uprobes: Move BUG_ON(UPROBE_SWBP_INSN_SIZE) from write_opcode() to install_breakpoint()
[linux/fpc-iii.git] / fs / ceph / file.c
blob988d4f302e4880281a2b5e04c9f44dd7870202d2
1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
10 #include "super.h"
11 #include "mds_client.h"
14 * Ceph file operations
16 * Implement basic open/close functionality, and implement
17 * read/write.
19 * We implement three modes of file I/O:
20 * - buffered uses the generic_file_aio_{read,write} helpers
22 * - synchronous is used when there is multi-client read/write
23 * sharing, avoids the page cache, and synchronously waits for an
24 * ack from the OSD.
26 * - direct io takes the variant of the sync path that references
27 * user pages directly.
29 * fsync() flushes and waits on dirty pages, but just queues metadata
30 * for writeback: since the MDS can recover size and mtime there is no
31 * need to wait for MDS acknowledgement.
36 * Prepare an open request. Preallocate ceph_cap to avoid an
37 * inopportune ENOMEM later.
39 static struct ceph_mds_request *
40 prepare_open_request(struct super_block *sb, int flags, int create_mode)
42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
43 struct ceph_mds_client *mdsc = fsc->mdsc;
44 struct ceph_mds_request *req;
45 int want_auth = USE_ANY_MDS;
46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
48 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
49 want_auth = USE_AUTH_MDS;
51 req = ceph_mdsc_create_request(mdsc, op, want_auth);
52 if (IS_ERR(req))
53 goto out;
54 req->r_fmode = ceph_flags_to_mode(flags);
55 req->r_args.open.flags = cpu_to_le32(flags);
56 req->r_args.open.mode = cpu_to_le32(create_mode);
57 out:
58 return req;
62 * initialize private struct file data.
63 * if we fail, clean up by dropping fmode reference on the ceph_inode
65 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
67 struct ceph_file_info *cf;
68 int ret = 0;
70 switch (inode->i_mode & S_IFMT) {
71 case S_IFREG:
72 case S_IFDIR:
73 dout("init_file %p %p 0%o (regular)\n", inode, file,
74 inode->i_mode);
75 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
76 if (cf == NULL) {
77 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
78 return -ENOMEM;
80 cf->fmode = fmode;
81 cf->next_offset = 2;
82 file->private_data = cf;
83 BUG_ON(inode->i_fop->release != ceph_release);
84 break;
86 case S_IFLNK:
87 dout("init_file %p %p 0%o (symlink)\n", inode, file,
88 inode->i_mode);
89 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
90 break;
92 default:
93 dout("init_file %p %p 0%o (special)\n", inode, file,
94 inode->i_mode);
96 * we need to drop the open ref now, since we don't
97 * have .release set to ceph_release.
99 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
100 BUG_ON(inode->i_fop->release == ceph_release);
102 /* call the proper open fop */
103 ret = inode->i_fop->open(inode, file);
105 return ret;
109 * If the filp already has private_data, that means the file was
110 * already opened by intent during lookup, and we do nothing.
112 * If we already have the requisite capabilities, we can satisfy
113 * the open request locally (no need to request new caps from the
114 * MDS). We do, however, need to inform the MDS (asynchronously)
115 * if our wanted caps set expands.
117 int ceph_open(struct inode *inode, struct file *file)
119 struct ceph_inode_info *ci = ceph_inode(inode);
120 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
121 struct ceph_mds_client *mdsc = fsc->mdsc;
122 struct ceph_mds_request *req;
123 struct ceph_file_info *cf = file->private_data;
124 struct inode *parent_inode = NULL;
125 int err;
126 int flags, fmode, wanted;
128 if (cf) {
129 dout("open file %p is already opened\n", file);
130 return 0;
133 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
134 flags = file->f_flags & ~(O_CREAT|O_EXCL);
135 if (S_ISDIR(inode->i_mode))
136 flags = O_DIRECTORY; /* mds likes to know */
138 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
139 ceph_vinop(inode), file, flags, file->f_flags);
140 fmode = ceph_flags_to_mode(flags);
141 wanted = ceph_caps_for_mode(fmode);
143 /* snapped files are read-only */
144 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
145 return -EROFS;
147 /* trivially open snapdir */
148 if (ceph_snap(inode) == CEPH_SNAPDIR) {
149 spin_lock(&ci->i_ceph_lock);
150 __ceph_get_fmode(ci, fmode);
151 spin_unlock(&ci->i_ceph_lock);
152 return ceph_init_file(inode, file, fmode);
156 * No need to block if we have caps on the auth MDS (for
157 * write) or any MDS (for read). Update wanted set
158 * asynchronously.
160 spin_lock(&ci->i_ceph_lock);
161 if (__ceph_is_any_real_caps(ci) &&
162 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
163 int mds_wanted = __ceph_caps_mds_wanted(ci);
164 int issued = __ceph_caps_issued(ci, NULL);
166 dout("open %p fmode %d want %s issued %s using existing\n",
167 inode, fmode, ceph_cap_string(wanted),
168 ceph_cap_string(issued));
169 __ceph_get_fmode(ci, fmode);
170 spin_unlock(&ci->i_ceph_lock);
172 /* adjust wanted? */
173 if ((issued & wanted) != wanted &&
174 (mds_wanted & wanted) != wanted &&
175 ceph_snap(inode) != CEPH_SNAPDIR)
176 ceph_check_caps(ci, 0, NULL);
178 return ceph_init_file(inode, file, fmode);
179 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
180 (ci->i_snap_caps & wanted) == wanted) {
181 __ceph_get_fmode(ci, fmode);
182 spin_unlock(&ci->i_ceph_lock);
183 return ceph_init_file(inode, file, fmode);
185 spin_unlock(&ci->i_ceph_lock);
187 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
188 req = prepare_open_request(inode->i_sb, flags, 0);
189 if (IS_ERR(req)) {
190 err = PTR_ERR(req);
191 goto out;
193 req->r_inode = inode;
194 ihold(inode);
195 req->r_num_caps = 1;
196 if (flags & (O_CREAT|O_TRUNC))
197 parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
198 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
199 iput(parent_inode);
200 if (!err)
201 err = ceph_init_file(inode, file, req->r_fmode);
202 ceph_mdsc_put_request(req);
203 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
204 out:
205 return err;
210 * Do a lookup + open with a single request.
212 * If this succeeds, but some subsequent check in the vfs
213 * may_open() fails, the struct *file gets cleaned up (i.e.
214 * ceph_release gets called). So fear not!
217 * flags
218 * path_lookup_open -> LOOKUP_OPEN
219 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
221 struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
222 struct nameidata *nd, int mode,
223 int locked_dir)
225 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
226 struct ceph_mds_client *mdsc = fsc->mdsc;
227 struct file *file;
228 struct ceph_mds_request *req;
229 struct dentry *ret;
230 int err;
231 int flags = nd->intent.open.flags;
233 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
234 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
236 /* do the open */
237 req = prepare_open_request(dir->i_sb, flags, mode);
238 if (IS_ERR(req))
239 return ERR_CAST(req);
240 req->r_dentry = dget(dentry);
241 req->r_num_caps = 2;
242 if (flags & O_CREAT) {
243 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
244 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
246 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
247 err = ceph_mdsc_do_request(mdsc,
248 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
249 req);
250 err = ceph_handle_snapdir(req, dentry, err);
251 if (err)
252 goto out;
253 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
254 err = ceph_handle_notrace_create(dir, dentry);
255 if (err)
256 goto out;
257 file = lookup_instantiate_filp(nd, req->r_dentry, ceph_open);
258 if (IS_ERR(file))
259 err = PTR_ERR(file);
260 out:
261 ret = ceph_finish_lookup(req, dentry, err);
262 ceph_mdsc_put_request(req);
263 dout("ceph_lookup_open result=%p\n", ret);
264 return ret;
267 int ceph_release(struct inode *inode, struct file *file)
269 struct ceph_inode_info *ci = ceph_inode(inode);
270 struct ceph_file_info *cf = file->private_data;
272 dout("release inode %p file %p\n", inode, file);
273 ceph_put_fmode(ci, cf->fmode);
274 if (cf->last_readdir)
275 ceph_mdsc_put_request(cf->last_readdir);
276 kfree(cf->last_name);
277 kfree(cf->dir_info);
278 dput(cf->dentry);
279 kmem_cache_free(ceph_file_cachep, cf);
281 /* wake up anyone waiting for caps on this inode */
282 wake_up_all(&ci->i_cap_wq);
283 return 0;
287 * Read a range of bytes striped over one or more objects. Iterate over
288 * objects we stripe over. (That's not atomic, but good enough for now.)
290 * If we get a short result from the OSD, check against i_size; we need to
291 * only return a short read to the caller if we hit EOF.
293 static int striped_read(struct inode *inode,
294 u64 off, u64 len,
295 struct page **pages, int num_pages,
296 int *checkeof, bool o_direct,
297 unsigned long buf_align)
299 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
300 struct ceph_inode_info *ci = ceph_inode(inode);
301 u64 pos, this_len;
302 int io_align, page_align;
303 int left, pages_left;
304 int read;
305 struct page **page_pos;
306 int ret;
307 bool hit_stripe, was_short;
310 * we may need to do multiple reads. not atomic, unfortunately.
312 pos = off;
313 left = len;
314 page_pos = pages;
315 pages_left = num_pages;
316 read = 0;
317 io_align = off & ~PAGE_MASK;
319 more:
320 if (o_direct)
321 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
322 else
323 page_align = pos & ~PAGE_MASK;
324 this_len = left;
325 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
326 &ci->i_layout, pos, &this_len,
327 ci->i_truncate_seq,
328 ci->i_truncate_size,
329 page_pos, pages_left, page_align);
330 if (ret == -ENOENT)
331 ret = 0;
332 hit_stripe = this_len < left;
333 was_short = ret >= 0 && ret < this_len;
334 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
335 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
337 if (ret > 0) {
338 int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
340 if (read < pos - off) {
341 dout(" zero gap %llu to %llu\n", off + read, pos);
342 ceph_zero_page_vector_range(page_align + read,
343 pos - off - read, pages);
345 pos += ret;
346 read = pos - off;
347 left -= ret;
348 page_pos += didpages;
349 pages_left -= didpages;
351 /* hit stripe? */
352 if (left && hit_stripe)
353 goto more;
356 if (was_short) {
357 /* did we bounce off eof? */
358 if (pos + left > inode->i_size)
359 *checkeof = 1;
361 /* zero trailing bytes (inside i_size) */
362 if (left > 0 && pos < inode->i_size) {
363 if (pos + left > inode->i_size)
364 left = inode->i_size - pos;
366 dout("zero tail %d\n", left);
367 ceph_zero_page_vector_range(page_align + read, left,
368 pages);
369 read += left;
373 if (ret >= 0)
374 ret = read;
375 dout("striped_read returns %d\n", ret);
376 return ret;
380 * Completely synchronous read and write methods. Direct from __user
381 * buffer to osd, or directly to user pages (if O_DIRECT).
383 * If the read spans object boundary, just do multiple reads.
385 static ssize_t ceph_sync_read(struct file *file, char __user *data,
386 unsigned len, loff_t *poff, int *checkeof)
388 struct inode *inode = file->f_dentry->d_inode;
389 struct page **pages;
390 u64 off = *poff;
391 int num_pages, ret;
393 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
394 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
396 if (file->f_flags & O_DIRECT) {
397 num_pages = calc_pages_for((unsigned long)data, len);
398 pages = ceph_get_direct_page_vector(data, num_pages, true);
399 } else {
400 num_pages = calc_pages_for(off, len);
401 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
403 if (IS_ERR(pages))
404 return PTR_ERR(pages);
407 * flush any page cache pages in this range. this
408 * will make concurrent normal and sync io slow,
409 * but it will at least behave sensibly when they are
410 * in sequence.
412 ret = filemap_write_and_wait(inode->i_mapping);
413 if (ret < 0)
414 goto done;
416 ret = striped_read(inode, off, len, pages, num_pages, checkeof,
417 file->f_flags & O_DIRECT,
418 (unsigned long)data & ~PAGE_MASK);
420 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
421 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
422 if (ret >= 0)
423 *poff = off + ret;
425 done:
426 if (file->f_flags & O_DIRECT)
427 ceph_put_page_vector(pages, num_pages, true);
428 else
429 ceph_release_page_vector(pages, num_pages);
430 dout("sync_read result %d\n", ret);
431 return ret;
435 * Write commit callback, called if we requested both an ACK and
436 * ONDISK commit reply from the OSD.
438 static void sync_write_commit(struct ceph_osd_request *req,
439 struct ceph_msg *msg)
441 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
443 dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
444 spin_lock(&ci->i_unsafe_lock);
445 list_del_init(&req->r_unsafe_item);
446 spin_unlock(&ci->i_unsafe_lock);
447 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
451 * Synchronous write, straight from __user pointer or user pages (if
452 * O_DIRECT).
454 * If write spans object boundary, just do multiple writes. (For a
455 * correct atomic write, we should e.g. take write locks on all
456 * objects, rollback on failure, etc.)
458 static ssize_t ceph_sync_write(struct file *file, const char __user *data,
459 size_t left, loff_t *offset)
461 struct inode *inode = file->f_dentry->d_inode;
462 struct ceph_inode_info *ci = ceph_inode(inode);
463 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
464 struct ceph_osd_request *req;
465 struct page **pages;
466 int num_pages;
467 long long unsigned pos;
468 u64 len;
469 int written = 0;
470 int flags;
471 int do_sync = 0;
472 int check_caps = 0;
473 int page_align, io_align;
474 unsigned long buf_align;
475 int ret;
476 struct timespec mtime = CURRENT_TIME;
478 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
479 return -EROFS;
481 dout("sync_write on file %p %lld~%u %s\n", file, *offset,
482 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
484 if (file->f_flags & O_APPEND)
485 pos = i_size_read(inode);
486 else
487 pos = *offset;
489 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
490 if (ret < 0)
491 return ret;
493 ret = invalidate_inode_pages2_range(inode->i_mapping,
494 pos >> PAGE_CACHE_SHIFT,
495 (pos + left) >> PAGE_CACHE_SHIFT);
496 if (ret < 0)
497 dout("invalidate_inode_pages2_range returned %d\n", ret);
499 flags = CEPH_OSD_FLAG_ORDERSNAP |
500 CEPH_OSD_FLAG_ONDISK |
501 CEPH_OSD_FLAG_WRITE;
502 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
503 flags |= CEPH_OSD_FLAG_ACK;
504 else
505 do_sync = 1;
508 * we may need to do multiple writes here if we span an object
509 * boundary. this isn't atomic, unfortunately. :(
511 more:
512 io_align = pos & ~PAGE_MASK;
513 buf_align = (unsigned long)data & ~PAGE_MASK;
514 len = left;
515 if (file->f_flags & O_DIRECT) {
516 /* write from beginning of first page, regardless of
517 io alignment */
518 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
519 num_pages = calc_pages_for((unsigned long)data, len);
520 } else {
521 page_align = pos & ~PAGE_MASK;
522 num_pages = calc_pages_for(pos, len);
524 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
525 ceph_vino(inode), pos, &len,
526 CEPH_OSD_OP_WRITE, flags,
527 ci->i_snap_realm->cached_context,
528 do_sync,
529 ci->i_truncate_seq, ci->i_truncate_size,
530 &mtime, false, 2, page_align);
531 if (!req)
532 return -ENOMEM;
534 if (file->f_flags & O_DIRECT) {
535 pages = ceph_get_direct_page_vector(data, num_pages, false);
536 if (IS_ERR(pages)) {
537 ret = PTR_ERR(pages);
538 goto out;
542 * throw out any page cache pages in this range. this
543 * may block.
545 truncate_inode_pages_range(inode->i_mapping, pos,
546 (pos+len) | (PAGE_CACHE_SIZE-1));
547 } else {
548 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
549 if (IS_ERR(pages)) {
550 ret = PTR_ERR(pages);
551 goto out;
553 ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
554 if (ret < 0) {
555 ceph_release_page_vector(pages, num_pages);
556 goto out;
559 if ((file->f_flags & O_SYNC) == 0) {
560 /* get a second commit callback */
561 req->r_safe_callback = sync_write_commit;
562 req->r_own_pages = 1;
565 req->r_pages = pages;
566 req->r_num_pages = num_pages;
567 req->r_inode = inode;
569 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
570 if (!ret) {
571 if (req->r_safe_callback) {
573 * Add to inode unsafe list only after we
574 * start_request so that a tid has been assigned.
576 spin_lock(&ci->i_unsafe_lock);
577 list_add_tail(&req->r_unsafe_item,
578 &ci->i_unsafe_writes);
579 spin_unlock(&ci->i_unsafe_lock);
580 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
583 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
584 if (ret < 0 && req->r_safe_callback) {
585 spin_lock(&ci->i_unsafe_lock);
586 list_del_init(&req->r_unsafe_item);
587 spin_unlock(&ci->i_unsafe_lock);
588 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
592 if (file->f_flags & O_DIRECT)
593 ceph_put_page_vector(pages, num_pages, false);
594 else if (file->f_flags & O_SYNC)
595 ceph_release_page_vector(pages, num_pages);
597 out:
598 ceph_osdc_put_request(req);
599 if (ret == 0) {
600 pos += len;
601 written += len;
602 left -= len;
603 data += written;
604 if (left)
605 goto more;
607 ret = written;
608 *offset = pos;
609 if (pos > i_size_read(inode))
610 check_caps = ceph_inode_set_size(inode, pos);
611 if (check_caps)
612 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
613 NULL);
615 return ret;
619 * Wrap generic_file_aio_read with checks for cap bits on the inode.
620 * Atomically grab references, so that those bits are not released
621 * back to the MDS mid-read.
623 * Hmm, the sync read case isn't actually async... should it be?
625 static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
626 unsigned long nr_segs, loff_t pos)
628 struct file *filp = iocb->ki_filp;
629 struct ceph_file_info *fi = filp->private_data;
630 loff_t *ppos = &iocb->ki_pos;
631 size_t len = iov->iov_len;
632 struct inode *inode = filp->f_dentry->d_inode;
633 struct ceph_inode_info *ci = ceph_inode(inode);
634 void __user *base = iov->iov_base;
635 ssize_t ret;
636 int want, got = 0;
637 int checkeof = 0, read = 0;
639 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
640 inode, ceph_vinop(inode), pos, (unsigned)len, inode);
641 again:
642 __ceph_do_pending_vmtruncate(inode);
643 if (fi->fmode & CEPH_FILE_MODE_LAZY)
644 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
645 else
646 want = CEPH_CAP_FILE_CACHE;
647 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
648 if (ret < 0)
649 goto out;
650 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
651 inode, ceph_vinop(inode), pos, (unsigned)len,
652 ceph_cap_string(got));
654 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
655 (iocb->ki_filp->f_flags & O_DIRECT) ||
656 (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
657 (fi->flags & CEPH_F_SYNC))
658 /* hmm, this isn't really async... */
659 ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
660 else
661 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
663 out:
664 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
665 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
666 ceph_put_cap_refs(ci, got);
668 if (checkeof && ret >= 0) {
669 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
671 /* hit EOF or hole? */
672 if (statret == 0 && *ppos < inode->i_size) {
673 dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
674 read += ret;
675 base += ret;
676 len -= ret;
677 checkeof = 0;
678 goto again;
681 if (ret >= 0)
682 ret += read;
684 return ret;
688 * Take cap references to avoid releasing caps to MDS mid-write.
690 * If we are synchronous, and write with an old snap context, the OSD
691 * may return EOLDSNAPC. In that case, retry the write.. _after_
692 * dropping our cap refs and allowing the pending snap to logically
693 * complete _before_ this write occurs.
695 * If we are near ENOSPC, write synchronously.
697 static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
698 unsigned long nr_segs, loff_t pos)
700 struct file *file = iocb->ki_filp;
701 struct ceph_file_info *fi = file->private_data;
702 struct inode *inode = file->f_dentry->d_inode;
703 struct ceph_inode_info *ci = ceph_inode(inode);
704 struct ceph_osd_client *osdc =
705 &ceph_sb_to_client(inode->i_sb)->client->osdc;
706 loff_t endoff = pos + iov->iov_len;
707 int want, got = 0;
708 int ret, err;
710 if (ceph_snap(inode) != CEPH_NOSNAP)
711 return -EROFS;
713 retry_snap:
714 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
715 return -ENOSPC;
716 __ceph_do_pending_vmtruncate(inode);
717 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
718 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
719 inode->i_size);
720 if (fi->fmode & CEPH_FILE_MODE_LAZY)
721 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
722 else
723 want = CEPH_CAP_FILE_BUFFER;
724 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
725 if (ret < 0)
726 goto out_put;
728 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
729 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
730 ceph_cap_string(got));
732 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
733 (iocb->ki_filp->f_flags & O_DIRECT) ||
734 (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
735 (fi->flags & CEPH_F_SYNC)) {
736 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
737 &iocb->ki_pos);
738 } else {
740 * buffered write; drop Fw early to avoid slow
741 * revocation if we get stuck on balance_dirty_pages
743 int dirty;
745 spin_lock(&ci->i_ceph_lock);
746 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
747 spin_unlock(&ci->i_ceph_lock);
748 ceph_put_cap_refs(ci, got);
750 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
751 if ((ret >= 0 || ret == -EIOCBQUEUED) &&
752 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
753 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
754 err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
755 if (err < 0)
756 ret = err;
759 if (dirty)
760 __mark_inode_dirty(inode, dirty);
761 goto out;
764 if (ret >= 0) {
765 int dirty;
766 spin_lock(&ci->i_ceph_lock);
767 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
768 spin_unlock(&ci->i_ceph_lock);
769 if (dirty)
770 __mark_inode_dirty(inode, dirty);
773 out_put:
774 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
775 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
776 ceph_cap_string(got));
777 ceph_put_cap_refs(ci, got);
779 out:
780 if (ret == -EOLDSNAPC) {
781 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
782 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
783 goto retry_snap;
786 return ret;
790 * llseek. be sure to verify file size on SEEK_END.
792 static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
794 struct inode *inode = file->f_mapping->host;
795 int ret;
797 mutex_lock(&inode->i_mutex);
798 __ceph_do_pending_vmtruncate(inode);
800 if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
801 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
802 if (ret < 0) {
803 offset = ret;
804 goto out;
808 switch (origin) {
809 case SEEK_END:
810 offset += inode->i_size;
811 break;
812 case SEEK_CUR:
814 * Here we special-case the lseek(fd, 0, SEEK_CUR)
815 * position-querying operation. Avoid rewriting the "same"
816 * f_pos value back to the file because a concurrent read(),
817 * write() or lseek() might have altered it
819 if (offset == 0) {
820 offset = file->f_pos;
821 goto out;
823 offset += file->f_pos;
824 break;
825 case SEEK_DATA:
826 if (offset >= inode->i_size) {
827 ret = -ENXIO;
828 goto out;
830 break;
831 case SEEK_HOLE:
832 if (offset >= inode->i_size) {
833 ret = -ENXIO;
834 goto out;
836 offset = inode->i_size;
837 break;
840 if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
841 offset = -EINVAL;
842 goto out;
845 /* Special lock needed here? */
846 if (offset != file->f_pos) {
847 file->f_pos = offset;
848 file->f_version = 0;
851 out:
852 mutex_unlock(&inode->i_mutex);
853 return offset;
856 const struct file_operations ceph_file_fops = {
857 .open = ceph_open,
858 .release = ceph_release,
859 .llseek = ceph_llseek,
860 .read = do_sync_read,
861 .write = do_sync_write,
862 .aio_read = ceph_aio_read,
863 .aio_write = ceph_aio_write,
864 .mmap = ceph_mmap,
865 .fsync = ceph_fsync,
866 .lock = ceph_lock,
867 .flock = ceph_flock,
868 .splice_read = generic_file_splice_read,
869 .splice_write = generic_file_splice_write,
870 .unlocked_ioctl = ceph_ioctl,
871 .compat_ioctl = ceph_ioctl,