1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
13 #include "mds_client.h"
17 * Ceph file operations
19 * Implement basic open/close functionality, and implement
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
41 static size_t dio_get_pagev_size(const struct iov_iter
*it
)
43 const struct iovec
*iov
= it
->iov
;
44 const struct iovec
*iovend
= iov
+ it
->nr_segs
;
47 size
= iov
->iov_len
- it
->iov_offset
;
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
52 while (PAGE_ALIGNED((iov
->iov_base
+ iov
->iov_len
)) &&
53 (++iov
< iovend
&& PAGE_ALIGNED((iov
->iov_base
)))) {
56 dout("dio_get_pagevlen len = %zu\n", size
);
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
66 dio_get_pages_alloc(const struct iov_iter
*it
, size_t nbytes
,
67 size_t *page_align
, int *num_pages
)
69 struct iov_iter tmp_it
= *it
;
72 int ret
= 0, idx
, npages
;
74 align
= (unsigned long)(it
->iov
->iov_base
+ it
->iov_offset
) &
76 npages
= calc_pages_for(align
, nbytes
);
77 pages
= kmalloc(sizeof(*pages
) * npages
, GFP_KERNEL
);
79 pages
= vmalloc(sizeof(*pages
) * npages
);
81 return ERR_PTR(-ENOMEM
);
84 for (idx
= 0; idx
< npages
; ) {
86 ret
= iov_iter_get_pages(&tmp_it
, pages
+ idx
, nbytes
,
87 npages
- idx
, &start
);
91 iov_iter_advance(&tmp_it
, ret
);
93 idx
+= (ret
+ start
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages
, align
);
102 ceph_put_page_vector(pages
, idx
, false);
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
110 static struct ceph_mds_request
*
111 prepare_open_request(struct super_block
*sb
, int flags
, int create_mode
)
113 struct ceph_fs_client
*fsc
= ceph_sb_to_client(sb
);
114 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
115 struct ceph_mds_request
*req
;
116 int want_auth
= USE_ANY_MDS
;
117 int op
= (flags
& O_CREAT
) ? CEPH_MDS_OP_CREATE
: CEPH_MDS_OP_OPEN
;
119 if (flags
& (O_WRONLY
|O_RDWR
|O_CREAT
|O_TRUNC
))
120 want_auth
= USE_AUTH_MDS
;
122 req
= ceph_mdsc_create_request(mdsc
, op
, want_auth
);
125 req
->r_fmode
= ceph_flags_to_mode(flags
);
126 req
->r_args
.open
.flags
= cpu_to_le32(flags
);
127 req
->r_args
.open
.mode
= cpu_to_le32(create_mode
);
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
136 static int ceph_init_file(struct inode
*inode
, struct file
*file
, int fmode
)
138 struct ceph_file_info
*cf
;
140 struct ceph_inode_info
*ci
= ceph_inode(inode
);
141 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
142 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
144 switch (inode
->i_mode
& S_IFMT
) {
146 /* First file open request creates the cookie, we want to keep
147 * this cookie around for the filetime of the inode as not to
148 * have to worry about fscache register / revoke / operation
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
154 ceph_fscache_register_inode_cookie(mdsc
->fsc
, ci
);
155 if ((fmode
& CEPH_FILE_MODE_WR
))
156 ceph_fscache_invalidate(inode
);
158 dout("init_file %p %p 0%o (regular)\n", inode
, file
,
160 cf
= kmem_cache_alloc(ceph_file_cachep
, GFP_KERNEL
| __GFP_ZERO
);
162 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
167 cf
->readdir_cache_idx
= -1;
168 file
->private_data
= cf
;
169 BUG_ON(inode
->i_fop
->release
!= ceph_release
);
173 dout("init_file %p %p 0%o (symlink)\n", inode
, file
,
175 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
179 dout("init_file %p %p 0%o (special)\n", inode
, file
,
182 * we need to drop the open ref now, since we don't
183 * have .release set to ceph_release.
185 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
186 BUG_ON(inode
->i_fop
->release
== ceph_release
);
188 /* call the proper open fop */
189 ret
= inode
->i_fop
->open(inode
, file
);
195 * If we already have the requisite capabilities, we can satisfy
196 * the open request locally (no need to request new caps from the
197 * MDS). We do, however, need to inform the MDS (asynchronously)
198 * if our wanted caps set expands.
200 int ceph_open(struct inode
*inode
, struct file
*file
)
202 struct ceph_inode_info
*ci
= ceph_inode(inode
);
203 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
204 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
205 struct ceph_mds_request
*req
;
206 struct ceph_file_info
*cf
= file
->private_data
;
208 int flags
, fmode
, wanted
;
211 dout("open file %p is already opened\n", file
);
215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
216 flags
= file
->f_flags
& ~(O_CREAT
|O_EXCL
);
217 if (S_ISDIR(inode
->i_mode
))
218 flags
= O_DIRECTORY
; /* mds likes to know */
220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode
,
221 ceph_vinop(inode
), file
, flags
, file
->f_flags
);
222 fmode
= ceph_flags_to_mode(flags
);
223 wanted
= ceph_caps_for_mode(fmode
);
225 /* snapped files are read-only */
226 if (ceph_snap(inode
) != CEPH_NOSNAP
&& (file
->f_mode
& FMODE_WRITE
))
229 /* trivially open snapdir */
230 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
231 spin_lock(&ci
->i_ceph_lock
);
232 __ceph_get_fmode(ci
, fmode
);
233 spin_unlock(&ci
->i_ceph_lock
);
234 return ceph_init_file(inode
, file
, fmode
);
238 * No need to block if we have caps on the auth MDS (for
239 * write) or any MDS (for read). Update wanted set
242 spin_lock(&ci
->i_ceph_lock
);
243 if (__ceph_is_any_real_caps(ci
) &&
244 (((fmode
& CEPH_FILE_MODE_WR
) == 0) || ci
->i_auth_cap
)) {
245 int mds_wanted
= __ceph_caps_mds_wanted(ci
);
246 int issued
= __ceph_caps_issued(ci
, NULL
);
248 dout("open %p fmode %d want %s issued %s using existing\n",
249 inode
, fmode
, ceph_cap_string(wanted
),
250 ceph_cap_string(issued
));
251 __ceph_get_fmode(ci
, fmode
);
252 spin_unlock(&ci
->i_ceph_lock
);
255 if ((issued
& wanted
) != wanted
&&
256 (mds_wanted
& wanted
) != wanted
&&
257 ceph_snap(inode
) != CEPH_SNAPDIR
)
258 ceph_check_caps(ci
, 0, NULL
);
260 return ceph_init_file(inode
, file
, fmode
);
261 } else if (ceph_snap(inode
) != CEPH_NOSNAP
&&
262 (ci
->i_snap_caps
& wanted
) == wanted
) {
263 __ceph_get_fmode(ci
, fmode
);
264 spin_unlock(&ci
->i_ceph_lock
);
265 return ceph_init_file(inode
, file
, fmode
);
268 spin_unlock(&ci
->i_ceph_lock
);
270 dout("open fmode %d wants %s\n", fmode
, ceph_cap_string(wanted
));
271 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
276 req
->r_inode
= inode
;
280 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
282 err
= ceph_init_file(inode
, file
, req
->r_fmode
);
283 ceph_mdsc_put_request(req
);
284 dout("open result=%d on %llx.%llx\n", err
, ceph_vinop(inode
));
291 * Do a lookup + open with a single request. If we get a non-existent
292 * file or symlink, return 1 so the VFS can retry.
294 int ceph_atomic_open(struct inode
*dir
, struct dentry
*dentry
,
295 struct file
*file
, unsigned flags
, umode_t mode
,
298 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
299 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
300 struct ceph_mds_request
*req
;
302 struct ceph_acls_info acls
= {};
305 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
307 d_unhashed(dentry
) ? "unhashed" : "hashed", flags
, mode
);
309 if (dentry
->d_name
.len
> NAME_MAX
)
310 return -ENAMETOOLONG
;
312 err
= ceph_init_dentry(dentry
);
316 if (flags
& O_CREAT
) {
317 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
323 req
= prepare_open_request(dir
->i_sb
, flags
, mode
);
328 req
->r_dentry
= dget(dentry
);
330 if (flags
& O_CREAT
) {
331 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
332 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
334 req
->r_pagelist
= acls
.pagelist
;
335 acls
.pagelist
= NULL
;
338 req
->r_locked_dir
= dir
; /* caller holds dir->i_mutex */
339 err
= ceph_mdsc_do_request(mdsc
,
340 (flags
& (O_CREAT
|O_TRUNC
)) ? dir
: NULL
,
342 err
= ceph_handle_snapdir(req
, dentry
, err
);
346 if ((flags
& O_CREAT
) && !req
->r_reply_info
.head
->is_dentry
)
347 err
= ceph_handle_notrace_create(dir
, dentry
);
349 if (d_unhashed(dentry
)) {
350 dn
= ceph_finish_lookup(req
, dentry
, err
);
354 /* we were given a hashed negative dentry */
359 if (dn
|| d_really_is_negative(dentry
) || d_is_symlink(dentry
)) {
360 /* make vfs retry on splice, ENOENT, or symlink */
361 dout("atomic_open finish_no_open on dn %p\n", dn
);
362 err
= finish_no_open(file
, dn
);
364 dout("atomic_open finish_open on dn %p\n", dn
);
365 if (req
->r_op
== CEPH_MDS_OP_CREATE
&& req
->r_reply_info
.has_create_ino
) {
366 ceph_init_inode_acls(d_inode(dentry
), &acls
);
367 *opened
|= FILE_CREATED
;
369 err
= finish_open(file
, dentry
, ceph_open
, opened
);
372 if (!req
->r_err
&& req
->r_target_inode
)
373 ceph_put_fmode(ceph_inode(req
->r_target_inode
), req
->r_fmode
);
374 ceph_mdsc_put_request(req
);
376 ceph_release_acls_info(&acls
);
377 dout("atomic_open result=%d\n", err
);
381 int ceph_release(struct inode
*inode
, struct file
*file
)
383 struct ceph_inode_info
*ci
= ceph_inode(inode
);
384 struct ceph_file_info
*cf
= file
->private_data
;
386 dout("release inode %p file %p\n", inode
, file
);
387 ceph_put_fmode(ci
, cf
->fmode
);
388 if (cf
->last_readdir
)
389 ceph_mdsc_put_request(cf
->last_readdir
);
390 kfree(cf
->last_name
);
392 kmem_cache_free(ceph_file_cachep
, cf
);
394 /* wake up anyone waiting for caps on this inode */
395 wake_up_all(&ci
->i_cap_wq
);
406 * Read a range of bytes striped over one or more objects. Iterate over
407 * objects we stripe over. (That's not atomic, but good enough for now.)
409 * If we get a short result from the OSD, check against i_size; we need to
410 * only return a short read to the caller if we hit EOF.
412 static int striped_read(struct inode
*inode
,
414 struct page
**pages
, int num_pages
,
417 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
418 struct ceph_inode_info
*ci
= ceph_inode(inode
);
419 u64 pos
, this_len
, left
;
421 int page_align
, pages_left
;
423 struct page
**page_pos
;
424 bool hit_stripe
, was_short
;
427 * we may need to do multiple reads. not atomic, unfortunately.
432 pages_left
= num_pages
;
436 page_align
= pos
& ~PAGE_MASK
;
438 ret
= ceph_osdc_readpages(&fsc
->client
->osdc
, ceph_vino(inode
),
439 &ci
->i_layout
, pos
, &this_len
,
442 page_pos
, pages_left
, page_align
);
445 hit_stripe
= this_len
< left
;
446 was_short
= ret
>= 0 && ret
< this_len
;
447 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos
, left
, read
,
448 ret
, hit_stripe
? " HITSTRIPE" : "", was_short
? " SHORT" : "");
450 i_size
= i_size_read(inode
);
453 if (was_short
&& (pos
+ ret
< i_size
)) {
454 int zlen
= min(this_len
- ret
, i_size
- pos
- ret
);
455 int zoff
= (off
& ~PAGE_MASK
) + read
+ ret
;
456 dout(" zero gap %llu to %llu\n",
457 pos
+ ret
, pos
+ ret
+ zlen
);
458 ceph_zero_page_vector_range(zoff
, zlen
, pages
);
462 didpages
= (page_align
+ ret
) >> PAGE_CACHE_SHIFT
;
466 page_pos
+= didpages
;
467 pages_left
-= didpages
;
469 /* hit stripe and need continue*/
470 if (left
&& hit_stripe
&& pos
< i_size
)
476 /* did we bounce off eof? */
477 if (pos
+ left
> i_size
)
478 *checkeof
= CHECK_EOF
;
481 dout("striped_read returns %d\n", ret
);
486 * Completely synchronous read and write methods. Direct from __user
487 * buffer to osd, or directly to user pages (if O_DIRECT).
489 * If the read spans object boundary, just do multiple reads.
491 static ssize_t
ceph_sync_read(struct kiocb
*iocb
, struct iov_iter
*i
,
494 struct file
*file
= iocb
->ki_filp
;
495 struct inode
*inode
= file_inode(file
);
497 u64 off
= iocb
->ki_pos
;
499 size_t len
= iov_iter_count(i
);
501 dout("sync_read on file %p %llu~%u %s\n", file
, off
,
503 (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
508 * flush any page cache pages in this range. this
509 * will make concurrent normal and sync io slow,
510 * but it will at least behave sensibly when they are
513 ret
= filemap_write_and_wait_range(inode
->i_mapping
, off
,
518 num_pages
= calc_pages_for(off
, len
);
519 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
521 return PTR_ERR(pages
);
522 ret
= striped_read(inode
, off
, len
, pages
,
523 num_pages
, checkeof
);
529 size_t page_off
= off
& ~PAGE_MASK
;
530 size_t copy
= min_t(size_t, left
,
531 PAGE_SIZE
- page_off
);
532 l
= copy_page_to_iter(pages
[k
++], page_off
, copy
, i
);
539 ceph_release_page_vector(pages
, num_pages
);
541 if (off
> iocb
->ki_pos
) {
542 ret
= off
- iocb
->ki_pos
;
546 dout("sync_read result %d\n", ret
);
550 struct ceph_aio_request
{
555 struct list_head osd_reqs
;
557 atomic_t pending_reqs
;
558 struct timespec mtime
;
559 struct ceph_cap_flush
*prealloc_cf
;
562 struct ceph_aio_work
{
563 struct work_struct work
;
564 struct ceph_osd_request
*req
;
567 static void ceph_aio_retry_work(struct work_struct
*work
);
569 static void ceph_aio_complete(struct inode
*inode
,
570 struct ceph_aio_request
*aio_req
)
572 struct ceph_inode_info
*ci
= ceph_inode(inode
);
575 if (!atomic_dec_and_test(&aio_req
->pending_reqs
))
578 ret
= aio_req
->error
;
580 ret
= aio_req
->total_len
;
582 dout("ceph_aio_complete %p rc %d\n", inode
, ret
);
584 if (ret
>= 0 && aio_req
->write
) {
587 loff_t endoff
= aio_req
->iocb
->ki_pos
+ aio_req
->total_len
;
588 if (endoff
> i_size_read(inode
)) {
589 if (ceph_inode_set_size(inode
, endoff
))
590 ceph_check_caps(ci
, CHECK_CAPS_AUTHONLY
, NULL
);
593 spin_lock(&ci
->i_ceph_lock
);
594 ci
->i_inline_version
= CEPH_INLINE_NONE
;
595 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
596 &aio_req
->prealloc_cf
);
597 spin_unlock(&ci
->i_ceph_lock
);
599 __mark_inode_dirty(inode
, dirty
);
603 ceph_put_cap_refs(ci
, (aio_req
->write
? CEPH_CAP_FILE_WR
:
606 aio_req
->iocb
->ki_complete(aio_req
->iocb
, ret
, 0);
608 ceph_free_cap_flush(aio_req
->prealloc_cf
);
612 static void ceph_aio_complete_req(struct ceph_osd_request
*req
,
613 struct ceph_msg
*msg
)
615 int rc
= req
->r_result
;
616 struct inode
*inode
= req
->r_inode
;
617 struct ceph_aio_request
*aio_req
= req
->r_priv
;
618 struct ceph_osd_data
*osd_data
= osd_req_op_extent_osd_data(req
, 0);
619 int num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
622 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
623 inode
, rc
, osd_data
->length
);
625 if (rc
== -EOLDSNAPC
) {
626 struct ceph_aio_work
*aio_work
;
627 BUG_ON(!aio_req
->write
);
629 aio_work
= kmalloc(sizeof(*aio_work
), GFP_NOFS
);
631 INIT_WORK(&aio_work
->work
, ceph_aio_retry_work
);
633 queue_work(ceph_inode_to_client(inode
)->wb_wq
,
638 } else if (!aio_req
->write
) {
641 if (rc
>= 0 && osd_data
->length
> rc
) {
642 int zoff
= osd_data
->alignment
+ rc
;
643 int zlen
= osd_data
->length
- rc
;
645 * If read is satisfied by single OSD request,
646 * it can pass EOF. Otherwise read is within
649 if (aio_req
->num_reqs
== 1) {
650 loff_t i_size
= i_size_read(inode
);
651 loff_t endoff
= aio_req
->iocb
->ki_pos
+ rc
;
653 zlen
= min_t(size_t, zlen
,
655 aio_req
->total_len
= rc
+ zlen
;
659 ceph_zero_page_vector_range(zoff
, zlen
,
664 ceph_put_page_vector(osd_data
->pages
, num_pages
, false);
665 ceph_osdc_put_request(req
);
668 cmpxchg(&aio_req
->error
, 0, rc
);
670 ceph_aio_complete(inode
, aio_req
);
674 static void ceph_aio_retry_work(struct work_struct
*work
)
676 struct ceph_aio_work
*aio_work
=
677 container_of(work
, struct ceph_aio_work
, work
);
678 struct ceph_osd_request
*orig_req
= aio_work
->req
;
679 struct ceph_aio_request
*aio_req
= orig_req
->r_priv
;
680 struct inode
*inode
= orig_req
->r_inode
;
681 struct ceph_inode_info
*ci
= ceph_inode(inode
);
682 struct ceph_snap_context
*snapc
;
683 struct ceph_osd_request
*req
;
686 spin_lock(&ci
->i_ceph_lock
);
687 if (__ceph_have_pending_cap_snap(ci
)) {
688 struct ceph_cap_snap
*capsnap
=
689 list_last_entry(&ci
->i_cap_snaps
,
690 struct ceph_cap_snap
,
692 snapc
= ceph_get_snap_context(capsnap
->context
);
694 BUG_ON(!ci
->i_head_snapc
);
695 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
697 spin_unlock(&ci
->i_ceph_lock
);
699 req
= ceph_osdc_alloc_request(orig_req
->r_osdc
, snapc
, 2,
707 req
->r_flags
= CEPH_OSD_FLAG_ORDERSNAP
|
708 CEPH_OSD_FLAG_ONDISK
|
710 req
->r_base_oloc
= orig_req
->r_base_oloc
;
711 req
->r_base_oid
= orig_req
->r_base_oid
;
713 req
->r_ops
[0] = orig_req
->r_ops
[0];
714 osd_req_op_init(req
, 1, CEPH_OSD_OP_STARTSYNC
, 0);
716 ceph_osdc_build_request(req
, req
->r_ops
[0].extent
.offset
,
717 snapc
, CEPH_NOSNAP
, &aio_req
->mtime
);
719 ceph_osdc_put_request(orig_req
);
721 req
->r_callback
= ceph_aio_complete_req
;
722 req
->r_inode
= inode
;
723 req
->r_priv
= aio_req
;
725 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
728 BUG_ON(ret
== -EOLDSNAPC
);
730 ceph_aio_complete_req(req
, NULL
);
733 ceph_put_snap_context(snapc
);
738 * Write commit request unsafe callback, called to tell us when a
739 * request is unsafe (that is, in flight--has been handed to the
740 * messenger to send to its target osd). It is called again when
741 * we've received a response message indicating the request is
742 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
743 * is completed early (and unsuccessfully) due to a timeout or
746 * This is used if we requested both an ACK and ONDISK commit reply
749 static void ceph_sync_write_unsafe(struct ceph_osd_request
*req
, bool unsafe
)
751 struct ceph_inode_info
*ci
= ceph_inode(req
->r_inode
);
753 dout("%s %p tid %llu %ssafe\n", __func__
, req
, req
->r_tid
,
756 ceph_get_cap_refs(ci
, CEPH_CAP_FILE_WR
);
757 spin_lock(&ci
->i_unsafe_lock
);
758 list_add_tail(&req
->r_unsafe_item
,
759 &ci
->i_unsafe_writes
);
760 spin_unlock(&ci
->i_unsafe_lock
);
762 spin_lock(&ci
->i_unsafe_lock
);
763 list_del_init(&req
->r_unsafe_item
);
764 spin_unlock(&ci
->i_unsafe_lock
);
765 ceph_put_cap_refs(ci
, CEPH_CAP_FILE_WR
);
771 ceph_direct_read_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
772 struct ceph_snap_context
*snapc
,
773 struct ceph_cap_flush
**pcf
)
775 struct file
*file
= iocb
->ki_filp
;
776 struct inode
*inode
= file_inode(file
);
777 struct ceph_inode_info
*ci
= ceph_inode(inode
);
778 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
779 struct ceph_vino vino
;
780 struct ceph_osd_request
*req
;
782 struct ceph_aio_request
*aio_req
= NULL
;
786 struct timespec mtime
= CURRENT_TIME
;
787 size_t count
= iov_iter_count(iter
);
788 loff_t pos
= iocb
->ki_pos
;
789 bool write
= iov_iter_rw(iter
) == WRITE
;
791 if (write
&& ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
794 dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
795 (write
? "write" : "read"), file
, pos
, (unsigned)count
);
797 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ count
);
802 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
803 pos
>> PAGE_CACHE_SHIFT
,
804 (pos
+ count
) >> PAGE_CACHE_SHIFT
);
806 dout("invalidate_inode_pages2_range returned %d\n", ret
);
808 flags
= CEPH_OSD_FLAG_ORDERSNAP
|
809 CEPH_OSD_FLAG_ONDISK
|
812 flags
= CEPH_OSD_FLAG_READ
;
815 while (iov_iter_count(iter
) > 0) {
816 u64 size
= dio_get_pagev_size(iter
);
820 vino
= ceph_vino(inode
);
821 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
823 /*include a 'startsync' command*/
825 write
? CEPH_OSD_OP_WRITE
:
837 pages
= dio_get_pages_alloc(iter
, len
, &start
, &num_pages
);
839 ceph_osdc_put_request(req
);
840 ret
= PTR_ERR(pages
);
845 * To simplify error handling, allow AIO when IO within i_size
846 * or IO can be satisfied by single OSD request.
848 if (pos
== iocb
->ki_pos
&& !is_sync_kiocb(iocb
) &&
849 (len
== count
|| pos
+ count
<= i_size_read(inode
))) {
850 aio_req
= kzalloc(sizeof(*aio_req
), GFP_KERNEL
);
852 aio_req
->iocb
= iocb
;
853 aio_req
->write
= write
;
854 INIT_LIST_HEAD(&aio_req
->osd_reqs
);
856 aio_req
->mtime
= mtime
;
857 swap(aio_req
->prealloc_cf
, *pcf
);
865 * throw out any page cache pages in this range. this
868 truncate_inode_pages_range(inode
->i_mapping
, pos
,
869 (pos
+len
) | (PAGE_CACHE_SIZE
- 1));
871 osd_req_op_init(req
, 1, CEPH_OSD_OP_STARTSYNC
, 0);
875 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, start
,
878 ceph_osdc_build_request(req
, pos
, snapc
, vino
.snap
, &mtime
);
881 aio_req
->total_len
+= len
;
883 atomic_inc(&aio_req
->pending_reqs
);
885 req
->r_callback
= ceph_aio_complete_req
;
886 req
->r_inode
= inode
;
887 req
->r_priv
= aio_req
;
888 list_add_tail(&req
->r_unsafe_item
, &aio_req
->osd_reqs
);
891 iov_iter_advance(iter
, len
);
895 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
897 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
899 size
= i_size_read(inode
);
903 if (ret
>= 0 && ret
< len
&& pos
+ ret
< size
) {
904 int zlen
= min_t(size_t, len
- ret
,
906 ceph_zero_page_vector_range(start
+ ret
, zlen
,
914 ceph_put_page_vector(pages
, num_pages
, false);
916 ceph_osdc_put_request(req
);
921 iov_iter_advance(iter
, len
);
923 if (!write
&& pos
>= size
)
926 if (write
&& pos
> size
) {
927 if (ceph_inode_set_size(inode
, pos
))
928 ceph_check_caps(ceph_inode(inode
),
935 if (aio_req
->num_reqs
== 0) {
940 ceph_get_cap_refs(ci
, write
? CEPH_CAP_FILE_WR
:
943 while (!list_empty(&aio_req
->osd_reqs
)) {
944 req
= list_first_entry(&aio_req
->osd_reqs
,
945 struct ceph_osd_request
,
947 list_del_init(&req
->r_unsafe_item
);
949 ret
= ceph_osdc_start_request(req
->r_osdc
,
952 BUG_ON(ret
== -EOLDSNAPC
);
954 ceph_aio_complete_req(req
, NULL
);
960 if (ret
!= -EOLDSNAPC
&& pos
> iocb
->ki_pos
) {
961 ret
= pos
- iocb
->ki_pos
;
968 * Synchronous write, straight from __user pointer or user pages.
970 * If write spans object boundary, just do multiple writes. (For a
971 * correct atomic write, we should e.g. take write locks on all
972 * objects, rollback on failure, etc.)
975 ceph_sync_write(struct kiocb
*iocb
, struct iov_iter
*from
, loff_t pos
,
976 struct ceph_snap_context
*snapc
)
978 struct file
*file
= iocb
->ki_filp
;
979 struct inode
*inode
= file_inode(file
);
980 struct ceph_inode_info
*ci
= ceph_inode(inode
);
981 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
982 struct ceph_vino vino
;
983 struct ceph_osd_request
*req
;
991 struct timespec mtime
= CURRENT_TIME
;
992 size_t count
= iov_iter_count(from
);
994 if (ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
997 dout("sync_write on file %p %lld~%u\n", file
, pos
, (unsigned)count
);
999 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ count
);
1003 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
1004 pos
>> PAGE_CACHE_SHIFT
,
1005 (pos
+ count
) >> PAGE_CACHE_SHIFT
);
1007 dout("invalidate_inode_pages2_range returned %d\n", ret
);
1009 flags
= CEPH_OSD_FLAG_ORDERSNAP
|
1010 CEPH_OSD_FLAG_ONDISK
|
1011 CEPH_OSD_FLAG_WRITE
|
1014 while ((len
= iov_iter_count(from
)) > 0) {
1018 vino
= ceph_vino(inode
);
1019 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1020 vino
, pos
, &len
, 0, 1,
1021 CEPH_OSD_OP_WRITE
, flags
, snapc
,
1023 ci
->i_truncate_size
,
1031 * write from beginning of first page,
1032 * regardless of io alignment
1034 num_pages
= (len
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1036 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1037 if (IS_ERR(pages
)) {
1038 ret
= PTR_ERR(pages
);
1043 for (n
= 0; n
< num_pages
; n
++) {
1044 size_t plen
= min_t(size_t, left
, PAGE_SIZE
);
1045 ret
= copy_page_from_iter(pages
[n
], 0, plen
, from
);
1054 ceph_release_page_vector(pages
, num_pages
);
1058 /* get a second commit callback */
1059 req
->r_unsafe_callback
= ceph_sync_write_unsafe
;
1060 req
->r_inode
= inode
;
1062 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0,
1065 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
1066 ceph_osdc_build_request(req
, pos
, snapc
, vino
.snap
, &mtime
);
1068 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1070 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1073 ceph_osdc_put_request(req
);
1078 if (pos
> i_size_read(inode
)) {
1079 check_caps
= ceph_inode_set_size(inode
, pos
);
1081 ceph_check_caps(ceph_inode(inode
),
1082 CHECK_CAPS_AUTHONLY
,
1089 if (ret
!= -EOLDSNAPC
&& written
> 0) {
1097 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1098 * Atomically grab references, so that those bits are not released
1099 * back to the MDS mid-read.
1101 * Hmm, the sync read case isn't actually async... should it be?
1103 static ssize_t
ceph_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1105 struct file
*filp
= iocb
->ki_filp
;
1106 struct ceph_file_info
*fi
= filp
->private_data
;
1107 size_t len
= iov_iter_count(to
);
1108 struct inode
*inode
= file_inode(filp
);
1109 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1110 struct page
*pinned_page
= NULL
;
1113 int retry_op
= 0, read
= 0;
1116 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1117 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
, inode
);
1119 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1120 want
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1122 want
= CEPH_CAP_FILE_CACHE
;
1123 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_RD
, want
, -1, &got
, &pinned_page
);
1127 if ((got
& (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1128 (iocb
->ki_flags
& IOCB_DIRECT
) ||
1129 (fi
->flags
& CEPH_F_SYNC
)) {
1131 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1132 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1133 ceph_cap_string(got
));
1135 if (ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1136 if (!retry_op
&& (iocb
->ki_flags
& IOCB_DIRECT
)) {
1137 ret
= ceph_direct_read_write(iocb
, to
,
1139 if (ret
>= 0 && ret
< len
)
1140 retry_op
= CHECK_EOF
;
1142 ret
= ceph_sync_read(iocb
, to
, &retry_op
);
1145 retry_op
= READ_INLINE
;
1148 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1149 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1150 ceph_cap_string(got
));
1152 ret
= generic_file_read_iter(iocb
, to
);
1154 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1155 inode
, ceph_vinop(inode
), ceph_cap_string(got
), (int)ret
);
1157 page_cache_release(pinned_page
);
1160 ceph_put_cap_refs(ci
, got
);
1161 if (retry_op
> HAVE_RETRIED
&& ret
>= 0) {
1163 struct page
*page
= NULL
;
1165 if (retry_op
== READ_INLINE
) {
1166 page
= __page_cache_alloc(GFP_KERNEL
);
1171 statret
= __ceph_do_getattr(inode
, page
,
1172 CEPH_STAT_CAP_INLINE_DATA
, !!page
);
1175 if (statret
== -ENODATA
) {
1176 BUG_ON(retry_op
!= READ_INLINE
);
1182 i_size
= i_size_read(inode
);
1183 if (retry_op
== READ_INLINE
) {
1184 BUG_ON(ret
> 0 || read
> 0);
1185 if (iocb
->ki_pos
< i_size
&&
1186 iocb
->ki_pos
< PAGE_CACHE_SIZE
) {
1187 loff_t end
= min_t(loff_t
, i_size
,
1188 iocb
->ki_pos
+ len
);
1189 end
= min_t(loff_t
, end
, PAGE_CACHE_SIZE
);
1191 zero_user_segment(page
, statret
, end
);
1192 ret
= copy_page_to_iter(page
,
1193 iocb
->ki_pos
& ~PAGE_MASK
,
1194 end
- iocb
->ki_pos
, to
);
1195 iocb
->ki_pos
+= ret
;
1198 if (iocb
->ki_pos
< i_size
&& read
< len
) {
1199 size_t zlen
= min_t(size_t, len
- read
,
1200 i_size
- iocb
->ki_pos
);
1201 ret
= iov_iter_zero(zlen
, to
);
1202 iocb
->ki_pos
+= ret
;
1205 __free_pages(page
, 0);
1209 /* hit EOF or hole? */
1210 if (retry_op
== CHECK_EOF
&& iocb
->ki_pos
< i_size
&&
1212 dout("sync_read hit hole, ppos %lld < size %lld"
1213 ", reading more\n", iocb
->ki_pos
, i_size
);
1217 retry_op
= HAVE_RETRIED
;
1229 * Take cap references to avoid releasing caps to MDS mid-write.
1231 * If we are synchronous, and write with an old snap context, the OSD
1232 * may return EOLDSNAPC. In that case, retry the write.. _after_
1233 * dropping our cap refs and allowing the pending snap to logically
1234 * complete _before_ this write occurs.
1236 * If we are near ENOSPC, write synchronously.
1238 static ssize_t
ceph_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1240 struct file
*file
= iocb
->ki_filp
;
1241 struct ceph_file_info
*fi
= file
->private_data
;
1242 struct inode
*inode
= file_inode(file
);
1243 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1244 struct ceph_osd_client
*osdc
=
1245 &ceph_sb_to_client(inode
->i_sb
)->client
->osdc
;
1246 struct ceph_cap_flush
*prealloc_cf
;
1247 ssize_t count
, written
= 0;
1251 if (ceph_snap(inode
) != CEPH_NOSNAP
)
1254 prealloc_cf
= ceph_alloc_cap_flush();
1260 /* We can write back this queue in page reclaim */
1261 current
->backing_dev_info
= inode_to_bdi(inode
);
1263 if (iocb
->ki_flags
& IOCB_APPEND
) {
1264 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1269 err
= generic_write_checks(iocb
, from
);
1274 count
= iov_iter_count(from
);
1275 err
= file_remove_privs(file
);
1279 err
= file_update_time(file
);
1283 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1284 err
= ceph_uninline_data(file
, NULL
);
1290 if (ceph_osdmap_flag(osdc
->osdmap
, CEPH_OSDMAP_FULL
)) {
1295 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1296 inode
, ceph_vinop(inode
), pos
, count
, i_size_read(inode
));
1297 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1298 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1300 want
= CEPH_CAP_FILE_BUFFER
;
1302 err
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, want
, pos
+ count
,
1307 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1308 inode
, ceph_vinop(inode
), pos
, count
, ceph_cap_string(got
));
1310 if ((got
& (CEPH_CAP_FILE_BUFFER
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1311 (iocb
->ki_flags
& IOCB_DIRECT
) || (fi
->flags
& CEPH_F_SYNC
)) {
1312 struct ceph_snap_context
*snapc
;
1313 struct iov_iter data
;
1314 inode_unlock(inode
);
1316 spin_lock(&ci
->i_ceph_lock
);
1317 if (__ceph_have_pending_cap_snap(ci
)) {
1318 struct ceph_cap_snap
*capsnap
=
1319 list_last_entry(&ci
->i_cap_snaps
,
1320 struct ceph_cap_snap
,
1322 snapc
= ceph_get_snap_context(capsnap
->context
);
1324 BUG_ON(!ci
->i_head_snapc
);
1325 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
1327 spin_unlock(&ci
->i_ceph_lock
);
1329 /* we might need to revert back to that point */
1331 if (iocb
->ki_flags
& IOCB_DIRECT
)
1332 written
= ceph_direct_read_write(iocb
, &data
, snapc
,
1335 written
= ceph_sync_write(iocb
, &data
, pos
, snapc
);
1336 if (written
== -EOLDSNAPC
) {
1337 dout("aio_write %p %llx.%llx %llu~%u"
1338 "got EOLDSNAPC, retrying\n",
1339 inode
, ceph_vinop(inode
),
1340 pos
, (unsigned)count
);
1345 iov_iter_advance(from
, written
);
1346 ceph_put_snap_context(snapc
);
1348 loff_t old_size
= i_size_read(inode
);
1350 * No need to acquire the i_truncate_mutex. Because
1351 * the MDS revokes Fwb caps before sending truncate
1352 * message to us. We can't get Fwb cap while there
1353 * are pending vmtruncate. So write and vmtruncate
1354 * can not run at the same time
1356 written
= generic_perform_write(file
, from
, pos
);
1357 if (likely(written
>= 0))
1358 iocb
->ki_pos
= pos
+ written
;
1359 if (i_size_read(inode
) > old_size
)
1360 ceph_fscache_update_objectsize(inode
);
1361 inode_unlock(inode
);
1366 spin_lock(&ci
->i_ceph_lock
);
1367 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1368 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1370 spin_unlock(&ci
->i_ceph_lock
);
1372 __mark_inode_dirty(inode
, dirty
);
1375 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1376 inode
, ceph_vinop(inode
), pos
, (unsigned)count
,
1377 ceph_cap_string(got
));
1378 ceph_put_cap_refs(ci
, got
);
1381 ((file
->f_flags
& O_SYNC
) || IS_SYNC(file
->f_mapping
->host
) ||
1382 ceph_osdmap_flag(osdc
->osdmap
, CEPH_OSDMAP_NEARFULL
))) {
1383 err
= vfs_fsync_range(file
, pos
, pos
+ written
- 1, 1);
1391 inode_unlock(inode
);
1393 ceph_free_cap_flush(prealloc_cf
);
1394 current
->backing_dev_info
= NULL
;
1395 return written
? written
: err
;
1399 * llseek. be sure to verify file size on SEEK_END.
1401 static loff_t
ceph_llseek(struct file
*file
, loff_t offset
, int whence
)
1403 struct inode
*inode
= file
->f_mapping
->host
;
1409 if (whence
== SEEK_END
|| whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
1410 ret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1417 i_size
= i_size_read(inode
);
1424 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1425 * position-querying operation. Avoid rewriting the "same"
1426 * f_pos value back to the file because a concurrent read(),
1427 * write() or lseek() might have altered it
1430 offset
= file
->f_pos
;
1433 offset
+= file
->f_pos
;
1436 if (offset
>= i_size
) {
1442 if (offset
>= i_size
) {
1450 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
1453 inode_unlock(inode
);
1457 static inline void ceph_zero_partial_page(
1458 struct inode
*inode
, loff_t offset
, unsigned size
)
1461 pgoff_t index
= offset
>> PAGE_CACHE_SHIFT
;
1463 page
= find_lock_page(inode
->i_mapping
, index
);
1465 wait_on_page_writeback(page
);
1466 zero_user(page
, offset
& (PAGE_CACHE_SIZE
- 1), size
);
1468 page_cache_release(page
);
1472 static void ceph_zero_pagecache_range(struct inode
*inode
, loff_t offset
,
1475 loff_t nearly
= round_up(offset
, PAGE_CACHE_SIZE
);
1476 if (offset
< nearly
) {
1477 loff_t size
= nearly
- offset
;
1480 ceph_zero_partial_page(inode
, offset
, size
);
1484 if (length
>= PAGE_CACHE_SIZE
) {
1485 loff_t size
= round_down(length
, PAGE_CACHE_SIZE
);
1486 truncate_pagecache_range(inode
, offset
, offset
+ size
- 1);
1491 ceph_zero_partial_page(inode
, offset
, length
);
1494 static int ceph_zero_partial_object(struct inode
*inode
,
1495 loff_t offset
, loff_t
*length
)
1497 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1498 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1499 struct ceph_osd_request
*req
;
1505 op
= offset
? CEPH_OSD_OP_DELETE
: CEPH_OSD_OP_TRUNCATE
;
1508 op
= CEPH_OSD_OP_ZERO
;
1511 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1515 CEPH_OSD_FLAG_WRITE
|
1516 CEPH_OSD_FLAG_ONDISK
,
1523 ceph_osdc_build_request(req
, offset
, NULL
, ceph_vino(inode
).snap
,
1526 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1528 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1532 ceph_osdc_put_request(req
);
1538 static int ceph_zero_objects(struct inode
*inode
, loff_t offset
, loff_t length
)
1541 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1542 s32 stripe_unit
= ceph_file_layout_su(ci
->i_layout
);
1543 s32 stripe_count
= ceph_file_layout_stripe_count(ci
->i_layout
);
1544 s32 object_size
= ceph_file_layout_object_size(ci
->i_layout
);
1545 u64 object_set_size
= object_size
* stripe_count
;
1548 /* round offset up to next period boundary */
1549 nearly
= offset
+ object_set_size
- 1;
1551 nearly
-= do_div(t
, object_set_size
);
1553 while (length
&& offset
< nearly
) {
1554 loff_t size
= length
;
1555 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1561 while (length
>= object_set_size
) {
1563 loff_t pos
= offset
;
1564 for (i
= 0; i
< stripe_count
; ++i
) {
1565 ret
= ceph_zero_partial_object(inode
, pos
, NULL
);
1570 offset
+= object_set_size
;
1571 length
-= object_set_size
;
1574 loff_t size
= length
;
1575 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1584 static long ceph_fallocate(struct file
*file
, int mode
,
1585 loff_t offset
, loff_t length
)
1587 struct ceph_file_info
*fi
= file
->private_data
;
1588 struct inode
*inode
= file_inode(file
);
1589 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1590 struct ceph_osd_client
*osdc
=
1591 &ceph_inode_to_client(inode
)->client
->osdc
;
1592 struct ceph_cap_flush
*prealloc_cf
;
1599 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
1602 if (!S_ISREG(inode
->i_mode
))
1605 prealloc_cf
= ceph_alloc_cap_flush();
1611 if (ceph_snap(inode
) != CEPH_NOSNAP
) {
1616 if (ceph_osdmap_flag(osdc
->osdmap
, CEPH_OSDMAP_FULL
) &&
1617 !(mode
& FALLOC_FL_PUNCH_HOLE
)) {
1622 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1623 ret
= ceph_uninline_data(file
, NULL
);
1628 size
= i_size_read(inode
);
1629 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
1630 endoff
= offset
+ length
;
1632 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1633 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1635 want
= CEPH_CAP_FILE_BUFFER
;
1637 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, want
, endoff
, &got
, NULL
);
1641 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1643 ceph_zero_pagecache_range(inode
, offset
, length
);
1644 ret
= ceph_zero_objects(inode
, offset
, length
);
1645 } else if (endoff
> size
) {
1646 truncate_pagecache_range(inode
, size
, -1);
1647 if (ceph_inode_set_size(inode
, endoff
))
1648 ceph_check_caps(ceph_inode(inode
),
1649 CHECK_CAPS_AUTHONLY
, NULL
);
1653 spin_lock(&ci
->i_ceph_lock
);
1654 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1655 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1657 spin_unlock(&ci
->i_ceph_lock
);
1659 __mark_inode_dirty(inode
, dirty
);
1662 ceph_put_cap_refs(ci
, got
);
1664 inode_unlock(inode
);
1665 ceph_free_cap_flush(prealloc_cf
);
1669 const struct file_operations ceph_file_fops
= {
1671 .release
= ceph_release
,
1672 .llseek
= ceph_llseek
,
1673 .read_iter
= ceph_read_iter
,
1674 .write_iter
= ceph_write_iter
,
1676 .fsync
= ceph_fsync
,
1678 .flock
= ceph_flock
,
1679 .splice_read
= generic_file_splice_read
,
1680 .splice_write
= iter_file_splice_write
,
1681 .unlocked_ioctl
= ceph_ioctl
,
1682 .compat_ioctl
= ceph_ioctl
,
1683 .fallocate
= ceph_fallocate
,