1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
16 #include "mds_client.h"
20 static __le32
ceph_flags_sys2wire(u32 flags
)
24 switch (flags
& O_ACCMODE
) {
26 wire_flags
|= CEPH_O_RDONLY
;
29 wire_flags
|= CEPH_O_WRONLY
;
32 wire_flags
|= CEPH_O_RDWR
;
38 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
40 ceph_sys2wire(O_CREAT
);
41 ceph_sys2wire(O_EXCL
);
42 ceph_sys2wire(O_TRUNC
);
43 ceph_sys2wire(O_DIRECTORY
);
44 ceph_sys2wire(O_NOFOLLOW
);
49 dout("unused open flags: %x\n", flags
);
51 return cpu_to_le32(wire_flags
);
55 * Ceph file operations
57 * Implement basic open/close functionality, and implement
60 * We implement three modes of file I/O:
61 * - buffered uses the generic_file_aio_{read,write} helpers
63 * - synchronous is used when there is multi-client read/write
64 * sharing, avoids the page cache, and synchronously waits for an
67 * - direct io takes the variant of the sync path that references
68 * user pages directly.
70 * fsync() flushes and waits on dirty pages, but just queues metadata
71 * for writeback: since the MDS can recover size and mtime there is no
72 * need to wait for MDS acknowledgement.
76 * How many pages to get in one call to iov_iter_get_pages(). This
77 * determines the size of the on-stack array used as a buffer.
79 #define ITER_GET_BVECS_PAGES 64
81 static ssize_t
__iter_get_bvecs(struct iov_iter
*iter
, size_t maxsize
,
82 struct bio_vec
*bvecs
)
87 if (maxsize
> iov_iter_count(iter
))
88 maxsize
= iov_iter_count(iter
);
90 while (size
< maxsize
) {
91 struct page
*pages
[ITER_GET_BVECS_PAGES
];
96 bytes
= iov_iter_get_pages(iter
, pages
, maxsize
- size
,
97 ITER_GET_BVECS_PAGES
, &start
);
101 iov_iter_advance(iter
, bytes
);
104 for ( ; bytes
; idx
++, bvec_idx
++) {
105 struct bio_vec bv
= {
106 .bv_page
= pages
[idx
],
107 .bv_len
= min_t(int, bytes
, PAGE_SIZE
- start
),
111 bvecs
[bvec_idx
] = bv
;
121 * iov_iter_get_pages() only considers one iov_iter segment, no matter
122 * what maxsize or maxpages are given. For ITER_BVEC that is a single
125 * Attempt to get up to @maxsize bytes worth of pages from @iter.
126 * Return the number of bytes in the created bio_vec array, or an error.
128 static ssize_t
iter_get_bvecs_alloc(struct iov_iter
*iter
, size_t maxsize
,
129 struct bio_vec
**bvecs
, int *num_bvecs
)
132 size_t orig_count
= iov_iter_count(iter
);
136 iov_iter_truncate(iter
, maxsize
);
137 npages
= iov_iter_npages(iter
, INT_MAX
);
138 iov_iter_reexpand(iter
, orig_count
);
141 * __iter_get_bvecs() may populate only part of the array -- zero it
144 bv
= kvmalloc_array(npages
, sizeof(*bv
), GFP_KERNEL
| __GFP_ZERO
);
148 bytes
= __iter_get_bvecs(iter
, maxsize
, bv
);
151 * No pages were pinned -- just free the array.
162 static void put_bvecs(struct bio_vec
*bvecs
, int num_bvecs
, bool should_dirty
)
166 for (i
= 0; i
< num_bvecs
; i
++) {
167 if (bvecs
[i
].bv_page
) {
169 set_page_dirty_lock(bvecs
[i
].bv_page
);
170 put_page(bvecs
[i
].bv_page
);
177 * Prepare an open request. Preallocate ceph_cap to avoid an
178 * inopportune ENOMEM later.
180 static struct ceph_mds_request
*
181 prepare_open_request(struct super_block
*sb
, int flags
, int create_mode
)
183 struct ceph_fs_client
*fsc
= ceph_sb_to_client(sb
);
184 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
185 struct ceph_mds_request
*req
;
186 int want_auth
= USE_ANY_MDS
;
187 int op
= (flags
& O_CREAT
) ? CEPH_MDS_OP_CREATE
: CEPH_MDS_OP_OPEN
;
189 if (flags
& (O_WRONLY
|O_RDWR
|O_CREAT
|O_TRUNC
))
190 want_auth
= USE_AUTH_MDS
;
192 req
= ceph_mdsc_create_request(mdsc
, op
, want_auth
);
195 req
->r_fmode
= ceph_flags_to_mode(flags
);
196 req
->r_args
.open
.flags
= ceph_flags_sys2wire(flags
);
197 req
->r_args
.open
.mode
= cpu_to_le32(create_mode
);
202 static int ceph_init_file_info(struct inode
*inode
, struct file
*file
,
203 int fmode
, bool isdir
)
205 struct ceph_inode_info
*ci
= ceph_inode(inode
);
206 struct ceph_file_info
*fi
;
208 dout("%s %p %p 0%o (%s)\n", __func__
, inode
, file
,
209 inode
->i_mode
, isdir
? "dir" : "regular");
210 BUG_ON(inode
->i_fop
->release
!= ceph_release
);
213 struct ceph_dir_file_info
*dfi
=
214 kmem_cache_zalloc(ceph_dir_file_cachep
, GFP_KERNEL
);
216 ceph_put_fmode(ci
, fmode
); /* clean up */
220 file
->private_data
= dfi
;
221 fi
= &dfi
->file_info
;
222 dfi
->next_offset
= 2;
223 dfi
->readdir_cache_idx
= -1;
225 fi
= kmem_cache_zalloc(ceph_file_cachep
, GFP_KERNEL
);
227 ceph_put_fmode(ci
, fmode
); /* clean up */
231 file
->private_data
= fi
;
235 spin_lock_init(&fi
->rw_contexts_lock
);
236 INIT_LIST_HEAD(&fi
->rw_contexts
);
237 fi
->meta_err
= errseq_sample(&ci
->i_meta_err
);
238 fi
->filp_gen
= READ_ONCE(ceph_inode_to_client(inode
)->filp_gen
);
244 * initialize private struct file data.
245 * if we fail, clean up by dropping fmode reference on the ceph_inode
247 static int ceph_init_file(struct inode
*inode
, struct file
*file
, int fmode
)
251 switch (inode
->i_mode
& S_IFMT
) {
253 ceph_fscache_register_inode_cookie(inode
);
254 ceph_fscache_file_set_cookie(inode
, file
);
257 ret
= ceph_init_file_info(inode
, file
, fmode
,
258 S_ISDIR(inode
->i_mode
));
264 dout("init_file %p %p 0%o (symlink)\n", inode
, file
,
266 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
270 dout("init_file %p %p 0%o (special)\n", inode
, file
,
273 * we need to drop the open ref now, since we don't
274 * have .release set to ceph_release.
276 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
277 BUG_ON(inode
->i_fop
->release
== ceph_release
);
279 /* call the proper open fop */
280 ret
= inode
->i_fop
->open(inode
, file
);
286 * try renew caps after session gets killed.
288 int ceph_renew_caps(struct inode
*inode
)
290 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(inode
->i_sb
)->mdsc
;
291 struct ceph_inode_info
*ci
= ceph_inode(inode
);
292 struct ceph_mds_request
*req
;
293 int err
, flags
, wanted
;
295 spin_lock(&ci
->i_ceph_lock
);
296 wanted
= __ceph_caps_file_wanted(ci
);
297 if (__ceph_is_any_real_caps(ci
) &&
298 (!(wanted
& CEPH_CAP_ANY_WR
) || ci
->i_auth_cap
)) {
299 int issued
= __ceph_caps_issued(ci
, NULL
);
300 spin_unlock(&ci
->i_ceph_lock
);
301 dout("renew caps %p want %s issued %s updating mds_wanted\n",
302 inode
, ceph_cap_string(wanted
), ceph_cap_string(issued
));
303 ceph_check_caps(ci
, 0, NULL
);
306 spin_unlock(&ci
->i_ceph_lock
);
309 if ((wanted
& CEPH_CAP_FILE_RD
) && (wanted
& CEPH_CAP_FILE_WR
))
311 else if (wanted
& CEPH_CAP_FILE_RD
)
313 else if (wanted
& CEPH_CAP_FILE_WR
)
316 if (wanted
& CEPH_CAP_FILE_LAZYIO
)
320 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
326 req
->r_inode
= inode
;
331 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
332 ceph_mdsc_put_request(req
);
334 dout("renew caps %p open result=%d\n", inode
, err
);
335 return err
< 0 ? err
: 0;
339 * If we already have the requisite capabilities, we can satisfy
340 * the open request locally (no need to request new caps from the
341 * MDS). We do, however, need to inform the MDS (asynchronously)
342 * if our wanted caps set expands.
344 int ceph_open(struct inode
*inode
, struct file
*file
)
346 struct ceph_inode_info
*ci
= ceph_inode(inode
);
347 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
348 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
349 struct ceph_mds_request
*req
;
350 struct ceph_file_info
*fi
= file
->private_data
;
352 int flags
, fmode
, wanted
;
355 dout("open file %p is already opened\n", file
);
359 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
360 flags
= file
->f_flags
& ~(O_CREAT
|O_EXCL
);
361 if (S_ISDIR(inode
->i_mode
))
362 flags
= O_DIRECTORY
; /* mds likes to know */
364 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode
,
365 ceph_vinop(inode
), file
, flags
, file
->f_flags
);
366 fmode
= ceph_flags_to_mode(flags
);
367 wanted
= ceph_caps_for_mode(fmode
);
369 /* snapped files are read-only */
370 if (ceph_snap(inode
) != CEPH_NOSNAP
&& (file
->f_mode
& FMODE_WRITE
))
373 /* trivially open snapdir */
374 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
375 spin_lock(&ci
->i_ceph_lock
);
376 __ceph_get_fmode(ci
, fmode
);
377 spin_unlock(&ci
->i_ceph_lock
);
378 return ceph_init_file(inode
, file
, fmode
);
382 * No need to block if we have caps on the auth MDS (for
383 * write) or any MDS (for read). Update wanted set
386 spin_lock(&ci
->i_ceph_lock
);
387 if (__ceph_is_any_real_caps(ci
) &&
388 (((fmode
& CEPH_FILE_MODE_WR
) == 0) || ci
->i_auth_cap
)) {
389 int mds_wanted
= __ceph_caps_mds_wanted(ci
, true);
390 int issued
= __ceph_caps_issued(ci
, NULL
);
392 dout("open %p fmode %d want %s issued %s using existing\n",
393 inode
, fmode
, ceph_cap_string(wanted
),
394 ceph_cap_string(issued
));
395 __ceph_get_fmode(ci
, fmode
);
396 spin_unlock(&ci
->i_ceph_lock
);
399 if ((issued
& wanted
) != wanted
&&
400 (mds_wanted
& wanted
) != wanted
&&
401 ceph_snap(inode
) != CEPH_SNAPDIR
)
402 ceph_check_caps(ci
, 0, NULL
);
404 return ceph_init_file(inode
, file
, fmode
);
405 } else if (ceph_snap(inode
) != CEPH_NOSNAP
&&
406 (ci
->i_snap_caps
& wanted
) == wanted
) {
407 __ceph_get_fmode(ci
, fmode
);
408 spin_unlock(&ci
->i_ceph_lock
);
409 return ceph_init_file(inode
, file
, fmode
);
412 spin_unlock(&ci
->i_ceph_lock
);
414 dout("open fmode %d wants %s\n", fmode
, ceph_cap_string(wanted
));
415 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
420 req
->r_inode
= inode
;
424 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
426 err
= ceph_init_file(inode
, file
, req
->r_fmode
);
427 ceph_mdsc_put_request(req
);
428 dout("open result=%d on %llx.%llx\n", err
, ceph_vinop(inode
));
435 * Do a lookup + open with a single request. If we get a non-existent
436 * file or symlink, return 1 so the VFS can retry.
438 int ceph_atomic_open(struct inode
*dir
, struct dentry
*dentry
,
439 struct file
*file
, unsigned flags
, umode_t mode
)
441 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
442 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
443 struct ceph_mds_request
*req
;
445 struct ceph_acl_sec_ctx as_ctx
= {};
449 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
451 d_unhashed(dentry
) ? "unhashed" : "hashed", flags
, mode
);
453 if (dentry
->d_name
.len
> NAME_MAX
)
454 return -ENAMETOOLONG
;
456 if (flags
& O_CREAT
) {
457 if (ceph_quota_is_max_files_exceeded(dir
))
459 err
= ceph_pre_init_acls(dir
, &mode
, &as_ctx
);
462 err
= ceph_security_init_secctx(dentry
, mode
, &as_ctx
);
465 } else if (!d_in_lookup(dentry
)) {
466 /* If it's not being looked up, it's negative */
471 req
= prepare_open_request(dir
->i_sb
, flags
, mode
);
476 req
->r_dentry
= dget(dentry
);
478 if (flags
& O_CREAT
) {
479 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_AUTH_EXCL
;
480 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
481 if (as_ctx
.pagelist
) {
482 req
->r_pagelist
= as_ctx
.pagelist
;
483 as_ctx
.pagelist
= NULL
;
487 mask
= CEPH_STAT_CAP_INODE
| CEPH_CAP_AUTH_SHARED
;
488 if (ceph_security_xattr_wanted(dir
))
489 mask
|= CEPH_CAP_XATTR_SHARED
;
490 req
->r_args
.open
.mask
= cpu_to_le32(mask
);
493 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
494 err
= ceph_mdsc_do_request(mdsc
,
495 (flags
& (O_CREAT
|O_TRUNC
)) ? dir
: NULL
,
497 err
= ceph_handle_snapdir(req
, dentry
, err
);
501 if ((flags
& O_CREAT
) && !req
->r_reply_info
.head
->is_dentry
)
502 err
= ceph_handle_notrace_create(dir
, dentry
);
504 if (d_in_lookup(dentry
)) {
505 dn
= ceph_finish_lookup(req
, dentry
, err
);
509 /* we were given a hashed negative dentry */
514 if (dn
|| d_really_is_negative(dentry
) || d_is_symlink(dentry
)) {
515 /* make vfs retry on splice, ENOENT, or symlink */
516 dout("atomic_open finish_no_open on dn %p\n", dn
);
517 err
= finish_no_open(file
, dn
);
519 dout("atomic_open finish_open on dn %p\n", dn
);
520 if (req
->r_op
== CEPH_MDS_OP_CREATE
&& req
->r_reply_info
.has_create_ino
) {
521 ceph_init_inode_acls(d_inode(dentry
), &as_ctx
);
522 file
->f_mode
|= FMODE_CREATED
;
524 err
= finish_open(file
, dentry
, ceph_open
);
527 if (!req
->r_err
&& req
->r_target_inode
)
528 ceph_put_fmode(ceph_inode(req
->r_target_inode
), req
->r_fmode
);
529 ceph_mdsc_put_request(req
);
531 ceph_release_acl_sec_ctx(&as_ctx
);
532 dout("atomic_open result=%d\n", err
);
536 int ceph_release(struct inode
*inode
, struct file
*file
)
538 struct ceph_inode_info
*ci
= ceph_inode(inode
);
540 if (S_ISDIR(inode
->i_mode
)) {
541 struct ceph_dir_file_info
*dfi
= file
->private_data
;
542 dout("release inode %p dir file %p\n", inode
, file
);
543 WARN_ON(!list_empty(&dfi
->file_info
.rw_contexts
));
545 ceph_put_fmode(ci
, dfi
->file_info
.fmode
);
547 if (dfi
->last_readdir
)
548 ceph_mdsc_put_request(dfi
->last_readdir
);
549 kfree(dfi
->last_name
);
550 kfree(dfi
->dir_info
);
551 kmem_cache_free(ceph_dir_file_cachep
, dfi
);
553 struct ceph_file_info
*fi
= file
->private_data
;
554 dout("release inode %p regular file %p\n", inode
, file
);
555 WARN_ON(!list_empty(&fi
->rw_contexts
));
557 ceph_put_fmode(ci
, fi
->fmode
);
558 kmem_cache_free(ceph_file_cachep
, fi
);
561 /* wake up anyone waiting for caps on this inode */
562 wake_up_all(&ci
->i_cap_wq
);
573 * Completely synchronous read and write methods. Direct from __user
574 * buffer to osd, or directly to user pages (if O_DIRECT).
576 * If the read spans object boundary, just do multiple reads. (That's not
577 * atomic, but good enough for now.)
579 * If we get a short result from the OSD, check against i_size; we need to
580 * only return a short read to the caller if we hit EOF.
582 static ssize_t
ceph_sync_read(struct kiocb
*iocb
, struct iov_iter
*to
,
585 struct file
*file
= iocb
->ki_filp
;
586 struct inode
*inode
= file_inode(file
);
587 struct ceph_inode_info
*ci
= ceph_inode(inode
);
588 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
589 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
591 u64 off
= iocb
->ki_pos
;
592 u64 len
= iov_iter_count(to
);
594 dout("sync_read on file %p %llu~%u %s\n", file
, off
, (unsigned)len
,
595 (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
600 * flush any page cache pages in this range. this
601 * will make concurrent normal and sync io slow,
602 * but it will at least behave sensibly when they are
605 ret
= filemap_write_and_wait_range(inode
->i_mapping
,
611 while ((len
= iov_iter_count(to
)) > 0) {
612 struct ceph_osd_request
*req
;
619 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
,
620 ci
->i_vino
, off
, &len
, 0, 1,
621 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
622 NULL
, ci
->i_truncate_seq
,
623 ci
->i_truncate_size
, false);
629 more
= len
< iov_iter_count(to
);
631 if (unlikely(iov_iter_is_pipe(to
))) {
632 ret
= iov_iter_get_pages_alloc(to
, &pages
, len
,
635 ceph_osdc_put_request(req
);
639 num_pages
= DIV_ROUND_UP(ret
+ page_off
, PAGE_SIZE
);
642 osd_req_op_extent_update(req
, 0, len
);
646 num_pages
= calc_pages_for(off
, len
);
647 page_off
= off
& ~PAGE_MASK
;
648 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
650 ceph_osdc_put_request(req
);
651 ret
= PTR_ERR(pages
);
656 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_off
,
658 ret
= ceph_osdc_start_request(osdc
, req
, false);
660 ret
= ceph_osdc_wait_request(osdc
, req
);
661 ceph_osdc_put_request(req
);
663 i_size
= i_size_read(inode
);
664 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
665 off
, len
, ret
, i_size
, (more
? " MORE" : ""));
669 if (ret
>= 0 && ret
< len
&& (off
+ ret
< i_size
)) {
670 int zlen
= min(len
- ret
, i_size
- off
- ret
);
671 int zoff
= page_off
+ ret
;
672 dout("sync_read zero gap %llu~%llu\n",
673 off
+ ret
, off
+ ret
+ zlen
);
674 ceph_zero_page_vector_range(zoff
, zlen
, pages
);
678 if (unlikely(iov_iter_is_pipe(to
))) {
680 iov_iter_advance(to
, ret
);
683 iov_iter_advance(to
, 0);
685 ceph_put_page_vector(pages
, num_pages
, false);
688 size_t left
= ret
> 0 ? ret
: 0;
691 page_off
= off
& ~PAGE_MASK
;
692 len
= min_t(size_t, left
, PAGE_SIZE
- page_off
);
693 copied
= copy_page_to_iter(pages
[idx
++],
702 ceph_release_page_vector(pages
, num_pages
);
706 if (ret
== -EBLACKLISTED
)
707 fsc
->blacklisted
= true;
711 if (off
>= i_size
|| !more
)
715 if (off
> iocb
->ki_pos
) {
717 iov_iter_count(to
) > 0 && off
>= i_size_read(inode
))
718 *retry_op
= CHECK_EOF
;
719 ret
= off
- iocb
->ki_pos
;
723 dout("sync_read result %zd retry_op %d\n", ret
, *retry_op
);
727 struct ceph_aio_request
{
733 struct list_head osd_reqs
;
735 atomic_t pending_reqs
;
736 struct timespec64 mtime
;
737 struct ceph_cap_flush
*prealloc_cf
;
740 struct ceph_aio_work
{
741 struct work_struct work
;
742 struct ceph_osd_request
*req
;
745 static void ceph_aio_retry_work(struct work_struct
*work
);
747 static void ceph_aio_complete(struct inode
*inode
,
748 struct ceph_aio_request
*aio_req
)
750 struct ceph_inode_info
*ci
= ceph_inode(inode
);
753 if (!atomic_dec_and_test(&aio_req
->pending_reqs
))
756 if (aio_req
->iocb
->ki_flags
& IOCB_DIRECT
)
757 inode_dio_end(inode
);
759 ret
= aio_req
->error
;
761 ret
= aio_req
->total_len
;
763 dout("ceph_aio_complete %p rc %d\n", inode
, ret
);
765 if (ret
>= 0 && aio_req
->write
) {
768 loff_t endoff
= aio_req
->iocb
->ki_pos
+ aio_req
->total_len
;
769 if (endoff
> i_size_read(inode
)) {
770 if (ceph_inode_set_size(inode
, endoff
))
771 ceph_check_caps(ci
, CHECK_CAPS_AUTHONLY
, NULL
);
774 spin_lock(&ci
->i_ceph_lock
);
775 ci
->i_inline_version
= CEPH_INLINE_NONE
;
776 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
777 &aio_req
->prealloc_cf
);
778 spin_unlock(&ci
->i_ceph_lock
);
780 __mark_inode_dirty(inode
, dirty
);
784 ceph_put_cap_refs(ci
, (aio_req
->write
? CEPH_CAP_FILE_WR
:
787 aio_req
->iocb
->ki_complete(aio_req
->iocb
, ret
, 0);
789 ceph_free_cap_flush(aio_req
->prealloc_cf
);
793 static void ceph_aio_complete_req(struct ceph_osd_request
*req
)
795 int rc
= req
->r_result
;
796 struct inode
*inode
= req
->r_inode
;
797 struct ceph_aio_request
*aio_req
= req
->r_priv
;
798 struct ceph_osd_data
*osd_data
= osd_req_op_extent_osd_data(req
, 0);
800 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_BVECS
);
801 BUG_ON(!osd_data
->num_bvecs
);
803 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
804 inode
, rc
, osd_data
->bvec_pos
.iter
.bi_size
);
806 if (rc
== -EOLDSNAPC
) {
807 struct ceph_aio_work
*aio_work
;
808 BUG_ON(!aio_req
->write
);
810 aio_work
= kmalloc(sizeof(*aio_work
), GFP_NOFS
);
812 INIT_WORK(&aio_work
->work
, ceph_aio_retry_work
);
814 queue_work(ceph_inode_to_client(inode
)->inode_wq
,
819 } else if (!aio_req
->write
) {
822 if (rc
>= 0 && osd_data
->bvec_pos
.iter
.bi_size
> rc
) {
824 int zlen
= osd_data
->bvec_pos
.iter
.bi_size
- rc
;
827 * If read is satisfied by single OSD request,
828 * it can pass EOF. Otherwise read is within
831 if (aio_req
->num_reqs
== 1) {
832 loff_t i_size
= i_size_read(inode
);
833 loff_t endoff
= aio_req
->iocb
->ki_pos
+ rc
;
835 zlen
= min_t(size_t, zlen
,
837 aio_req
->total_len
= rc
+ zlen
;
840 iov_iter_bvec(&i
, READ
, osd_data
->bvec_pos
.bvecs
,
842 osd_data
->bvec_pos
.iter
.bi_size
);
843 iov_iter_advance(&i
, rc
);
844 iov_iter_zero(zlen
, &i
);
848 put_bvecs(osd_data
->bvec_pos
.bvecs
, osd_data
->num_bvecs
,
849 aio_req
->should_dirty
);
850 ceph_osdc_put_request(req
);
853 cmpxchg(&aio_req
->error
, 0, rc
);
855 ceph_aio_complete(inode
, aio_req
);
859 static void ceph_aio_retry_work(struct work_struct
*work
)
861 struct ceph_aio_work
*aio_work
=
862 container_of(work
, struct ceph_aio_work
, work
);
863 struct ceph_osd_request
*orig_req
= aio_work
->req
;
864 struct ceph_aio_request
*aio_req
= orig_req
->r_priv
;
865 struct inode
*inode
= orig_req
->r_inode
;
866 struct ceph_inode_info
*ci
= ceph_inode(inode
);
867 struct ceph_snap_context
*snapc
;
868 struct ceph_osd_request
*req
;
871 spin_lock(&ci
->i_ceph_lock
);
872 if (__ceph_have_pending_cap_snap(ci
)) {
873 struct ceph_cap_snap
*capsnap
=
874 list_last_entry(&ci
->i_cap_snaps
,
875 struct ceph_cap_snap
,
877 snapc
= ceph_get_snap_context(capsnap
->context
);
879 BUG_ON(!ci
->i_head_snapc
);
880 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
882 spin_unlock(&ci
->i_ceph_lock
);
884 req
= ceph_osdc_alloc_request(orig_req
->r_osdc
, snapc
, 1,
892 req
->r_flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
893 ceph_oloc_copy(&req
->r_base_oloc
, &orig_req
->r_base_oloc
);
894 ceph_oid_copy(&req
->r_base_oid
, &orig_req
->r_base_oid
);
896 req
->r_ops
[0] = orig_req
->r_ops
[0];
898 req
->r_mtime
= aio_req
->mtime
;
899 req
->r_data_offset
= req
->r_ops
[0].extent
.offset
;
901 ret
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
903 ceph_osdc_put_request(req
);
908 ceph_osdc_put_request(orig_req
);
910 req
->r_callback
= ceph_aio_complete_req
;
911 req
->r_inode
= inode
;
912 req
->r_priv
= aio_req
;
914 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
918 ceph_aio_complete_req(req
);
921 ceph_put_snap_context(snapc
);
926 ceph_direct_read_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
927 struct ceph_snap_context
*snapc
,
928 struct ceph_cap_flush
**pcf
)
930 struct file
*file
= iocb
->ki_filp
;
931 struct inode
*inode
= file_inode(file
);
932 struct ceph_inode_info
*ci
= ceph_inode(inode
);
933 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
934 struct ceph_vino vino
;
935 struct ceph_osd_request
*req
;
936 struct bio_vec
*bvecs
;
937 struct ceph_aio_request
*aio_req
= NULL
;
941 struct timespec64 mtime
= current_time(inode
);
942 size_t count
= iov_iter_count(iter
);
943 loff_t pos
= iocb
->ki_pos
;
944 bool write
= iov_iter_rw(iter
) == WRITE
;
945 bool should_dirty
= !write
&& iter_is_iovec(iter
);
947 if (write
&& ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
950 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
951 (write
? "write" : "read"), file
, pos
, (unsigned)count
,
952 snapc
, snapc
? snapc
->seq
: 0);
955 int ret2
= invalidate_inode_pages2_range(inode
->i_mapping
,
957 (pos
+ count
- 1) >> PAGE_SHIFT
);
959 dout("invalidate_inode_pages2_range returned %d\n", ret2
);
961 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
963 flags
= CEPH_OSD_FLAG_READ
;
966 while (iov_iter_count(iter
) > 0) {
967 u64 size
= iov_iter_count(iter
);
971 size
= min_t(u64
, size
, fsc
->mount_options
->wsize
);
973 size
= min_t(u64
, size
, fsc
->mount_options
->rsize
);
975 vino
= ceph_vino(inode
);
976 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
979 write
? CEPH_OSD_OP_WRITE
:
990 len
= iter_get_bvecs_alloc(iter
, size
, &bvecs
, &num_pages
);
992 ceph_osdc_put_request(req
);
997 osd_req_op_extent_update(req
, 0, len
);
1000 * To simplify error handling, allow AIO when IO within i_size
1001 * or IO can be satisfied by single OSD request.
1003 if (pos
== iocb
->ki_pos
&& !is_sync_kiocb(iocb
) &&
1004 (len
== count
|| pos
+ count
<= i_size_read(inode
))) {
1005 aio_req
= kzalloc(sizeof(*aio_req
), GFP_KERNEL
);
1007 aio_req
->iocb
= iocb
;
1008 aio_req
->write
= write
;
1009 aio_req
->should_dirty
= should_dirty
;
1010 INIT_LIST_HEAD(&aio_req
->osd_reqs
);
1012 aio_req
->mtime
= mtime
;
1013 swap(aio_req
->prealloc_cf
, *pcf
);
1021 * throw out any page cache pages in this range. this
1024 truncate_inode_pages_range(inode
->i_mapping
, pos
,
1025 PAGE_ALIGN(pos
+ len
) - 1);
1027 req
->r_mtime
= mtime
;
1030 osd_req_op_extent_osd_data_bvecs(req
, 0, bvecs
, num_pages
, len
);
1033 aio_req
->total_len
+= len
;
1034 aio_req
->num_reqs
++;
1035 atomic_inc(&aio_req
->pending_reqs
);
1037 req
->r_callback
= ceph_aio_complete_req
;
1038 req
->r_inode
= inode
;
1039 req
->r_priv
= aio_req
;
1040 list_add_tail(&req
->r_private_item
, &aio_req
->osd_reqs
);
1046 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
1048 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1050 size
= i_size_read(inode
);
1054 if (ret
>= 0 && ret
< len
&& pos
+ ret
< size
) {
1056 int zlen
= min_t(size_t, len
- ret
,
1059 iov_iter_bvec(&i
, READ
, bvecs
, num_pages
, len
);
1060 iov_iter_advance(&i
, ret
);
1061 iov_iter_zero(zlen
, &i
);
1068 put_bvecs(bvecs
, num_pages
, should_dirty
);
1069 ceph_osdc_put_request(req
);
1074 if (!write
&& pos
>= size
)
1077 if (write
&& pos
> size
) {
1078 if (ceph_inode_set_size(inode
, pos
))
1079 ceph_check_caps(ceph_inode(inode
),
1080 CHECK_CAPS_AUTHONLY
,
1086 LIST_HEAD(osd_reqs
);
1088 if (aio_req
->num_reqs
== 0) {
1093 ceph_get_cap_refs(ci
, write
? CEPH_CAP_FILE_WR
:
1096 list_splice(&aio_req
->osd_reqs
, &osd_reqs
);
1097 inode_dio_begin(inode
);
1098 while (!list_empty(&osd_reqs
)) {
1099 req
= list_first_entry(&osd_reqs
,
1100 struct ceph_osd_request
,
1102 list_del_init(&req
->r_private_item
);
1104 ret
= ceph_osdc_start_request(req
->r_osdc
,
1107 req
->r_result
= ret
;
1108 ceph_aio_complete_req(req
);
1111 return -EIOCBQUEUED
;
1114 if (ret
!= -EOLDSNAPC
&& pos
> iocb
->ki_pos
) {
1115 ret
= pos
- iocb
->ki_pos
;
1122 * Synchronous write, straight from __user pointer or user pages.
1124 * If write spans object boundary, just do multiple writes. (For a
1125 * correct atomic write, we should e.g. take write locks on all
1126 * objects, rollback on failure, etc.)
1129 ceph_sync_write(struct kiocb
*iocb
, struct iov_iter
*from
, loff_t pos
,
1130 struct ceph_snap_context
*snapc
)
1132 struct file
*file
= iocb
->ki_filp
;
1133 struct inode
*inode
= file_inode(file
);
1134 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1135 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1136 struct ceph_vino vino
;
1137 struct ceph_osd_request
*req
;
1138 struct page
**pages
;
1144 bool check_caps
= false;
1145 struct timespec64 mtime
= current_time(inode
);
1146 size_t count
= iov_iter_count(from
);
1148 if (ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
1151 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1152 file
, pos
, (unsigned)count
, snapc
, snapc
->seq
);
1154 ret
= filemap_write_and_wait_range(inode
->i_mapping
,
1155 pos
, pos
+ count
- 1);
1159 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
1161 (pos
+ count
- 1) >> PAGE_SHIFT
);
1163 dout("invalidate_inode_pages2_range returned %d\n", ret
);
1165 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
1167 while ((len
= iov_iter_count(from
)) > 0) {
1171 vino
= ceph_vino(inode
);
1172 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1173 vino
, pos
, &len
, 0, 1,
1174 CEPH_OSD_OP_WRITE
, flags
, snapc
,
1176 ci
->i_truncate_size
,
1184 * write from beginning of first page,
1185 * regardless of io alignment
1187 num_pages
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1189 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1190 if (IS_ERR(pages
)) {
1191 ret
= PTR_ERR(pages
);
1196 for (n
= 0; n
< num_pages
; n
++) {
1197 size_t plen
= min_t(size_t, left
, PAGE_SIZE
);
1198 ret
= copy_page_from_iter(pages
[n
], 0, plen
, from
);
1207 ceph_release_page_vector(pages
, num_pages
);
1211 req
->r_inode
= inode
;
1213 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0,
1216 req
->r_mtime
= mtime
;
1217 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1219 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1222 ceph_osdc_put_request(req
);
1224 ceph_set_error_write(ci
);
1228 ceph_clear_error_write(ci
);
1231 if (pos
> i_size_read(inode
)) {
1232 check_caps
= ceph_inode_set_size(inode
, pos
);
1234 ceph_check_caps(ceph_inode(inode
),
1235 CHECK_CAPS_AUTHONLY
,
1241 if (ret
!= -EOLDSNAPC
&& written
> 0) {
1249 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1250 * Atomically grab references, so that those bits are not released
1251 * back to the MDS mid-read.
1253 * Hmm, the sync read case isn't actually async... should it be?
1255 static ssize_t
ceph_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1257 struct file
*filp
= iocb
->ki_filp
;
1258 struct ceph_file_info
*fi
= filp
->private_data
;
1259 size_t len
= iov_iter_count(to
);
1260 struct inode
*inode
= file_inode(filp
);
1261 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1262 struct page
*pinned_page
= NULL
;
1265 int retry_op
= 0, read
= 0;
1268 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1269 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
, inode
);
1271 if (iocb
->ki_flags
& IOCB_DIRECT
)
1272 ceph_start_io_direct(inode
);
1274 ceph_start_io_read(inode
);
1276 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1277 want
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1279 want
= CEPH_CAP_FILE_CACHE
;
1280 ret
= ceph_get_caps(filp
, CEPH_CAP_FILE_RD
, want
, -1,
1281 &got
, &pinned_page
);
1283 if (iocb
->ki_flags
& IOCB_DIRECT
)
1284 ceph_end_io_direct(inode
);
1286 ceph_end_io_read(inode
);
1290 if ((got
& (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1291 (iocb
->ki_flags
& IOCB_DIRECT
) ||
1292 (fi
->flags
& CEPH_F_SYNC
)) {
1294 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1295 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1296 ceph_cap_string(got
));
1298 if (ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1299 if (!retry_op
&& (iocb
->ki_flags
& IOCB_DIRECT
)) {
1300 ret
= ceph_direct_read_write(iocb
, to
,
1302 if (ret
>= 0 && ret
< len
)
1303 retry_op
= CHECK_EOF
;
1305 ret
= ceph_sync_read(iocb
, to
, &retry_op
);
1308 retry_op
= READ_INLINE
;
1311 CEPH_DEFINE_RW_CONTEXT(rw_ctx
, got
);
1312 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1313 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1314 ceph_cap_string(got
));
1315 ceph_add_rw_context(fi
, &rw_ctx
);
1316 ret
= generic_file_read_iter(iocb
, to
);
1317 ceph_del_rw_context(fi
, &rw_ctx
);
1320 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1321 inode
, ceph_vinop(inode
), ceph_cap_string(got
), (int)ret
);
1323 put_page(pinned_page
);
1326 ceph_put_cap_refs(ci
, got
);
1328 if (iocb
->ki_flags
& IOCB_DIRECT
)
1329 ceph_end_io_direct(inode
);
1331 ceph_end_io_read(inode
);
1333 if (retry_op
> HAVE_RETRIED
&& ret
>= 0) {
1335 struct page
*page
= NULL
;
1337 if (retry_op
== READ_INLINE
) {
1338 page
= __page_cache_alloc(GFP_KERNEL
);
1343 statret
= __ceph_do_getattr(inode
, page
,
1344 CEPH_STAT_CAP_INLINE_DATA
, !!page
);
1348 if (statret
== -ENODATA
) {
1349 BUG_ON(retry_op
!= READ_INLINE
);
1355 i_size
= i_size_read(inode
);
1356 if (retry_op
== READ_INLINE
) {
1357 BUG_ON(ret
> 0 || read
> 0);
1358 if (iocb
->ki_pos
< i_size
&&
1359 iocb
->ki_pos
< PAGE_SIZE
) {
1360 loff_t end
= min_t(loff_t
, i_size
,
1361 iocb
->ki_pos
+ len
);
1362 end
= min_t(loff_t
, end
, PAGE_SIZE
);
1364 zero_user_segment(page
, statret
, end
);
1365 ret
= copy_page_to_iter(page
,
1366 iocb
->ki_pos
& ~PAGE_MASK
,
1367 end
- iocb
->ki_pos
, to
);
1368 iocb
->ki_pos
+= ret
;
1371 if (iocb
->ki_pos
< i_size
&& read
< len
) {
1372 size_t zlen
= min_t(size_t, len
- read
,
1373 i_size
- iocb
->ki_pos
);
1374 ret
= iov_iter_zero(zlen
, to
);
1375 iocb
->ki_pos
+= ret
;
1378 __free_pages(page
, 0);
1382 /* hit EOF or hole? */
1383 if (retry_op
== CHECK_EOF
&& iocb
->ki_pos
< i_size
&&
1385 dout("sync_read hit hole, ppos %lld < size %lld"
1386 ", reading more\n", iocb
->ki_pos
, i_size
);
1390 retry_op
= HAVE_RETRIED
;
1402 * Take cap references to avoid releasing caps to MDS mid-write.
1404 * If we are synchronous, and write with an old snap context, the OSD
1405 * may return EOLDSNAPC. In that case, retry the write.. _after_
1406 * dropping our cap refs and allowing the pending snap to logically
1407 * complete _before_ this write occurs.
1409 * If we are near ENOSPC, write synchronously.
1411 static ssize_t
ceph_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1413 struct file
*file
= iocb
->ki_filp
;
1414 struct ceph_file_info
*fi
= file
->private_data
;
1415 struct inode
*inode
= file_inode(file
);
1416 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1417 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1418 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
1419 struct ceph_cap_flush
*prealloc_cf
;
1420 ssize_t count
, written
= 0;
1422 bool direct_lock
= false;
1426 loff_t limit
= max(i_size_read(inode
), fsc
->max_file_size
);
1428 if (ceph_snap(inode
) != CEPH_NOSNAP
)
1431 prealloc_cf
= ceph_alloc_cap_flush();
1435 if ((iocb
->ki_flags
& (IOCB_DIRECT
| IOCB_APPEND
)) == IOCB_DIRECT
)
1440 ceph_start_io_direct(inode
);
1442 ceph_start_io_write(inode
);
1444 /* We can write back this queue in page reclaim */
1445 current
->backing_dev_info
= inode_to_bdi(inode
);
1447 if (iocb
->ki_flags
& IOCB_APPEND
) {
1448 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1453 err
= generic_write_checks(iocb
, from
);
1458 if (unlikely(pos
>= limit
)) {
1462 iov_iter_truncate(from
, limit
- pos
);
1465 count
= iov_iter_count(from
);
1466 if (ceph_quota_is_max_bytes_exceeded(inode
, pos
+ count
)) {
1471 err
= file_remove_privs(file
);
1475 err
= file_update_time(file
);
1479 inode_inc_iversion_raw(inode
);
1481 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1482 err
= ceph_uninline_data(file
, NULL
);
1487 down_read(&osdc
->lock
);
1488 map_flags
= osdc
->osdmap
->flags
;
1489 pool_flags
= ceph_pg_pool_flags(osdc
->osdmap
, ci
->i_layout
.pool_id
);
1490 up_read(&osdc
->lock
);
1491 if ((map_flags
& CEPH_OSDMAP_FULL
) ||
1492 (pool_flags
& CEPH_POOL_FLAG_FULL
)) {
1497 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1498 inode
, ceph_vinop(inode
), pos
, count
, i_size_read(inode
));
1499 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1500 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1502 want
= CEPH_CAP_FILE_BUFFER
;
1504 err
= ceph_get_caps(file
, CEPH_CAP_FILE_WR
, want
, pos
+ count
,
1509 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1510 inode
, ceph_vinop(inode
), pos
, count
, ceph_cap_string(got
));
1512 if ((got
& (CEPH_CAP_FILE_BUFFER
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1513 (iocb
->ki_flags
& IOCB_DIRECT
) || (fi
->flags
& CEPH_F_SYNC
) ||
1514 (ci
->i_ceph_flags
& CEPH_I_ERROR_WRITE
)) {
1515 struct ceph_snap_context
*snapc
;
1516 struct iov_iter data
;
1518 spin_lock(&ci
->i_ceph_lock
);
1519 if (__ceph_have_pending_cap_snap(ci
)) {
1520 struct ceph_cap_snap
*capsnap
=
1521 list_last_entry(&ci
->i_cap_snaps
,
1522 struct ceph_cap_snap
,
1524 snapc
= ceph_get_snap_context(capsnap
->context
);
1526 BUG_ON(!ci
->i_head_snapc
);
1527 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
1529 spin_unlock(&ci
->i_ceph_lock
);
1531 /* we might need to revert back to that point */
1533 if (iocb
->ki_flags
& IOCB_DIRECT
)
1534 written
= ceph_direct_read_write(iocb
, &data
, snapc
,
1537 written
= ceph_sync_write(iocb
, &data
, pos
, snapc
);
1539 ceph_end_io_direct(inode
);
1541 ceph_end_io_write(inode
);
1543 iov_iter_advance(from
, written
);
1544 ceph_put_snap_context(snapc
);
1547 * No need to acquire the i_truncate_mutex. Because
1548 * the MDS revokes Fwb caps before sending truncate
1549 * message to us. We can't get Fwb cap while there
1550 * are pending vmtruncate. So write and vmtruncate
1551 * can not run at the same time
1553 written
= generic_perform_write(file
, from
, pos
);
1554 if (likely(written
>= 0))
1555 iocb
->ki_pos
= pos
+ written
;
1556 ceph_end_io_write(inode
);
1562 spin_lock(&ci
->i_ceph_lock
);
1563 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1564 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1566 spin_unlock(&ci
->i_ceph_lock
);
1568 __mark_inode_dirty(inode
, dirty
);
1569 if (ceph_quota_is_max_bytes_approaching(inode
, iocb
->ki_pos
))
1570 ceph_check_caps(ci
, CHECK_CAPS_NODELAY
, NULL
);
1573 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1574 inode
, ceph_vinop(inode
), pos
, (unsigned)count
,
1575 ceph_cap_string(got
));
1576 ceph_put_cap_refs(ci
, got
);
1578 if (written
== -EOLDSNAPC
) {
1579 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1580 inode
, ceph_vinop(inode
), pos
, (unsigned)count
);
1585 if ((map_flags
& CEPH_OSDMAP_NEARFULL
) ||
1586 (pool_flags
& CEPH_POOL_FLAG_NEARFULL
))
1587 iocb
->ki_flags
|= IOCB_DSYNC
;
1588 written
= generic_write_sync(iocb
, written
);
1594 ceph_end_io_direct(inode
);
1596 ceph_end_io_write(inode
);
1598 ceph_free_cap_flush(prealloc_cf
);
1599 current
->backing_dev_info
= NULL
;
1600 return written
? written
: err
;
1604 * llseek. be sure to verify file size on SEEK_END.
1606 static loff_t
ceph_llseek(struct file
*file
, loff_t offset
, int whence
)
1608 struct inode
*inode
= file
->f_mapping
->host
;
1609 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1615 if (whence
== SEEK_END
|| whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
1616 ret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1621 i_size
= i_size_read(inode
);
1628 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1629 * position-querying operation. Avoid rewriting the "same"
1630 * f_pos value back to the file because a concurrent read(),
1631 * write() or lseek() might have altered it
1637 offset
+= file
->f_pos
;
1640 if (offset
< 0 || offset
>= i_size
) {
1646 if (offset
< 0 || offset
>= i_size
) {
1654 ret
= vfs_setpos(file
, offset
, max(i_size
, fsc
->max_file_size
));
1657 inode_unlock(inode
);
1661 static inline void ceph_zero_partial_page(
1662 struct inode
*inode
, loff_t offset
, unsigned size
)
1665 pgoff_t index
= offset
>> PAGE_SHIFT
;
1667 page
= find_lock_page(inode
->i_mapping
, index
);
1669 wait_on_page_writeback(page
);
1670 zero_user(page
, offset
& (PAGE_SIZE
- 1), size
);
1676 static void ceph_zero_pagecache_range(struct inode
*inode
, loff_t offset
,
1679 loff_t nearly
= round_up(offset
, PAGE_SIZE
);
1680 if (offset
< nearly
) {
1681 loff_t size
= nearly
- offset
;
1684 ceph_zero_partial_page(inode
, offset
, size
);
1688 if (length
>= PAGE_SIZE
) {
1689 loff_t size
= round_down(length
, PAGE_SIZE
);
1690 truncate_pagecache_range(inode
, offset
, offset
+ size
- 1);
1695 ceph_zero_partial_page(inode
, offset
, length
);
1698 static int ceph_zero_partial_object(struct inode
*inode
,
1699 loff_t offset
, loff_t
*length
)
1701 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1702 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1703 struct ceph_osd_request
*req
;
1709 op
= offset
? CEPH_OSD_OP_DELETE
: CEPH_OSD_OP_TRUNCATE
;
1712 op
= CEPH_OSD_OP_ZERO
;
1715 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1719 CEPH_OSD_FLAG_WRITE
,
1726 req
->r_mtime
= inode
->i_mtime
;
1727 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1729 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1733 ceph_osdc_put_request(req
);
1739 static int ceph_zero_objects(struct inode
*inode
, loff_t offset
, loff_t length
)
1742 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1743 s32 stripe_unit
= ci
->i_layout
.stripe_unit
;
1744 s32 stripe_count
= ci
->i_layout
.stripe_count
;
1745 s32 object_size
= ci
->i_layout
.object_size
;
1746 u64 object_set_size
= object_size
* stripe_count
;
1749 /* round offset up to next period boundary */
1750 nearly
= offset
+ object_set_size
- 1;
1752 nearly
-= do_div(t
, object_set_size
);
1754 while (length
&& offset
< nearly
) {
1755 loff_t size
= length
;
1756 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1762 while (length
>= object_set_size
) {
1764 loff_t pos
= offset
;
1765 for (i
= 0; i
< stripe_count
; ++i
) {
1766 ret
= ceph_zero_partial_object(inode
, pos
, NULL
);
1771 offset
+= object_set_size
;
1772 length
-= object_set_size
;
1775 loff_t size
= length
;
1776 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1785 static long ceph_fallocate(struct file
*file
, int mode
,
1786 loff_t offset
, loff_t length
)
1788 struct ceph_file_info
*fi
= file
->private_data
;
1789 struct inode
*inode
= file_inode(file
);
1790 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1791 struct ceph_cap_flush
*prealloc_cf
;
1798 if (mode
!= (FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
1801 if (!S_ISREG(inode
->i_mode
))
1804 prealloc_cf
= ceph_alloc_cap_flush();
1810 if (ceph_snap(inode
) != CEPH_NOSNAP
) {
1815 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1816 ret
= ceph_uninline_data(file
, NULL
);
1821 size
= i_size_read(inode
);
1823 /* Are we punching a hole beyond EOF? */
1826 if ((offset
+ length
) > size
)
1827 length
= size
- offset
;
1829 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1830 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1832 want
= CEPH_CAP_FILE_BUFFER
;
1834 ret
= ceph_get_caps(file
, CEPH_CAP_FILE_WR
, want
, endoff
, &got
, NULL
);
1838 ceph_zero_pagecache_range(inode
, offset
, length
);
1839 ret
= ceph_zero_objects(inode
, offset
, length
);
1842 spin_lock(&ci
->i_ceph_lock
);
1843 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1844 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1846 spin_unlock(&ci
->i_ceph_lock
);
1848 __mark_inode_dirty(inode
, dirty
);
1851 ceph_put_cap_refs(ci
, got
);
1853 inode_unlock(inode
);
1854 ceph_free_cap_flush(prealloc_cf
);
1859 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1860 * src_ci. Two attempts are made to obtain both caps, and an error is return if
1861 * this fails; zero is returned on success.
1863 static int get_rd_wr_caps(struct file
*src_filp
, int *src_got
,
1864 struct file
*dst_filp
,
1865 loff_t dst_endoff
, int *dst_got
)
1868 bool retrying
= false;
1871 ret
= ceph_get_caps(dst_filp
, CEPH_CAP_FILE_WR
, CEPH_CAP_FILE_BUFFER
,
1872 dst_endoff
, dst_got
, NULL
);
1877 * Since we're already holding the FILE_WR capability for the dst file,
1878 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
1879 * retry dance instead to try to get both capabilities.
1881 ret
= ceph_try_get_caps(file_inode(src_filp
),
1882 CEPH_CAP_FILE_RD
, CEPH_CAP_FILE_SHARED
,
1885 /* Start by dropping dst_ci caps and getting src_ci caps */
1886 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp
)), *dst_got
);
1889 /* ceph_try_get_caps masks EAGAIN */
1893 ret
= ceph_get_caps(src_filp
, CEPH_CAP_FILE_RD
,
1894 CEPH_CAP_FILE_SHARED
, -1, src_got
, NULL
);
1897 /*... drop src_ci caps too, and retry */
1898 ceph_put_cap_refs(ceph_inode(file_inode(src_filp
)), *src_got
);
1905 static void put_rd_wr_caps(struct ceph_inode_info
*src_ci
, int src_got
,
1906 struct ceph_inode_info
*dst_ci
, int dst_got
)
1908 ceph_put_cap_refs(src_ci
, src_got
);
1909 ceph_put_cap_refs(dst_ci
, dst_got
);
1913 * This function does several size-related checks, returning an error if:
1914 * - source file is smaller than off+len
1915 * - destination file size is not OK (inode_newsize_ok())
1916 * - max bytes quotas is exceeded
1918 static int is_file_size_ok(struct inode
*src_inode
, struct inode
*dst_inode
,
1919 loff_t src_off
, loff_t dst_off
, size_t len
)
1921 loff_t size
, endoff
;
1923 size
= i_size_read(src_inode
);
1925 * Don't copy beyond source file EOF. Instead of simply setting length
1926 * to (size - src_off), just drop to VFS default implementation, as the
1927 * local i_size may be stale due to other clients writing to the source
1930 if (src_off
+ len
> size
) {
1931 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1932 src_off
, len
, size
);
1935 size
= i_size_read(dst_inode
);
1937 endoff
= dst_off
+ len
;
1938 if (inode_newsize_ok(dst_inode
, endoff
))
1941 if (ceph_quota_is_max_bytes_exceeded(dst_inode
, endoff
))
1947 static ssize_t
__ceph_copy_file_range(struct file
*src_file
, loff_t src_off
,
1948 struct file
*dst_file
, loff_t dst_off
,
1949 size_t len
, unsigned int flags
)
1951 struct inode
*src_inode
= file_inode(src_file
);
1952 struct inode
*dst_inode
= file_inode(dst_file
);
1953 struct ceph_inode_info
*src_ci
= ceph_inode(src_inode
);
1954 struct ceph_inode_info
*dst_ci
= ceph_inode(dst_inode
);
1955 struct ceph_cap_flush
*prealloc_cf
;
1956 struct ceph_fs_client
*src_fsc
= ceph_inode_to_client(src_inode
);
1957 struct ceph_object_locator src_oloc
, dst_oloc
;
1958 struct ceph_object_id src_oid
, dst_oid
;
1959 loff_t endoff
= 0, size
;
1961 u64 src_objnum
, dst_objnum
, src_objoff
, dst_objoff
;
1962 u32 src_objlen
, dst_objlen
, object_size
;
1963 int src_got
= 0, dst_got
= 0, err
, dirty
;
1964 bool do_final_copy
= false;
1966 if (src_inode
->i_sb
!= dst_inode
->i_sb
) {
1967 struct ceph_fs_client
*dst_fsc
= ceph_inode_to_client(dst_inode
);
1969 if (ceph_fsid_compare(&src_fsc
->client
->fsid
,
1970 &dst_fsc
->client
->fsid
)) {
1971 dout("Copying files across clusters: src: %pU dst: %pU\n",
1972 &src_fsc
->client
->fsid
, &dst_fsc
->client
->fsid
);
1976 if (ceph_snap(dst_inode
) != CEPH_NOSNAP
)
1980 * Some of the checks below will return -EOPNOTSUPP, which will force a
1981 * fallback to the default VFS copy_file_range implementation. This is
1982 * desirable in several cases (for ex, the 'len' is smaller than the
1983 * size of the objects, or in cases where that would be more
1987 if (ceph_test_mount_opt(src_fsc
, NOCOPYFROM
))
1991 * Striped file layouts require that we copy partial objects, but the
1992 * OSD copy-from operation only supports full-object copies. Limit
1993 * this to non-striped file layouts for now.
1995 if ((src_ci
->i_layout
.stripe_unit
!= dst_ci
->i_layout
.stripe_unit
) ||
1996 (src_ci
->i_layout
.stripe_count
!= 1) ||
1997 (dst_ci
->i_layout
.stripe_count
!= 1) ||
1998 (src_ci
->i_layout
.object_size
!= dst_ci
->i_layout
.object_size
)) {
1999 dout("Invalid src/dst files layout\n");
2003 if (len
< src_ci
->i_layout
.object_size
)
2004 return -EOPNOTSUPP
; /* no remote copy will be done */
2006 prealloc_cf
= ceph_alloc_cap_flush();
2010 /* Start by sync'ing the source and destination files */
2011 ret
= file_write_and_wait_range(src_file
, src_off
, (src_off
+ len
));
2013 dout("failed to write src file (%zd)\n", ret
);
2016 ret
= file_write_and_wait_range(dst_file
, dst_off
, (dst_off
+ len
));
2018 dout("failed to write dst file (%zd)\n", ret
);
2023 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2024 * clients may have dirty data in their caches. And OSDs know nothing
2025 * about caps, so they can't safely do the remote object copies.
2027 err
= get_rd_wr_caps(src_file
, &src_got
,
2028 dst_file
, (dst_off
+ len
), &dst_got
);
2030 dout("get_rd_wr_caps returned %d\n", err
);
2035 ret
= is_file_size_ok(src_inode
, dst_inode
, src_off
, dst_off
, len
);
2039 size
= i_size_read(dst_inode
);
2040 endoff
= dst_off
+ len
;
2042 /* Drop dst file cached pages */
2043 ret
= invalidate_inode_pages2_range(dst_inode
->i_mapping
,
2044 dst_off
>> PAGE_SHIFT
,
2045 endoff
>> PAGE_SHIFT
);
2047 dout("Failed to invalidate inode pages (%zd)\n", ret
);
2050 src_oloc
.pool
= src_ci
->i_layout
.pool_id
;
2051 src_oloc
.pool_ns
= ceph_try_get_string(src_ci
->i_layout
.pool_ns
);
2052 dst_oloc
.pool
= dst_ci
->i_layout
.pool_id
;
2053 dst_oloc
.pool_ns
= ceph_try_get_string(dst_ci
->i_layout
.pool_ns
);
2055 ceph_calc_file_object_mapping(&src_ci
->i_layout
, src_off
,
2056 src_ci
->i_layout
.object_size
,
2057 &src_objnum
, &src_objoff
, &src_objlen
);
2058 ceph_calc_file_object_mapping(&dst_ci
->i_layout
, dst_off
,
2059 dst_ci
->i_layout
.object_size
,
2060 &dst_objnum
, &dst_objoff
, &dst_objlen
);
2061 /* object-level offsets need to the same */
2062 if (src_objoff
!= dst_objoff
) {
2068 * Do a manual copy if the object offset isn't object aligned.
2069 * 'src_objlen' contains the bytes left until the end of the object,
2070 * starting at the src_off
2074 * we need to temporarily drop all caps as we'll be calling
2075 * {read,write}_iter, which will get caps again.
2077 put_rd_wr_caps(src_ci
, src_got
, dst_ci
, dst_got
);
2078 ret
= do_splice_direct(src_file
, &src_off
, dst_file
,
2079 &dst_off
, src_objlen
, flags
);
2081 dout("do_splice_direct returned %d\n", err
);
2085 err
= get_rd_wr_caps(src_file
, &src_got
,
2086 dst_file
, (dst_off
+ len
), &dst_got
);
2089 err
= is_file_size_ok(src_inode
, dst_inode
,
2090 src_off
, dst_off
, len
);
2094 object_size
= src_ci
->i_layout
.object_size
;
2095 while (len
>= object_size
) {
2096 ceph_calc_file_object_mapping(&src_ci
->i_layout
, src_off
,
2097 object_size
, &src_objnum
,
2098 &src_objoff
, &src_objlen
);
2099 ceph_calc_file_object_mapping(&dst_ci
->i_layout
, dst_off
,
2100 object_size
, &dst_objnum
,
2101 &dst_objoff
, &dst_objlen
);
2102 ceph_oid_init(&src_oid
);
2103 ceph_oid_printf(&src_oid
, "%llx.%08llx",
2104 src_ci
->i_vino
.ino
, src_objnum
);
2105 ceph_oid_init(&dst_oid
);
2106 ceph_oid_printf(&dst_oid
, "%llx.%08llx",
2107 dst_ci
->i_vino
.ino
, dst_objnum
);
2108 /* Do an object remote copy */
2109 err
= ceph_osdc_copy_from(
2110 &src_fsc
->client
->osdc
,
2111 src_ci
->i_vino
.snap
, 0,
2112 &src_oid
, &src_oloc
,
2113 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
|
2114 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE
,
2115 &dst_oid
, &dst_oloc
,
2116 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
|
2117 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
, 0);
2119 dout("ceph_osdc_copy_from returned %d\n", err
);
2125 src_off
+= object_size
;
2126 dst_off
+= object_size
;
2131 /* We still need one final local copy */
2132 do_final_copy
= true;
2134 file_update_time(dst_file
);
2135 inode_inc_iversion_raw(dst_inode
);
2137 if (endoff
> size
) {
2140 /* Let the MDS know about dst file size change */
2141 if (ceph_quota_is_max_bytes_approaching(dst_inode
, endoff
))
2142 caps_flags
|= CHECK_CAPS_NODELAY
;
2143 if (ceph_inode_set_size(dst_inode
, endoff
))
2144 caps_flags
|= CHECK_CAPS_AUTHONLY
;
2146 ceph_check_caps(dst_ci
, caps_flags
, NULL
);
2149 spin_lock(&dst_ci
->i_ceph_lock
);
2150 dst_ci
->i_inline_version
= CEPH_INLINE_NONE
;
2151 dirty
= __ceph_mark_dirty_caps(dst_ci
, CEPH_CAP_FILE_WR
, &prealloc_cf
);
2152 spin_unlock(&dst_ci
->i_ceph_lock
);
2154 __mark_inode_dirty(dst_inode
, dirty
);
2157 put_rd_wr_caps(src_ci
, src_got
, dst_ci
, dst_got
);
2159 if (do_final_copy
) {
2160 err
= do_splice_direct(src_file
, &src_off
, dst_file
,
2161 &dst_off
, len
, flags
);
2163 dout("do_splice_direct returned %d\n", err
);
2171 ceph_free_cap_flush(prealloc_cf
);
2176 static ssize_t
ceph_copy_file_range(struct file
*src_file
, loff_t src_off
,
2177 struct file
*dst_file
, loff_t dst_off
,
2178 size_t len
, unsigned int flags
)
2182 ret
= __ceph_copy_file_range(src_file
, src_off
, dst_file
, dst_off
,
2185 if (ret
== -EOPNOTSUPP
|| ret
== -EXDEV
)
2186 ret
= generic_copy_file_range(src_file
, src_off
, dst_file
,
2187 dst_off
, len
, flags
);
2191 const struct file_operations ceph_file_fops
= {
2193 .release
= ceph_release
,
2194 .llseek
= ceph_llseek
,
2195 .read_iter
= ceph_read_iter
,
2196 .write_iter
= ceph_write_iter
,
2198 .fsync
= ceph_fsync
,
2200 .flock
= ceph_flock
,
2201 .splice_read
= generic_file_splice_read
,
2202 .splice_write
= iter_file_splice_write
,
2203 .unlocked_ioctl
= ceph_ioctl
,
2204 .compat_ioctl
= compat_ptr_ioctl
,
2205 .fallocate
= ceph_fallocate
,
2206 .copy_file_range
= ceph_copy_file_range
,