1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/falloc.h>
14 #include "mds_client.h"
17 static __le32
ceph_flags_sys2wire(u32 flags
)
21 switch (flags
& O_ACCMODE
) {
23 wire_flags
|= CEPH_O_RDONLY
;
26 wire_flags
|= CEPH_O_WRONLY
;
29 wire_flags
|= CEPH_O_RDWR
;
35 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
37 ceph_sys2wire(O_CREAT
);
38 ceph_sys2wire(O_EXCL
);
39 ceph_sys2wire(O_TRUNC
);
40 ceph_sys2wire(O_DIRECTORY
);
41 ceph_sys2wire(O_NOFOLLOW
);
46 dout("unused open flags: %x\n", flags
);
48 return cpu_to_le32(wire_flags
);
52 * Ceph file operations
54 * Implement basic open/close functionality, and implement
57 * We implement three modes of file I/O:
58 * - buffered uses the generic_file_aio_{read,write} helpers
60 * - synchronous is used when there is multi-client read/write
61 * sharing, avoids the page cache, and synchronously waits for an
64 * - direct io takes the variant of the sync path that references
65 * user pages directly.
67 * fsync() flushes and waits on dirty pages, but just queues metadata
68 * for writeback: since the MDS can recover size and mtime there is no
69 * need to wait for MDS acknowledgement.
73 * How many pages to get in one call to iov_iter_get_pages(). This
74 * determines the size of the on-stack array used as a buffer.
76 #define ITER_GET_BVECS_PAGES 64
78 static ssize_t
__iter_get_bvecs(struct iov_iter
*iter
, size_t maxsize
,
79 struct bio_vec
*bvecs
)
84 if (maxsize
> iov_iter_count(iter
))
85 maxsize
= iov_iter_count(iter
);
87 while (size
< maxsize
) {
88 struct page
*pages
[ITER_GET_BVECS_PAGES
];
93 bytes
= iov_iter_get_pages(iter
, pages
, maxsize
- size
,
94 ITER_GET_BVECS_PAGES
, &start
);
98 iov_iter_advance(iter
, bytes
);
101 for ( ; bytes
; idx
++, bvec_idx
++) {
102 struct bio_vec bv
= {
103 .bv_page
= pages
[idx
],
104 .bv_len
= min_t(int, bytes
, PAGE_SIZE
- start
),
108 bvecs
[bvec_idx
] = bv
;
118 * iov_iter_get_pages() only considers one iov_iter segment, no matter
119 * what maxsize or maxpages are given. For ITER_BVEC that is a single
122 * Attempt to get up to @maxsize bytes worth of pages from @iter.
123 * Return the number of bytes in the created bio_vec array, or an error.
125 static ssize_t
iter_get_bvecs_alloc(struct iov_iter
*iter
, size_t maxsize
,
126 struct bio_vec
**bvecs
, int *num_bvecs
)
129 size_t orig_count
= iov_iter_count(iter
);
133 iov_iter_truncate(iter
, maxsize
);
134 npages
= iov_iter_npages(iter
, INT_MAX
);
135 iov_iter_reexpand(iter
, orig_count
);
138 * __iter_get_bvecs() may populate only part of the array -- zero it
141 bv
= kvmalloc_array(npages
, sizeof(*bv
), GFP_KERNEL
| __GFP_ZERO
);
145 bytes
= __iter_get_bvecs(iter
, maxsize
, bv
);
148 * No pages were pinned -- just free the array.
159 static void put_bvecs(struct bio_vec
*bvecs
, int num_bvecs
, bool should_dirty
)
163 for (i
= 0; i
< num_bvecs
; i
++) {
164 if (bvecs
[i
].bv_page
) {
166 set_page_dirty_lock(bvecs
[i
].bv_page
);
167 put_page(bvecs
[i
].bv_page
);
174 * Prepare an open request. Preallocate ceph_cap to avoid an
175 * inopportune ENOMEM later.
177 static struct ceph_mds_request
*
178 prepare_open_request(struct super_block
*sb
, int flags
, int create_mode
)
180 struct ceph_fs_client
*fsc
= ceph_sb_to_client(sb
);
181 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
182 struct ceph_mds_request
*req
;
183 int want_auth
= USE_ANY_MDS
;
184 int op
= (flags
& O_CREAT
) ? CEPH_MDS_OP_CREATE
: CEPH_MDS_OP_OPEN
;
186 if (flags
& (O_WRONLY
|O_RDWR
|O_CREAT
|O_TRUNC
))
187 want_auth
= USE_AUTH_MDS
;
189 req
= ceph_mdsc_create_request(mdsc
, op
, want_auth
);
192 req
->r_fmode
= ceph_flags_to_mode(flags
);
193 req
->r_args
.open
.flags
= ceph_flags_sys2wire(flags
);
194 req
->r_args
.open
.mode
= cpu_to_le32(create_mode
);
199 static int ceph_init_file_info(struct inode
*inode
, struct file
*file
,
200 int fmode
, bool isdir
)
202 struct ceph_file_info
*fi
;
204 dout("%s %p %p 0%o (%s)\n", __func__
, inode
, file
,
205 inode
->i_mode
, isdir
? "dir" : "regular");
206 BUG_ON(inode
->i_fop
->release
!= ceph_release
);
209 struct ceph_dir_file_info
*dfi
=
210 kmem_cache_zalloc(ceph_dir_file_cachep
, GFP_KERNEL
);
212 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
216 file
->private_data
= dfi
;
217 fi
= &dfi
->file_info
;
218 dfi
->next_offset
= 2;
219 dfi
->readdir_cache_idx
= -1;
221 fi
= kmem_cache_zalloc(ceph_file_cachep
, GFP_KERNEL
);
223 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
227 file
->private_data
= fi
;
231 spin_lock_init(&fi
->rw_contexts_lock
);
232 INIT_LIST_HEAD(&fi
->rw_contexts
);
238 * initialize private struct file data.
239 * if we fail, clean up by dropping fmode reference on the ceph_inode
241 static int ceph_init_file(struct inode
*inode
, struct file
*file
, int fmode
)
245 switch (inode
->i_mode
& S_IFMT
) {
247 ceph_fscache_register_inode_cookie(inode
);
248 ceph_fscache_file_set_cookie(inode
, file
);
250 ret
= ceph_init_file_info(inode
, file
, fmode
,
251 S_ISDIR(inode
->i_mode
));
257 dout("init_file %p %p 0%o (symlink)\n", inode
, file
,
259 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
263 dout("init_file %p %p 0%o (special)\n", inode
, file
,
266 * we need to drop the open ref now, since we don't
267 * have .release set to ceph_release.
269 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
270 BUG_ON(inode
->i_fop
->release
== ceph_release
);
272 /* call the proper open fop */
273 ret
= inode
->i_fop
->open(inode
, file
);
279 * try renew caps after session gets killed.
281 int ceph_renew_caps(struct inode
*inode
)
283 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(inode
->i_sb
)->mdsc
;
284 struct ceph_inode_info
*ci
= ceph_inode(inode
);
285 struct ceph_mds_request
*req
;
286 int err
, flags
, wanted
;
288 spin_lock(&ci
->i_ceph_lock
);
289 wanted
= __ceph_caps_file_wanted(ci
);
290 if (__ceph_is_any_real_caps(ci
) &&
291 (!(wanted
& CEPH_CAP_ANY_WR
) || ci
->i_auth_cap
)) {
292 int issued
= __ceph_caps_issued(ci
, NULL
);
293 spin_unlock(&ci
->i_ceph_lock
);
294 dout("renew caps %p want %s issued %s updating mds_wanted\n",
295 inode
, ceph_cap_string(wanted
), ceph_cap_string(issued
));
296 ceph_check_caps(ci
, 0, NULL
);
299 spin_unlock(&ci
->i_ceph_lock
);
302 if ((wanted
& CEPH_CAP_FILE_RD
) && (wanted
& CEPH_CAP_FILE_WR
))
304 else if (wanted
& CEPH_CAP_FILE_RD
)
306 else if (wanted
& CEPH_CAP_FILE_WR
)
309 if (wanted
& CEPH_CAP_FILE_LAZYIO
)
313 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
319 req
->r_inode
= inode
;
324 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
325 ceph_mdsc_put_request(req
);
327 dout("renew caps %p open result=%d\n", inode
, err
);
328 return err
< 0 ? err
: 0;
332 * If we already have the requisite capabilities, we can satisfy
333 * the open request locally (no need to request new caps from the
334 * MDS). We do, however, need to inform the MDS (asynchronously)
335 * if our wanted caps set expands.
337 int ceph_open(struct inode
*inode
, struct file
*file
)
339 struct ceph_inode_info
*ci
= ceph_inode(inode
);
340 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
341 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
342 struct ceph_mds_request
*req
;
343 struct ceph_file_info
*fi
= file
->private_data
;
345 int flags
, fmode
, wanted
;
348 dout("open file %p is already opened\n", file
);
352 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
353 flags
= file
->f_flags
& ~(O_CREAT
|O_EXCL
);
354 if (S_ISDIR(inode
->i_mode
))
355 flags
= O_DIRECTORY
; /* mds likes to know */
357 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode
,
358 ceph_vinop(inode
), file
, flags
, file
->f_flags
);
359 fmode
= ceph_flags_to_mode(flags
);
360 wanted
= ceph_caps_for_mode(fmode
);
362 /* snapped files are read-only */
363 if (ceph_snap(inode
) != CEPH_NOSNAP
&& (file
->f_mode
& FMODE_WRITE
))
366 /* trivially open snapdir */
367 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
368 spin_lock(&ci
->i_ceph_lock
);
369 __ceph_get_fmode(ci
, fmode
);
370 spin_unlock(&ci
->i_ceph_lock
);
371 return ceph_init_file(inode
, file
, fmode
);
375 * No need to block if we have caps on the auth MDS (for
376 * write) or any MDS (for read). Update wanted set
379 spin_lock(&ci
->i_ceph_lock
);
380 if (__ceph_is_any_real_caps(ci
) &&
381 (((fmode
& CEPH_FILE_MODE_WR
) == 0) || ci
->i_auth_cap
)) {
382 int mds_wanted
= __ceph_caps_mds_wanted(ci
, true);
383 int issued
= __ceph_caps_issued(ci
, NULL
);
385 dout("open %p fmode %d want %s issued %s using existing\n",
386 inode
, fmode
, ceph_cap_string(wanted
),
387 ceph_cap_string(issued
));
388 __ceph_get_fmode(ci
, fmode
);
389 spin_unlock(&ci
->i_ceph_lock
);
392 if ((issued
& wanted
) != wanted
&&
393 (mds_wanted
& wanted
) != wanted
&&
394 ceph_snap(inode
) != CEPH_SNAPDIR
)
395 ceph_check_caps(ci
, 0, NULL
);
397 return ceph_init_file(inode
, file
, fmode
);
398 } else if (ceph_snap(inode
) != CEPH_NOSNAP
&&
399 (ci
->i_snap_caps
& wanted
) == wanted
) {
400 __ceph_get_fmode(ci
, fmode
);
401 spin_unlock(&ci
->i_ceph_lock
);
402 return ceph_init_file(inode
, file
, fmode
);
405 spin_unlock(&ci
->i_ceph_lock
);
407 dout("open fmode %d wants %s\n", fmode
, ceph_cap_string(wanted
));
408 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
413 req
->r_inode
= inode
;
417 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
419 err
= ceph_init_file(inode
, file
, req
->r_fmode
);
420 ceph_mdsc_put_request(req
);
421 dout("open result=%d on %llx.%llx\n", err
, ceph_vinop(inode
));
428 * Do a lookup + open with a single request. If we get a non-existent
429 * file or symlink, return 1 so the VFS can retry.
431 int ceph_atomic_open(struct inode
*dir
, struct dentry
*dentry
,
432 struct file
*file
, unsigned flags
, umode_t mode
)
434 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
435 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
436 struct ceph_mds_request
*req
;
438 struct ceph_acls_info acls
= {};
442 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
444 d_unhashed(dentry
) ? "unhashed" : "hashed", flags
, mode
);
446 if (dentry
->d_name
.len
> NAME_MAX
)
447 return -ENAMETOOLONG
;
449 if (flags
& O_CREAT
) {
450 if (ceph_quota_is_max_files_exceeded(dir
))
452 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
458 req
= prepare_open_request(dir
->i_sb
, flags
, mode
);
463 req
->r_dentry
= dget(dentry
);
465 if (flags
& O_CREAT
) {
466 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_AUTH_EXCL
;
467 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
469 req
->r_pagelist
= acls
.pagelist
;
470 acls
.pagelist
= NULL
;
474 mask
= CEPH_STAT_CAP_INODE
| CEPH_CAP_AUTH_SHARED
;
475 if (ceph_security_xattr_wanted(dir
))
476 mask
|= CEPH_CAP_XATTR_SHARED
;
477 req
->r_args
.open
.mask
= cpu_to_le32(mask
);
480 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
481 err
= ceph_mdsc_do_request(mdsc
,
482 (flags
& (O_CREAT
|O_TRUNC
)) ? dir
: NULL
,
484 err
= ceph_handle_snapdir(req
, dentry
, err
);
488 if ((flags
& O_CREAT
) && !req
->r_reply_info
.head
->is_dentry
)
489 err
= ceph_handle_notrace_create(dir
, dentry
);
491 if (d_in_lookup(dentry
)) {
492 dn
= ceph_finish_lookup(req
, dentry
, err
);
496 /* we were given a hashed negative dentry */
501 if (dn
|| d_really_is_negative(dentry
) || d_is_symlink(dentry
)) {
502 /* make vfs retry on splice, ENOENT, or symlink */
503 dout("atomic_open finish_no_open on dn %p\n", dn
);
504 err
= finish_no_open(file
, dn
);
506 dout("atomic_open finish_open on dn %p\n", dn
);
507 if (req
->r_op
== CEPH_MDS_OP_CREATE
&& req
->r_reply_info
.has_create_ino
) {
508 ceph_init_inode_acls(d_inode(dentry
), &acls
);
509 file
->f_mode
|= FMODE_CREATED
;
511 err
= finish_open(file
, dentry
, ceph_open
);
514 if (!req
->r_err
&& req
->r_target_inode
)
515 ceph_put_fmode(ceph_inode(req
->r_target_inode
), req
->r_fmode
);
516 ceph_mdsc_put_request(req
);
518 ceph_release_acls_info(&acls
);
519 dout("atomic_open result=%d\n", err
);
523 int ceph_release(struct inode
*inode
, struct file
*file
)
525 struct ceph_inode_info
*ci
= ceph_inode(inode
);
527 if (S_ISDIR(inode
->i_mode
)) {
528 struct ceph_dir_file_info
*dfi
= file
->private_data
;
529 dout("release inode %p dir file %p\n", inode
, file
);
530 WARN_ON(!list_empty(&dfi
->file_info
.rw_contexts
));
532 ceph_put_fmode(ci
, dfi
->file_info
.fmode
);
534 if (dfi
->last_readdir
)
535 ceph_mdsc_put_request(dfi
->last_readdir
);
536 kfree(dfi
->last_name
);
537 kfree(dfi
->dir_info
);
538 kmem_cache_free(ceph_dir_file_cachep
, dfi
);
540 struct ceph_file_info
*fi
= file
->private_data
;
541 dout("release inode %p regular file %p\n", inode
, file
);
542 WARN_ON(!list_empty(&fi
->rw_contexts
));
544 ceph_put_fmode(ci
, fi
->fmode
);
545 kmem_cache_free(ceph_file_cachep
, fi
);
548 /* wake up anyone waiting for caps on this inode */
549 wake_up_all(&ci
->i_cap_wq
);
560 * Read a range of bytes striped over one or more objects. Iterate over
561 * objects we stripe over. (That's not atomic, but good enough for now.)
563 * If we get a short result from the OSD, check against i_size; we need to
564 * only return a short read to the caller if we hit EOF.
566 static int striped_read(struct inode
*inode
,
568 struct page
**pages
, int num_pages
,
569 int page_align
, int *checkeof
)
571 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
572 struct ceph_inode_info
*ci
= ceph_inode(inode
);
577 bool hit_stripe
, was_short
;
580 * we may need to do multiple reads. not atomic, unfortunately.
584 page_idx
= (page_align
+ read
) >> PAGE_SHIFT
;
585 ret
= ceph_osdc_readpages(&fsc
->client
->osdc
, ceph_vino(inode
),
586 &ci
->i_layout
, pos
, &this_len
,
587 ci
->i_truncate_seq
, ci
->i_truncate_size
,
588 pages
+ page_idx
, num_pages
- page_idx
,
589 ((page_align
+ read
) & ~PAGE_MASK
));
592 hit_stripe
= this_len
< len
;
593 was_short
= ret
>= 0 && ret
< this_len
;
594 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos
, len
, read
,
595 ret
, hit_stripe
? " HITSTRIPE" : "", was_short
? " SHORT" : "");
597 i_size
= i_size_read(inode
);
599 if (was_short
&& (pos
+ ret
< i_size
)) {
600 int zlen
= min(this_len
- ret
, i_size
- pos
- ret
);
601 int zoff
= page_align
+ read
+ ret
;
602 dout(" zero gap %llu to %llu\n",
603 pos
+ ret
, pos
+ ret
+ zlen
);
604 ceph_zero_page_vector_range(zoff
, zlen
, pages
);
612 /* hit stripe and need continue*/
613 if (len
&& hit_stripe
&& pos
< i_size
)
619 /* did we bounce off eof? */
620 if (pos
+ len
> i_size
)
621 *checkeof
= CHECK_EOF
;
624 dout("striped_read returns %d\n", ret
);
629 * Completely synchronous read and write methods. Direct from __user
630 * buffer to osd, or directly to user pages (if O_DIRECT).
632 * If the read spans object boundary, just do multiple reads.
634 static ssize_t
ceph_sync_read(struct kiocb
*iocb
, struct iov_iter
*to
,
637 struct file
*file
= iocb
->ki_filp
;
638 struct inode
*inode
= file_inode(file
);
640 u64 off
= iocb
->ki_pos
;
643 size_t len
= iov_iter_count(to
);
645 dout("sync_read on file %p %llu~%u %s\n", file
, off
, (unsigned)len
,
646 (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
651 * flush any page cache pages in this range. this
652 * will make concurrent normal and sync io slow,
653 * but it will at least behave sensibly when they are
656 ret
= filemap_write_and_wait_range(inode
->i_mapping
, off
,
661 if (unlikely(to
->type
& ITER_PIPE
)) {
663 ret
= iov_iter_get_pages_alloc(to
, &pages
, len
,
667 num_pages
= DIV_ROUND_UP(ret
+ page_off
, PAGE_SIZE
);
669 ret
= striped_read(inode
, off
, ret
, pages
, num_pages
,
672 iov_iter_advance(to
, ret
);
675 iov_iter_advance(to
, 0);
677 ceph_put_page_vector(pages
, num_pages
, false);
679 num_pages
= calc_pages_for(off
, len
);
680 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
682 return PTR_ERR(pages
);
684 ret
= striped_read(inode
, off
, len
, pages
, num_pages
,
685 (off
& ~PAGE_MASK
), checkeof
);
691 size_t page_off
= off
& ~PAGE_MASK
;
692 size_t copy
= min_t(size_t, left
,
693 PAGE_SIZE
- page_off
);
694 l
= copy_page_to_iter(pages
[k
++], page_off
,
702 ceph_release_page_vector(pages
, num_pages
);
705 if (off
> iocb
->ki_pos
) {
706 ret
= off
- iocb
->ki_pos
;
710 dout("sync_read result %zd\n", ret
);
714 struct ceph_aio_request
{
720 struct list_head osd_reqs
;
722 atomic_t pending_reqs
;
723 struct timespec64 mtime
;
724 struct ceph_cap_flush
*prealloc_cf
;
727 struct ceph_aio_work
{
728 struct work_struct work
;
729 struct ceph_osd_request
*req
;
732 static void ceph_aio_retry_work(struct work_struct
*work
);
734 static void ceph_aio_complete(struct inode
*inode
,
735 struct ceph_aio_request
*aio_req
)
737 struct ceph_inode_info
*ci
= ceph_inode(inode
);
740 if (!atomic_dec_and_test(&aio_req
->pending_reqs
))
743 ret
= aio_req
->error
;
745 ret
= aio_req
->total_len
;
747 dout("ceph_aio_complete %p rc %d\n", inode
, ret
);
749 if (ret
>= 0 && aio_req
->write
) {
752 loff_t endoff
= aio_req
->iocb
->ki_pos
+ aio_req
->total_len
;
753 if (endoff
> i_size_read(inode
)) {
754 if (ceph_inode_set_size(inode
, endoff
))
755 ceph_check_caps(ci
, CHECK_CAPS_AUTHONLY
, NULL
);
758 spin_lock(&ci
->i_ceph_lock
);
759 ci
->i_inline_version
= CEPH_INLINE_NONE
;
760 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
761 &aio_req
->prealloc_cf
);
762 spin_unlock(&ci
->i_ceph_lock
);
764 __mark_inode_dirty(inode
, dirty
);
768 ceph_put_cap_refs(ci
, (aio_req
->write
? CEPH_CAP_FILE_WR
:
771 aio_req
->iocb
->ki_complete(aio_req
->iocb
, ret
, 0);
773 ceph_free_cap_flush(aio_req
->prealloc_cf
);
777 static void ceph_aio_complete_req(struct ceph_osd_request
*req
)
779 int rc
= req
->r_result
;
780 struct inode
*inode
= req
->r_inode
;
781 struct ceph_aio_request
*aio_req
= req
->r_priv
;
782 struct ceph_osd_data
*osd_data
= osd_req_op_extent_osd_data(req
, 0);
784 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_BVECS
);
785 BUG_ON(!osd_data
->num_bvecs
);
787 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
788 inode
, rc
, osd_data
->bvec_pos
.iter
.bi_size
);
790 if (rc
== -EOLDSNAPC
) {
791 struct ceph_aio_work
*aio_work
;
792 BUG_ON(!aio_req
->write
);
794 aio_work
= kmalloc(sizeof(*aio_work
), GFP_NOFS
);
796 INIT_WORK(&aio_work
->work
, ceph_aio_retry_work
);
798 queue_work(ceph_inode_to_client(inode
)->wb_wq
,
803 } else if (!aio_req
->write
) {
806 if (rc
>= 0 && osd_data
->bvec_pos
.iter
.bi_size
> rc
) {
808 int zlen
= osd_data
->bvec_pos
.iter
.bi_size
- rc
;
811 * If read is satisfied by single OSD request,
812 * it can pass EOF. Otherwise read is within
815 if (aio_req
->num_reqs
== 1) {
816 loff_t i_size
= i_size_read(inode
);
817 loff_t endoff
= aio_req
->iocb
->ki_pos
+ rc
;
819 zlen
= min_t(size_t, zlen
,
821 aio_req
->total_len
= rc
+ zlen
;
824 iov_iter_bvec(&i
, ITER_BVEC
, osd_data
->bvec_pos
.bvecs
,
826 osd_data
->bvec_pos
.iter
.bi_size
);
827 iov_iter_advance(&i
, rc
);
828 iov_iter_zero(zlen
, &i
);
832 put_bvecs(osd_data
->bvec_pos
.bvecs
, osd_data
->num_bvecs
,
833 aio_req
->should_dirty
);
834 ceph_osdc_put_request(req
);
837 cmpxchg(&aio_req
->error
, 0, rc
);
839 ceph_aio_complete(inode
, aio_req
);
843 static void ceph_aio_retry_work(struct work_struct
*work
)
845 struct ceph_aio_work
*aio_work
=
846 container_of(work
, struct ceph_aio_work
, work
);
847 struct ceph_osd_request
*orig_req
= aio_work
->req
;
848 struct ceph_aio_request
*aio_req
= orig_req
->r_priv
;
849 struct inode
*inode
= orig_req
->r_inode
;
850 struct ceph_inode_info
*ci
= ceph_inode(inode
);
851 struct ceph_snap_context
*snapc
;
852 struct ceph_osd_request
*req
;
855 spin_lock(&ci
->i_ceph_lock
);
856 if (__ceph_have_pending_cap_snap(ci
)) {
857 struct ceph_cap_snap
*capsnap
=
858 list_last_entry(&ci
->i_cap_snaps
,
859 struct ceph_cap_snap
,
861 snapc
= ceph_get_snap_context(capsnap
->context
);
863 BUG_ON(!ci
->i_head_snapc
);
864 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
866 spin_unlock(&ci
->i_ceph_lock
);
868 req
= ceph_osdc_alloc_request(orig_req
->r_osdc
, snapc
, 2,
876 req
->r_flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
877 ceph_oloc_copy(&req
->r_base_oloc
, &orig_req
->r_base_oloc
);
878 ceph_oid_copy(&req
->r_base_oid
, &orig_req
->r_base_oid
);
880 ret
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
882 ceph_osdc_put_request(req
);
887 req
->r_ops
[0] = orig_req
->r_ops
[0];
889 req
->r_mtime
= aio_req
->mtime
;
890 req
->r_data_offset
= req
->r_ops
[0].extent
.offset
;
892 ceph_osdc_put_request(orig_req
);
894 req
->r_callback
= ceph_aio_complete_req
;
895 req
->r_inode
= inode
;
896 req
->r_priv
= aio_req
;
898 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
902 ceph_aio_complete_req(req
);
905 ceph_put_snap_context(snapc
);
910 ceph_direct_read_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
911 struct ceph_snap_context
*snapc
,
912 struct ceph_cap_flush
**pcf
)
914 struct file
*file
= iocb
->ki_filp
;
915 struct inode
*inode
= file_inode(file
);
916 struct ceph_inode_info
*ci
= ceph_inode(inode
);
917 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
918 struct ceph_vino vino
;
919 struct ceph_osd_request
*req
;
920 struct bio_vec
*bvecs
;
921 struct ceph_aio_request
*aio_req
= NULL
;
925 struct timespec64 mtime
= current_time(inode
);
926 size_t count
= iov_iter_count(iter
);
927 loff_t pos
= iocb
->ki_pos
;
928 bool write
= iov_iter_rw(iter
) == WRITE
;
929 bool should_dirty
= !write
&& iter_is_iovec(iter
);
931 if (write
&& ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
934 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
935 (write
? "write" : "read"), file
, pos
, (unsigned)count
,
938 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ count
);
943 int ret2
= invalidate_inode_pages2_range(inode
->i_mapping
,
945 (pos
+ count
) >> PAGE_SHIFT
);
947 dout("invalidate_inode_pages2_range returned %d\n", ret2
);
949 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
951 flags
= CEPH_OSD_FLAG_READ
;
954 while (iov_iter_count(iter
) > 0) {
955 u64 size
= iov_iter_count(iter
);
959 size
= min_t(u64
, size
, fsc
->mount_options
->wsize
);
961 size
= min_t(u64
, size
, fsc
->mount_options
->rsize
);
963 vino
= ceph_vino(inode
);
964 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
967 write
? CEPH_OSD_OP_WRITE
:
978 len
= iter_get_bvecs_alloc(iter
, size
, &bvecs
, &num_pages
);
980 ceph_osdc_put_request(req
);
985 osd_req_op_extent_update(req
, 0, len
);
988 * To simplify error handling, allow AIO when IO within i_size
989 * or IO can be satisfied by single OSD request.
991 if (pos
== iocb
->ki_pos
&& !is_sync_kiocb(iocb
) &&
992 (len
== count
|| pos
+ count
<= i_size_read(inode
))) {
993 aio_req
= kzalloc(sizeof(*aio_req
), GFP_KERNEL
);
995 aio_req
->iocb
= iocb
;
996 aio_req
->write
= write
;
997 aio_req
->should_dirty
= should_dirty
;
998 INIT_LIST_HEAD(&aio_req
->osd_reqs
);
1000 aio_req
->mtime
= mtime
;
1001 swap(aio_req
->prealloc_cf
, *pcf
);
1009 * throw out any page cache pages in this range. this
1012 truncate_inode_pages_range(inode
->i_mapping
, pos
,
1013 (pos
+len
) | (PAGE_SIZE
- 1));
1015 req
->r_mtime
= mtime
;
1018 osd_req_op_extent_osd_data_bvecs(req
, 0, bvecs
, num_pages
, len
);
1021 aio_req
->total_len
+= len
;
1022 aio_req
->num_reqs
++;
1023 atomic_inc(&aio_req
->pending_reqs
);
1025 req
->r_callback
= ceph_aio_complete_req
;
1026 req
->r_inode
= inode
;
1027 req
->r_priv
= aio_req
;
1028 list_add_tail(&req
->r_unsafe_item
, &aio_req
->osd_reqs
);
1034 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
1036 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1038 size
= i_size_read(inode
);
1042 if (ret
>= 0 && ret
< len
&& pos
+ ret
< size
) {
1044 int zlen
= min_t(size_t, len
- ret
,
1047 iov_iter_bvec(&i
, ITER_BVEC
, bvecs
, num_pages
,
1049 iov_iter_advance(&i
, ret
);
1050 iov_iter_zero(zlen
, &i
);
1057 put_bvecs(bvecs
, num_pages
, should_dirty
);
1058 ceph_osdc_put_request(req
);
1063 if (!write
&& pos
>= size
)
1066 if (write
&& pos
> size
) {
1067 if (ceph_inode_set_size(inode
, pos
))
1068 ceph_check_caps(ceph_inode(inode
),
1069 CHECK_CAPS_AUTHONLY
,
1075 LIST_HEAD(osd_reqs
);
1077 if (aio_req
->num_reqs
== 0) {
1082 ceph_get_cap_refs(ci
, write
? CEPH_CAP_FILE_WR
:
1085 list_splice(&aio_req
->osd_reqs
, &osd_reqs
);
1086 while (!list_empty(&osd_reqs
)) {
1087 req
= list_first_entry(&osd_reqs
,
1088 struct ceph_osd_request
,
1090 list_del_init(&req
->r_unsafe_item
);
1092 ret
= ceph_osdc_start_request(req
->r_osdc
,
1095 req
->r_result
= ret
;
1096 ceph_aio_complete_req(req
);
1099 return -EIOCBQUEUED
;
1102 if (ret
!= -EOLDSNAPC
&& pos
> iocb
->ki_pos
) {
1103 ret
= pos
- iocb
->ki_pos
;
1110 * Synchronous write, straight from __user pointer or user pages.
1112 * If write spans object boundary, just do multiple writes. (For a
1113 * correct atomic write, we should e.g. take write locks on all
1114 * objects, rollback on failure, etc.)
1117 ceph_sync_write(struct kiocb
*iocb
, struct iov_iter
*from
, loff_t pos
,
1118 struct ceph_snap_context
*snapc
)
1120 struct file
*file
= iocb
->ki_filp
;
1121 struct inode
*inode
= file_inode(file
);
1122 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1123 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1124 struct ceph_vino vino
;
1125 struct ceph_osd_request
*req
;
1126 struct page
**pages
;
1132 bool check_caps
= false;
1133 struct timespec64 mtime
= current_time(inode
);
1134 size_t count
= iov_iter_count(from
);
1136 if (ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
1139 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1140 file
, pos
, (unsigned)count
, snapc
, snapc
->seq
);
1142 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ count
);
1146 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
1148 (pos
+ count
) >> PAGE_SHIFT
);
1150 dout("invalidate_inode_pages2_range returned %d\n", ret
);
1152 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
1154 while ((len
= iov_iter_count(from
)) > 0) {
1158 vino
= ceph_vino(inode
);
1159 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1160 vino
, pos
, &len
, 0, 1,
1161 CEPH_OSD_OP_WRITE
, flags
, snapc
,
1163 ci
->i_truncate_size
,
1171 * write from beginning of first page,
1172 * regardless of io alignment
1174 num_pages
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1176 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1177 if (IS_ERR(pages
)) {
1178 ret
= PTR_ERR(pages
);
1183 for (n
= 0; n
< num_pages
; n
++) {
1184 size_t plen
= min_t(size_t, left
, PAGE_SIZE
);
1185 ret
= copy_page_from_iter(pages
[n
], 0, plen
, from
);
1194 ceph_release_page_vector(pages
, num_pages
);
1198 req
->r_inode
= inode
;
1200 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0,
1203 req
->r_mtime
= mtime
;
1204 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1206 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1209 ceph_osdc_put_request(req
);
1211 ceph_set_error_write(ci
);
1215 ceph_clear_error_write(ci
);
1218 if (pos
> i_size_read(inode
)) {
1219 check_caps
= ceph_inode_set_size(inode
, pos
);
1221 ceph_check_caps(ceph_inode(inode
),
1222 CHECK_CAPS_AUTHONLY
,
1228 if (ret
!= -EOLDSNAPC
&& written
> 0) {
1236 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1237 * Atomically grab references, so that those bits are not released
1238 * back to the MDS mid-read.
1240 * Hmm, the sync read case isn't actually async... should it be?
1242 static ssize_t
ceph_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1244 struct file
*filp
= iocb
->ki_filp
;
1245 struct ceph_file_info
*fi
= filp
->private_data
;
1246 size_t len
= iov_iter_count(to
);
1247 struct inode
*inode
= file_inode(filp
);
1248 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1249 struct page
*pinned_page
= NULL
;
1252 int retry_op
= 0, read
= 0;
1255 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1256 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
, inode
);
1258 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1259 want
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1261 want
= CEPH_CAP_FILE_CACHE
;
1262 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_RD
, want
, -1, &got
, &pinned_page
);
1266 if ((got
& (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1267 (iocb
->ki_flags
& IOCB_DIRECT
) ||
1268 (fi
->flags
& CEPH_F_SYNC
)) {
1270 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1271 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1272 ceph_cap_string(got
));
1274 if (ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1275 if (!retry_op
&& (iocb
->ki_flags
& IOCB_DIRECT
)) {
1276 ret
= ceph_direct_read_write(iocb
, to
,
1278 if (ret
>= 0 && ret
< len
)
1279 retry_op
= CHECK_EOF
;
1281 ret
= ceph_sync_read(iocb
, to
, &retry_op
);
1284 retry_op
= READ_INLINE
;
1287 CEPH_DEFINE_RW_CONTEXT(rw_ctx
, got
);
1288 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1289 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1290 ceph_cap_string(got
));
1291 ceph_add_rw_context(fi
, &rw_ctx
);
1292 ret
= generic_file_read_iter(iocb
, to
);
1293 ceph_del_rw_context(fi
, &rw_ctx
);
1295 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1296 inode
, ceph_vinop(inode
), ceph_cap_string(got
), (int)ret
);
1298 put_page(pinned_page
);
1301 ceph_put_cap_refs(ci
, got
);
1302 if (retry_op
> HAVE_RETRIED
&& ret
>= 0) {
1304 struct page
*page
= NULL
;
1306 if (retry_op
== READ_INLINE
) {
1307 page
= __page_cache_alloc(GFP_KERNEL
);
1312 statret
= __ceph_do_getattr(inode
, page
,
1313 CEPH_STAT_CAP_INLINE_DATA
, !!page
);
1317 if (statret
== -ENODATA
) {
1318 BUG_ON(retry_op
!= READ_INLINE
);
1324 i_size
= i_size_read(inode
);
1325 if (retry_op
== READ_INLINE
) {
1326 BUG_ON(ret
> 0 || read
> 0);
1327 if (iocb
->ki_pos
< i_size
&&
1328 iocb
->ki_pos
< PAGE_SIZE
) {
1329 loff_t end
= min_t(loff_t
, i_size
,
1330 iocb
->ki_pos
+ len
);
1331 end
= min_t(loff_t
, end
, PAGE_SIZE
);
1333 zero_user_segment(page
, statret
, end
);
1334 ret
= copy_page_to_iter(page
,
1335 iocb
->ki_pos
& ~PAGE_MASK
,
1336 end
- iocb
->ki_pos
, to
);
1337 iocb
->ki_pos
+= ret
;
1340 if (iocb
->ki_pos
< i_size
&& read
< len
) {
1341 size_t zlen
= min_t(size_t, len
- read
,
1342 i_size
- iocb
->ki_pos
);
1343 ret
= iov_iter_zero(zlen
, to
);
1344 iocb
->ki_pos
+= ret
;
1347 __free_pages(page
, 0);
1351 /* hit EOF or hole? */
1352 if (retry_op
== CHECK_EOF
&& iocb
->ki_pos
< i_size
&&
1354 dout("sync_read hit hole, ppos %lld < size %lld"
1355 ", reading more\n", iocb
->ki_pos
, i_size
);
1359 retry_op
= HAVE_RETRIED
;
1371 * Take cap references to avoid releasing caps to MDS mid-write.
1373 * If we are synchronous, and write with an old snap context, the OSD
1374 * may return EOLDSNAPC. In that case, retry the write.. _after_
1375 * dropping our cap refs and allowing the pending snap to logically
1376 * complete _before_ this write occurs.
1378 * If we are near ENOSPC, write synchronously.
1380 static ssize_t
ceph_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1382 struct file
*file
= iocb
->ki_filp
;
1383 struct ceph_file_info
*fi
= file
->private_data
;
1384 struct inode
*inode
= file_inode(file
);
1385 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1386 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1387 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
1388 struct ceph_cap_flush
*prealloc_cf
;
1389 ssize_t count
, written
= 0;
1394 loff_t limit
= max(i_size_read(inode
), fsc
->max_file_size
);
1396 if (ceph_snap(inode
) != CEPH_NOSNAP
)
1399 prealloc_cf
= ceph_alloc_cap_flush();
1406 /* We can write back this queue in page reclaim */
1407 current
->backing_dev_info
= inode_to_bdi(inode
);
1409 if (iocb
->ki_flags
& IOCB_APPEND
) {
1410 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1415 err
= generic_write_checks(iocb
, from
);
1420 if (unlikely(pos
>= limit
)) {
1424 iov_iter_truncate(from
, limit
- pos
);
1427 count
= iov_iter_count(from
);
1428 if (ceph_quota_is_max_bytes_exceeded(inode
, pos
+ count
)) {
1433 err
= file_remove_privs(file
);
1437 err
= file_update_time(file
);
1441 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1442 err
= ceph_uninline_data(file
, NULL
);
1447 down_read(&osdc
->lock
);
1448 map_flags
= osdc
->osdmap
->flags
;
1449 pool_flags
= ceph_pg_pool_flags(osdc
->osdmap
, ci
->i_layout
.pool_id
);
1450 up_read(&osdc
->lock
);
1451 if ((map_flags
& CEPH_OSDMAP_FULL
) ||
1452 (pool_flags
& CEPH_POOL_FLAG_FULL
)) {
1457 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1458 inode
, ceph_vinop(inode
), pos
, count
, i_size_read(inode
));
1459 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1460 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1462 want
= CEPH_CAP_FILE_BUFFER
;
1464 err
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, want
, pos
+ count
,
1469 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1470 inode
, ceph_vinop(inode
), pos
, count
, ceph_cap_string(got
));
1472 if ((got
& (CEPH_CAP_FILE_BUFFER
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1473 (iocb
->ki_flags
& IOCB_DIRECT
) || (fi
->flags
& CEPH_F_SYNC
) ||
1474 (ci
->i_ceph_flags
& CEPH_I_ERROR_WRITE
)) {
1475 struct ceph_snap_context
*snapc
;
1476 struct iov_iter data
;
1477 inode_unlock(inode
);
1479 spin_lock(&ci
->i_ceph_lock
);
1480 if (__ceph_have_pending_cap_snap(ci
)) {
1481 struct ceph_cap_snap
*capsnap
=
1482 list_last_entry(&ci
->i_cap_snaps
,
1483 struct ceph_cap_snap
,
1485 snapc
= ceph_get_snap_context(capsnap
->context
);
1487 BUG_ON(!ci
->i_head_snapc
);
1488 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
1490 spin_unlock(&ci
->i_ceph_lock
);
1492 /* we might need to revert back to that point */
1494 if (iocb
->ki_flags
& IOCB_DIRECT
)
1495 written
= ceph_direct_read_write(iocb
, &data
, snapc
,
1498 written
= ceph_sync_write(iocb
, &data
, pos
, snapc
);
1500 iov_iter_advance(from
, written
);
1501 ceph_put_snap_context(snapc
);
1504 * No need to acquire the i_truncate_mutex. Because
1505 * the MDS revokes Fwb caps before sending truncate
1506 * message to us. We can't get Fwb cap while there
1507 * are pending vmtruncate. So write and vmtruncate
1508 * can not run at the same time
1510 written
= generic_perform_write(file
, from
, pos
);
1511 if (likely(written
>= 0))
1512 iocb
->ki_pos
= pos
+ written
;
1513 inode_unlock(inode
);
1519 spin_lock(&ci
->i_ceph_lock
);
1520 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1521 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1523 spin_unlock(&ci
->i_ceph_lock
);
1525 __mark_inode_dirty(inode
, dirty
);
1526 if (ceph_quota_is_max_bytes_approaching(inode
, iocb
->ki_pos
))
1527 ceph_check_caps(ci
, CHECK_CAPS_NODELAY
, NULL
);
1530 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1531 inode
, ceph_vinop(inode
), pos
, (unsigned)count
,
1532 ceph_cap_string(got
));
1533 ceph_put_cap_refs(ci
, got
);
1535 if (written
== -EOLDSNAPC
) {
1536 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1537 inode
, ceph_vinop(inode
), pos
, (unsigned)count
);
1542 if ((map_flags
& CEPH_OSDMAP_NEARFULL
) ||
1543 (pool_flags
& CEPH_POOL_FLAG_NEARFULL
))
1544 iocb
->ki_flags
|= IOCB_DSYNC
;
1545 written
= generic_write_sync(iocb
, written
);
1551 inode_unlock(inode
);
1553 ceph_free_cap_flush(prealloc_cf
);
1554 current
->backing_dev_info
= NULL
;
1555 return written
? written
: err
;
1559 * llseek. be sure to verify file size on SEEK_END.
1561 static loff_t
ceph_llseek(struct file
*file
, loff_t offset
, int whence
)
1563 struct inode
*inode
= file
->f_mapping
->host
;
1564 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1570 if (whence
== SEEK_END
|| whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
1571 ret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1576 i_size
= i_size_read(inode
);
1583 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1584 * position-querying operation. Avoid rewriting the "same"
1585 * f_pos value back to the file because a concurrent read(),
1586 * write() or lseek() might have altered it
1592 offset
+= file
->f_pos
;
1595 if (offset
< 0 || offset
>= i_size
) {
1601 if (offset
< 0 || offset
>= i_size
) {
1609 ret
= vfs_setpos(file
, offset
, max(i_size
, fsc
->max_file_size
));
1612 inode_unlock(inode
);
1616 static inline void ceph_zero_partial_page(
1617 struct inode
*inode
, loff_t offset
, unsigned size
)
1620 pgoff_t index
= offset
>> PAGE_SHIFT
;
1622 page
= find_lock_page(inode
->i_mapping
, index
);
1624 wait_on_page_writeback(page
);
1625 zero_user(page
, offset
& (PAGE_SIZE
- 1), size
);
1631 static void ceph_zero_pagecache_range(struct inode
*inode
, loff_t offset
,
1634 loff_t nearly
= round_up(offset
, PAGE_SIZE
);
1635 if (offset
< nearly
) {
1636 loff_t size
= nearly
- offset
;
1639 ceph_zero_partial_page(inode
, offset
, size
);
1643 if (length
>= PAGE_SIZE
) {
1644 loff_t size
= round_down(length
, PAGE_SIZE
);
1645 truncate_pagecache_range(inode
, offset
, offset
+ size
- 1);
1650 ceph_zero_partial_page(inode
, offset
, length
);
1653 static int ceph_zero_partial_object(struct inode
*inode
,
1654 loff_t offset
, loff_t
*length
)
1656 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1657 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1658 struct ceph_osd_request
*req
;
1664 op
= offset
? CEPH_OSD_OP_DELETE
: CEPH_OSD_OP_TRUNCATE
;
1667 op
= CEPH_OSD_OP_ZERO
;
1670 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1674 CEPH_OSD_FLAG_WRITE
,
1681 req
->r_mtime
= inode
->i_mtime
;
1682 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1684 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1688 ceph_osdc_put_request(req
);
1694 static int ceph_zero_objects(struct inode
*inode
, loff_t offset
, loff_t length
)
1697 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1698 s32 stripe_unit
= ci
->i_layout
.stripe_unit
;
1699 s32 stripe_count
= ci
->i_layout
.stripe_count
;
1700 s32 object_size
= ci
->i_layout
.object_size
;
1701 u64 object_set_size
= object_size
* stripe_count
;
1704 /* round offset up to next period boundary */
1705 nearly
= offset
+ object_set_size
- 1;
1707 nearly
-= do_div(t
, object_set_size
);
1709 while (length
&& offset
< nearly
) {
1710 loff_t size
= length
;
1711 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1717 while (length
>= object_set_size
) {
1719 loff_t pos
= offset
;
1720 for (i
= 0; i
< stripe_count
; ++i
) {
1721 ret
= ceph_zero_partial_object(inode
, pos
, NULL
);
1726 offset
+= object_set_size
;
1727 length
-= object_set_size
;
1730 loff_t size
= length
;
1731 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1740 static long ceph_fallocate(struct file
*file
, int mode
,
1741 loff_t offset
, loff_t length
)
1743 struct ceph_file_info
*fi
= file
->private_data
;
1744 struct inode
*inode
= file_inode(file
);
1745 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1746 struct ceph_cap_flush
*prealloc_cf
;
1753 if (mode
!= (FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
1756 if (!S_ISREG(inode
->i_mode
))
1759 prealloc_cf
= ceph_alloc_cap_flush();
1765 if (ceph_snap(inode
) != CEPH_NOSNAP
) {
1770 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1771 ret
= ceph_uninline_data(file
, NULL
);
1776 size
= i_size_read(inode
);
1778 /* Are we punching a hole beyond EOF? */
1781 if ((offset
+ length
) > size
)
1782 length
= size
- offset
;
1784 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1785 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1787 want
= CEPH_CAP_FILE_BUFFER
;
1789 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, want
, endoff
, &got
, NULL
);
1793 ceph_zero_pagecache_range(inode
, offset
, length
);
1794 ret
= ceph_zero_objects(inode
, offset
, length
);
1797 spin_lock(&ci
->i_ceph_lock
);
1798 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1799 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1801 spin_unlock(&ci
->i_ceph_lock
);
1803 __mark_inode_dirty(inode
, dirty
);
1806 ceph_put_cap_refs(ci
, got
);
1808 inode_unlock(inode
);
1809 ceph_free_cap_flush(prealloc_cf
);
1813 const struct file_operations ceph_file_fops
= {
1815 .release
= ceph_release
,
1816 .llseek
= ceph_llseek
,
1817 .read_iter
= ceph_read_iter
,
1818 .write_iter
= ceph_write_iter
,
1820 .fsync
= ceph_fsync
,
1822 .flock
= ceph_flock
,
1823 .splice_read
= generic_file_splice_read
,
1824 .splice_write
= iter_file_splice_write
,
1825 .unlocked_ioctl
= ceph_ioctl
,
1826 .compat_ioctl
= ceph_ioctl
,
1827 .fallocate
= ceph_fallocate
,