1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/backing-dev.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h> /* generic_writepages */
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13 #include <linux/iversion.h>
16 #include "mds_client.h"
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/striper.h>
22 * Ceph address space ops.
24 * There are a few funny things going on here.
26 * The page->private field is used to reference a struct
27 * ceph_snap_context for _every_ dirty page. This indicates which
28 * snapshot the page was logically dirtied in, and thus which snap
29 * context needs to be associated with the osd write during writeback.
31 * Similarly, struct ceph_inode_info maintains a set of counters to
32 * count dirty pages on the inode. In the absence of snapshots,
33 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
35 * When a snapshot is taken (that is, when the client receives
36 * notification that a snapshot was taken), each inode with caps and
37 * with dirty pages (dirty pages implies there is a cap) gets a new
38 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
39 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
40 * moved to capsnap->dirty. (Unless a sync write is currently in
41 * progress. In that case, the capsnap is said to be "pending", new
42 * writes cannot start, and the capsnap isn't "finalized" until the
43 * write completes (or fails) and a final size/mtime for the inode for
44 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
46 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
47 * we look for the first capsnap in i_cap_snaps and write out pages in
48 * that snap context _only_. Then we move on to the next capsnap,
49 * eventually reaching the "live" or "head" context (i.e., pages that
50 * are not yet snapped) and are writing the most recently dirtied
53 * Invalidate and so forth must take care to ensure the dirty page
54 * accounting is preserved.
57 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
58 #define CONGESTION_OFF_THRESH(congestion_kb) \
59 (CONGESTION_ON_THRESH(congestion_kb) - \
60 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
62 static inline struct ceph_snap_context
*page_snap_context(struct page
*page
)
64 if (PagePrivate(page
))
65 return (void *)page
->private;
70 * Dirty a page. Optimistically adjust accounting, on the assumption
71 * that we won't race with invalidate. If we do, readjust.
73 static int ceph_set_page_dirty(struct page
*page
)
75 struct address_space
*mapping
= page
->mapping
;
77 struct ceph_inode_info
*ci
;
78 struct ceph_snap_context
*snapc
;
81 if (unlikely(!mapping
))
82 return !TestSetPageDirty(page
);
84 if (PageDirty(page
)) {
85 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
86 mapping
->host
, page
, page
->index
);
87 BUG_ON(!PagePrivate(page
));
91 inode
= mapping
->host
;
92 ci
= ceph_inode(inode
);
95 spin_lock(&ci
->i_ceph_lock
);
96 BUG_ON(ci
->i_wr_ref
== 0); // caller should hold Fw reference
97 if (__ceph_have_pending_cap_snap(ci
)) {
98 struct ceph_cap_snap
*capsnap
=
99 list_last_entry(&ci
->i_cap_snaps
,
100 struct ceph_cap_snap
,
102 snapc
= ceph_get_snap_context(capsnap
->context
);
103 capsnap
->dirty_pages
++;
105 BUG_ON(!ci
->i_head_snapc
);
106 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
107 ++ci
->i_wrbuffer_ref_head
;
109 if (ci
->i_wrbuffer_ref
== 0)
111 ++ci
->i_wrbuffer_ref
;
112 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
113 "snapc %p seq %lld (%d snaps)\n",
114 mapping
->host
, page
, page
->index
,
115 ci
->i_wrbuffer_ref
-1, ci
->i_wrbuffer_ref_head
-1,
116 ci
->i_wrbuffer_ref
, ci
->i_wrbuffer_ref_head
,
117 snapc
, snapc
->seq
, snapc
->num_snaps
);
118 spin_unlock(&ci
->i_ceph_lock
);
121 * Reference snap context in page->private. Also set
122 * PagePrivate so that we get invalidatepage callback.
124 BUG_ON(PagePrivate(page
));
125 page
->private = (unsigned long)snapc
;
126 SetPagePrivate(page
);
128 ret
= __set_page_dirty_nobuffers(page
);
129 WARN_ON(!PageLocked(page
));
130 WARN_ON(!page
->mapping
);
136 * If we are truncating the full page (i.e. offset == 0), adjust the
137 * dirty page counters appropriately. Only called if there is private
140 static void ceph_invalidatepage(struct page
*page
, unsigned int offset
,
144 struct ceph_inode_info
*ci
;
145 struct ceph_snap_context
*snapc
= page_snap_context(page
);
147 inode
= page
->mapping
->host
;
148 ci
= ceph_inode(inode
);
150 if (offset
!= 0 || length
!= PAGE_SIZE
) {
151 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
152 inode
, page
, page
->index
, offset
, length
);
156 ceph_invalidate_fscache_page(inode
, page
);
158 WARN_ON(!PageLocked(page
));
159 if (!PagePrivate(page
))
162 dout("%p invalidatepage %p idx %lu full dirty page\n",
163 inode
, page
, page
->index
);
165 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
166 ceph_put_snap_context(snapc
);
168 ClearPagePrivate(page
);
171 static int ceph_releasepage(struct page
*page
, gfp_t g
)
173 dout("%p releasepage %p idx %lu (%sdirty)\n", page
->mapping
->host
,
174 page
, page
->index
, PageDirty(page
) ? "" : "not ");
176 /* Can we release the page from the cache? */
177 if (!ceph_release_fscache_page(page
, g
))
180 return !PagePrivate(page
);
184 * Read some contiguous pages. If we cross a stripe boundary, shorten
185 * *plen. Return number of bytes read, or error.
187 static int ceph_sync_readpages(struct ceph_fs_client
*fsc
,
188 struct ceph_vino vino
,
189 struct ceph_file_layout
*layout
,
191 u32 truncate_seq
, u64 truncate_size
,
192 struct page
**pages
, int num_pages
,
195 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
196 struct ceph_osd_request
*req
;
199 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
200 vino
.snap
, off
, *plen
);
201 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
, 0, 1,
202 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
203 NULL
, truncate_seq
, truncate_size
,
208 /* it may be a short read due to an object boundary */
209 osd_req_op_extent_osd_data_pages(req
, 0,
210 pages
, *plen
, page_align
, false, false);
212 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
213 off
, *plen
, *plen
, page_align
);
215 rc
= ceph_osdc_start_request(osdc
, req
, false);
217 rc
= ceph_osdc_wait_request(osdc
, req
);
219 ceph_osdc_put_request(req
);
220 dout("readpages result %d\n", rc
);
225 * read a single page, without unlocking it.
227 static int ceph_do_readpage(struct file
*filp
, struct page
*page
)
229 struct inode
*inode
= file_inode(filp
);
230 struct ceph_inode_info
*ci
= ceph_inode(inode
);
231 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
233 u64 off
= page_offset(page
);
236 if (off
>= i_size_read(inode
)) {
237 zero_user_segment(page
, 0, PAGE_SIZE
);
238 SetPageUptodate(page
);
242 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
244 * Uptodate inline data should have been added
245 * into page cache while getting Fcr caps.
249 zero_user_segment(page
, 0, PAGE_SIZE
);
250 SetPageUptodate(page
);
254 err
= ceph_readpage_from_fscache(inode
, page
);
258 dout("readpage inode %p file %p page %p index %lu\n",
259 inode
, filp
, page
, page
->index
);
260 err
= ceph_sync_readpages(fsc
, ceph_vino(inode
),
261 &ci
->i_layout
, off
, &len
,
262 ci
->i_truncate_seq
, ci
->i_truncate_size
,
268 ceph_fscache_readpage_cancel(inode
, page
);
269 if (err
== -EBLACKLISTED
)
270 fsc
->blacklisted
= true;
274 /* zero fill remainder of page */
275 zero_user_segment(page
, err
, PAGE_SIZE
);
277 flush_dcache_page(page
);
279 SetPageUptodate(page
);
280 ceph_readpage_to_fscache(inode
, page
);
283 return err
< 0 ? err
: 0;
286 static int ceph_readpage(struct file
*filp
, struct page
*page
)
288 int r
= ceph_do_readpage(filp
, page
);
289 if (r
!= -EINPROGRESS
)
297 * Finish an async read(ahead) op.
299 static void finish_read(struct ceph_osd_request
*req
)
301 struct inode
*inode
= req
->r_inode
;
302 struct ceph_osd_data
*osd_data
;
303 int rc
= req
->r_result
<= 0 ? req
->r_result
: 0;
304 int bytes
= req
->r_result
>= 0 ? req
->r_result
: 0;
308 dout("finish_read %p req %p rc %d bytes %d\n", inode
, req
, rc
, bytes
);
309 if (rc
== -EBLACKLISTED
)
310 ceph_inode_to_client(inode
)->blacklisted
= true;
312 /* unlock all pages, zeroing any data we didn't read */
313 osd_data
= osd_req_op_extent_osd_data(req
, 0);
314 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
315 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
316 (u64
)osd_data
->length
);
317 for (i
= 0; i
< num_pages
; i
++) {
318 struct page
*page
= osd_data
->pages
[i
];
320 if (rc
< 0 && rc
!= -ENOENT
) {
321 ceph_fscache_readpage_cancel(inode
, page
);
324 if (bytes
< (int)PAGE_SIZE
) {
325 /* zero (remainder of) page */
326 int s
= bytes
< 0 ? 0 : bytes
;
327 zero_user_segment(page
, s
, PAGE_SIZE
);
329 dout("finish_read %p uptodate %p idx %lu\n", inode
, page
,
331 flush_dcache_page(page
);
332 SetPageUptodate(page
);
333 ceph_readpage_to_fscache(inode
, page
);
339 kfree(osd_data
->pages
);
343 * start an async read(ahead) operation. return nr_pages we submitted
344 * a read for on success, or negative error code.
346 static int start_read(struct inode
*inode
, struct ceph_rw_context
*rw_ctx
,
347 struct list_head
*page_list
, int max
)
349 struct ceph_osd_client
*osdc
=
350 &ceph_inode_to_client(inode
)->client
->osdc
;
351 struct ceph_inode_info
*ci
= ceph_inode(inode
);
352 struct page
*page
= lru_to_page(page_list
);
353 struct ceph_vino vino
;
354 struct ceph_osd_request
*req
;
365 /* caller of readpages does not hold buffer and read caps
366 * (fadvise, madvise and readahead cases) */
367 int want
= CEPH_CAP_FILE_CACHE
;
368 ret
= ceph_try_get_caps(inode
, CEPH_CAP_FILE_RD
, want
,
371 dout("start_read %p, error getting cap\n", inode
);
372 } else if (!(got
& want
)) {
373 dout("start_read %p, no cache cap\n", inode
);
378 ceph_put_cap_refs(ci
, got
);
379 while (!list_empty(page_list
)) {
380 page
= lru_to_page(page_list
);
381 list_del(&page
->lru
);
388 off
= (u64
) page_offset(page
);
391 next_index
= page
->index
;
392 list_for_each_entry_reverse(page
, page_list
, lru
) {
393 if (page
->index
!= next_index
)
397 if (max
&& nr_pages
== max
)
400 len
= nr_pages
<< PAGE_SHIFT
;
401 dout("start_read %p nr_pages %d is %lld~%lld\n", inode
, nr_pages
,
403 vino
= ceph_vino(inode
);
404 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
, vino
, off
, &len
,
405 0, 1, CEPH_OSD_OP_READ
,
406 CEPH_OSD_FLAG_READ
, NULL
,
407 ci
->i_truncate_seq
, ci
->i_truncate_size
,
414 /* build page vector */
415 nr_pages
= calc_pages_for(0, len
);
416 pages
= kmalloc_array(nr_pages
, sizeof(*pages
), GFP_KERNEL
);
421 for (i
= 0; i
< nr_pages
; ++i
) {
422 page
= list_entry(page_list
->prev
, struct page
, lru
);
423 BUG_ON(PageLocked(page
));
424 list_del(&page
->lru
);
426 dout("start_read %p adding %p idx %lu\n", inode
, page
,
428 if (add_to_page_cache_lru(page
, &inode
->i_data
, page
->index
,
430 ceph_fscache_uncache_page(inode
, page
);
432 dout("start_read %p add_to_page_cache failed %p\n",
436 len
= nr_pages
<< PAGE_SHIFT
;
437 osd_req_op_extent_update(req
, 0, len
);
444 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0, false, false);
445 req
->r_callback
= finish_read
;
446 req
->r_inode
= inode
;
448 dout("start_read %p starting %p %lld~%lld\n", inode
, req
, off
, len
);
449 ret
= ceph_osdc_start_request(osdc
, req
, false);
452 ceph_osdc_put_request(req
);
454 /* After adding locked pages to page cache, the inode holds cache cap.
455 * So we can drop our cap refs. */
457 ceph_put_cap_refs(ci
, got
);
462 for (i
= 0; i
< nr_pages
; ++i
) {
463 ceph_fscache_readpage_cancel(inode
, pages
[i
]);
464 unlock_page(pages
[i
]);
466 ceph_put_page_vector(pages
, nr_pages
, false);
468 ceph_osdc_put_request(req
);
471 ceph_put_cap_refs(ci
, got
);
477 * Read multiple pages. Leave pages we don't read + unlock in page_list;
478 * the caller (VM) cleans them up.
480 static int ceph_readpages(struct file
*file
, struct address_space
*mapping
,
481 struct list_head
*page_list
, unsigned nr_pages
)
483 struct inode
*inode
= file_inode(file
);
484 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
485 struct ceph_file_info
*fi
= file
->private_data
;
486 struct ceph_rw_context
*rw_ctx
;
490 if (ceph_inode(inode
)->i_inline_version
!= CEPH_INLINE_NONE
)
493 rc
= ceph_readpages_from_fscache(mapping
->host
, mapping
, page_list
,
499 rw_ctx
= ceph_find_rw_context(fi
);
500 max
= fsc
->mount_options
->rsize
>> PAGE_SHIFT
;
501 dout("readpages %p file %p ctx %p nr_pages %d max %d\n",
502 inode
, file
, rw_ctx
, nr_pages
, max
);
503 while (!list_empty(page_list
)) {
504 rc
= start_read(inode
, rw_ctx
, page_list
, max
);
509 ceph_fscache_readpages_cancel(inode
, page_list
);
511 dout("readpages %p file %p ret %d\n", inode
, file
, rc
);
515 struct ceph_writeback_ctl
525 * Get ref for the oldest snapc for an inode with dirty data... that is, the
526 * only snap context we are allowed to write back.
528 static struct ceph_snap_context
*
529 get_oldest_context(struct inode
*inode
, struct ceph_writeback_ctl
*ctl
,
530 struct ceph_snap_context
*page_snapc
)
532 struct ceph_inode_info
*ci
= ceph_inode(inode
);
533 struct ceph_snap_context
*snapc
= NULL
;
534 struct ceph_cap_snap
*capsnap
= NULL
;
536 spin_lock(&ci
->i_ceph_lock
);
537 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
538 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap
,
539 capsnap
->context
, capsnap
->dirty_pages
);
540 if (!capsnap
->dirty_pages
)
543 /* get i_size, truncate_{seq,size} for page_snapc? */
544 if (snapc
&& capsnap
->context
!= page_snapc
)
548 if (capsnap
->writing
) {
549 ctl
->i_size
= i_size_read(inode
);
550 ctl
->size_stable
= false;
552 ctl
->i_size
= capsnap
->size
;
553 ctl
->size_stable
= true;
555 ctl
->truncate_size
= capsnap
->truncate_size
;
556 ctl
->truncate_seq
= capsnap
->truncate_seq
;
557 ctl
->head_snapc
= false;
563 snapc
= ceph_get_snap_context(capsnap
->context
);
565 page_snapc
== snapc
||
566 page_snapc
->seq
> snapc
->seq
)
569 if (!snapc
&& ci
->i_wrbuffer_ref_head
) {
570 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
571 dout(" head snapc %p has %d dirty pages\n",
572 snapc
, ci
->i_wrbuffer_ref_head
);
574 ctl
->i_size
= i_size_read(inode
);
575 ctl
->truncate_size
= ci
->i_truncate_size
;
576 ctl
->truncate_seq
= ci
->i_truncate_seq
;
577 ctl
->size_stable
= false;
578 ctl
->head_snapc
= true;
581 spin_unlock(&ci
->i_ceph_lock
);
585 static u64
get_writepages_data_length(struct inode
*inode
,
586 struct page
*page
, u64 start
)
588 struct ceph_inode_info
*ci
= ceph_inode(inode
);
589 struct ceph_snap_context
*snapc
= page_snap_context(page
);
590 struct ceph_cap_snap
*capsnap
= NULL
;
591 u64 end
= i_size_read(inode
);
593 if (snapc
!= ci
->i_head_snapc
) {
595 spin_lock(&ci
->i_ceph_lock
);
596 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
597 if (capsnap
->context
== snapc
) {
598 if (!capsnap
->writing
)
604 spin_unlock(&ci
->i_ceph_lock
);
607 if (end
> page_offset(page
) + PAGE_SIZE
)
608 end
= page_offset(page
) + PAGE_SIZE
;
609 return end
> start
? end
- start
: 0;
613 * do a synchronous write on N pages
615 static int ceph_sync_writepages(struct ceph_fs_client
*fsc
,
616 struct ceph_vino vino
,
617 struct ceph_file_layout
*layout
,
618 struct ceph_snap_context
*snapc
,
620 u32 truncate_seq
, u64 truncate_size
,
621 struct timespec64
*mtime
,
622 struct page
**pages
, int num_pages
)
624 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
625 struct ceph_osd_request
*req
;
627 int page_align
= off
& ~PAGE_MASK
;
629 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
, 0, 1,
630 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
631 snapc
, truncate_seq
, truncate_size
,
636 /* it may be a short write due to an object boundary */
637 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_align
,
639 dout("writepages %llu~%llu (%llu bytes)\n", off
, len
, len
);
641 req
->r_mtime
= *mtime
;
642 rc
= ceph_osdc_start_request(osdc
, req
, true);
644 rc
= ceph_osdc_wait_request(osdc
, req
);
646 ceph_osdc_put_request(req
);
649 dout("writepages result %d\n", rc
);
654 * Write a single page, but leave the page locked.
656 * If we get a write error, mark the mapping for error, but still adjust the
657 * dirty page accounting (i.e., page is no longer dirty).
659 static int writepage_nounlock(struct page
*page
, struct writeback_control
*wbc
)
662 struct ceph_inode_info
*ci
;
663 struct ceph_fs_client
*fsc
;
664 struct ceph_snap_context
*snapc
, *oldest
;
665 loff_t page_off
= page_offset(page
);
666 int err
, len
= PAGE_SIZE
;
667 struct ceph_writeback_ctl ceph_wbc
;
669 dout("writepage %p idx %lu\n", page
, page
->index
);
671 inode
= page
->mapping
->host
;
672 ci
= ceph_inode(inode
);
673 fsc
= ceph_inode_to_client(inode
);
675 /* verify this is a writeable snap context */
676 snapc
= page_snap_context(page
);
678 dout("writepage %p page %p not dirty?\n", inode
, page
);
681 oldest
= get_oldest_context(inode
, &ceph_wbc
, snapc
);
682 if (snapc
->seq
> oldest
->seq
) {
683 dout("writepage %p page %p snapc %p not writeable - noop\n",
685 /* we should only noop if called by kswapd */
686 WARN_ON(!(current
->flags
& PF_MEMALLOC
));
687 ceph_put_snap_context(oldest
);
688 redirty_page_for_writepage(wbc
, page
);
691 ceph_put_snap_context(oldest
);
693 /* is this a partial page at end of file? */
694 if (page_off
>= ceph_wbc
.i_size
) {
695 dout("%p page eof %llu\n", page
, ceph_wbc
.i_size
);
696 page
->mapping
->a_ops
->invalidatepage(page
, 0, PAGE_SIZE
);
700 if (ceph_wbc
.i_size
< page_off
+ len
)
701 len
= ceph_wbc
.i_size
- page_off
;
703 dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n",
704 inode
, page
, page
->index
, page_off
, len
, snapc
, snapc
->seq
);
706 if (atomic_long_inc_return(&fsc
->writeback_count
) >
707 CONGESTION_ON_THRESH(fsc
->mount_options
->congestion_kb
))
708 set_bdi_congested(inode_to_bdi(inode
), BLK_RW_ASYNC
);
710 set_page_writeback(page
);
711 err
= ceph_sync_writepages(fsc
, ceph_vino(inode
),
712 &ci
->i_layout
, snapc
, page_off
, len
,
713 ceph_wbc
.truncate_seq
,
714 ceph_wbc
.truncate_size
,
715 &inode
->i_mtime
, &page
, 1);
717 struct writeback_control tmp_wbc
;
720 if (err
== -ERESTARTSYS
) {
721 /* killed by SIGKILL */
722 dout("writepage interrupted page %p\n", page
);
723 redirty_page_for_writepage(wbc
, page
);
724 end_page_writeback(page
);
727 if (err
== -EBLACKLISTED
)
728 fsc
->blacklisted
= true;
729 dout("writepage setting page/mapping error %d %p\n",
731 mapping_set_error(&inode
->i_data
, err
);
732 wbc
->pages_skipped
++;
734 dout("writepage cleaned page %p\n", page
);
735 err
= 0; /* vfs expects us to return 0 */
738 ClearPagePrivate(page
);
739 end_page_writeback(page
);
740 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
741 ceph_put_snap_context(snapc
); /* page's reference */
743 if (atomic_long_dec_return(&fsc
->writeback_count
) <
744 CONGESTION_OFF_THRESH(fsc
->mount_options
->congestion_kb
))
745 clear_bdi_congested(inode_to_bdi(inode
), BLK_RW_ASYNC
);
750 static int ceph_writepage(struct page
*page
, struct writeback_control
*wbc
)
753 struct inode
*inode
= page
->mapping
->host
;
756 err
= writepage_nounlock(page
, wbc
);
757 if (err
== -ERESTARTSYS
) {
758 /* direct memory reclaimer was killed by SIGKILL. return 0
759 * to prevent caller from setting mapping/page error */
768 * async writeback completion handler.
770 * If we get an error, set the mapping error bit, but not the individual
773 static void writepages_finish(struct ceph_osd_request
*req
)
775 struct inode
*inode
= req
->r_inode
;
776 struct ceph_inode_info
*ci
= ceph_inode(inode
);
777 struct ceph_osd_data
*osd_data
;
779 int num_pages
, total_pages
= 0;
781 int rc
= req
->r_result
;
782 struct ceph_snap_context
*snapc
= req
->r_snapc
;
783 struct address_space
*mapping
= inode
->i_mapping
;
784 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
787 dout("writepages_finish %p rc %d\n", inode
, rc
);
789 mapping_set_error(mapping
, rc
);
790 ceph_set_error_write(ci
);
791 if (rc
== -EBLACKLISTED
)
792 fsc
->blacklisted
= true;
794 ceph_clear_error_write(ci
);
798 * We lost the cache cap, need to truncate the page before
799 * it is unlocked, otherwise we'd truncate it later in the
800 * page truncation thread, possibly losing some data that
803 remove_page
= !(ceph_caps_issued(ci
) &
804 (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
));
806 /* clean all pages */
807 for (i
= 0; i
< req
->r_num_ops
; i
++) {
808 if (req
->r_ops
[i
].op
!= CEPH_OSD_OP_WRITE
)
811 osd_data
= osd_req_op_extent_osd_data(req
, i
);
812 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
813 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
814 (u64
)osd_data
->length
);
815 total_pages
+= num_pages
;
816 for (j
= 0; j
< num_pages
; j
++) {
817 page
= osd_data
->pages
[j
];
819 WARN_ON(!PageUptodate(page
));
821 if (atomic_long_dec_return(&fsc
->writeback_count
) <
822 CONGESTION_OFF_THRESH(
823 fsc
->mount_options
->congestion_kb
))
824 clear_bdi_congested(inode_to_bdi(inode
),
827 ceph_put_snap_context(page_snap_context(page
));
829 ClearPagePrivate(page
);
830 dout("unlocking %p\n", page
);
831 end_page_writeback(page
);
834 generic_error_remove_page(inode
->i_mapping
,
839 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
840 inode
, osd_data
->length
, rc
>= 0 ? num_pages
: 0);
842 release_pages(osd_data
->pages
, num_pages
);
845 ceph_put_wrbuffer_cap_refs(ci
, total_pages
, snapc
);
847 osd_data
= osd_req_op_extent_osd_data(req
, 0);
848 if (osd_data
->pages_from_pool
)
849 mempool_free(osd_data
->pages
,
850 ceph_sb_to_client(inode
->i_sb
)->wb_pagevec_pool
);
852 kfree(osd_data
->pages
);
853 ceph_osdc_put_request(req
);
857 * initiate async writeback
859 static int ceph_writepages_start(struct address_space
*mapping
,
860 struct writeback_control
*wbc
)
862 struct inode
*inode
= mapping
->host
;
863 struct ceph_inode_info
*ci
= ceph_inode(inode
);
864 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
865 struct ceph_vino vino
= ceph_vino(inode
);
866 pgoff_t index
, start_index
, end
= -1;
867 struct ceph_snap_context
*snapc
= NULL
, *last_snapc
= NULL
, *pgsnapc
;
870 unsigned int wsize
= i_blocksize(inode
);
871 struct ceph_osd_request
*req
= NULL
;
872 struct ceph_writeback_ctl ceph_wbc
;
873 bool should_loop
, range_whole
= false;
876 dout("writepages_start %p (mode=%s)\n", inode
,
877 wbc
->sync_mode
== WB_SYNC_NONE
? "NONE" :
878 (wbc
->sync_mode
== WB_SYNC_ALL
? "ALL" : "HOLD"));
880 if (READ_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
881 if (ci
->i_wrbuffer_ref
> 0) {
883 "writepage_start %p %lld forced umount\n",
884 inode
, ceph_ino(inode
));
886 mapping_set_error(mapping
, -EIO
);
887 return -EIO
; /* we're in a forced umount, don't write! */
889 if (fsc
->mount_options
->wsize
< wsize
)
890 wsize
= fsc
->mount_options
->wsize
;
894 start_index
= wbc
->range_cyclic
? mapping
->writeback_index
: 0;
898 /* find oldest snap context with dirty data */
899 snapc
= get_oldest_context(inode
, &ceph_wbc
, NULL
);
901 /* hmm, why does writepages get called when there
903 dout(" no snap context with dirty data?\n");
906 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
907 snapc
, snapc
->seq
, snapc
->num_snaps
);
910 if (ceph_wbc
.head_snapc
&& snapc
!= last_snapc
) {
911 /* where to start/end? */
912 if (wbc
->range_cyclic
) {
917 dout(" cyclic, start at %lu\n", index
);
919 index
= wbc
->range_start
>> PAGE_SHIFT
;
920 end
= wbc
->range_end
>> PAGE_SHIFT
;
921 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
923 dout(" not cyclic, %lu to %lu\n", index
, end
);
925 } else if (!ceph_wbc
.head_snapc
) {
926 /* Do not respect wbc->range_{start,end}. Dirty pages
927 * in that range can be associated with newer snapc.
928 * They are not writeable until we write all dirty pages
929 * associated with 'snapc' get written */
932 dout(" non-head snapc, range whole\n");
935 ceph_put_snap_context(last_snapc
);
938 while (!done
&& index
<= end
) {
939 int num_ops
= 0, op_idx
;
940 unsigned i
, pvec_pages
, max_pages
, locked_pages
= 0;
941 struct page
**pages
= NULL
, **data_pages
;
942 mempool_t
*pool
= NULL
; /* Becomes non-null if mempool used */
944 pgoff_t strip_unit_end
= 0;
945 u64 offset
= 0, len
= 0;
947 max_pages
= wsize
>> PAGE_SHIFT
;
950 pvec_pages
= pagevec_lookup_range_nr_tag(&pvec
, mapping
, &index
,
951 end
, PAGECACHE_TAG_DIRTY
,
952 max_pages
- locked_pages
);
953 dout("pagevec_lookup_range_tag got %d\n", pvec_pages
);
954 if (!pvec_pages
&& !locked_pages
)
956 for (i
= 0; i
< pvec_pages
&& locked_pages
< max_pages
; i
++) {
957 page
= pvec
.pages
[i
];
958 dout("? %p idx %lu\n", page
, page
->index
);
959 if (locked_pages
== 0)
960 lock_page(page
); /* first page */
961 else if (!trylock_page(page
))
964 /* only dirty pages, or our accounting breaks */
965 if (unlikely(!PageDirty(page
)) ||
966 unlikely(page
->mapping
!= mapping
)) {
967 dout("!dirty or !mapping %p\n", page
);
971 /* only if matching snap context */
972 pgsnapc
= page_snap_context(page
);
973 if (pgsnapc
!= snapc
) {
974 dout("page snapc %p %lld != oldest %p %lld\n",
975 pgsnapc
, pgsnapc
->seq
, snapc
, snapc
->seq
);
977 !ceph_wbc
.head_snapc
&&
978 wbc
->sync_mode
!= WB_SYNC_NONE
)
983 if (page_offset(page
) >= ceph_wbc
.i_size
) {
984 dout("%p page eof %llu\n",
985 page
, ceph_wbc
.i_size
);
986 if ((ceph_wbc
.size_stable
||
987 page_offset(page
) >= i_size_read(inode
)) &&
988 clear_page_dirty_for_io(page
))
989 mapping
->a_ops
->invalidatepage(page
,
994 if (strip_unit_end
&& (page
->index
> strip_unit_end
)) {
995 dout("end of strip unit %p\n", page
);
999 if (PageWriteback(page
)) {
1000 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
1001 dout("%p under writeback\n", page
);
1005 dout("waiting on writeback %p\n", page
);
1006 wait_on_page_writeback(page
);
1009 if (!clear_page_dirty_for_io(page
)) {
1010 dout("%p !clear_page_dirty_for_io\n", page
);
1016 * We have something to write. If this is
1017 * the first locked page this time through,
1018 * calculate max possinle write size and
1019 * allocate a page array
1021 if (locked_pages
== 0) {
1026 /* prepare async write request */
1027 offset
= (u64
)page_offset(page
);
1028 ceph_calc_file_object_mapping(&ci
->i_layout
,
1035 strip_unit_end
= page
->index
+
1036 ((len
- 1) >> PAGE_SHIFT
);
1039 max_pages
= calc_pages_for(0, (u64
)len
);
1040 pages
= kmalloc_array(max_pages
,
1044 pool
= fsc
->wb_pagevec_pool
;
1045 pages
= mempool_alloc(pool
, GFP_NOFS
);
1050 } else if (page
->index
!=
1051 (offset
+ len
) >> PAGE_SHIFT
) {
1052 if (num_ops
>= (pool
? CEPH_OSD_SLAB_OPS
:
1053 CEPH_OSD_MAX_OPS
)) {
1054 redirty_page_for_writepage(wbc
, page
);
1060 offset
= (u64
)page_offset(page
);
1064 /* note position of first page in pvec */
1065 dout("%p will write page %p idx %lu\n",
1066 inode
, page
, page
->index
);
1068 if (atomic_long_inc_return(&fsc
->writeback_count
) >
1069 CONGESTION_ON_THRESH(
1070 fsc
->mount_options
->congestion_kb
)) {
1071 set_bdi_congested(inode_to_bdi(inode
),
1076 pages
[locked_pages
++] = page
;
1077 pvec
.pages
[i
] = NULL
;
1082 /* did we get anything? */
1084 goto release_pvec_pages
;
1087 /* shift unused page to beginning of pvec */
1088 for (j
= 0; j
< pvec_pages
; j
++) {
1092 pvec
.pages
[n
] = pvec
.pages
[j
];
1097 if (pvec_pages
&& i
== pvec_pages
&&
1098 locked_pages
< max_pages
) {
1099 dout("reached end pvec, trying for more\n");
1100 pagevec_release(&pvec
);
1101 goto get_more_pages
;
1106 offset
= page_offset(pages
[0]);
1109 req
= ceph_osdc_new_request(&fsc
->client
->osdc
,
1110 &ci
->i_layout
, vino
,
1111 offset
, &len
, 0, num_ops
,
1112 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
1113 snapc
, ceph_wbc
.truncate_seq
,
1114 ceph_wbc
.truncate_size
, false);
1116 req
= ceph_osdc_new_request(&fsc
->client
->osdc
,
1117 &ci
->i_layout
, vino
,
1122 CEPH_OSD_FLAG_WRITE
,
1123 snapc
, ceph_wbc
.truncate_seq
,
1124 ceph_wbc
.truncate_size
, true);
1125 BUG_ON(IS_ERR(req
));
1127 BUG_ON(len
< page_offset(pages
[locked_pages
- 1]) +
1128 PAGE_SIZE
- offset
);
1130 req
->r_callback
= writepages_finish
;
1131 req
->r_inode
= inode
;
1133 /* Format the osd request message and submit the write */
1137 for (i
= 0; i
< locked_pages
; i
++) {
1138 u64 cur_offset
= page_offset(pages
[i
]);
1139 if (offset
+ len
!= cur_offset
) {
1140 if (op_idx
+ 1 == req
->r_num_ops
)
1142 osd_req_op_extent_dup_last(req
, op_idx
,
1143 cur_offset
- offset
);
1144 dout("writepages got pages at %llu~%llu\n",
1146 osd_req_op_extent_osd_data_pages(req
, op_idx
,
1149 osd_req_op_extent_update(req
, op_idx
, len
);
1152 offset
= cur_offset
;
1153 data_pages
= pages
+ i
;
1157 set_page_writeback(pages
[i
]);
1161 if (ceph_wbc
.size_stable
) {
1162 len
= min(len
, ceph_wbc
.i_size
- offset
);
1163 } else if (i
== locked_pages
) {
1164 /* writepages_finish() clears writeback pages
1165 * according to the data length, so make sure
1166 * data length covers all locked pages */
1167 u64 min_len
= len
+ 1 - PAGE_SIZE
;
1168 len
= get_writepages_data_length(inode
, pages
[i
- 1],
1170 len
= max(len
, min_len
);
1172 dout("writepages got pages at %llu~%llu\n", offset
, len
);
1174 osd_req_op_extent_osd_data_pages(req
, op_idx
, data_pages
, len
,
1176 osd_req_op_extent_update(req
, op_idx
, len
);
1178 BUG_ON(op_idx
+ 1 != req
->r_num_ops
);
1181 if (i
< locked_pages
) {
1182 BUG_ON(num_ops
<= req
->r_num_ops
);
1183 num_ops
-= req
->r_num_ops
;
1186 /* allocate new pages array for next request */
1188 pages
= kmalloc_array(locked_pages
, sizeof(*pages
),
1191 pool
= fsc
->wb_pagevec_pool
;
1192 pages
= mempool_alloc(pool
, GFP_NOFS
);
1195 memcpy(pages
, data_pages
+ i
,
1196 locked_pages
* sizeof(*pages
));
1197 memset(data_pages
+ i
, 0,
1198 locked_pages
* sizeof(*pages
));
1200 BUG_ON(num_ops
!= req
->r_num_ops
);
1201 index
= pages
[i
- 1]->index
+ 1;
1202 /* request message now owns the pages array */
1206 req
->r_mtime
= inode
->i_mtime
;
1207 rc
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, true);
1211 wbc
->nr_to_write
-= i
;
1216 * We stop writing back only if we are not doing
1217 * integrity sync. In case of integrity sync we have to
1218 * keep going until we have written all the pages
1219 * we tagged for writeback prior to entering this loop.
1221 if (wbc
->nr_to_write
<= 0 && wbc
->sync_mode
== WB_SYNC_NONE
)
1225 dout("pagevec_release on %d pages (%p)\n", (int)pvec
.nr
,
1226 pvec
.nr
? pvec
.pages
[0] : NULL
);
1227 pagevec_release(&pvec
);
1230 if (should_loop
&& !done
) {
1231 /* more to do; loop back to beginning of file */
1232 dout("writepages looping back to beginning of file\n");
1233 end
= start_index
- 1; /* OK even when start_index == 0 */
1235 /* to write dirty pages associated with next snapc,
1236 * we need to wait until current writes complete */
1237 if (wbc
->sync_mode
!= WB_SYNC_NONE
&&
1238 start_index
== 0 && /* all dirty pages were checked */
1239 !ceph_wbc
.head_snapc
) {
1243 while ((index
<= end
) &&
1244 (nr
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
1245 PAGECACHE_TAG_WRITEBACK
))) {
1246 for (i
= 0; i
< nr
; i
++) {
1247 page
= pvec
.pages
[i
];
1248 if (page_snap_context(page
) != snapc
)
1250 wait_on_page_writeback(page
);
1252 pagevec_release(&pvec
);
1262 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1263 mapping
->writeback_index
= index
;
1266 ceph_osdc_put_request(req
);
1267 ceph_put_snap_context(last_snapc
);
1268 dout("writepages dend - startone, rc = %d\n", rc
);
1275 * See if a given @snapc is either writeable, or already written.
1277 static int context_is_writeable_or_written(struct inode
*inode
,
1278 struct ceph_snap_context
*snapc
)
1280 struct ceph_snap_context
*oldest
= get_oldest_context(inode
, NULL
, NULL
);
1281 int ret
= !oldest
|| snapc
->seq
<= oldest
->seq
;
1283 ceph_put_snap_context(oldest
);
1288 * We are only allowed to write into/dirty the page if the page is
1289 * clean, or already dirty within the same snap context.
1291 * called with page locked.
1292 * return success with page locked,
1293 * or any failure (incl -EAGAIN) with page unlocked.
1295 static int ceph_update_writeable_page(struct file
*file
,
1296 loff_t pos
, unsigned len
,
1299 struct inode
*inode
= file_inode(file
);
1300 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1301 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1302 loff_t page_off
= pos
& PAGE_MASK
;
1303 int pos_in_page
= pos
& ~PAGE_MASK
;
1304 int end_in_page
= pos_in_page
+ len
;
1307 struct ceph_snap_context
*snapc
, *oldest
;
1309 if (READ_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
1310 dout(" page %p forced umount\n", page
);
1316 /* writepages currently holds page lock, but if we change that later, */
1317 wait_on_page_writeback(page
);
1319 snapc
= page_snap_context(page
);
1320 if (snapc
&& snapc
!= ci
->i_head_snapc
) {
1322 * this page is already dirty in another (older) snap
1323 * context! is it writeable now?
1325 oldest
= get_oldest_context(inode
, NULL
, NULL
);
1326 if (snapc
->seq
> oldest
->seq
) {
1327 ceph_put_snap_context(oldest
);
1328 dout(" page %p snapc %p not current or oldest\n",
1331 * queue for writeback, and wait for snapc to
1332 * be writeable or written
1334 snapc
= ceph_get_snap_context(snapc
);
1336 ceph_queue_writeback(inode
);
1337 r
= wait_event_killable(ci
->i_cap_wq
,
1338 context_is_writeable_or_written(inode
, snapc
));
1339 ceph_put_snap_context(snapc
);
1340 if (r
== -ERESTARTSYS
)
1344 ceph_put_snap_context(oldest
);
1346 /* yay, writeable, do it now (without dropping page lock) */
1347 dout(" page %p snapc %p not current, but oldest\n",
1349 if (!clear_page_dirty_for_io(page
))
1351 r
= writepage_nounlock(page
, NULL
);
1357 if (PageUptodate(page
)) {
1358 dout(" page %p already uptodate\n", page
);
1363 if (pos_in_page
== 0 && len
== PAGE_SIZE
)
1366 /* past end of file? */
1367 i_size
= i_size_read(inode
);
1369 if (page_off
>= i_size
||
1370 (pos_in_page
== 0 && (pos
+len
) >= i_size
&&
1371 end_in_page
- pos_in_page
!= PAGE_SIZE
)) {
1372 dout(" zeroing %p 0 - %d and %d - %d\n",
1373 page
, pos_in_page
, end_in_page
, (int)PAGE_SIZE
);
1374 zero_user_segments(page
,
1376 end_in_page
, PAGE_SIZE
);
1380 /* we need to read it. */
1381 r
= ceph_do_readpage(file
, page
);
1383 if (r
== -EINPROGRESS
)
1394 * We are only allowed to write into/dirty the page if the page is
1395 * clean, or already dirty within the same snap context.
1397 static int ceph_write_begin(struct file
*file
, struct address_space
*mapping
,
1398 loff_t pos
, unsigned len
, unsigned flags
,
1399 struct page
**pagep
, void **fsdata
)
1401 struct inode
*inode
= file_inode(file
);
1403 pgoff_t index
= pos
>> PAGE_SHIFT
;
1408 page
= grab_cache_page_write_begin(mapping
, index
, 0);
1412 dout("write_begin file %p inode %p page %p %d~%d\n", file
,
1413 inode
, page
, (int)pos
, (int)len
);
1415 r
= ceph_update_writeable_page(file
, pos
, len
, page
);
1420 } while (r
== -EAGAIN
);
1426 * we don't do anything in here that simple_write_end doesn't do
1427 * except adjust dirty page accounting
1429 static int ceph_write_end(struct file
*file
, struct address_space
*mapping
,
1430 loff_t pos
, unsigned len
, unsigned copied
,
1431 struct page
*page
, void *fsdata
)
1433 struct inode
*inode
= file_inode(file
);
1434 bool check_cap
= false;
1436 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file
,
1437 inode
, page
, (int)pos
, (int)copied
, (int)len
);
1439 /* zero the stale part of the page if we did a short copy */
1440 if (!PageUptodate(page
)) {
1445 SetPageUptodate(page
);
1448 /* did file size increase? */
1449 if (pos
+copied
> i_size_read(inode
))
1450 check_cap
= ceph_inode_set_size(inode
, pos
+copied
);
1452 set_page_dirty(page
);
1459 ceph_check_caps(ceph_inode(inode
), CHECK_CAPS_AUTHONLY
, NULL
);
1465 * we set .direct_IO to indicate direct io is supported, but since we
1466 * intercept O_DIRECT reads and writes early, this function should
1469 static ssize_t
ceph_direct_io(struct kiocb
*iocb
, struct iov_iter
*iter
)
1475 const struct address_space_operations ceph_aops
= {
1476 .readpage
= ceph_readpage
,
1477 .readpages
= ceph_readpages
,
1478 .writepage
= ceph_writepage
,
1479 .writepages
= ceph_writepages_start
,
1480 .write_begin
= ceph_write_begin
,
1481 .write_end
= ceph_write_end
,
1482 .set_page_dirty
= ceph_set_page_dirty
,
1483 .invalidatepage
= ceph_invalidatepage
,
1484 .releasepage
= ceph_releasepage
,
1485 .direct_IO
= ceph_direct_io
,
1488 static void ceph_block_sigs(sigset_t
*oldset
)
1491 siginitsetinv(&mask
, sigmask(SIGKILL
));
1492 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
1495 static void ceph_restore_sigs(sigset_t
*oldset
)
1497 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
1503 static vm_fault_t
ceph_filemap_fault(struct vm_fault
*vmf
)
1505 struct vm_area_struct
*vma
= vmf
->vma
;
1506 struct inode
*inode
= file_inode(vma
->vm_file
);
1507 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1508 struct ceph_file_info
*fi
= vma
->vm_file
->private_data
;
1509 struct page
*pinned_page
= NULL
;
1510 loff_t off
= vmf
->pgoff
<< PAGE_SHIFT
;
1513 vm_fault_t ret
= VM_FAULT_SIGBUS
;
1515 ceph_block_sigs(&oldset
);
1517 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
1518 inode
, ceph_vinop(inode
), off
, (size_t)PAGE_SIZE
);
1519 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1520 want
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1522 want
= CEPH_CAP_FILE_CACHE
;
1525 err
= ceph_get_caps(vma
->vm_file
, CEPH_CAP_FILE_RD
, want
, -1,
1526 &got
, &pinned_page
);
1530 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
1531 inode
, off
, (size_t)PAGE_SIZE
, ceph_cap_string(got
));
1533 if ((got
& (CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
)) ||
1534 ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1535 CEPH_DEFINE_RW_CONTEXT(rw_ctx
, got
);
1536 ceph_add_rw_context(fi
, &rw_ctx
);
1537 ret
= filemap_fault(vmf
);
1538 ceph_del_rw_context(fi
, &rw_ctx
);
1539 dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n",
1540 inode
, off
, (size_t)PAGE_SIZE
,
1541 ceph_cap_string(got
), ret
);
1546 put_page(pinned_page
);
1547 ceph_put_cap_refs(ci
, got
);
1552 /* read inline data */
1553 if (off
>= PAGE_SIZE
) {
1554 /* does not support inline data > PAGE_SIZE */
1555 ret
= VM_FAULT_SIGBUS
;
1557 struct address_space
*mapping
= inode
->i_mapping
;
1558 struct page
*page
= find_or_create_page(mapping
, 0,
1559 mapping_gfp_constraint(mapping
,
1565 err
= __ceph_do_getattr(inode
, page
,
1566 CEPH_STAT_CAP_INLINE_DATA
, true);
1567 if (err
< 0 || off
>= i_size_read(inode
)) {
1570 ret
= vmf_error(err
);
1573 if (err
< PAGE_SIZE
)
1574 zero_user_segment(page
, err
, PAGE_SIZE
);
1576 flush_dcache_page(page
);
1577 SetPageUptodate(page
);
1579 ret
= VM_FAULT_MAJOR
| VM_FAULT_LOCKED
;
1581 dout("filemap_fault %p %llu~%zd read inline data ret %x\n",
1582 inode
, off
, (size_t)PAGE_SIZE
, ret
);
1585 ceph_restore_sigs(&oldset
);
1587 ret
= vmf_error(err
);
1593 * Reuse write_begin here for simplicity.
1595 static vm_fault_t
ceph_page_mkwrite(struct vm_fault
*vmf
)
1597 struct vm_area_struct
*vma
= vmf
->vma
;
1598 struct inode
*inode
= file_inode(vma
->vm_file
);
1599 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1600 struct ceph_file_info
*fi
= vma
->vm_file
->private_data
;
1601 struct ceph_cap_flush
*prealloc_cf
;
1602 struct page
*page
= vmf
->page
;
1603 loff_t off
= page_offset(page
);
1604 loff_t size
= i_size_read(inode
);
1608 vm_fault_t ret
= VM_FAULT_SIGBUS
;
1610 prealloc_cf
= ceph_alloc_cap_flush();
1612 return VM_FAULT_OOM
;
1614 sb_start_pagefault(inode
->i_sb
);
1615 ceph_block_sigs(&oldset
);
1617 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1618 struct page
*locked_page
= NULL
;
1623 err
= ceph_uninline_data(vma
->vm_file
, locked_page
);
1625 unlock_page(locked_page
);
1630 if (off
+ PAGE_SIZE
<= size
)
1633 len
= size
& ~PAGE_MASK
;
1635 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1636 inode
, ceph_vinop(inode
), off
, len
, size
);
1637 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1638 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1640 want
= CEPH_CAP_FILE_BUFFER
;
1643 err
= ceph_get_caps(vma
->vm_file
, CEPH_CAP_FILE_WR
, want
, off
+ len
,
1648 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1649 inode
, off
, len
, ceph_cap_string(got
));
1651 /* Update time before taking page lock */
1652 file_update_time(vma
->vm_file
);
1653 inode_inc_iversion_raw(inode
);
1658 if (page_mkwrite_check_truncate(page
, inode
) < 0) {
1660 ret
= VM_FAULT_NOPAGE
;
1664 err
= ceph_update_writeable_page(vma
->vm_file
, off
, len
, page
);
1666 /* success. we'll keep the page locked. */
1667 set_page_dirty(page
);
1668 ret
= VM_FAULT_LOCKED
;
1670 } while (err
== -EAGAIN
);
1672 if (ret
== VM_FAULT_LOCKED
||
1673 ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1675 spin_lock(&ci
->i_ceph_lock
);
1676 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1677 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1679 spin_unlock(&ci
->i_ceph_lock
);
1681 __mark_inode_dirty(inode
, dirty
);
1684 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
1685 inode
, off
, len
, ceph_cap_string(got
), ret
);
1686 ceph_put_cap_refs(ci
, got
);
1688 ceph_restore_sigs(&oldset
);
1689 sb_end_pagefault(inode
->i_sb
);
1690 ceph_free_cap_flush(prealloc_cf
);
1692 ret
= vmf_error(err
);
1696 void ceph_fill_inline_data(struct inode
*inode
, struct page
*locked_page
,
1697 char *data
, size_t len
)
1699 struct address_space
*mapping
= inode
->i_mapping
;
1705 if (i_size_read(inode
) == 0)
1707 page
= find_or_create_page(mapping
, 0,
1708 mapping_gfp_constraint(mapping
,
1712 if (PageUptodate(page
)) {
1719 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1720 inode
, ceph_vinop(inode
), len
, locked_page
);
1723 void *kaddr
= kmap_atomic(page
);
1724 memcpy(kaddr
, data
, len
);
1725 kunmap_atomic(kaddr
);
1728 if (page
!= locked_page
) {
1729 if (len
< PAGE_SIZE
)
1730 zero_user_segment(page
, len
, PAGE_SIZE
);
1732 flush_dcache_page(page
);
1734 SetPageUptodate(page
);
1740 int ceph_uninline_data(struct file
*filp
, struct page
*locked_page
)
1742 struct inode
*inode
= file_inode(filp
);
1743 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1744 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1745 struct ceph_osd_request
*req
;
1746 struct page
*page
= NULL
;
1747 u64 len
, inline_version
;
1749 bool from_pagecache
= false;
1751 spin_lock(&ci
->i_ceph_lock
);
1752 inline_version
= ci
->i_inline_version
;
1753 spin_unlock(&ci
->i_ceph_lock
);
1755 dout("uninline_data %p %llx.%llx inline_version %llu\n",
1756 inode
, ceph_vinop(inode
), inline_version
);
1758 if (inline_version
== 1 || /* initial version, no data */
1759 inline_version
== CEPH_INLINE_NONE
)
1764 WARN_ON(!PageUptodate(page
));
1765 } else if (ceph_caps_issued(ci
) &
1766 (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) {
1767 page
= find_get_page(inode
->i_mapping
, 0);
1769 if (PageUptodate(page
)) {
1770 from_pagecache
= true;
1780 len
= i_size_read(inode
);
1781 if (len
> PAGE_SIZE
)
1784 page
= __page_cache_alloc(GFP_NOFS
);
1789 err
= __ceph_do_getattr(inode
, page
,
1790 CEPH_STAT_CAP_INLINE_DATA
, true);
1792 /* no inline data */
1793 if (err
== -ENODATA
)
1800 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1801 ceph_vino(inode
), 0, &len
, 0, 1,
1802 CEPH_OSD_OP_CREATE
, CEPH_OSD_FLAG_WRITE
,
1809 req
->r_mtime
= inode
->i_mtime
;
1810 err
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1812 err
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1813 ceph_osdc_put_request(req
);
1817 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1818 ceph_vino(inode
), 0, &len
, 1, 3,
1819 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
1820 NULL
, ci
->i_truncate_seq
,
1821 ci
->i_truncate_size
, false);
1827 osd_req_op_extent_osd_data_pages(req
, 1, &page
, len
, 0, false, false);
1830 __le64 xattr_buf
= cpu_to_le64(inline_version
);
1831 err
= osd_req_op_xattr_init(req
, 0, CEPH_OSD_OP_CMPXATTR
,
1832 "inline_version", &xattr_buf
,
1834 CEPH_OSD_CMPXATTR_OP_GT
,
1835 CEPH_OSD_CMPXATTR_MODE_U64
);
1842 int xattr_len
= snprintf(xattr_buf
, sizeof(xattr_buf
),
1843 "%llu", inline_version
);
1844 err
= osd_req_op_xattr_init(req
, 2, CEPH_OSD_OP_SETXATTR
,
1846 xattr_buf
, xattr_len
, 0, 0);
1851 req
->r_mtime
= inode
->i_mtime
;
1852 err
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1854 err
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1856 ceph_osdc_put_request(req
);
1857 if (err
== -ECANCELED
)
1860 if (page
&& page
!= locked_page
) {
1861 if (from_pagecache
) {
1865 __free_pages(page
, 0);
1868 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1869 inode
, ceph_vinop(inode
), inline_version
, err
);
1873 static const struct vm_operations_struct ceph_vmops
= {
1874 .fault
= ceph_filemap_fault
,
1875 .page_mkwrite
= ceph_page_mkwrite
,
1878 int ceph_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1880 struct address_space
*mapping
= file
->f_mapping
;
1882 if (!mapping
->a_ops
->readpage
)
1884 file_accessed(file
);
1885 vma
->vm_ops
= &ceph_vmops
;
1894 static int __ceph_pool_perm_get(struct ceph_inode_info
*ci
,
1895 s64 pool
, struct ceph_string
*pool_ns
)
1897 struct ceph_fs_client
*fsc
= ceph_inode_to_client(&ci
->vfs_inode
);
1898 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1899 struct ceph_osd_request
*rd_req
= NULL
, *wr_req
= NULL
;
1900 struct rb_node
**p
, *parent
;
1901 struct ceph_pool_perm
*perm
;
1902 struct page
**pages
;
1904 int err
= 0, err2
= 0, have
= 0;
1906 down_read(&mdsc
->pool_perm_rwsem
);
1907 p
= &mdsc
->pool_perm_tree
.rb_node
;
1909 perm
= rb_entry(*p
, struct ceph_pool_perm
, node
);
1910 if (pool
< perm
->pool
)
1912 else if (pool
> perm
->pool
)
1913 p
= &(*p
)->rb_right
;
1915 int ret
= ceph_compare_string(pool_ns
,
1921 p
= &(*p
)->rb_right
;
1928 up_read(&mdsc
->pool_perm_rwsem
);
1933 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1934 pool
, (int)pool_ns
->len
, pool_ns
->str
);
1936 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool
);
1938 down_write(&mdsc
->pool_perm_rwsem
);
1939 p
= &mdsc
->pool_perm_tree
.rb_node
;
1943 perm
= rb_entry(parent
, struct ceph_pool_perm
, node
);
1944 if (pool
< perm
->pool
)
1946 else if (pool
> perm
->pool
)
1947 p
= &(*p
)->rb_right
;
1949 int ret
= ceph_compare_string(pool_ns
,
1955 p
= &(*p
)->rb_right
;
1963 up_write(&mdsc
->pool_perm_rwsem
);
1967 rd_req
= ceph_osdc_alloc_request(&fsc
->client
->osdc
, NULL
,
1968 1, false, GFP_NOFS
);
1974 rd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1975 osd_req_op_init(rd_req
, 0, CEPH_OSD_OP_STAT
, 0);
1976 rd_req
->r_base_oloc
.pool
= pool
;
1978 rd_req
->r_base_oloc
.pool_ns
= ceph_get_string(pool_ns
);
1979 ceph_oid_printf(&rd_req
->r_base_oid
, "%llx.00000000", ci
->i_vino
.ino
);
1981 err
= ceph_osdc_alloc_messages(rd_req
, GFP_NOFS
);
1985 wr_req
= ceph_osdc_alloc_request(&fsc
->client
->osdc
, NULL
,
1986 1, false, GFP_NOFS
);
1992 wr_req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
1993 osd_req_op_init(wr_req
, 0, CEPH_OSD_OP_CREATE
, CEPH_OSD_OP_FLAG_EXCL
);
1994 ceph_oloc_copy(&wr_req
->r_base_oloc
, &rd_req
->r_base_oloc
);
1995 ceph_oid_copy(&wr_req
->r_base_oid
, &rd_req
->r_base_oid
);
1997 err
= ceph_osdc_alloc_messages(wr_req
, GFP_NOFS
);
2001 /* one page should be large enough for STAT data */
2002 pages
= ceph_alloc_page_vector(1, GFP_KERNEL
);
2003 if (IS_ERR(pages
)) {
2004 err
= PTR_ERR(pages
);
2008 osd_req_op_raw_data_in_pages(rd_req
, 0, pages
, PAGE_SIZE
,
2010 err
= ceph_osdc_start_request(&fsc
->client
->osdc
, rd_req
, false);
2012 wr_req
->r_mtime
= ci
->vfs_inode
.i_mtime
;
2013 err2
= ceph_osdc_start_request(&fsc
->client
->osdc
, wr_req
, false);
2016 err
= ceph_osdc_wait_request(&fsc
->client
->osdc
, rd_req
);
2018 err2
= ceph_osdc_wait_request(&fsc
->client
->osdc
, wr_req
);
2020 if (err
>= 0 || err
== -ENOENT
)
2022 else if (err
!= -EPERM
) {
2023 if (err
== -EBLACKLISTED
)
2024 fsc
->blacklisted
= true;
2028 if (err2
== 0 || err2
== -EEXIST
)
2030 else if (err2
!= -EPERM
) {
2031 if (err2
== -EBLACKLISTED
)
2032 fsc
->blacklisted
= true;
2037 pool_ns_len
= pool_ns
? pool_ns
->len
: 0;
2038 perm
= kmalloc(sizeof(*perm
) + pool_ns_len
+ 1, GFP_NOFS
);
2046 perm
->pool_ns_len
= pool_ns_len
;
2047 if (pool_ns_len
> 0)
2048 memcpy(perm
->pool_ns
, pool_ns
->str
, pool_ns_len
);
2049 perm
->pool_ns
[pool_ns_len
] = 0;
2051 rb_link_node(&perm
->node
, parent
, p
);
2052 rb_insert_color(&perm
->node
, &mdsc
->pool_perm_tree
);
2055 up_write(&mdsc
->pool_perm_rwsem
);
2057 ceph_osdc_put_request(rd_req
);
2058 ceph_osdc_put_request(wr_req
);
2063 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
2064 pool
, (int)pool_ns
->len
, pool_ns
->str
, err
);
2066 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool
, err
);
2070 int ceph_pool_perm_check(struct inode
*inode
, int need
)
2072 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2073 struct ceph_string
*pool_ns
;
2077 if (ci
->i_vino
.snap
!= CEPH_NOSNAP
) {
2079 * Pool permission check needs to write to the first object.
2080 * But for snapshot, head of the first object may have alread
2081 * been deleted. Skip check to avoid creating orphan object.
2086 if (ceph_test_mount_opt(ceph_inode_to_client(inode
),
2090 spin_lock(&ci
->i_ceph_lock
);
2091 flags
= ci
->i_ceph_flags
;
2092 pool
= ci
->i_layout
.pool_id
;
2093 spin_unlock(&ci
->i_ceph_lock
);
2095 if (flags
& CEPH_I_POOL_PERM
) {
2096 if ((need
& CEPH_CAP_FILE_RD
) && !(flags
& CEPH_I_POOL_RD
)) {
2097 dout("ceph_pool_perm_check pool %lld no read perm\n",
2101 if ((need
& CEPH_CAP_FILE_WR
) && !(flags
& CEPH_I_POOL_WR
)) {
2102 dout("ceph_pool_perm_check pool %lld no write perm\n",
2109 pool_ns
= ceph_try_get_string(ci
->i_layout
.pool_ns
);
2110 ret
= __ceph_pool_perm_get(ci
, pool
, pool_ns
);
2111 ceph_put_string(pool_ns
);
2115 flags
= CEPH_I_POOL_PERM
;
2116 if (ret
& POOL_READ
)
2117 flags
|= CEPH_I_POOL_RD
;
2118 if (ret
& POOL_WRITE
)
2119 flags
|= CEPH_I_POOL_WR
;
2121 spin_lock(&ci
->i_ceph_lock
);
2122 if (pool
== ci
->i_layout
.pool_id
&&
2123 pool_ns
== rcu_dereference_raw(ci
->i_layout
.pool_ns
)) {
2124 ci
->i_ceph_flags
|= flags
;
2126 pool
= ci
->i_layout
.pool_id
;
2127 flags
= ci
->i_ceph_flags
;
2129 spin_unlock(&ci
->i_ceph_lock
);
2133 void ceph_pool_perm_destroy(struct ceph_mds_client
*mdsc
)
2135 struct ceph_pool_perm
*perm
;
2138 while (!RB_EMPTY_ROOT(&mdsc
->pool_perm_tree
)) {
2139 n
= rb_first(&mdsc
->pool_perm_tree
);
2140 perm
= rb_entry(n
, struct ceph_pool_perm
, node
);
2141 rb_erase(n
, &mdsc
->pool_perm_tree
);