1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/backing-dev.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h> /* generic_writepages */
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
17 #include "mds_client.h"
20 #include <linux/ceph/osd_client.h>
21 #include <linux/ceph/striper.h>
24 * Ceph address space ops.
26 * There are a few funny things going on here.
28 * The page->private field is used to reference a struct
29 * ceph_snap_context for _every_ dirty page. This indicates which
30 * snapshot the page was logically dirtied in, and thus which snap
31 * context needs to be associated with the osd write during writeback.
33 * Similarly, struct ceph_inode_info maintains a set of counters to
34 * count dirty pages on the inode. In the absence of snapshots,
35 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
37 * When a snapshot is taken (that is, when the client receives
38 * notification that a snapshot was taken), each inode with caps and
39 * with dirty pages (dirty pages implies there is a cap) gets a new
40 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
41 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
42 * moved to capsnap->dirty. (Unless a sync write is currently in
43 * progress. In that case, the capsnap is said to be "pending", new
44 * writes cannot start, and the capsnap isn't "finalized" until the
45 * write completes (or fails) and a final size/mtime for the inode for
46 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
48 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
49 * we look for the first capsnap in i_cap_snaps and write out pages in
50 * that snap context _only_. Then we move on to the next capsnap,
51 * eventually reaching the "live" or "head" context (i.e., pages that
52 * are not yet snapped) and are writing the most recently dirtied
55 * Invalidate and so forth must take care to ensure the dirty page
56 * accounting is preserved.
59 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
60 #define CONGESTION_OFF_THRESH(congestion_kb) \
61 (CONGESTION_ON_THRESH(congestion_kb) - \
62 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
64 static inline struct ceph_snap_context
*page_snap_context(struct page
*page
)
66 if (PagePrivate(page
))
67 return (void *)page
->private;
72 * Dirty a page. Optimistically adjust accounting, on the assumption
73 * that we won't race with invalidate. If we do, readjust.
75 static int ceph_set_page_dirty(struct page
*page
)
77 struct address_space
*mapping
= page
->mapping
;
79 struct ceph_inode_info
*ci
;
80 struct ceph_snap_context
*snapc
;
83 if (unlikely(!mapping
))
84 return !TestSetPageDirty(page
);
86 if (PageDirty(page
)) {
87 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
88 mapping
->host
, page
, page
->index
);
89 BUG_ON(!PagePrivate(page
));
93 inode
= mapping
->host
;
94 ci
= ceph_inode(inode
);
97 spin_lock(&ci
->i_ceph_lock
);
98 BUG_ON(ci
->i_wr_ref
== 0); // caller should hold Fw reference
99 if (__ceph_have_pending_cap_snap(ci
)) {
100 struct ceph_cap_snap
*capsnap
=
101 list_last_entry(&ci
->i_cap_snaps
,
102 struct ceph_cap_snap
,
104 snapc
= ceph_get_snap_context(capsnap
->context
);
105 capsnap
->dirty_pages
++;
107 BUG_ON(!ci
->i_head_snapc
);
108 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
109 ++ci
->i_wrbuffer_ref_head
;
111 if (ci
->i_wrbuffer_ref
== 0)
113 ++ci
->i_wrbuffer_ref
;
114 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
115 "snapc %p seq %lld (%d snaps)\n",
116 mapping
->host
, page
, page
->index
,
117 ci
->i_wrbuffer_ref
-1, ci
->i_wrbuffer_ref_head
-1,
118 ci
->i_wrbuffer_ref
, ci
->i_wrbuffer_ref_head
,
119 snapc
, snapc
->seq
, snapc
->num_snaps
);
120 spin_unlock(&ci
->i_ceph_lock
);
123 * Reference snap context in page->private. Also set
124 * PagePrivate so that we get invalidatepage callback.
126 BUG_ON(PagePrivate(page
));
127 page
->private = (unsigned long)snapc
;
128 SetPagePrivate(page
);
130 ret
= __set_page_dirty_nobuffers(page
);
131 WARN_ON(!PageLocked(page
));
132 WARN_ON(!page
->mapping
);
138 * If we are truncating the full page (i.e. offset == 0), adjust the
139 * dirty page counters appropriately. Only called if there is private
142 static void ceph_invalidatepage(struct page
*page
, unsigned int offset
,
146 struct ceph_inode_info
*ci
;
147 struct ceph_snap_context
*snapc
= page_snap_context(page
);
149 inode
= page
->mapping
->host
;
150 ci
= ceph_inode(inode
);
152 if (offset
!= 0 || length
!= PAGE_SIZE
) {
153 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
154 inode
, page
, page
->index
, offset
, length
);
158 ceph_invalidate_fscache_page(inode
, page
);
160 WARN_ON(!PageLocked(page
));
161 if (!PagePrivate(page
))
164 dout("%p invalidatepage %p idx %lu full dirty page\n",
165 inode
, page
, page
->index
);
167 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
168 ceph_put_snap_context(snapc
);
170 ClearPagePrivate(page
);
173 static int ceph_releasepage(struct page
*page
, gfp_t g
)
175 dout("%p releasepage %p idx %lu (%sdirty)\n", page
->mapping
->host
,
176 page
, page
->index
, PageDirty(page
) ? "" : "not ");
178 /* Can we release the page from the cache? */
179 if (!ceph_release_fscache_page(page
, g
))
182 return !PagePrivate(page
);
185 /* read a single page, without unlocking it. */
186 static int ceph_do_readpage(struct file
*filp
, struct page
*page
)
188 struct inode
*inode
= file_inode(filp
);
189 struct ceph_inode_info
*ci
= ceph_inode(inode
);
190 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
191 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
192 struct ceph_osd_request
*req
;
193 struct ceph_vino vino
= ceph_vino(inode
);
195 u64 off
= page_offset(page
);
198 if (off
>= i_size_read(inode
)) {
199 zero_user_segment(page
, 0, PAGE_SIZE
);
200 SetPageUptodate(page
);
204 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
206 * Uptodate inline data should have been added
207 * into page cache while getting Fcr caps.
211 zero_user_segment(page
, 0, PAGE_SIZE
);
212 SetPageUptodate(page
);
216 err
= ceph_readpage_from_fscache(inode
, page
);
220 dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n",
221 vino
.ino
, vino
.snap
, filp
, off
, len
, page
, page
->index
);
222 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
, vino
, off
, &len
, 0, 1,
223 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
, NULL
,
224 ci
->i_truncate_seq
, ci
->i_truncate_size
,
229 osd_req_op_extent_osd_data_pages(req
, 0, &page
, len
, 0, false, false);
231 err
= ceph_osdc_start_request(osdc
, req
, false);
233 err
= ceph_osdc_wait_request(osdc
, req
);
235 ceph_update_read_latency(&fsc
->mdsc
->metric
, req
->r_start_latency
,
236 req
->r_end_latency
, err
);
238 ceph_osdc_put_request(req
);
239 dout("readpage result %d\n", err
);
244 ceph_fscache_readpage_cancel(inode
, page
);
245 if (err
== -EBLOCKLISTED
)
246 fsc
->blocklisted
= true;
250 /* zero fill remainder of page */
251 zero_user_segment(page
, err
, PAGE_SIZE
);
253 flush_dcache_page(page
);
255 SetPageUptodate(page
);
256 ceph_readpage_to_fscache(inode
, page
);
259 return err
< 0 ? err
: 0;
262 static int ceph_readpage(struct file
*filp
, struct page
*page
)
264 int r
= ceph_do_readpage(filp
, page
);
265 if (r
!= -EINPROGRESS
)
273 * Finish an async read(ahead) op.
275 static void finish_read(struct ceph_osd_request
*req
)
277 struct inode
*inode
= req
->r_inode
;
278 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
279 struct ceph_osd_data
*osd_data
;
280 int rc
= req
->r_result
<= 0 ? req
->r_result
: 0;
281 int bytes
= req
->r_result
>= 0 ? req
->r_result
: 0;
285 dout("finish_read %p req %p rc %d bytes %d\n", inode
, req
, rc
, bytes
);
286 if (rc
== -EBLOCKLISTED
)
287 ceph_inode_to_client(inode
)->blocklisted
= true;
289 /* unlock all pages, zeroing any data we didn't read */
290 osd_data
= osd_req_op_extent_osd_data(req
, 0);
291 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
292 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
293 (u64
)osd_data
->length
);
294 for (i
= 0; i
< num_pages
; i
++) {
295 struct page
*page
= osd_data
->pages
[i
];
297 if (rc
< 0 && rc
!= -ENOENT
) {
298 ceph_fscache_readpage_cancel(inode
, page
);
301 if (bytes
< (int)PAGE_SIZE
) {
302 /* zero (remainder of) page */
303 int s
= bytes
< 0 ? 0 : bytes
;
304 zero_user_segment(page
, s
, PAGE_SIZE
);
306 dout("finish_read %p uptodate %p idx %lu\n", inode
, page
,
308 flush_dcache_page(page
);
309 SetPageUptodate(page
);
310 ceph_readpage_to_fscache(inode
, page
);
317 ceph_update_read_latency(&fsc
->mdsc
->metric
, req
->r_start_latency
,
318 req
->r_end_latency
, rc
);
320 kfree(osd_data
->pages
);
324 * start an async read(ahead) operation. return nr_pages we submitted
325 * a read for on success, or negative error code.
327 static int start_read(struct inode
*inode
, struct ceph_rw_context
*rw_ctx
,
328 struct list_head
*page_list
, int max
)
330 struct ceph_osd_client
*osdc
=
331 &ceph_inode_to_client(inode
)->client
->osdc
;
332 struct ceph_inode_info
*ci
= ceph_inode(inode
);
333 struct page
*page
= lru_to_page(page_list
);
334 struct ceph_vino vino
;
335 struct ceph_osd_request
*req
;
346 /* caller of readpages does not hold buffer and read caps
347 * (fadvise, madvise and readahead cases) */
348 int want
= CEPH_CAP_FILE_CACHE
;
349 ret
= ceph_try_get_caps(inode
, CEPH_CAP_FILE_RD
, want
,
352 dout("start_read %p, error getting cap\n", inode
);
353 } else if (!(got
& want
)) {
354 dout("start_read %p, no cache cap\n", inode
);
359 ceph_put_cap_refs(ci
, got
);
360 while (!list_empty(page_list
)) {
361 page
= lru_to_page(page_list
);
362 list_del(&page
->lru
);
369 off
= (u64
) page_offset(page
);
372 next_index
= page
->index
;
373 list_for_each_entry_reverse(page
, page_list
, lru
) {
374 if (page
->index
!= next_index
)
378 if (max
&& nr_pages
== max
)
381 len
= nr_pages
<< PAGE_SHIFT
;
382 dout("start_read %p nr_pages %d is %lld~%lld\n", inode
, nr_pages
,
384 vino
= ceph_vino(inode
);
385 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
, vino
, off
, &len
,
386 0, 1, CEPH_OSD_OP_READ
,
387 CEPH_OSD_FLAG_READ
, NULL
,
388 ci
->i_truncate_seq
, ci
->i_truncate_size
,
395 /* build page vector */
396 nr_pages
= calc_pages_for(0, len
);
397 pages
= kmalloc_array(nr_pages
, sizeof(*pages
), GFP_KERNEL
);
402 for (i
= 0; i
< nr_pages
; ++i
) {
403 page
= list_entry(page_list
->prev
, struct page
, lru
);
404 BUG_ON(PageLocked(page
));
405 list_del(&page
->lru
);
407 dout("start_read %p adding %p idx %lu\n", inode
, page
,
409 if (add_to_page_cache_lru(page
, &inode
->i_data
, page
->index
,
411 ceph_fscache_uncache_page(inode
, page
);
413 dout("start_read %p add_to_page_cache failed %p\n",
417 len
= nr_pages
<< PAGE_SHIFT
;
418 osd_req_op_extent_update(req
, 0, len
);
425 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0, false, false);
426 req
->r_callback
= finish_read
;
427 req
->r_inode
= inode
;
429 dout("start_read %p starting %p %lld~%lld\n", inode
, req
, off
, len
);
430 ret
= ceph_osdc_start_request(osdc
, req
, false);
433 ceph_osdc_put_request(req
);
435 /* After adding locked pages to page cache, the inode holds cache cap.
436 * So we can drop our cap refs. */
438 ceph_put_cap_refs(ci
, got
);
443 for (i
= 0; i
< nr_pages
; ++i
) {
444 ceph_fscache_readpage_cancel(inode
, pages
[i
]);
445 unlock_page(pages
[i
]);
447 ceph_put_page_vector(pages
, nr_pages
, false);
449 ceph_osdc_put_request(req
);
452 ceph_put_cap_refs(ci
, got
);
458 * Read multiple pages. Leave pages we don't read + unlock in page_list;
459 * the caller (VM) cleans them up.
461 static int ceph_readpages(struct file
*file
, struct address_space
*mapping
,
462 struct list_head
*page_list
, unsigned nr_pages
)
464 struct inode
*inode
= file_inode(file
);
465 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
466 struct ceph_file_info
*fi
= file
->private_data
;
467 struct ceph_rw_context
*rw_ctx
;
471 if (ceph_inode(inode
)->i_inline_version
!= CEPH_INLINE_NONE
)
474 rc
= ceph_readpages_from_fscache(mapping
->host
, mapping
, page_list
,
480 rw_ctx
= ceph_find_rw_context(fi
);
481 max
= fsc
->mount_options
->rsize
>> PAGE_SHIFT
;
482 dout("readpages %p file %p ctx %p nr_pages %d max %d\n",
483 inode
, file
, rw_ctx
, nr_pages
, max
);
484 while (!list_empty(page_list
)) {
485 rc
= start_read(inode
, rw_ctx
, page_list
, max
);
490 ceph_fscache_readpages_cancel(inode
, page_list
);
492 dout("readpages %p file %p ret %d\n", inode
, file
, rc
);
496 struct ceph_writeback_ctl
506 * Get ref for the oldest snapc for an inode with dirty data... that is, the
507 * only snap context we are allowed to write back.
509 static struct ceph_snap_context
*
510 get_oldest_context(struct inode
*inode
, struct ceph_writeback_ctl
*ctl
,
511 struct ceph_snap_context
*page_snapc
)
513 struct ceph_inode_info
*ci
= ceph_inode(inode
);
514 struct ceph_snap_context
*snapc
= NULL
;
515 struct ceph_cap_snap
*capsnap
= NULL
;
517 spin_lock(&ci
->i_ceph_lock
);
518 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
519 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap
,
520 capsnap
->context
, capsnap
->dirty_pages
);
521 if (!capsnap
->dirty_pages
)
524 /* get i_size, truncate_{seq,size} for page_snapc? */
525 if (snapc
&& capsnap
->context
!= page_snapc
)
529 if (capsnap
->writing
) {
530 ctl
->i_size
= i_size_read(inode
);
531 ctl
->size_stable
= false;
533 ctl
->i_size
= capsnap
->size
;
534 ctl
->size_stable
= true;
536 ctl
->truncate_size
= capsnap
->truncate_size
;
537 ctl
->truncate_seq
= capsnap
->truncate_seq
;
538 ctl
->head_snapc
= false;
544 snapc
= ceph_get_snap_context(capsnap
->context
);
546 page_snapc
== snapc
||
547 page_snapc
->seq
> snapc
->seq
)
550 if (!snapc
&& ci
->i_wrbuffer_ref_head
) {
551 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
552 dout(" head snapc %p has %d dirty pages\n",
553 snapc
, ci
->i_wrbuffer_ref_head
);
555 ctl
->i_size
= i_size_read(inode
);
556 ctl
->truncate_size
= ci
->i_truncate_size
;
557 ctl
->truncate_seq
= ci
->i_truncate_seq
;
558 ctl
->size_stable
= false;
559 ctl
->head_snapc
= true;
562 spin_unlock(&ci
->i_ceph_lock
);
566 static u64
get_writepages_data_length(struct inode
*inode
,
567 struct page
*page
, u64 start
)
569 struct ceph_inode_info
*ci
= ceph_inode(inode
);
570 struct ceph_snap_context
*snapc
= page_snap_context(page
);
571 struct ceph_cap_snap
*capsnap
= NULL
;
572 u64 end
= i_size_read(inode
);
574 if (snapc
!= ci
->i_head_snapc
) {
576 spin_lock(&ci
->i_ceph_lock
);
577 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
578 if (capsnap
->context
== snapc
) {
579 if (!capsnap
->writing
)
585 spin_unlock(&ci
->i_ceph_lock
);
588 if (end
> page_offset(page
) + PAGE_SIZE
)
589 end
= page_offset(page
) + PAGE_SIZE
;
590 return end
> start
? end
- start
: 0;
594 * Write a single page, but leave the page locked.
596 * If we get a write error, mark the mapping for error, but still adjust the
597 * dirty page accounting (i.e., page is no longer dirty).
599 static int writepage_nounlock(struct page
*page
, struct writeback_control
*wbc
)
601 struct inode
*inode
= page
->mapping
->host
;
602 struct ceph_inode_info
*ci
= ceph_inode(inode
);
603 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
604 struct ceph_snap_context
*snapc
, *oldest
;
605 loff_t page_off
= page_offset(page
);
607 loff_t len
= PAGE_SIZE
;
608 struct ceph_writeback_ctl ceph_wbc
;
609 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
610 struct ceph_osd_request
*req
;
612 dout("writepage %p idx %lu\n", page
, page
->index
);
614 /* verify this is a writeable snap context */
615 snapc
= page_snap_context(page
);
617 dout("writepage %p page %p not dirty?\n", inode
, page
);
620 oldest
= get_oldest_context(inode
, &ceph_wbc
, snapc
);
621 if (snapc
->seq
> oldest
->seq
) {
622 dout("writepage %p page %p snapc %p not writeable - noop\n",
624 /* we should only noop if called by kswapd */
625 WARN_ON(!(current
->flags
& PF_MEMALLOC
));
626 ceph_put_snap_context(oldest
);
627 redirty_page_for_writepage(wbc
, page
);
630 ceph_put_snap_context(oldest
);
632 /* is this a partial page at end of file? */
633 if (page_off
>= ceph_wbc
.i_size
) {
634 dout("%p page eof %llu\n", page
, ceph_wbc
.i_size
);
635 page
->mapping
->a_ops
->invalidatepage(page
, 0, PAGE_SIZE
);
639 if (ceph_wbc
.i_size
< page_off
+ len
)
640 len
= ceph_wbc
.i_size
- page_off
;
642 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
643 inode
, page
, page
->index
, page_off
, len
, snapc
, snapc
->seq
);
645 if (atomic_long_inc_return(&fsc
->writeback_count
) >
646 CONGESTION_ON_THRESH(fsc
->mount_options
->congestion_kb
))
647 set_bdi_congested(inode_to_bdi(inode
), BLK_RW_ASYNC
);
649 set_page_writeback(page
);
650 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
, ceph_vino(inode
), page_off
, &len
, 0, 1,
651 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
, snapc
,
652 ceph_wbc
.truncate_seq
, ceph_wbc
.truncate_size
,
655 redirty_page_for_writepage(wbc
, page
);
656 end_page_writeback(page
);
660 /* it may be a short write due to an object boundary */
661 WARN_ON_ONCE(len
> PAGE_SIZE
);
662 osd_req_op_extent_osd_data_pages(req
, 0, &page
, len
, 0, false, false);
663 dout("writepage %llu~%llu (%llu bytes)\n", page_off
, len
, len
);
665 req
->r_mtime
= inode
->i_mtime
;
666 err
= ceph_osdc_start_request(osdc
, req
, true);
668 err
= ceph_osdc_wait_request(osdc
, req
);
670 ceph_update_write_latency(&fsc
->mdsc
->metric
, req
->r_start_latency
,
671 req
->r_end_latency
, err
);
673 ceph_osdc_put_request(req
);
678 struct writeback_control tmp_wbc
;
681 if (err
== -ERESTARTSYS
) {
682 /* killed by SIGKILL */
683 dout("writepage interrupted page %p\n", page
);
684 redirty_page_for_writepage(wbc
, page
);
685 end_page_writeback(page
);
688 if (err
== -EBLOCKLISTED
)
689 fsc
->blocklisted
= true;
690 dout("writepage setting page/mapping error %d %p\n",
692 mapping_set_error(&inode
->i_data
, err
);
693 wbc
->pages_skipped
++;
695 dout("writepage cleaned page %p\n", page
);
696 err
= 0; /* vfs expects us to return 0 */
699 ClearPagePrivate(page
);
700 end_page_writeback(page
);
701 ceph_put_wrbuffer_cap_refs(ci
, 1, snapc
);
702 ceph_put_snap_context(snapc
); /* page's reference */
704 if (atomic_long_dec_return(&fsc
->writeback_count
) <
705 CONGESTION_OFF_THRESH(fsc
->mount_options
->congestion_kb
))
706 clear_bdi_congested(inode_to_bdi(inode
), BLK_RW_ASYNC
);
711 static int ceph_writepage(struct page
*page
, struct writeback_control
*wbc
)
714 struct inode
*inode
= page
->mapping
->host
;
717 err
= writepage_nounlock(page
, wbc
);
718 if (err
== -ERESTARTSYS
) {
719 /* direct memory reclaimer was killed by SIGKILL. return 0
720 * to prevent caller from setting mapping/page error */
729 * async writeback completion handler.
731 * If we get an error, set the mapping error bit, but not the individual
734 static void writepages_finish(struct ceph_osd_request
*req
)
736 struct inode
*inode
= req
->r_inode
;
737 struct ceph_inode_info
*ci
= ceph_inode(inode
);
738 struct ceph_osd_data
*osd_data
;
740 int num_pages
, total_pages
= 0;
742 int rc
= req
->r_result
;
743 struct ceph_snap_context
*snapc
= req
->r_snapc
;
744 struct address_space
*mapping
= inode
->i_mapping
;
745 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
748 dout("writepages_finish %p rc %d\n", inode
, rc
);
750 mapping_set_error(mapping
, rc
);
751 ceph_set_error_write(ci
);
752 if (rc
== -EBLOCKLISTED
)
753 fsc
->blocklisted
= true;
755 ceph_clear_error_write(ci
);
758 ceph_update_write_latency(&fsc
->mdsc
->metric
, req
->r_start_latency
,
759 req
->r_end_latency
, rc
);
762 * We lost the cache cap, need to truncate the page before
763 * it is unlocked, otherwise we'd truncate it later in the
764 * page truncation thread, possibly losing some data that
767 remove_page
= !(ceph_caps_issued(ci
) &
768 (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
));
770 /* clean all pages */
771 for (i
= 0; i
< req
->r_num_ops
; i
++) {
772 if (req
->r_ops
[i
].op
!= CEPH_OSD_OP_WRITE
)
775 osd_data
= osd_req_op_extent_osd_data(req
, i
);
776 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_PAGES
);
777 num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
778 (u64
)osd_data
->length
);
779 total_pages
+= num_pages
;
780 for (j
= 0; j
< num_pages
; j
++) {
781 page
= osd_data
->pages
[j
];
783 WARN_ON(!PageUptodate(page
));
785 if (atomic_long_dec_return(&fsc
->writeback_count
) <
786 CONGESTION_OFF_THRESH(
787 fsc
->mount_options
->congestion_kb
))
788 clear_bdi_congested(inode_to_bdi(inode
),
791 ceph_put_snap_context(page_snap_context(page
));
793 ClearPagePrivate(page
);
794 dout("unlocking %p\n", page
);
795 end_page_writeback(page
);
798 generic_error_remove_page(inode
->i_mapping
,
803 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
804 inode
, osd_data
->length
, rc
>= 0 ? num_pages
: 0);
806 release_pages(osd_data
->pages
, num_pages
);
809 ceph_put_wrbuffer_cap_refs(ci
, total_pages
, snapc
);
811 osd_data
= osd_req_op_extent_osd_data(req
, 0);
812 if (osd_data
->pages_from_pool
)
813 mempool_free(osd_data
->pages
, ceph_wb_pagevec_pool
);
815 kfree(osd_data
->pages
);
816 ceph_osdc_put_request(req
);
820 * initiate async writeback
822 static int ceph_writepages_start(struct address_space
*mapping
,
823 struct writeback_control
*wbc
)
825 struct inode
*inode
= mapping
->host
;
826 struct ceph_inode_info
*ci
= ceph_inode(inode
);
827 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
828 struct ceph_vino vino
= ceph_vino(inode
);
829 pgoff_t index
, start_index
, end
= -1;
830 struct ceph_snap_context
*snapc
= NULL
, *last_snapc
= NULL
, *pgsnapc
;
833 unsigned int wsize
= i_blocksize(inode
);
834 struct ceph_osd_request
*req
= NULL
;
835 struct ceph_writeback_ctl ceph_wbc
;
836 bool should_loop
, range_whole
= false;
839 dout("writepages_start %p (mode=%s)\n", inode
,
840 wbc
->sync_mode
== WB_SYNC_NONE
? "NONE" :
841 (wbc
->sync_mode
== WB_SYNC_ALL
? "ALL" : "HOLD"));
843 if (READ_ONCE(fsc
->mount_state
) >= CEPH_MOUNT_SHUTDOWN
) {
844 if (ci
->i_wrbuffer_ref
> 0) {
846 "writepage_start %p %lld forced umount\n",
847 inode
, ceph_ino(inode
));
849 mapping_set_error(mapping
, -EIO
);
850 return -EIO
; /* we're in a forced umount, don't write! */
852 if (fsc
->mount_options
->wsize
< wsize
)
853 wsize
= fsc
->mount_options
->wsize
;
857 start_index
= wbc
->range_cyclic
? mapping
->writeback_index
: 0;
861 /* find oldest snap context with dirty data */
862 snapc
= get_oldest_context(inode
, &ceph_wbc
, NULL
);
864 /* hmm, why does writepages get called when there
866 dout(" no snap context with dirty data?\n");
869 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
870 snapc
, snapc
->seq
, snapc
->num_snaps
);
873 if (ceph_wbc
.head_snapc
&& snapc
!= last_snapc
) {
874 /* where to start/end? */
875 if (wbc
->range_cyclic
) {
880 dout(" cyclic, start at %lu\n", index
);
882 index
= wbc
->range_start
>> PAGE_SHIFT
;
883 end
= wbc
->range_end
>> PAGE_SHIFT
;
884 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
886 dout(" not cyclic, %lu to %lu\n", index
, end
);
888 } else if (!ceph_wbc
.head_snapc
) {
889 /* Do not respect wbc->range_{start,end}. Dirty pages
890 * in that range can be associated with newer snapc.
891 * They are not writeable until we write all dirty pages
892 * associated with 'snapc' get written */
895 dout(" non-head snapc, range whole\n");
898 ceph_put_snap_context(last_snapc
);
901 while (!done
&& index
<= end
) {
902 int num_ops
= 0, op_idx
;
903 unsigned i
, pvec_pages
, max_pages
, locked_pages
= 0;
904 struct page
**pages
= NULL
, **data_pages
;
906 pgoff_t strip_unit_end
= 0;
907 u64 offset
= 0, len
= 0;
908 bool from_pool
= false;
910 max_pages
= wsize
>> PAGE_SHIFT
;
913 pvec_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
,
914 end
, PAGECACHE_TAG_DIRTY
);
915 dout("pagevec_lookup_range_tag got %d\n", pvec_pages
);
916 if (!pvec_pages
&& !locked_pages
)
918 for (i
= 0; i
< pvec_pages
&& locked_pages
< max_pages
; i
++) {
919 page
= pvec
.pages
[i
];
920 dout("? %p idx %lu\n", page
, page
->index
);
921 if (locked_pages
== 0)
922 lock_page(page
); /* first page */
923 else if (!trylock_page(page
))
926 /* only dirty pages, or our accounting breaks */
927 if (unlikely(!PageDirty(page
)) ||
928 unlikely(page
->mapping
!= mapping
)) {
929 dout("!dirty or !mapping %p\n", page
);
933 /* only if matching snap context */
934 pgsnapc
= page_snap_context(page
);
935 if (pgsnapc
!= snapc
) {
936 dout("page snapc %p %lld != oldest %p %lld\n",
937 pgsnapc
, pgsnapc
->seq
, snapc
, snapc
->seq
);
939 !ceph_wbc
.head_snapc
&&
940 wbc
->sync_mode
!= WB_SYNC_NONE
)
945 if (page_offset(page
) >= ceph_wbc
.i_size
) {
946 dout("%p page eof %llu\n",
947 page
, ceph_wbc
.i_size
);
948 if ((ceph_wbc
.size_stable
||
949 page_offset(page
) >= i_size_read(inode
)) &&
950 clear_page_dirty_for_io(page
))
951 mapping
->a_ops
->invalidatepage(page
,
956 if (strip_unit_end
&& (page
->index
> strip_unit_end
)) {
957 dout("end of strip unit %p\n", page
);
961 if (PageWriteback(page
)) {
962 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
963 dout("%p under writeback\n", page
);
967 dout("waiting on writeback %p\n", page
);
968 wait_on_page_writeback(page
);
971 if (!clear_page_dirty_for_io(page
)) {
972 dout("%p !clear_page_dirty_for_io\n", page
);
978 * We have something to write. If this is
979 * the first locked page this time through,
980 * calculate max possinle write size and
981 * allocate a page array
983 if (locked_pages
== 0) {
988 /* prepare async write request */
989 offset
= (u64
)page_offset(page
);
990 ceph_calc_file_object_mapping(&ci
->i_layout
,
997 strip_unit_end
= page
->index
+
998 ((len
- 1) >> PAGE_SHIFT
);
1001 max_pages
= calc_pages_for(0, (u64
)len
);
1002 pages
= kmalloc_array(max_pages
,
1007 pages
= mempool_alloc(ceph_wb_pagevec_pool
, GFP_NOFS
);
1012 } else if (page
->index
!=
1013 (offset
+ len
) >> PAGE_SHIFT
) {
1014 if (num_ops
>= (from_pool
? CEPH_OSD_SLAB_OPS
:
1015 CEPH_OSD_MAX_OPS
)) {
1016 redirty_page_for_writepage(wbc
, page
);
1022 offset
= (u64
)page_offset(page
);
1026 /* note position of first page in pvec */
1027 dout("%p will write page %p idx %lu\n",
1028 inode
, page
, page
->index
);
1030 if (atomic_long_inc_return(&fsc
->writeback_count
) >
1031 CONGESTION_ON_THRESH(
1032 fsc
->mount_options
->congestion_kb
)) {
1033 set_bdi_congested(inode_to_bdi(inode
),
1038 pages
[locked_pages
++] = page
;
1039 pvec
.pages
[i
] = NULL
;
1044 /* did we get anything? */
1046 goto release_pvec_pages
;
1049 /* shift unused page to beginning of pvec */
1050 for (j
= 0; j
< pvec_pages
; j
++) {
1054 pvec
.pages
[n
] = pvec
.pages
[j
];
1059 if (pvec_pages
&& i
== pvec_pages
&&
1060 locked_pages
< max_pages
) {
1061 dout("reached end pvec, trying for more\n");
1062 pagevec_release(&pvec
);
1063 goto get_more_pages
;
1068 offset
= page_offset(pages
[0]);
1071 req
= ceph_osdc_new_request(&fsc
->client
->osdc
,
1072 &ci
->i_layout
, vino
,
1073 offset
, &len
, 0, num_ops
,
1074 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
1075 snapc
, ceph_wbc
.truncate_seq
,
1076 ceph_wbc
.truncate_size
, false);
1078 req
= ceph_osdc_new_request(&fsc
->client
->osdc
,
1079 &ci
->i_layout
, vino
,
1084 CEPH_OSD_FLAG_WRITE
,
1085 snapc
, ceph_wbc
.truncate_seq
,
1086 ceph_wbc
.truncate_size
, true);
1087 BUG_ON(IS_ERR(req
));
1089 BUG_ON(len
< page_offset(pages
[locked_pages
- 1]) +
1090 PAGE_SIZE
- offset
);
1092 req
->r_callback
= writepages_finish
;
1093 req
->r_inode
= inode
;
1095 /* Format the osd request message and submit the write */
1099 for (i
= 0; i
< locked_pages
; i
++) {
1100 u64 cur_offset
= page_offset(pages
[i
]);
1101 if (offset
+ len
!= cur_offset
) {
1102 if (op_idx
+ 1 == req
->r_num_ops
)
1104 osd_req_op_extent_dup_last(req
, op_idx
,
1105 cur_offset
- offset
);
1106 dout("writepages got pages at %llu~%llu\n",
1108 osd_req_op_extent_osd_data_pages(req
, op_idx
,
1111 osd_req_op_extent_update(req
, op_idx
, len
);
1114 offset
= cur_offset
;
1115 data_pages
= pages
+ i
;
1119 set_page_writeback(pages
[i
]);
1123 if (ceph_wbc
.size_stable
) {
1124 len
= min(len
, ceph_wbc
.i_size
- offset
);
1125 } else if (i
== locked_pages
) {
1126 /* writepages_finish() clears writeback pages
1127 * according to the data length, so make sure
1128 * data length covers all locked pages */
1129 u64 min_len
= len
+ 1 - PAGE_SIZE
;
1130 len
= get_writepages_data_length(inode
, pages
[i
- 1],
1132 len
= max(len
, min_len
);
1134 dout("writepages got pages at %llu~%llu\n", offset
, len
);
1136 osd_req_op_extent_osd_data_pages(req
, op_idx
, data_pages
, len
,
1137 0, from_pool
, false);
1138 osd_req_op_extent_update(req
, op_idx
, len
);
1140 BUG_ON(op_idx
+ 1 != req
->r_num_ops
);
1143 if (i
< locked_pages
) {
1144 BUG_ON(num_ops
<= req
->r_num_ops
);
1145 num_ops
-= req
->r_num_ops
;
1148 /* allocate new pages array for next request */
1150 pages
= kmalloc_array(locked_pages
, sizeof(*pages
),
1154 pages
= mempool_alloc(ceph_wb_pagevec_pool
, GFP_NOFS
);
1157 memcpy(pages
, data_pages
+ i
,
1158 locked_pages
* sizeof(*pages
));
1159 memset(data_pages
+ i
, 0,
1160 locked_pages
* sizeof(*pages
));
1162 BUG_ON(num_ops
!= req
->r_num_ops
);
1163 index
= pages
[i
- 1]->index
+ 1;
1164 /* request message now owns the pages array */
1168 req
->r_mtime
= inode
->i_mtime
;
1169 rc
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, true);
1173 wbc
->nr_to_write
-= i
;
1178 * We stop writing back only if we are not doing
1179 * integrity sync. In case of integrity sync we have to
1180 * keep going until we have written all the pages
1181 * we tagged for writeback prior to entering this loop.
1183 if (wbc
->nr_to_write
<= 0 && wbc
->sync_mode
== WB_SYNC_NONE
)
1187 dout("pagevec_release on %d pages (%p)\n", (int)pvec
.nr
,
1188 pvec
.nr
? pvec
.pages
[0] : NULL
);
1189 pagevec_release(&pvec
);
1192 if (should_loop
&& !done
) {
1193 /* more to do; loop back to beginning of file */
1194 dout("writepages looping back to beginning of file\n");
1195 end
= start_index
- 1; /* OK even when start_index == 0 */
1197 /* to write dirty pages associated with next snapc,
1198 * we need to wait until current writes complete */
1199 if (wbc
->sync_mode
!= WB_SYNC_NONE
&&
1200 start_index
== 0 && /* all dirty pages were checked */
1201 !ceph_wbc
.head_snapc
) {
1205 while ((index
<= end
) &&
1206 (nr
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
1207 PAGECACHE_TAG_WRITEBACK
))) {
1208 for (i
= 0; i
< nr
; i
++) {
1209 page
= pvec
.pages
[i
];
1210 if (page_snap_context(page
) != snapc
)
1212 wait_on_page_writeback(page
);
1214 pagevec_release(&pvec
);
1224 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1225 mapping
->writeback_index
= index
;
1228 ceph_osdc_put_request(req
);
1229 ceph_put_snap_context(last_snapc
);
1230 dout("writepages dend - startone, rc = %d\n", rc
);
1237 * See if a given @snapc is either writeable, or already written.
1239 static int context_is_writeable_or_written(struct inode
*inode
,
1240 struct ceph_snap_context
*snapc
)
1242 struct ceph_snap_context
*oldest
= get_oldest_context(inode
, NULL
, NULL
);
1243 int ret
= !oldest
|| snapc
->seq
<= oldest
->seq
;
1245 ceph_put_snap_context(oldest
);
1250 * ceph_find_incompatible - find an incompatible context and return it
1251 * @page: page being dirtied
1253 * We are only allowed to write into/dirty a page if the page is
1254 * clean, or already dirty within the same snap context. Returns a
1255 * conflicting context if there is one, NULL if there isn't, or a
1256 * negative error code on other errors.
1258 * Must be called with page lock held.
1260 static struct ceph_snap_context
*
1261 ceph_find_incompatible(struct page
*page
)
1263 struct inode
*inode
= page
->mapping
->host
;
1264 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1265 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1267 if (READ_ONCE(fsc
->mount_state
) >= CEPH_MOUNT_SHUTDOWN
) {
1268 dout(" page %p forced umount\n", page
);
1269 return ERR_PTR(-EIO
);
1273 struct ceph_snap_context
*snapc
, *oldest
;
1275 wait_on_page_writeback(page
);
1277 snapc
= page_snap_context(page
);
1278 if (!snapc
|| snapc
== ci
->i_head_snapc
)
1282 * this page is already dirty in another (older) snap
1283 * context! is it writeable now?
1285 oldest
= get_oldest_context(inode
, NULL
, NULL
);
1286 if (snapc
->seq
> oldest
->seq
) {
1287 /* not writeable -- return it for the caller to deal with */
1288 ceph_put_snap_context(oldest
);
1289 dout(" page %p snapc %p not current or oldest\n", page
, snapc
);
1290 return ceph_get_snap_context(snapc
);
1292 ceph_put_snap_context(oldest
);
1294 /* yay, writeable, do it now (without dropping page lock) */
1295 dout(" page %p snapc %p not current, but oldest\n", page
, snapc
);
1296 if (clear_page_dirty_for_io(page
)) {
1297 int r
= writepage_nounlock(page
, NULL
);
1306 * We are only allowed to write into/dirty the page if the page is
1307 * clean, or already dirty within the same snap context.
1309 static int ceph_write_begin(struct file
*file
, struct address_space
*mapping
,
1310 loff_t pos
, unsigned len
, unsigned flags
,
1311 struct page
**pagep
, void **fsdata
)
1313 struct inode
*inode
= file_inode(file
);
1314 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1315 struct ceph_snap_context
*snapc
;
1316 struct page
*page
= NULL
;
1317 pgoff_t index
= pos
>> PAGE_SHIFT
;
1318 int pos_in_page
= pos
& ~PAGE_MASK
;
1321 dout("write_begin file %p inode %p page %p %d~%d\n", file
, inode
, page
, (int)pos
, (int)len
);
1324 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1330 snapc
= ceph_find_incompatible(page
);
1332 if (IS_ERR(snapc
)) {
1339 ceph_queue_writeback(inode
);
1340 r
= wait_event_killable(ci
->i_cap_wq
,
1341 context_is_writeable_or_written(inode
, snapc
));
1342 ceph_put_snap_context(snapc
);
1348 if (PageUptodate(page
)) {
1349 dout(" page %p already uptodate\n", page
);
1354 * In some cases we don't need to read at all:
1356 * - write that lies completely beyond EOF
1357 * - write that covers the the page from start to EOF or beyond it
1359 if ((pos_in_page
== 0 && len
== PAGE_SIZE
) ||
1360 (pos
>= i_size_read(inode
)) ||
1361 (pos_in_page
== 0 && (pos
+ len
) >= i_size_read(inode
))) {
1362 zero_user_segments(page
, 0, pos_in_page
,
1363 pos_in_page
+ len
, PAGE_SIZE
);
1368 * We need to read it. If we get back -EINPROGRESS, then the page was
1369 * handed off to fscache and it will be unlocked when the read completes.
1370 * Refind the page in that case so we can reacquire the page lock. Otherwise
1371 * we got a hard error or the read was completed synchronously.
1373 r
= ceph_do_readpage(file
, page
);
1374 if (r
!= -EINPROGRESS
)
1390 * we don't do anything in here that simple_write_end doesn't do
1391 * except adjust dirty page accounting
1393 static int ceph_write_end(struct file
*file
, struct address_space
*mapping
,
1394 loff_t pos
, unsigned len
, unsigned copied
,
1395 struct page
*page
, void *fsdata
)
1397 struct inode
*inode
= file_inode(file
);
1398 bool check_cap
= false;
1400 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file
,
1401 inode
, page
, (int)pos
, (int)copied
, (int)len
);
1403 /* zero the stale part of the page if we did a short copy */
1404 if (!PageUptodate(page
)) {
1409 SetPageUptodate(page
);
1412 /* did file size increase? */
1413 if (pos
+copied
> i_size_read(inode
))
1414 check_cap
= ceph_inode_set_size(inode
, pos
+copied
);
1416 set_page_dirty(page
);
1423 ceph_check_caps(ceph_inode(inode
), CHECK_CAPS_AUTHONLY
, NULL
);
1429 * we set .direct_IO to indicate direct io is supported, but since we
1430 * intercept O_DIRECT reads and writes early, this function should
1433 static ssize_t
ceph_direct_io(struct kiocb
*iocb
, struct iov_iter
*iter
)
1439 const struct address_space_operations ceph_aops
= {
1440 .readpage
= ceph_readpage
,
1441 .readpages
= ceph_readpages
,
1442 .writepage
= ceph_writepage
,
1443 .writepages
= ceph_writepages_start
,
1444 .write_begin
= ceph_write_begin
,
1445 .write_end
= ceph_write_end
,
1446 .set_page_dirty
= ceph_set_page_dirty
,
1447 .invalidatepage
= ceph_invalidatepage
,
1448 .releasepage
= ceph_releasepage
,
1449 .direct_IO
= ceph_direct_io
,
1452 static void ceph_block_sigs(sigset_t
*oldset
)
1455 siginitsetinv(&mask
, sigmask(SIGKILL
));
1456 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
1459 static void ceph_restore_sigs(sigset_t
*oldset
)
1461 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
1467 static vm_fault_t
ceph_filemap_fault(struct vm_fault
*vmf
)
1469 struct vm_area_struct
*vma
= vmf
->vma
;
1470 struct inode
*inode
= file_inode(vma
->vm_file
);
1471 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1472 struct ceph_file_info
*fi
= vma
->vm_file
->private_data
;
1473 struct page
*pinned_page
= NULL
;
1474 loff_t off
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1477 vm_fault_t ret
= VM_FAULT_SIGBUS
;
1479 ceph_block_sigs(&oldset
);
1481 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
1482 inode
, ceph_vinop(inode
), off
, (size_t)PAGE_SIZE
);
1483 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1484 want
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1486 want
= CEPH_CAP_FILE_CACHE
;
1489 err
= ceph_get_caps(vma
->vm_file
, CEPH_CAP_FILE_RD
, want
, -1,
1490 &got
, &pinned_page
);
1494 dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
1495 inode
, off
, (size_t)PAGE_SIZE
, ceph_cap_string(got
));
1497 if ((got
& (CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
)) ||
1498 ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1499 CEPH_DEFINE_RW_CONTEXT(rw_ctx
, got
);
1500 ceph_add_rw_context(fi
, &rw_ctx
);
1501 ret
= filemap_fault(vmf
);
1502 ceph_del_rw_context(fi
, &rw_ctx
);
1503 dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n",
1504 inode
, off
, (size_t)PAGE_SIZE
,
1505 ceph_cap_string(got
), ret
);
1510 put_page(pinned_page
);
1511 ceph_put_cap_refs(ci
, got
);
1516 /* read inline data */
1517 if (off
>= PAGE_SIZE
) {
1518 /* does not support inline data > PAGE_SIZE */
1519 ret
= VM_FAULT_SIGBUS
;
1521 struct address_space
*mapping
= inode
->i_mapping
;
1522 struct page
*page
= find_or_create_page(mapping
, 0,
1523 mapping_gfp_constraint(mapping
,
1529 err
= __ceph_do_getattr(inode
, page
,
1530 CEPH_STAT_CAP_INLINE_DATA
, true);
1531 if (err
< 0 || off
>= i_size_read(inode
)) {
1534 ret
= vmf_error(err
);
1537 if (err
< PAGE_SIZE
)
1538 zero_user_segment(page
, err
, PAGE_SIZE
);
1540 flush_dcache_page(page
);
1541 SetPageUptodate(page
);
1543 ret
= VM_FAULT_MAJOR
| VM_FAULT_LOCKED
;
1545 dout("filemap_fault %p %llu~%zd read inline data ret %x\n",
1546 inode
, off
, (size_t)PAGE_SIZE
, ret
);
1549 ceph_restore_sigs(&oldset
);
1551 ret
= vmf_error(err
);
1557 * Reuse write_begin here for simplicity.
1559 static vm_fault_t
ceph_page_mkwrite(struct vm_fault
*vmf
)
1561 struct vm_area_struct
*vma
= vmf
->vma
;
1562 struct inode
*inode
= file_inode(vma
->vm_file
);
1563 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1564 struct ceph_file_info
*fi
= vma
->vm_file
->private_data
;
1565 struct ceph_cap_flush
*prealloc_cf
;
1566 struct page
*page
= vmf
->page
;
1567 loff_t off
= page_offset(page
);
1568 loff_t size
= i_size_read(inode
);
1572 vm_fault_t ret
= VM_FAULT_SIGBUS
;
1574 prealloc_cf
= ceph_alloc_cap_flush();
1576 return VM_FAULT_OOM
;
1578 sb_start_pagefault(inode
->i_sb
);
1579 ceph_block_sigs(&oldset
);
1581 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1582 struct page
*locked_page
= NULL
;
1587 err
= ceph_uninline_data(vma
->vm_file
, locked_page
);
1589 unlock_page(locked_page
);
1594 if (off
+ PAGE_SIZE
<= size
)
1597 len
= size
& ~PAGE_MASK
;
1599 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1600 inode
, ceph_vinop(inode
), off
, len
, size
);
1601 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1602 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1604 want
= CEPH_CAP_FILE_BUFFER
;
1607 err
= ceph_get_caps(vma
->vm_file
, CEPH_CAP_FILE_WR
, want
, off
+ len
,
1612 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1613 inode
, off
, len
, ceph_cap_string(got
));
1615 /* Update time before taking page lock */
1616 file_update_time(vma
->vm_file
);
1617 inode_inc_iversion_raw(inode
);
1620 struct ceph_snap_context
*snapc
;
1624 if (page_mkwrite_check_truncate(page
, inode
) < 0) {
1626 ret
= VM_FAULT_NOPAGE
;
1630 snapc
= ceph_find_incompatible(page
);
1632 /* success. we'll keep the page locked. */
1633 set_page_dirty(page
);
1634 ret
= VM_FAULT_LOCKED
;
1640 if (IS_ERR(snapc
)) {
1641 ret
= VM_FAULT_SIGBUS
;
1645 ceph_queue_writeback(inode
);
1646 err
= wait_event_killable(ci
->i_cap_wq
,
1647 context_is_writeable_or_written(inode
, snapc
));
1648 ceph_put_snap_context(snapc
);
1651 if (ret
== VM_FAULT_LOCKED
||
1652 ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1654 spin_lock(&ci
->i_ceph_lock
);
1655 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1656 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1658 spin_unlock(&ci
->i_ceph_lock
);
1660 __mark_inode_dirty(inode
, dirty
);
1663 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
1664 inode
, off
, len
, ceph_cap_string(got
), ret
);
1665 ceph_put_cap_refs(ci
, got
);
1667 ceph_restore_sigs(&oldset
);
1668 sb_end_pagefault(inode
->i_sb
);
1669 ceph_free_cap_flush(prealloc_cf
);
1671 ret
= vmf_error(err
);
1675 void ceph_fill_inline_data(struct inode
*inode
, struct page
*locked_page
,
1676 char *data
, size_t len
)
1678 struct address_space
*mapping
= inode
->i_mapping
;
1684 if (i_size_read(inode
) == 0)
1686 page
= find_or_create_page(mapping
, 0,
1687 mapping_gfp_constraint(mapping
,
1691 if (PageUptodate(page
)) {
1698 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1699 inode
, ceph_vinop(inode
), len
, locked_page
);
1702 void *kaddr
= kmap_atomic(page
);
1703 memcpy(kaddr
, data
, len
);
1704 kunmap_atomic(kaddr
);
1707 if (page
!= locked_page
) {
1708 if (len
< PAGE_SIZE
)
1709 zero_user_segment(page
, len
, PAGE_SIZE
);
1711 flush_dcache_page(page
);
1713 SetPageUptodate(page
);
1719 int ceph_uninline_data(struct file
*filp
, struct page
*locked_page
)
1721 struct inode
*inode
= file_inode(filp
);
1722 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1723 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1724 struct ceph_osd_request
*req
;
1725 struct page
*page
= NULL
;
1726 u64 len
, inline_version
;
1728 bool from_pagecache
= false;
1730 spin_lock(&ci
->i_ceph_lock
);
1731 inline_version
= ci
->i_inline_version
;
1732 spin_unlock(&ci
->i_ceph_lock
);
1734 dout("uninline_data %p %llx.%llx inline_version %llu\n",
1735 inode
, ceph_vinop(inode
), inline_version
);
1737 if (inline_version
== 1 || /* initial version, no data */
1738 inline_version
== CEPH_INLINE_NONE
)
1743 WARN_ON(!PageUptodate(page
));
1744 } else if (ceph_caps_issued(ci
) &
1745 (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) {
1746 page
= find_get_page(inode
->i_mapping
, 0);
1748 if (PageUptodate(page
)) {
1749 from_pagecache
= true;
1759 len
= i_size_read(inode
);
1760 if (len
> PAGE_SIZE
)
1763 page
= __page_cache_alloc(GFP_NOFS
);
1768 err
= __ceph_do_getattr(inode
, page
,
1769 CEPH_STAT_CAP_INLINE_DATA
, true);
1771 /* no inline data */
1772 if (err
== -ENODATA
)
1779 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1780 ceph_vino(inode
), 0, &len
, 0, 1,
1781 CEPH_OSD_OP_CREATE
, CEPH_OSD_FLAG_WRITE
,
1788 req
->r_mtime
= inode
->i_mtime
;
1789 err
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1791 err
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1792 ceph_osdc_put_request(req
);
1796 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1797 ceph_vino(inode
), 0, &len
, 1, 3,
1798 CEPH_OSD_OP_WRITE
, CEPH_OSD_FLAG_WRITE
,
1799 NULL
, ci
->i_truncate_seq
,
1800 ci
->i_truncate_size
, false);
1806 osd_req_op_extent_osd_data_pages(req
, 1, &page
, len
, 0, false, false);
1809 __le64 xattr_buf
= cpu_to_le64(inline_version
);
1810 err
= osd_req_op_xattr_init(req
, 0, CEPH_OSD_OP_CMPXATTR
,
1811 "inline_version", &xattr_buf
,
1813 CEPH_OSD_CMPXATTR_OP_GT
,
1814 CEPH_OSD_CMPXATTR_MODE_U64
);
1821 int xattr_len
= snprintf(xattr_buf
, sizeof(xattr_buf
),
1822 "%llu", inline_version
);
1823 err
= osd_req_op_xattr_init(req
, 2, CEPH_OSD_OP_SETXATTR
,
1825 xattr_buf
, xattr_len
, 0, 0);
1830 req
->r_mtime
= inode
->i_mtime
;
1831 err
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1833 err
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1835 ceph_update_write_latency(&fsc
->mdsc
->metric
, req
->r_start_latency
,
1836 req
->r_end_latency
, err
);
1839 ceph_osdc_put_request(req
);
1840 if (err
== -ECANCELED
)
1843 if (page
&& page
!= locked_page
) {
1844 if (from_pagecache
) {
1848 __free_pages(page
, 0);
1851 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1852 inode
, ceph_vinop(inode
), inline_version
, err
);
1856 static const struct vm_operations_struct ceph_vmops
= {
1857 .fault
= ceph_filemap_fault
,
1858 .page_mkwrite
= ceph_page_mkwrite
,
1861 int ceph_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1863 struct address_space
*mapping
= file
->f_mapping
;
1865 if (!mapping
->a_ops
->readpage
)
1867 file_accessed(file
);
1868 vma
->vm_ops
= &ceph_vmops
;
1877 static int __ceph_pool_perm_get(struct ceph_inode_info
*ci
,
1878 s64 pool
, struct ceph_string
*pool_ns
)
1880 struct ceph_fs_client
*fsc
= ceph_inode_to_client(&ci
->vfs_inode
);
1881 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1882 struct ceph_osd_request
*rd_req
= NULL
, *wr_req
= NULL
;
1883 struct rb_node
**p
, *parent
;
1884 struct ceph_pool_perm
*perm
;
1885 struct page
**pages
;
1887 int err
= 0, err2
= 0, have
= 0;
1889 down_read(&mdsc
->pool_perm_rwsem
);
1890 p
= &mdsc
->pool_perm_tree
.rb_node
;
1892 perm
= rb_entry(*p
, struct ceph_pool_perm
, node
);
1893 if (pool
< perm
->pool
)
1895 else if (pool
> perm
->pool
)
1896 p
= &(*p
)->rb_right
;
1898 int ret
= ceph_compare_string(pool_ns
,
1904 p
= &(*p
)->rb_right
;
1911 up_read(&mdsc
->pool_perm_rwsem
);
1916 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1917 pool
, (int)pool_ns
->len
, pool_ns
->str
);
1919 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool
);
1921 down_write(&mdsc
->pool_perm_rwsem
);
1922 p
= &mdsc
->pool_perm_tree
.rb_node
;
1926 perm
= rb_entry(parent
, struct ceph_pool_perm
, node
);
1927 if (pool
< perm
->pool
)
1929 else if (pool
> perm
->pool
)
1930 p
= &(*p
)->rb_right
;
1932 int ret
= ceph_compare_string(pool_ns
,
1938 p
= &(*p
)->rb_right
;
1946 up_write(&mdsc
->pool_perm_rwsem
);
1950 rd_req
= ceph_osdc_alloc_request(&fsc
->client
->osdc
, NULL
,
1951 1, false, GFP_NOFS
);
1957 rd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1958 osd_req_op_init(rd_req
, 0, CEPH_OSD_OP_STAT
, 0);
1959 rd_req
->r_base_oloc
.pool
= pool
;
1961 rd_req
->r_base_oloc
.pool_ns
= ceph_get_string(pool_ns
);
1962 ceph_oid_printf(&rd_req
->r_base_oid
, "%llx.00000000", ci
->i_vino
.ino
);
1964 err
= ceph_osdc_alloc_messages(rd_req
, GFP_NOFS
);
1968 wr_req
= ceph_osdc_alloc_request(&fsc
->client
->osdc
, NULL
,
1969 1, false, GFP_NOFS
);
1975 wr_req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
1976 osd_req_op_init(wr_req
, 0, CEPH_OSD_OP_CREATE
, CEPH_OSD_OP_FLAG_EXCL
);
1977 ceph_oloc_copy(&wr_req
->r_base_oloc
, &rd_req
->r_base_oloc
);
1978 ceph_oid_copy(&wr_req
->r_base_oid
, &rd_req
->r_base_oid
);
1980 err
= ceph_osdc_alloc_messages(wr_req
, GFP_NOFS
);
1984 /* one page should be large enough for STAT data */
1985 pages
= ceph_alloc_page_vector(1, GFP_KERNEL
);
1986 if (IS_ERR(pages
)) {
1987 err
= PTR_ERR(pages
);
1991 osd_req_op_raw_data_in_pages(rd_req
, 0, pages
, PAGE_SIZE
,
1993 err
= ceph_osdc_start_request(&fsc
->client
->osdc
, rd_req
, false);
1995 wr_req
->r_mtime
= ci
->vfs_inode
.i_mtime
;
1996 err2
= ceph_osdc_start_request(&fsc
->client
->osdc
, wr_req
, false);
1999 err
= ceph_osdc_wait_request(&fsc
->client
->osdc
, rd_req
);
2001 err2
= ceph_osdc_wait_request(&fsc
->client
->osdc
, wr_req
);
2003 if (err
>= 0 || err
== -ENOENT
)
2005 else if (err
!= -EPERM
) {
2006 if (err
== -EBLOCKLISTED
)
2007 fsc
->blocklisted
= true;
2011 if (err2
== 0 || err2
== -EEXIST
)
2013 else if (err2
!= -EPERM
) {
2014 if (err2
== -EBLOCKLISTED
)
2015 fsc
->blocklisted
= true;
2020 pool_ns_len
= pool_ns
? pool_ns
->len
: 0;
2021 perm
= kmalloc(sizeof(*perm
) + pool_ns_len
+ 1, GFP_NOFS
);
2029 perm
->pool_ns_len
= pool_ns_len
;
2030 if (pool_ns_len
> 0)
2031 memcpy(perm
->pool_ns
, pool_ns
->str
, pool_ns_len
);
2032 perm
->pool_ns
[pool_ns_len
] = 0;
2034 rb_link_node(&perm
->node
, parent
, p
);
2035 rb_insert_color(&perm
->node
, &mdsc
->pool_perm_tree
);
2038 up_write(&mdsc
->pool_perm_rwsem
);
2040 ceph_osdc_put_request(rd_req
);
2041 ceph_osdc_put_request(wr_req
);
2046 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
2047 pool
, (int)pool_ns
->len
, pool_ns
->str
, err
);
2049 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool
, err
);
2053 int ceph_pool_perm_check(struct inode
*inode
, int need
)
2055 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2056 struct ceph_string
*pool_ns
;
2060 if (ci
->i_vino
.snap
!= CEPH_NOSNAP
) {
2062 * Pool permission check needs to write to the first object.
2063 * But for snapshot, head of the first object may have alread
2064 * been deleted. Skip check to avoid creating orphan object.
2069 if (ceph_test_mount_opt(ceph_inode_to_client(inode
),
2073 spin_lock(&ci
->i_ceph_lock
);
2074 flags
= ci
->i_ceph_flags
;
2075 pool
= ci
->i_layout
.pool_id
;
2076 spin_unlock(&ci
->i_ceph_lock
);
2078 if (flags
& CEPH_I_POOL_PERM
) {
2079 if ((need
& CEPH_CAP_FILE_RD
) && !(flags
& CEPH_I_POOL_RD
)) {
2080 dout("ceph_pool_perm_check pool %lld no read perm\n",
2084 if ((need
& CEPH_CAP_FILE_WR
) && !(flags
& CEPH_I_POOL_WR
)) {
2085 dout("ceph_pool_perm_check pool %lld no write perm\n",
2092 pool_ns
= ceph_try_get_string(ci
->i_layout
.pool_ns
);
2093 ret
= __ceph_pool_perm_get(ci
, pool
, pool_ns
);
2094 ceph_put_string(pool_ns
);
2098 flags
= CEPH_I_POOL_PERM
;
2099 if (ret
& POOL_READ
)
2100 flags
|= CEPH_I_POOL_RD
;
2101 if (ret
& POOL_WRITE
)
2102 flags
|= CEPH_I_POOL_WR
;
2104 spin_lock(&ci
->i_ceph_lock
);
2105 if (pool
== ci
->i_layout
.pool_id
&&
2106 pool_ns
== rcu_dereference_raw(ci
->i_layout
.pool_ns
)) {
2107 ci
->i_ceph_flags
|= flags
;
2109 pool
= ci
->i_layout
.pool_id
;
2110 flags
= ci
->i_ceph_flags
;
2112 spin_unlock(&ci
->i_ceph_lock
);
2116 void ceph_pool_perm_destroy(struct ceph_mds_client
*mdsc
)
2118 struct ceph_pool_perm
*perm
;
2121 while (!RB_EMPTY_ROOT(&mdsc
->pool_perm_tree
)) {
2122 n
= rb_first(&mdsc
->pool_perm_tree
);
2123 perm
= rb_entry(n
, struct ceph_pool_perm
, node
);
2124 rb_erase(n
, &mdsc
->pool_perm_tree
);