2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
36 #include "nfs4trace.h"
37 #include "delegation.h"
40 #define NFSDBG_FACILITY NFSDBG_PNFS
41 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
46 * protects pnfs_modules_tbl.
48 static DEFINE_SPINLOCK(pnfs_spinlock
);
51 * pnfs_modules_tbl holds all pnfs modules
53 static LIST_HEAD(pnfs_modules_tbl
);
56 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, nfs4_stateid stateid
,
57 enum pnfs_iomode iomode
, bool sync
);
59 /* Return the registered pnfs layout driver module matching given id */
60 static struct pnfs_layoutdriver_type
*
61 find_pnfs_driver_locked(u32 id
)
63 struct pnfs_layoutdriver_type
*local
;
65 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
70 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
74 static struct pnfs_layoutdriver_type
*
75 find_pnfs_driver(u32 id
)
77 struct pnfs_layoutdriver_type
*local
;
79 spin_lock(&pnfs_spinlock
);
80 local
= find_pnfs_driver_locked(id
);
81 if (local
!= NULL
&& !try_module_get(local
->owner
)) {
82 dprintk("%s: Could not grab reference on module\n", __func__
);
85 spin_unlock(&pnfs_spinlock
);
90 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
92 if (nfss
->pnfs_curr_ld
) {
93 if (nfss
->pnfs_curr_ld
->clear_layoutdriver
)
94 nfss
->pnfs_curr_ld
->clear_layoutdriver(nfss
);
95 /* Decrement the MDS count. Purge the deviceid cache if zero */
96 if (atomic_dec_and_test(&nfss
->nfs_client
->cl_mds_count
))
97 nfs4_deviceid_purge_client(nfss
->nfs_client
);
98 module_put(nfss
->pnfs_curr_ld
->owner
);
100 nfss
->pnfs_curr_ld
= NULL
;
104 * Try to set the server's pnfs module to the pnfs layout type specified by id.
105 * Currently only one pNFS layout driver per filesystem is supported.
107 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
110 set_pnfs_layoutdriver(struct nfs_server
*server
, const struct nfs_fh
*mntfh
,
113 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
117 if (!(server
->nfs_client
->cl_exchange_flags
&
118 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
119 printk(KERN_ERR
"NFS: %s: id %u cl_exchange_flags 0x%x\n",
120 __func__
, id
, server
->nfs_client
->cl_exchange_flags
);
123 ld_type
= find_pnfs_driver(id
);
125 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
126 ld_type
= find_pnfs_driver(id
);
128 dprintk("%s: No pNFS module found for %u.\n",
133 server
->pnfs_curr_ld
= ld_type
;
134 if (ld_type
->set_layoutdriver
135 && ld_type
->set_layoutdriver(server
, mntfh
)) {
136 printk(KERN_ERR
"NFS: %s: Error initializing pNFS layout "
137 "driver %u.\n", __func__
, id
);
138 module_put(ld_type
->owner
);
141 /* Bump the MDS count */
142 atomic_inc(&server
->nfs_client
->cl_mds_count
);
144 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
148 dprintk("%s: Using NFSv4 I/O\n", __func__
);
149 server
->pnfs_curr_ld
= NULL
;
153 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
155 int status
= -EINVAL
;
156 struct pnfs_layoutdriver_type
*tmp
;
158 if (ld_type
->id
== 0) {
159 printk(KERN_ERR
"NFS: %s id 0 is reserved\n", __func__
);
162 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
163 printk(KERN_ERR
"NFS: %s Layout driver must provide "
164 "alloc_lseg and free_lseg.\n", __func__
);
168 spin_lock(&pnfs_spinlock
);
169 tmp
= find_pnfs_driver_locked(ld_type
->id
);
171 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
173 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
176 printk(KERN_ERR
"NFS: %s Module with id %d already loaded!\n",
177 __func__
, ld_type
->id
);
179 spin_unlock(&pnfs_spinlock
);
183 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
186 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
188 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
189 spin_lock(&pnfs_spinlock
);
190 list_del(&ld_type
->pnfs_tblid
);
191 spin_unlock(&pnfs_spinlock
);
193 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
196 * pNFS client layout cache
199 /* Need to hold i_lock if caller does not already hold reference */
201 pnfs_get_layout_hdr(struct pnfs_layout_hdr
*lo
)
203 atomic_inc(&lo
->plh_refcount
);
206 static struct pnfs_layout_hdr
*
207 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
209 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
210 return ld
->alloc_layout_hdr(ino
, gfp_flags
);
214 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
216 struct nfs_server
*server
= NFS_SERVER(lo
->plh_inode
);
217 struct pnfs_layoutdriver_type
*ld
= server
->pnfs_curr_ld
;
219 if (!list_empty(&lo
->plh_layouts
)) {
220 struct nfs_client
*clp
= server
->nfs_client
;
222 spin_lock(&clp
->cl_lock
);
223 list_del_init(&lo
->plh_layouts
);
224 spin_unlock(&clp
->cl_lock
);
226 put_rpccred(lo
->plh_lc_cred
);
227 return ld
->free_layout_hdr(lo
);
231 pnfs_detach_layout_hdr(struct pnfs_layout_hdr
*lo
)
233 struct nfs_inode
*nfsi
= NFS_I(lo
->plh_inode
);
234 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
236 /* Reset MDS Threshold I/O counters */
242 pnfs_put_layout_hdr(struct pnfs_layout_hdr
*lo
)
244 struct inode
*inode
= lo
->plh_inode
;
246 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
247 if (!list_empty(&lo
->plh_segs
))
248 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
249 pnfs_detach_layout_hdr(lo
);
250 spin_unlock(&inode
->i_lock
);
251 pnfs_free_layout_hdr(lo
);
256 pnfs_iomode_to_fail_bit(u32 iomode
)
258 return iomode
== IOMODE_RW
?
259 NFS_LAYOUT_RW_FAILED
: NFS_LAYOUT_RO_FAILED
;
263 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
265 lo
->plh_retry_timestamp
= jiffies
;
266 if (!test_and_set_bit(fail_bit
, &lo
->plh_flags
))
267 atomic_inc(&lo
->plh_refcount
);
271 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr
*lo
, int fail_bit
)
273 if (test_and_clear_bit(fail_bit
, &lo
->plh_flags
))
274 atomic_dec(&lo
->plh_refcount
);
278 pnfs_layout_io_set_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
280 struct inode
*inode
= lo
->plh_inode
;
281 struct pnfs_layout_range range
= {
284 .length
= NFS4_MAX_UINT64
,
288 spin_lock(&inode
->i_lock
);
289 pnfs_layout_set_fail_bit(lo
, pnfs_iomode_to_fail_bit(iomode
));
290 pnfs_mark_matching_lsegs_invalid(lo
, &head
, &range
);
291 spin_unlock(&inode
->i_lock
);
292 pnfs_free_lseg_list(&head
);
293 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__
,
294 iomode
== IOMODE_RW
? "RW" : "READ");
298 pnfs_layout_io_test_failed(struct pnfs_layout_hdr
*lo
, u32 iomode
)
300 unsigned long start
, end
;
301 int fail_bit
= pnfs_iomode_to_fail_bit(iomode
);
303 if (test_bit(fail_bit
, &lo
->plh_flags
) == 0)
306 start
= end
- PNFS_LAYOUTGET_RETRY_TIMEOUT
;
307 if (!time_in_range(lo
->plh_retry_timestamp
, start
, end
)) {
308 /* It is time to retry the failed layoutgets */
309 pnfs_layout_clear_fail_bit(lo
, fail_bit
);
316 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
318 INIT_LIST_HEAD(&lseg
->pls_list
);
319 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
320 atomic_set(&lseg
->pls_refcount
, 1);
322 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
323 lseg
->pls_layout
= lo
;
326 static void pnfs_free_lseg(struct pnfs_layout_segment
*lseg
)
328 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
330 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
334 pnfs_layout_remove_lseg(struct pnfs_layout_hdr
*lo
,
335 struct pnfs_layout_segment
*lseg
)
337 struct inode
*inode
= lo
->plh_inode
;
339 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
340 list_del_init(&lseg
->pls_list
);
341 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
342 atomic_dec(&lo
->plh_refcount
);
343 if (list_empty(&lo
->plh_segs
))
344 clear_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
345 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
348 /* Return true if layoutreturn is needed */
350 pnfs_layout_need_return(struct pnfs_layout_hdr
*lo
,
351 struct pnfs_layout_segment
*lseg
)
353 struct pnfs_layout_segment
*s
;
355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
358 list_for_each_entry(s
, &lo
->plh_segs
, pls_list
)
359 if (s
!= lseg
&& test_bit(NFS_LSEG_LAYOUTRETURN
, &s
->pls_flags
))
366 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr
*lo
)
368 if (test_and_set_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
))
370 lo
->plh_return_iomode
= 0;
371 lo
->plh_block_lgets
++;
372 pnfs_get_layout_hdr(lo
);
373 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
, &lo
->plh_flags
);
377 static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment
*lseg
,
378 struct pnfs_layout_hdr
*lo
, struct inode
*inode
)
380 lo
= lseg
->pls_layout
;
381 inode
= lo
->plh_inode
;
383 spin_lock(&inode
->i_lock
);
384 if (pnfs_layout_need_return(lo
, lseg
)) {
385 nfs4_stateid stateid
;
386 enum pnfs_iomode iomode
;
389 stateid
= lo
->plh_stateid
;
390 iomode
= lo
->plh_return_iomode
;
391 send
= pnfs_prepare_layoutreturn(lo
);
392 spin_unlock(&inode
->i_lock
);
394 /* Send an async layoutreturn so we dont deadlock */
395 pnfs_send_layoutreturn(lo
, stateid
, iomode
, false);
398 spin_unlock(&inode
->i_lock
);
402 pnfs_put_lseg(struct pnfs_layout_segment
*lseg
)
404 struct pnfs_layout_hdr
*lo
;
410 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
411 atomic_read(&lseg
->pls_refcount
),
412 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
414 /* Handle the case where refcount != 1 */
415 if (atomic_add_unless(&lseg
->pls_refcount
, -1, 1))
418 lo
= lseg
->pls_layout
;
419 inode
= lo
->plh_inode
;
420 /* Do we need a layoutreturn? */
421 if (test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
))
422 pnfs_layoutreturn_before_put_lseg(lseg
, lo
, inode
);
424 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
425 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
426 spin_unlock(&inode
->i_lock
);
429 pnfs_get_layout_hdr(lo
);
430 pnfs_layout_remove_lseg(lo
, lseg
);
431 spin_unlock(&inode
->i_lock
);
432 pnfs_free_lseg(lseg
);
433 pnfs_put_layout_hdr(lo
);
436 EXPORT_SYMBOL_GPL(pnfs_put_lseg
);
438 static void pnfs_free_lseg_async_work(struct work_struct
*work
)
440 struct pnfs_layout_segment
*lseg
;
441 struct pnfs_layout_hdr
*lo
;
443 lseg
= container_of(work
, struct pnfs_layout_segment
, pls_work
);
444 lo
= lseg
->pls_layout
;
446 pnfs_free_lseg(lseg
);
447 pnfs_put_layout_hdr(lo
);
450 static void pnfs_free_lseg_async(struct pnfs_layout_segment
*lseg
)
452 INIT_WORK(&lseg
->pls_work
, pnfs_free_lseg_async_work
);
453 schedule_work(&lseg
->pls_work
);
457 pnfs_put_lseg_locked(struct pnfs_layout_segment
*lseg
)
462 assert_spin_locked(&lseg
->pls_layout
->plh_inode
->i_lock
);
464 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
465 atomic_read(&lseg
->pls_refcount
),
466 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
467 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
468 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
469 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
))
471 pnfs_get_layout_hdr(lo
);
472 pnfs_layout_remove_lseg(lo
, lseg
);
473 pnfs_free_lseg_async(lseg
);
476 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked
);
479 end_offset(u64 start
, u64 len
)
484 return end
>= start
? end
: NFS4_MAX_UINT64
;
488 * is l2 fully contained in l1?
490 * [----------------------------------)
495 pnfs_lseg_range_contained(const struct pnfs_layout_range
*l1
,
496 const struct pnfs_layout_range
*l2
)
498 u64 start1
= l1
->offset
;
499 u64 end1
= end_offset(start1
, l1
->length
);
500 u64 start2
= l2
->offset
;
501 u64 end2
= end_offset(start2
, l2
->length
);
503 return (start1
<= start2
) && (end1
>= end2
);
507 * is l1 and l2 intersecting?
509 * [----------------------------------)
514 pnfs_lseg_range_intersecting(const struct pnfs_layout_range
*l1
,
515 const struct pnfs_layout_range
*l2
)
517 u64 start1
= l1
->offset
;
518 u64 end1
= end_offset(start1
, l1
->length
);
519 u64 start2
= l2
->offset
;
520 u64 end2
= end_offset(start2
, l2
->length
);
522 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
523 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
527 should_free_lseg(const struct pnfs_layout_range
*lseg_range
,
528 const struct pnfs_layout_range
*recall_range
)
530 return (recall_range
->iomode
== IOMODE_ANY
||
531 lseg_range
->iomode
== recall_range
->iomode
) &&
532 pnfs_lseg_range_intersecting(lseg_range
, recall_range
);
535 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment
*lseg
,
536 struct list_head
*tmp_list
)
538 if (!atomic_dec_and_test(&lseg
->pls_refcount
))
540 pnfs_layout_remove_lseg(lseg
->pls_layout
, lseg
);
541 list_add(&lseg
->pls_list
, tmp_list
);
545 /* Returns 1 if lseg is removed from list, 0 otherwise */
546 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
547 struct list_head
*tmp_list
)
551 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
552 /* Remove the reference keeping the lseg in the
553 * list. It will now be removed when all
554 * outstanding io is finished.
556 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
557 atomic_read(&lseg
->pls_refcount
));
558 if (pnfs_lseg_dec_and_remove_zero(lseg
, tmp_list
))
564 /* Returns count of number of matching invalid lsegs remaining in list
568 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
569 struct list_head
*tmp_list
,
570 struct pnfs_layout_range
*recall_range
)
572 struct pnfs_layout_segment
*lseg
, *next
;
573 int invalid
= 0, removed
= 0;
575 dprintk("%s:Begin lo %p\n", __func__
, lo
);
577 if (list_empty(&lo
->plh_segs
))
579 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
581 should_free_lseg(&lseg
->pls_range
, recall_range
)) {
582 dprintk("%s: freeing lseg %p iomode %d "
583 "offset %llu length %llu\n", __func__
,
584 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
585 lseg
->pls_range
.length
);
587 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
589 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
590 return invalid
- removed
;
593 /* note free_me must contain lsegs from a single layout_hdr */
595 pnfs_free_lseg_list(struct list_head
*free_me
)
597 struct pnfs_layout_segment
*lseg
, *tmp
;
599 if (list_empty(free_me
))
602 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
603 list_del(&lseg
->pls_list
);
604 pnfs_free_lseg(lseg
);
609 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
611 struct pnfs_layout_hdr
*lo
;
614 spin_lock(&nfsi
->vfs_inode
.i_lock
);
617 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
618 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
619 pnfs_get_layout_hdr(lo
);
620 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RO_FAILED
);
621 pnfs_layout_clear_fail_bit(lo
, NFS_LAYOUT_RW_FAILED
);
622 pnfs_clear_retry_layoutget(lo
);
623 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
624 pnfs_free_lseg_list(&tmp_list
);
625 pnfs_put_layout_hdr(lo
);
627 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
629 EXPORT_SYMBOL_GPL(pnfs_destroy_layout
);
632 pnfs_layout_add_bulk_destroy_list(struct inode
*inode
,
633 struct list_head
*layout_list
)
635 struct pnfs_layout_hdr
*lo
;
638 spin_lock(&inode
->i_lock
);
639 lo
= NFS_I(inode
)->layout
;
640 if (lo
!= NULL
&& list_empty(&lo
->plh_bulk_destroy
)) {
641 pnfs_get_layout_hdr(lo
);
642 list_add(&lo
->plh_bulk_destroy
, layout_list
);
645 spin_unlock(&inode
->i_lock
);
649 /* Caller must hold rcu_read_lock and clp->cl_lock */
651 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client
*clp
,
652 struct nfs_server
*server
,
653 struct list_head
*layout_list
)
655 struct pnfs_layout_hdr
*lo
, *next
;
658 list_for_each_entry_safe(lo
, next
, &server
->layouts
, plh_layouts
) {
659 inode
= igrab(lo
->plh_inode
);
662 list_del_init(&lo
->plh_layouts
);
663 if (pnfs_layout_add_bulk_destroy_list(inode
, layout_list
))
666 spin_unlock(&clp
->cl_lock
);
668 spin_lock(&clp
->cl_lock
);
676 pnfs_layout_free_bulk_destroy_list(struct list_head
*layout_list
,
679 struct pnfs_layout_hdr
*lo
;
681 struct pnfs_layout_range range
= {
682 .iomode
= IOMODE_ANY
,
684 .length
= NFS4_MAX_UINT64
,
686 LIST_HEAD(lseg_list
);
689 while (!list_empty(layout_list
)) {
690 lo
= list_entry(layout_list
->next
, struct pnfs_layout_hdr
,
692 dprintk("%s freeing layout for inode %lu\n", __func__
,
693 lo
->plh_inode
->i_ino
);
694 inode
= lo
->plh_inode
;
696 pnfs_layoutcommit_inode(inode
, false);
698 spin_lock(&inode
->i_lock
);
699 list_del_init(&lo
->plh_bulk_destroy
);
700 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
702 set_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
);
703 if (pnfs_mark_matching_lsegs_invalid(lo
, &lseg_list
, &range
))
705 spin_unlock(&inode
->i_lock
);
706 pnfs_free_lseg_list(&lseg_list
);
707 pnfs_put_layout_hdr(lo
);
714 pnfs_destroy_layouts_byfsid(struct nfs_client
*clp
,
715 struct nfs_fsid
*fsid
,
718 struct nfs_server
*server
;
719 LIST_HEAD(layout_list
);
721 spin_lock(&clp
->cl_lock
);
724 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
725 if (memcmp(&server
->fsid
, fsid
, sizeof(*fsid
)) != 0)
727 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
733 spin_unlock(&clp
->cl_lock
);
735 if (list_empty(&layout_list
))
737 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
741 pnfs_destroy_layouts_byclid(struct nfs_client
*clp
,
744 struct nfs_server
*server
;
745 LIST_HEAD(layout_list
);
747 spin_lock(&clp
->cl_lock
);
750 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
751 if (pnfs_layout_bulk_destroy_byserver_locked(clp
,
757 spin_unlock(&clp
->cl_lock
);
759 if (list_empty(&layout_list
))
761 return pnfs_layout_free_bulk_destroy_list(&layout_list
, is_recall
);
765 * Called by the state manger to remove all layouts established under an
769 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
771 nfs4_deviceid_mark_client_invalid(clp
);
772 nfs4_deviceid_purge_client(clp
);
774 pnfs_destroy_layouts_byclid(clp
, false);
778 * Compare 2 layout stateid sequence ids, to see which is newer,
779 * taking into account wraparound issues.
781 static bool pnfs_seqid_is_newer(u32 s1
, u32 s2
)
783 return (s32
)(s1
- s2
) > 0;
786 /* update lo->plh_stateid with new if is more recent */
788 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
791 u32 oldseq
, newseq
, new_barrier
;
792 int empty
= list_empty(&lo
->plh_segs
);
794 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
795 newseq
= be32_to_cpu(new->seqid
);
796 if (empty
|| pnfs_seqid_is_newer(newseq
, oldseq
)) {
797 nfs4_stateid_copy(&lo
->plh_stateid
, new);
798 if (update_barrier
) {
799 new_barrier
= be32_to_cpu(new->seqid
);
801 /* Because of wraparound, we want to keep the barrier
802 * "close" to the current seqids.
804 new_barrier
= newseq
- atomic_read(&lo
->plh_outstanding
);
806 if (empty
|| pnfs_seqid_is_newer(new_barrier
, lo
->plh_barrier
))
807 lo
->plh_barrier
= new_barrier
;
812 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr
*lo
,
813 const nfs4_stateid
*stateid
)
815 u32 seqid
= be32_to_cpu(stateid
->seqid
);
817 return !pnfs_seqid_is_newer(seqid
, lo
->plh_barrier
);
821 pnfs_layout_returning(const struct pnfs_layout_hdr
*lo
,
822 struct pnfs_layout_range
*range
)
824 return test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
) &&
825 (lo
->plh_return_iomode
== IOMODE_ANY
||
826 lo
->plh_return_iomode
== range
->iomode
);
829 /* lget is set to 1 if called from inside send_layoutget call chain */
831 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr
*lo
,
832 struct pnfs_layout_range
*range
, int lget
)
834 return lo
->plh_block_lgets
||
835 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
836 (list_empty(&lo
->plh_segs
) &&
837 (atomic_read(&lo
->plh_outstanding
) > lget
)) ||
838 pnfs_layout_returning(lo
, range
);
842 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
843 struct pnfs_layout_range
*range
,
844 struct nfs4_state
*open_state
)
848 dprintk("--> %s\n", __func__
);
849 spin_lock(&lo
->plh_inode
->i_lock
);
850 if (pnfs_layoutgets_blocked(lo
, range
, 1)) {
852 } else if (!nfs4_valid_open_stateid(open_state
)) {
854 } else if (list_empty(&lo
->plh_segs
) ||
855 test_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
)) {
859 seq
= read_seqbegin(&open_state
->seqlock
);
860 nfs4_stateid_copy(dst
, &open_state
->stateid
);
861 } while (read_seqretry(&open_state
->seqlock
, seq
));
863 nfs4_stateid_copy(dst
, &lo
->plh_stateid
);
864 spin_unlock(&lo
->plh_inode
->i_lock
);
865 dprintk("<-- %s\n", __func__
);
870 * Get layout from server.
871 * for now, assume that whole file layouts are requested.
873 * arg->length: all ones
875 static struct pnfs_layout_segment
*
876 send_layoutget(struct pnfs_layout_hdr
*lo
,
877 struct nfs_open_context
*ctx
,
878 struct pnfs_layout_range
*range
,
881 struct inode
*ino
= lo
->plh_inode
;
882 struct nfs_server
*server
= NFS_SERVER(ino
);
883 struct nfs4_layoutget
*lgp
;
884 struct pnfs_layout_segment
*lseg
;
886 dprintk("--> %s\n", __func__
);
888 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
892 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
893 if (lgp
->args
.minlength
> range
->length
)
894 lgp
->args
.minlength
= range
->length
;
895 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
896 lgp
->args
.range
= *range
;
897 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
898 lgp
->args
.inode
= ino
;
899 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
900 lgp
->gfp_flags
= gfp_flags
;
901 lgp
->cred
= lo
->plh_lc_cred
;
903 /* Synchronously retrieve layout information from server and
906 lseg
= nfs4_proc_layoutget(lgp
, gfp_flags
);
908 switch (PTR_ERR(lseg
)) {
913 /* remember that LAYOUTGET failed and suspend trying */
914 pnfs_layout_io_set_failed(lo
, range
->iomode
);
918 pnfs_layout_clear_fail_bit(lo
,
919 pnfs_iomode_to_fail_bit(range
->iomode
));
924 static void pnfs_clear_layoutcommit(struct inode
*inode
,
925 struct list_head
*head
)
927 struct nfs_inode
*nfsi
= NFS_I(inode
);
928 struct pnfs_layout_segment
*lseg
, *tmp
;
930 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
932 list_for_each_entry_safe(lseg
, tmp
, &nfsi
->layout
->plh_segs
, pls_list
) {
933 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
935 pnfs_lseg_dec_and_remove_zero(lseg
, head
);
939 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr
*lo
)
941 clear_bit_unlock(NFS_LAYOUT_RETURN
, &lo
->plh_flags
);
942 smp_mb__after_atomic();
943 wake_up_bit(&lo
->plh_flags
, NFS_LAYOUT_RETURN
);
944 rpc_wake_up(&NFS_SERVER(lo
->plh_inode
)->roc_rpcwaitq
);
948 pnfs_send_layoutreturn(struct pnfs_layout_hdr
*lo
, nfs4_stateid stateid
,
949 enum pnfs_iomode iomode
, bool sync
)
951 struct inode
*ino
= lo
->plh_inode
;
952 struct nfs4_layoutreturn
*lrp
;
955 lrp
= kzalloc(sizeof(*lrp
), GFP_NOFS
);
956 if (unlikely(lrp
== NULL
)) {
958 spin_lock(&ino
->i_lock
);
959 lo
->plh_block_lgets
--;
960 pnfs_clear_layoutreturn_waitbit(lo
);
961 rpc_wake_up(&NFS_SERVER(ino
)->roc_rpcwaitq
);
962 spin_unlock(&ino
->i_lock
);
963 pnfs_put_layout_hdr(lo
);
967 lrp
->args
.stateid
= stateid
;
968 lrp
->args
.layout_type
= NFS_SERVER(ino
)->pnfs_curr_ld
->id
;
969 lrp
->args
.inode
= ino
;
970 lrp
->args
.range
.iomode
= iomode
;
971 lrp
->args
.range
.offset
= 0;
972 lrp
->args
.range
.length
= NFS4_MAX_UINT64
;
973 lrp
->args
.layout
= lo
;
974 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
975 lrp
->cred
= lo
->plh_lc_cred
;
977 status
= nfs4_proc_layoutreturn(lrp
, sync
);
979 dprintk("<-- %s status: %d\n", __func__
, status
);
984 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
985 * when the layout segment list is empty.
987 * Note that a pnfs_layout_hdr can exist with an empty layout segment
988 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
989 * deviceid is marked invalid.
992 _pnfs_return_layout(struct inode
*ino
)
994 struct pnfs_layout_hdr
*lo
= NULL
;
995 struct nfs_inode
*nfsi
= NFS_I(ino
);
997 nfs4_stateid stateid
;
998 int status
= 0, empty
;
1001 dprintk("NFS: %s for inode %lu\n", __func__
, ino
->i_ino
);
1003 spin_lock(&ino
->i_lock
);
1006 spin_unlock(&ino
->i_lock
);
1007 dprintk("NFS: %s no layout to return\n", __func__
);
1010 stateid
= nfsi
->layout
->plh_stateid
;
1011 /* Reference matched in nfs4_layoutreturn_release */
1012 pnfs_get_layout_hdr(lo
);
1013 empty
= list_empty(&lo
->plh_segs
);
1014 pnfs_clear_layoutcommit(ino
, &tmp_list
);
1015 pnfs_mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
1017 if (NFS_SERVER(ino
)->pnfs_curr_ld
->return_range
) {
1018 struct pnfs_layout_range range
= {
1019 .iomode
= IOMODE_ANY
,
1021 .length
= NFS4_MAX_UINT64
,
1023 NFS_SERVER(ino
)->pnfs_curr_ld
->return_range(lo
, &range
);
1026 /* Don't send a LAYOUTRETURN if list was initially empty */
1028 spin_unlock(&ino
->i_lock
);
1029 dprintk("NFS: %s no layout segments to return\n", __func__
);
1030 goto out_put_layout_hdr
;
1033 set_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
1034 send
= pnfs_prepare_layoutreturn(lo
);
1035 spin_unlock(&ino
->i_lock
);
1036 pnfs_free_lseg_list(&tmp_list
);
1038 status
= pnfs_send_layoutreturn(lo
, stateid
, IOMODE_ANY
, true);
1040 pnfs_put_layout_hdr(lo
);
1042 dprintk("<-- %s status: %d\n", __func__
, status
);
1045 EXPORT_SYMBOL_GPL(_pnfs_return_layout
);
1048 pnfs_commit_and_return_layout(struct inode
*inode
)
1050 struct pnfs_layout_hdr
*lo
;
1053 spin_lock(&inode
->i_lock
);
1054 lo
= NFS_I(inode
)->layout
;
1056 spin_unlock(&inode
->i_lock
);
1059 pnfs_get_layout_hdr(lo
);
1060 /* Block new layoutgets and read/write to ds */
1061 lo
->plh_block_lgets
++;
1062 spin_unlock(&inode
->i_lock
);
1063 filemap_fdatawait(inode
->i_mapping
);
1064 ret
= pnfs_layoutcommit_inode(inode
, true);
1066 ret
= _pnfs_return_layout(inode
);
1067 spin_lock(&inode
->i_lock
);
1068 lo
->plh_block_lgets
--;
1069 spin_unlock(&inode
->i_lock
);
1070 pnfs_put_layout_hdr(lo
);
1074 bool pnfs_roc(struct inode
*ino
)
1076 struct nfs_inode
*nfsi
= NFS_I(ino
);
1077 struct nfs_open_context
*ctx
;
1078 struct nfs4_state
*state
;
1079 struct pnfs_layout_hdr
*lo
;
1080 struct pnfs_layout_segment
*lseg
, *tmp
;
1081 nfs4_stateid stateid
;
1082 LIST_HEAD(tmp_list
);
1083 bool found
= false, layoutreturn
= false;
1085 spin_lock(&ino
->i_lock
);
1087 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
1088 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
1091 /* Don't return layout if we hold a delegation */
1092 if (nfs4_check_delegation(ino
, FMODE_READ
))
1095 list_for_each_entry(ctx
, &nfsi
->open_files
, list
) {
1097 /* Don't return layout if there is open file state */
1098 if (state
!= NULL
&& state
->state
!= 0)
1102 pnfs_clear_retry_layoutget(lo
);
1103 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
1104 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
1105 mark_lseg_invalid(lseg
, &tmp_list
);
1110 lo
->plh_block_lgets
++;
1111 pnfs_get_layout_hdr(lo
); /* matched in pnfs_roc_release */
1112 spin_unlock(&ino
->i_lock
);
1113 pnfs_free_lseg_list(&tmp_list
);
1114 pnfs_layoutcommit_inode(ino
, true);
1119 stateid
= lo
->plh_stateid
;
1120 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1122 layoutreturn
= pnfs_prepare_layoutreturn(lo
);
1124 spin_unlock(&ino
->i_lock
);
1126 pnfs_layoutcommit_inode(ino
, true);
1127 pnfs_send_layoutreturn(lo
, stateid
, IOMODE_ANY
, true);
1132 void pnfs_roc_release(struct inode
*ino
)
1134 struct pnfs_layout_hdr
*lo
;
1136 spin_lock(&ino
->i_lock
);
1137 lo
= NFS_I(ino
)->layout
;
1138 lo
->plh_block_lgets
--;
1139 if (atomic_dec_and_test(&lo
->plh_refcount
)) {
1140 pnfs_detach_layout_hdr(lo
);
1141 spin_unlock(&ino
->i_lock
);
1142 pnfs_free_layout_hdr(lo
);
1144 spin_unlock(&ino
->i_lock
);
1147 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
1149 struct pnfs_layout_hdr
*lo
;
1151 spin_lock(&ino
->i_lock
);
1152 lo
= NFS_I(ino
)->layout
;
1153 if (pnfs_seqid_is_newer(barrier
, lo
->plh_barrier
))
1154 lo
->plh_barrier
= barrier
;
1155 spin_unlock(&ino
->i_lock
);
1158 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
, struct rpc_task
*task
)
1160 struct nfs_inode
*nfsi
= NFS_I(ino
);
1161 struct pnfs_layout_hdr
*lo
;
1162 struct pnfs_layout_segment
*lseg
;
1163 nfs4_stateid stateid
;
1165 bool layoutreturn
= false;
1167 spin_lock(&ino
->i_lock
);
1168 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
) {
1169 if (!test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
))
1171 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
))
1173 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1174 spin_unlock(&ino
->i_lock
);
1178 current_seqid
= be32_to_cpu(lo
->plh_stateid
.seqid
);
1180 /* Since close does not return a layout stateid for use as
1181 * a barrier, we choose the worst-case barrier.
1183 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
1184 stateid
= lo
->plh_stateid
;
1185 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1187 layoutreturn
= pnfs_prepare_layoutreturn(lo
);
1188 if (test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
))
1189 rpc_sleep_on(&NFS_SERVER(ino
)->roc_rpcwaitq
, task
, NULL
);
1191 spin_unlock(&ino
->i_lock
);
1193 pnfs_send_layoutreturn(lo
, stateid
, IOMODE_ANY
, false);
1200 * Compare two layout segments for sorting into layout cache.
1201 * We want to preferentially return RW over RO layouts, so ensure those
1205 pnfs_lseg_range_cmp(const struct pnfs_layout_range
*l1
,
1206 const struct pnfs_layout_range
*l2
)
1210 /* high offset > low offset */
1211 d
= l1
->offset
- l2
->offset
;
1215 /* short length > long length */
1216 d
= l2
->length
- l1
->length
;
1220 /* read > read/write */
1221 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
1225 pnfs_layout_insert_lseg(struct pnfs_layout_hdr
*lo
,
1226 struct pnfs_layout_segment
*lseg
)
1228 struct pnfs_layout_segment
*lp
;
1230 dprintk("%s:Begin\n", __func__
);
1232 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
1233 if (pnfs_lseg_range_cmp(&lseg
->pls_range
, &lp
->pls_range
) > 0)
1235 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
1236 dprintk("%s: inserted lseg %p "
1237 "iomode %d offset %llu length %llu before "
1238 "lp %p iomode %d offset %llu length %llu\n",
1239 __func__
, lseg
, lseg
->pls_range
.iomode
,
1240 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
1241 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
1242 lp
->pls_range
.length
);
1245 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
1246 dprintk("%s: inserted lseg %p "
1247 "iomode %d offset %llu length %llu at tail\n",
1248 __func__
, lseg
, lseg
->pls_range
.iomode
,
1249 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
1251 pnfs_get_layout_hdr(lo
);
1253 dprintk("%s:Return\n", __func__
);
1256 static struct pnfs_layout_hdr
*
1257 alloc_init_layout_hdr(struct inode
*ino
,
1258 struct nfs_open_context
*ctx
,
1261 struct pnfs_layout_hdr
*lo
;
1263 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
1266 atomic_set(&lo
->plh_refcount
, 1);
1267 INIT_LIST_HEAD(&lo
->plh_layouts
);
1268 INIT_LIST_HEAD(&lo
->plh_segs
);
1269 INIT_LIST_HEAD(&lo
->plh_bulk_destroy
);
1270 lo
->plh_inode
= ino
;
1271 lo
->plh_lc_cred
= get_rpccred(ctx
->cred
);
1275 static struct pnfs_layout_hdr
*
1276 pnfs_find_alloc_layout(struct inode
*ino
,
1277 struct nfs_open_context
*ctx
,
1280 struct nfs_inode
*nfsi
= NFS_I(ino
);
1281 struct pnfs_layout_hdr
*new = NULL
;
1283 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
1285 if (nfsi
->layout
!= NULL
)
1287 spin_unlock(&ino
->i_lock
);
1288 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
1289 spin_lock(&ino
->i_lock
);
1291 if (likely(nfsi
->layout
== NULL
)) { /* Won the race? */
1294 } else if (new != NULL
)
1295 pnfs_free_layout_hdr(new);
1297 pnfs_get_layout_hdr(nfsi
->layout
);
1298 return nfsi
->layout
;
1302 * iomode matching rules:
1313 pnfs_lseg_range_match(const struct pnfs_layout_range
*ls_range
,
1314 const struct pnfs_layout_range
*range
)
1316 struct pnfs_layout_range range1
;
1318 if ((range
->iomode
== IOMODE_RW
&&
1319 ls_range
->iomode
!= IOMODE_RW
) ||
1320 !pnfs_lseg_range_intersecting(ls_range
, range
))
1323 /* range1 covers only the first byte in the range */
1326 return pnfs_lseg_range_contained(ls_range
, &range1
);
1330 * lookup range in layout
1332 static struct pnfs_layout_segment
*
1333 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
1334 struct pnfs_layout_range
*range
)
1336 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
1338 dprintk("%s:Begin\n", __func__
);
1340 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
1341 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
1342 !test_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
) &&
1343 pnfs_lseg_range_match(&lseg
->pls_range
, range
)) {
1344 ret
= pnfs_get_lseg(lseg
);
1347 if (lseg
->pls_range
.offset
> range
->offset
)
1351 dprintk("%s:Return lseg %p ref %d\n",
1352 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
1357 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1358 * to the MDS or over pNFS
1360 * The nfs_inode read_io and write_io fields are cumulative counters reset
1361 * when there are no layout segments. Note that in pnfs_update_layout iomode
1362 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1365 * A return of true means use MDS I/O.
1368 * If a file's size is smaller than the file size threshold, data accesses
1369 * SHOULD be sent to the metadata server. If an I/O request has a length that
1370 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1371 * server. If both file size and I/O size are provided, the client SHOULD
1372 * reach or exceed both thresholds before sending its read or write
1373 * requests to the data server.
1375 static bool pnfs_within_mdsthreshold(struct nfs_open_context
*ctx
,
1376 struct inode
*ino
, int iomode
)
1378 struct nfs4_threshold
*t
= ctx
->mdsthreshold
;
1379 struct nfs_inode
*nfsi
= NFS_I(ino
);
1380 loff_t fsize
= i_size_read(ino
);
1381 bool size
= false, size_set
= false, io
= false, io_set
= false, ret
= false;
1386 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1387 __func__
, t
->bm
, t
->rd_sz
, t
->wr_sz
, t
->rd_io_sz
, t
->wr_io_sz
);
1391 if (t
->bm
& THRESHOLD_RD
) {
1392 dprintk("%s fsize %llu\n", __func__
, fsize
);
1394 if (fsize
< t
->rd_sz
)
1397 if (t
->bm
& THRESHOLD_RD_IO
) {
1398 dprintk("%s nfsi->read_io %llu\n", __func__
,
1401 if (nfsi
->read_io
< t
->rd_io_sz
)
1406 if (t
->bm
& THRESHOLD_WR
) {
1407 dprintk("%s fsize %llu\n", __func__
, fsize
);
1409 if (fsize
< t
->wr_sz
)
1412 if (t
->bm
& THRESHOLD_WR_IO
) {
1413 dprintk("%s nfsi->write_io %llu\n", __func__
,
1416 if (nfsi
->write_io
< t
->wr_io_sz
)
1421 if (size_set
&& io_set
) {
1424 } else if (size
|| io
)
1427 dprintk("<-- %s size %d io %d ret %d\n", __func__
, size
, io
, ret
);
1431 /* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1432 static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key
*key
)
1434 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET
, key
->flags
))
1436 return nfs_wait_bit_killable(key
);
1439 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr
*lo
)
1442 * send layoutcommit as it can hold up layoutreturn due to lseg
1445 pnfs_layoutcommit_inode(lo
->plh_inode
, false);
1446 return !wait_on_bit_action(&lo
->plh_flags
, NFS_LAYOUT_RETURN
,
1447 pnfs_layoutget_retry_bit_wait
,
1448 TASK_UNINTERRUPTIBLE
);
1451 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr
*lo
)
1453 unsigned long *bitlock
= &lo
->plh_flags
;
1455 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET
, bitlock
);
1456 smp_mb__after_atomic();
1457 wake_up_bit(bitlock
, NFS_LAYOUT_FIRST_LAYOUTGET
);
1461 * Layout segment is retreived from the server if not cached.
1462 * The appropriate layout segment is referenced and returned to the caller.
1464 struct pnfs_layout_segment
*
1465 pnfs_update_layout(struct inode
*ino
,
1466 struct nfs_open_context
*ctx
,
1469 enum pnfs_iomode iomode
,
1472 struct pnfs_layout_range arg
= {
1478 struct nfs_server
*server
= NFS_SERVER(ino
);
1479 struct nfs_client
*clp
= server
->nfs_client
;
1480 struct pnfs_layout_hdr
*lo
;
1481 struct pnfs_layout_segment
*lseg
= NULL
;
1484 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
1487 if (pnfs_within_mdsthreshold(ctx
, ino
, iomode
))
1492 spin_lock(&ino
->i_lock
);
1493 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
1495 spin_unlock(&ino
->i_lock
);
1499 /* Do we even need to bother with this? */
1500 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1501 dprintk("%s matches recall, use MDS\n", __func__
);
1505 /* if LAYOUTGET already failed once we don't try again */
1506 if (pnfs_layout_io_test_failed(lo
, iomode
) &&
1507 !pnfs_should_retry_layoutget(lo
))
1510 first
= list_empty(&lo
->plh_segs
);
1512 /* The first layoutget for the file. Need to serialize per
1513 * RFC 5661 Errata 3208.
1515 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET
,
1517 spin_unlock(&ino
->i_lock
);
1518 wait_on_bit(&lo
->plh_flags
, NFS_LAYOUT_FIRST_LAYOUTGET
,
1519 TASK_UNINTERRUPTIBLE
);
1520 pnfs_put_layout_hdr(lo
);
1524 /* Check to see if the layout for the given range
1527 lseg
= pnfs_find_lseg(lo
, &arg
);
1533 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1534 * for LAYOUTRETURN even if first is true.
1536 if (!lseg
&& pnfs_should_retry_layoutget(lo
) &&
1537 test_bit(NFS_LAYOUT_RETURN
, &lo
->plh_flags
)) {
1538 spin_unlock(&ino
->i_lock
);
1539 dprintk("%s wait for layoutreturn\n", __func__
);
1540 if (pnfs_prepare_to_retry_layoutget(lo
)) {
1542 pnfs_clear_first_layoutget(lo
);
1543 pnfs_put_layout_hdr(lo
);
1544 dprintk("%s retrying\n", __func__
);
1547 goto out_put_layout_hdr
;
1550 if (pnfs_layoutgets_blocked(lo
, &arg
, 0))
1552 atomic_inc(&lo
->plh_outstanding
);
1553 spin_unlock(&ino
->i_lock
);
1555 if (list_empty(&lo
->plh_layouts
)) {
1556 /* The lo must be on the clp list if there is any
1557 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1559 spin_lock(&clp
->cl_lock
);
1560 if (list_empty(&lo
->plh_layouts
))
1561 list_add_tail(&lo
->plh_layouts
, &server
->layouts
);
1562 spin_unlock(&clp
->cl_lock
);
1565 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
1567 arg
.offset
-= pg_offset
;
1568 arg
.length
+= pg_offset
;
1570 if (arg
.length
!= NFS4_MAX_UINT64
)
1571 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
1573 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
1574 pnfs_clear_retry_layoutget(lo
);
1575 atomic_dec(&lo
->plh_outstanding
);
1578 pnfs_clear_first_layoutget(lo
);
1579 pnfs_put_layout_hdr(lo
);
1581 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1582 "(%s, offset: %llu, length: %llu)\n",
1583 __func__
, ino
->i_sb
->s_id
,
1584 (unsigned long long)NFS_FILEID(ino
),
1585 lseg
== NULL
? "not found" : "found",
1586 iomode
==IOMODE_RW
? "read/write" : "read-only",
1587 (unsigned long long)pos
,
1588 (unsigned long long)count
);
1591 spin_unlock(&ino
->i_lock
);
1592 goto out_put_layout_hdr
;
1594 EXPORT_SYMBOL_GPL(pnfs_update_layout
);
1596 struct pnfs_layout_segment
*
1597 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1599 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1600 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1601 struct pnfs_layout_segment
*lseg
;
1602 struct inode
*ino
= lo
->plh_inode
;
1606 /* Inject layout blob into I/O device driver */
1607 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1608 if (!lseg
|| IS_ERR(lseg
)) {
1612 status
= PTR_ERR(lseg
);
1613 dprintk("%s: Could not allocate layout: error %d\n",
1618 init_lseg(lo
, lseg
);
1619 lseg
->pls_range
= res
->range
;
1621 spin_lock(&ino
->i_lock
);
1622 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1623 dprintk("%s forget reply due to recall\n", __func__
);
1624 goto out_forget_reply
;
1627 if (pnfs_layoutgets_blocked(lo
, &lgp
->args
.range
, 1)) {
1628 dprintk("%s forget reply due to state\n", __func__
);
1629 goto out_forget_reply
;
1632 if (nfs4_stateid_match_other(&lo
->plh_stateid
, &res
->stateid
)) {
1633 /* existing state ID, make sure the sequence number matches. */
1634 if (pnfs_layout_stateid_blocked(lo
, &res
->stateid
)) {
1635 dprintk("%s forget reply due to sequence\n", __func__
);
1636 goto out_forget_reply
;
1638 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1641 * We got an entirely new state ID. Mark all segments for the
1642 * inode invalid, and don't bother validating the stateid
1645 pnfs_mark_matching_lsegs_invalid(lo
, &free_me
, NULL
);
1647 nfs4_stateid_copy(&lo
->plh_stateid
, &res
->stateid
);
1648 lo
->plh_barrier
= be32_to_cpu(res
->stateid
.seqid
);
1651 clear_bit(NFS_LAYOUT_INVALID_STID
, &lo
->plh_flags
);
1653 pnfs_get_lseg(lseg
);
1654 pnfs_layout_insert_lseg(lo
, lseg
);
1656 if (res
->return_on_close
) {
1657 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1658 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
1661 spin_unlock(&ino
->i_lock
);
1662 pnfs_free_lseg_list(&free_me
);
1665 return ERR_PTR(status
);
1668 spin_unlock(&ino
->i_lock
);
1669 lseg
->pls_layout
= lo
;
1670 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
1675 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr
*lo
,
1676 struct list_head
*tmp_list
,
1677 struct pnfs_layout_range
*return_range
)
1679 struct pnfs_layout_segment
*lseg
, *next
;
1681 dprintk("%s:Begin lo %p\n", __func__
, lo
);
1683 if (list_empty(&lo
->plh_segs
))
1686 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
1687 if (should_free_lseg(&lseg
->pls_range
, return_range
)) {
1688 dprintk("%s: marking lseg %p iomode %d "
1689 "offset %llu length %llu\n", __func__
,
1690 lseg
, lseg
->pls_range
.iomode
,
1691 lseg
->pls_range
.offset
,
1692 lseg
->pls_range
.length
);
1693 set_bit(NFS_LSEG_LAYOUTRETURN
, &lseg
->pls_flags
);
1694 mark_lseg_invalid(lseg
, tmp_list
);
1698 void pnfs_error_mark_layout_for_return(struct inode
*inode
,
1699 struct pnfs_layout_segment
*lseg
)
1701 struct pnfs_layout_hdr
*lo
= NFS_I(inode
)->layout
;
1702 int iomode
= pnfs_iomode_to_fail_bit(lseg
->pls_range
.iomode
);
1703 struct pnfs_layout_range range
= {
1704 .iomode
= lseg
->pls_range
.iomode
,
1706 .length
= NFS4_MAX_UINT64
,
1710 spin_lock(&inode
->i_lock
);
1711 /* set failure bit so that pnfs path will be retried later */
1712 pnfs_layout_set_fail_bit(lo
, iomode
);
1713 if (lo
->plh_return_iomode
== 0)
1714 lo
->plh_return_iomode
= range
.iomode
;
1715 else if (lo
->plh_return_iomode
!= range
.iomode
)
1716 lo
->plh_return_iomode
= IOMODE_ANY
;
1718 * mark all matching lsegs so that we are sure to have no live
1719 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
1722 pnfs_mark_matching_lsegs_return(lo
, &free_me
, &range
);
1723 spin_unlock(&inode
->i_lock
);
1724 pnfs_free_lseg_list(&free_me
);
1726 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return
);
1729 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*req
)
1731 u64 rd_size
= req
->wb_bytes
;
1733 if (pgio
->pg_lseg
== NULL
) {
1734 if (pgio
->pg_dreq
== NULL
)
1735 rd_size
= i_size_read(pgio
->pg_inode
) - req_offset(req
);
1737 rd_size
= nfs_dreq_bytes_left(pgio
->pg_dreq
);
1739 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1746 /* If no lseg, fall back to read through mds */
1747 if (pgio
->pg_lseg
== NULL
)
1748 nfs_pageio_reset_read_mds(pgio
);
1751 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read
);
1754 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
1755 struct nfs_page
*req
, u64 wb_size
)
1757 if (pgio
->pg_lseg
== NULL
)
1758 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1764 /* If no lseg, fall back to write through mds */
1765 if (pgio
->pg_lseg
== NULL
)
1766 nfs_pageio_reset_write_mds(pgio
);
1768 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write
);
1771 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor
*desc
)
1773 if (desc
->pg_lseg
) {
1774 pnfs_put_lseg(desc
->pg_lseg
);
1775 desc
->pg_lseg
= NULL
;
1778 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup
);
1781 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1782 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1785 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
,
1786 struct nfs_page
*prev
, struct nfs_page
*req
)
1789 u64 seg_end
, req_start
, seg_left
;
1791 size
= nfs_generic_pg_test(pgio
, prev
, req
);
1796 * 'size' contains the number of bytes left in the current page (up
1797 * to the original size asked for in @req->wb_bytes).
1799 * Calculate how many bytes are left in the layout segment
1800 * and if there are less bytes than 'size', return that instead.
1802 * Please also note that 'end_offset' is actually the offset of the
1803 * first byte that lies outside the pnfs_layout_range. FIXME?
1806 if (pgio
->pg_lseg
) {
1807 seg_end
= end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1808 pgio
->pg_lseg
->pls_range
.length
);
1809 req_start
= req_offset(req
);
1810 WARN_ON_ONCE(req_start
>= seg_end
);
1811 /* start of request is past the last byte of this segment */
1812 if (req_start
>= seg_end
) {
1813 /* reference the new lseg */
1814 if (pgio
->pg_ops
->pg_cleanup
)
1815 pgio
->pg_ops
->pg_cleanup(pgio
);
1816 if (pgio
->pg_ops
->pg_init
)
1817 pgio
->pg_ops
->pg_init(pgio
, req
);
1821 /* adjust 'size' iff there are fewer bytes left in the
1822 * segment than what nfs_generic_pg_test returned */
1823 seg_left
= seg_end
- req_start
;
1824 if (seg_left
< size
)
1825 size
= (unsigned int)seg_left
;
1830 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
1832 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
1834 struct nfs_pageio_descriptor pgio
;
1836 /* Resend all requests through the MDS */
1837 nfs_pageio_init_write(&pgio
, hdr
->inode
, FLUSH_STABLE
, true,
1838 hdr
->completion_ops
);
1839 set_bit(NFS_CONTEXT_RESEND_WRITES
, &hdr
->args
.context
->flags
);
1840 return nfs_pageio_resend(&pgio
, hdr
);
1842 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds
);
1844 static void pnfs_ld_handle_write_error(struct nfs_pgio_header
*hdr
)
1847 dprintk("pnfs write error = %d\n", hdr
->pnfs_error
);
1848 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1849 PNFS_LAYOUTRET_ON_ERROR
) {
1850 pnfs_return_layout(hdr
->inode
);
1852 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1853 hdr
->task
.tk_status
= pnfs_write_done_resend_to_mds(hdr
);
1857 * Called by non rpc-based layout drivers
1859 void pnfs_ld_write_done(struct nfs_pgio_header
*hdr
)
1861 trace_nfs4_pnfs_write(hdr
, hdr
->pnfs_error
);
1862 if (!hdr
->pnfs_error
) {
1863 pnfs_set_layoutcommit(hdr
->inode
, hdr
->lseg
,
1864 hdr
->mds_offset
+ hdr
->res
.count
);
1865 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
1867 pnfs_ld_handle_write_error(hdr
);
1868 hdr
->mds_ops
->rpc_release(hdr
);
1870 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
1873 pnfs_write_through_mds(struct nfs_pageio_descriptor
*desc
,
1874 struct nfs_pgio_header
*hdr
)
1876 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1878 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1879 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
1880 nfs_pageio_reset_write_mds(desc
);
1881 mirror
->pg_recoalesce
= 1;
1883 nfs_pgio_data_destroy(hdr
);
1887 static enum pnfs_try_status
1888 pnfs_try_to_write_data(struct nfs_pgio_header
*hdr
,
1889 const struct rpc_call_ops
*call_ops
,
1890 struct pnfs_layout_segment
*lseg
,
1893 struct inode
*inode
= hdr
->inode
;
1894 enum pnfs_try_status trypnfs
;
1895 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1897 hdr
->mds_ops
= call_ops
;
1899 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
1900 inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
, how
);
1901 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(hdr
, how
);
1902 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
1903 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
1904 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1909 pnfs_do_write(struct nfs_pageio_descriptor
*desc
,
1910 struct nfs_pgio_header
*hdr
, int how
)
1912 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
1913 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
1914 enum pnfs_try_status trypnfs
;
1916 trypnfs
= pnfs_try_to_write_data(hdr
, call_ops
, lseg
, how
);
1917 if (trypnfs
== PNFS_NOT_ATTEMPTED
)
1918 pnfs_write_through_mds(desc
, hdr
);
1921 static void pnfs_writehdr_free(struct nfs_pgio_header
*hdr
)
1923 pnfs_put_lseg(hdr
->lseg
);
1924 nfs_pgio_header_free(hdr
);
1928 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor
*desc
)
1930 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1932 struct nfs_pgio_header
*hdr
;
1935 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
1937 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
);
1940 nfs_pgheader_init(desc
, hdr
, pnfs_writehdr_free
);
1942 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
1943 ret
= nfs_generic_pgio(desc
, hdr
);
1945 pnfs_do_write(desc
, hdr
, desc
->pg_ioflags
);
1949 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages
);
1951 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header
*hdr
)
1953 struct nfs_pageio_descriptor pgio
;
1955 /* Resend all requests through the MDS */
1956 nfs_pageio_init_read(&pgio
, hdr
->inode
, true, hdr
->completion_ops
);
1957 return nfs_pageio_resend(&pgio
, hdr
);
1959 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds
);
1961 static void pnfs_ld_handle_read_error(struct nfs_pgio_header
*hdr
)
1963 dprintk("pnfs read error = %d\n", hdr
->pnfs_error
);
1964 if (NFS_SERVER(hdr
->inode
)->pnfs_curr_ld
->flags
&
1965 PNFS_LAYOUTRET_ON_ERROR
) {
1966 pnfs_return_layout(hdr
->inode
);
1968 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
))
1969 hdr
->task
.tk_status
= pnfs_read_done_resend_to_mds(hdr
);
1973 * Called by non rpc-based layout drivers
1975 void pnfs_ld_read_done(struct nfs_pgio_header
*hdr
)
1977 trace_nfs4_pnfs_read(hdr
, hdr
->pnfs_error
);
1978 if (likely(!hdr
->pnfs_error
)) {
1979 __nfs4_read_done_cb(hdr
);
1980 hdr
->mds_ops
->rpc_call_done(&hdr
->task
, hdr
);
1982 pnfs_ld_handle_read_error(hdr
);
1983 hdr
->mds_ops
->rpc_release(hdr
);
1985 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
1988 pnfs_read_through_mds(struct nfs_pageio_descriptor
*desc
,
1989 struct nfs_pgio_header
*hdr
)
1991 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
1993 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
1994 list_splice_tail_init(&hdr
->pages
, &mirror
->pg_list
);
1995 nfs_pageio_reset_read_mds(desc
);
1996 mirror
->pg_recoalesce
= 1;
1998 nfs_pgio_data_destroy(hdr
);
2003 * Call the appropriate parallel I/O subsystem read function.
2005 static enum pnfs_try_status
2006 pnfs_try_to_read_data(struct nfs_pgio_header
*hdr
,
2007 const struct rpc_call_ops
*call_ops
,
2008 struct pnfs_layout_segment
*lseg
)
2010 struct inode
*inode
= hdr
->inode
;
2011 struct nfs_server
*nfss
= NFS_SERVER(inode
);
2012 enum pnfs_try_status trypnfs
;
2014 hdr
->mds_ops
= call_ops
;
2016 dprintk("%s: Reading ino:%lu %u@%llu\n",
2017 __func__
, inode
->i_ino
, hdr
->args
.count
, hdr
->args
.offset
);
2019 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(hdr
);
2020 if (trypnfs
!= PNFS_NOT_ATTEMPTED
)
2021 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
2022 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
2026 /* Resend all requests through pnfs. */
2027 int pnfs_read_resend_pnfs(struct nfs_pgio_header
*hdr
)
2029 struct nfs_pageio_descriptor pgio
;
2031 nfs_pageio_init_read(&pgio
, hdr
->inode
, false, hdr
->completion_ops
);
2032 return nfs_pageio_resend(&pgio
, hdr
);
2034 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs
);
2037 pnfs_do_read(struct nfs_pageio_descriptor
*desc
, struct nfs_pgio_header
*hdr
)
2039 const struct rpc_call_ops
*call_ops
= desc
->pg_rpc_callops
;
2040 struct pnfs_layout_segment
*lseg
= desc
->pg_lseg
;
2041 enum pnfs_try_status trypnfs
;
2044 trypnfs
= pnfs_try_to_read_data(hdr
, call_ops
, lseg
);
2045 if (trypnfs
== PNFS_TRY_AGAIN
)
2046 err
= pnfs_read_resend_pnfs(hdr
);
2047 if (trypnfs
== PNFS_NOT_ATTEMPTED
|| err
)
2048 pnfs_read_through_mds(desc
, hdr
);
2051 static void pnfs_readhdr_free(struct nfs_pgio_header
*hdr
)
2053 pnfs_put_lseg(hdr
->lseg
);
2054 nfs_pgio_header_free(hdr
);
2058 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor
*desc
)
2060 struct nfs_pgio_mirror
*mirror
= nfs_pgio_current_mirror(desc
);
2062 struct nfs_pgio_header
*hdr
;
2065 hdr
= nfs_pgio_header_alloc(desc
->pg_rw_ops
);
2067 desc
->pg_completion_ops
->error_cleanup(&mirror
->pg_list
);
2070 nfs_pgheader_init(desc
, hdr
, pnfs_readhdr_free
);
2071 hdr
->lseg
= pnfs_get_lseg(desc
->pg_lseg
);
2072 ret
= nfs_generic_pgio(desc
, hdr
);
2074 pnfs_do_read(desc
, hdr
);
2077 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages
);
2079 static void pnfs_clear_layoutcommitting(struct inode
*inode
)
2081 unsigned long *bitlock
= &NFS_I(inode
)->flags
;
2083 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING
, bitlock
);
2084 smp_mb__after_atomic();
2085 wake_up_bit(bitlock
, NFS_INO_LAYOUTCOMMITTING
);
2089 * There can be multiple RW segments.
2091 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
2093 struct pnfs_layout_segment
*lseg
;
2095 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
2096 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
2097 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
2098 list_add(&lseg
->pls_lc_list
, listp
);
2102 static void pnfs_list_write_lseg_done(struct inode
*inode
, struct list_head
*listp
)
2104 struct pnfs_layout_segment
*lseg
, *tmp
;
2106 /* Matched by references in pnfs_set_layoutcommit */
2107 list_for_each_entry_safe(lseg
, tmp
, listp
, pls_lc_list
) {
2108 list_del_init(&lseg
->pls_lc_list
);
2109 pnfs_put_lseg(lseg
);
2112 pnfs_clear_layoutcommitting(inode
);
2115 void pnfs_set_lo_fail(struct pnfs_layout_segment
*lseg
)
2117 pnfs_layout_io_set_failed(lseg
->pls_layout
, lseg
->pls_range
.iomode
);
2119 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail
);
2122 pnfs_set_layoutcommit(struct inode
*inode
, struct pnfs_layout_segment
*lseg
,
2125 struct nfs_inode
*nfsi
= NFS_I(inode
);
2126 bool mark_as_dirty
= false;
2128 spin_lock(&inode
->i_lock
);
2129 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
2130 nfsi
->layout
->plh_lwb
= end_pos
;
2131 mark_as_dirty
= true;
2132 dprintk("%s: Set layoutcommit for inode %lu ",
2133 __func__
, inode
->i_ino
);
2134 } else if (end_pos
> nfsi
->layout
->plh_lwb
)
2135 nfsi
->layout
->plh_lwb
= end_pos
;
2136 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
)) {
2137 /* references matched in nfs4_layoutcommit_release */
2138 pnfs_get_lseg(lseg
);
2140 spin_unlock(&inode
->i_lock
);
2141 dprintk("%s: lseg %p end_pos %llu\n",
2142 __func__
, lseg
, nfsi
->layout
->plh_lwb
);
2144 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2145 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2147 mark_inode_dirty_sync(inode
);
2149 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
2151 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data
*data
)
2153 struct nfs_server
*nfss
= NFS_SERVER(data
->args
.inode
);
2155 if (nfss
->pnfs_curr_ld
->cleanup_layoutcommit
)
2156 nfss
->pnfs_curr_ld
->cleanup_layoutcommit(data
);
2157 pnfs_list_write_lseg_done(data
->args
.inode
, &data
->lseg_list
);
2161 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2162 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2163 * data to disk to allow the server to recover the data if it crashes.
2164 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2165 * is off, and a COMMIT is sent to a data server, or
2166 * if WRITEs to a data server return NFS_DATA_SYNC.
2169 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
2171 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2172 struct nfs4_layoutcommit_data
*data
;
2173 struct nfs_inode
*nfsi
= NFS_I(inode
);
2177 if (!pnfs_layoutcommit_outstanding(inode
))
2180 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
2183 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING
, &nfsi
->flags
)) {
2186 status
= wait_on_bit_lock_action(&nfsi
->flags
,
2187 NFS_INO_LAYOUTCOMMITTING
,
2188 nfs_wait_bit_killable
,
2195 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2196 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
2198 goto clear_layoutcommitting
;
2201 spin_lock(&inode
->i_lock
);
2202 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
2205 INIT_LIST_HEAD(&data
->lseg_list
);
2206 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
2208 end_pos
= nfsi
->layout
->plh_lwb
;
2210 nfs4_stateid_copy(&data
->args
.stateid
, &nfsi
->layout
->plh_stateid
);
2211 spin_unlock(&inode
->i_lock
);
2213 data
->args
.inode
= inode
;
2214 data
->cred
= get_rpccred(nfsi
->layout
->plh_lc_cred
);
2215 nfs_fattr_init(&data
->fattr
);
2216 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
2217 data
->res
.fattr
= &data
->fattr
;
2218 data
->args
.lastbytewritten
= end_pos
- 1;
2219 data
->res
.server
= NFS_SERVER(inode
);
2221 if (ld
->prepare_layoutcommit
) {
2222 status
= ld
->prepare_layoutcommit(&data
->args
);
2224 put_rpccred(data
->cred
);
2225 spin_lock(&inode
->i_lock
);
2226 set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
);
2227 if (end_pos
> nfsi
->layout
->plh_lwb
)
2228 nfsi
->layout
->plh_lwb
= end_pos
;
2234 status
= nfs4_proc_layoutcommit(data
, sync
);
2237 mark_inode_dirty_sync(inode
);
2238 dprintk("<-- %s status %d\n", __func__
, status
);
2241 spin_unlock(&inode
->i_lock
);
2243 clear_layoutcommitting
:
2244 pnfs_clear_layoutcommitting(inode
);
2247 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode
);
2250 pnfs_generic_sync(struct inode
*inode
, bool datasync
)
2252 return pnfs_layoutcommit_inode(inode
, true);
2254 EXPORT_SYMBOL_GPL(pnfs_generic_sync
);
2256 struct nfs4_threshold
*pnfs_mdsthreshold_alloc(void)
2258 struct nfs4_threshold
*thp
;
2260 thp
= kzalloc(sizeof(*thp
), GFP_NOFS
);
2262 dprintk("%s mdsthreshold allocation failed\n", __func__
);
2268 #if IS_ENABLED(CONFIG_NFS_V4_2)
2270 pnfs_report_layoutstat(struct inode
*inode
)
2272 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
2273 struct nfs_server
*server
= NFS_SERVER(inode
);
2274 struct nfs_inode
*nfsi
= NFS_I(inode
);
2275 struct nfs42_layoutstat_data
*data
;
2276 struct pnfs_layout_hdr
*hdr
;
2279 if (!pnfs_enabled_sb(server
) || !ld
->prepare_layoutstats
)
2282 if (!nfs_server_capable(inode
, NFS_CAP_LAYOUTSTATS
))
2285 if (test_and_set_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
))
2288 spin_lock(&inode
->i_lock
);
2289 if (!NFS_I(inode
)->layout
) {
2290 spin_unlock(&inode
->i_lock
);
2293 hdr
= NFS_I(inode
)->layout
;
2294 pnfs_get_layout_hdr(hdr
);
2295 spin_unlock(&inode
->i_lock
);
2297 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
2303 data
->args
.fh
= NFS_FH(inode
);
2304 data
->args
.inode
= inode
;
2305 nfs4_stateid_copy(&data
->args
.stateid
, &hdr
->plh_stateid
);
2306 status
= ld
->prepare_layoutstats(&data
->args
);
2310 status
= nfs42_proc_layoutstats_generic(NFS_SERVER(inode
), data
);
2313 dprintk("%s returns %d\n", __func__
, status
);
2319 pnfs_put_layout_hdr(hdr
);
2320 smp_mb__before_atomic();
2321 clear_bit(NFS_INO_LAYOUTSTATS
, &nfsi
->flags
);
2322 smp_mb__after_atomic();
2325 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat
);