2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
35 #define NFSDBG_FACILITY NFSDBG_PNFS
40 * protects pnfs_modules_tbl.
42 static DEFINE_SPINLOCK(pnfs_spinlock
);
45 * pnfs_modules_tbl holds all pnfs modules
47 static LIST_HEAD(pnfs_modules_tbl
);
49 /* Return the registered pnfs layout driver module matching given id */
50 static struct pnfs_layoutdriver_type
*
51 find_pnfs_driver_locked(u32 id
)
53 struct pnfs_layoutdriver_type
*local
;
55 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
60 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
64 static struct pnfs_layoutdriver_type
*
65 find_pnfs_driver(u32 id
)
67 struct pnfs_layoutdriver_type
*local
;
69 spin_lock(&pnfs_spinlock
);
70 local
= find_pnfs_driver_locked(id
);
71 spin_unlock(&pnfs_spinlock
);
76 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
78 if (nfss
->pnfs_curr_ld
)
79 module_put(nfss
->pnfs_curr_ld
->owner
);
80 nfss
->pnfs_curr_ld
= NULL
;
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
90 set_pnfs_layoutdriver(struct nfs_server
*server
, u32 id
)
92 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
96 if (!(server
->nfs_client
->cl_exchange_flags
&
97 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
98 printk(KERN_ERR
"%s: id %u cl_exchange_flags 0x%x\n", __func__
,
99 id
, server
->nfs_client
->cl_exchange_flags
);
102 ld_type
= find_pnfs_driver(id
);
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
105 ld_type
= find_pnfs_driver(id
);
107 dprintk("%s: No pNFS module found for %u.\n",
112 if (!try_module_get(ld_type
->owner
)) {
113 dprintk("%s: Could not grab reference on module\n", __func__
);
116 server
->pnfs_curr_ld
= ld_type
;
118 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
122 dprintk("%s: Using NFSv4 I/O\n", __func__
);
123 server
->pnfs_curr_ld
= NULL
;
127 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
129 int status
= -EINVAL
;
130 struct pnfs_layoutdriver_type
*tmp
;
132 if (ld_type
->id
== 0) {
133 printk(KERN_ERR
"%s id 0 is reserved\n", __func__
);
136 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
137 printk(KERN_ERR
"%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__
);
142 spin_lock(&pnfs_spinlock
);
143 tmp
= find_pnfs_driver_locked(ld_type
->id
);
145 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
147 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
150 printk(KERN_ERR
"%s Module with id %d already loaded!\n",
151 __func__
, ld_type
->id
);
153 spin_unlock(&pnfs_spinlock
);
157 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
160 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
162 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
163 spin_lock(&pnfs_spinlock
);
164 list_del(&ld_type
->pnfs_tblid
);
165 spin_unlock(&pnfs_spinlock
);
167 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
170 * pNFS client layout cache
173 /* Need to hold i_lock if caller does not already hold reference */
175 get_layout_hdr(struct pnfs_layout_hdr
*lo
)
177 atomic_inc(&lo
->plh_refcount
);
180 static struct pnfs_layout_hdr
*
181 pnfs_alloc_layout_hdr(struct inode
*ino
, gfp_t gfp_flags
)
183 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(ino
)->pnfs_curr_ld
;
184 return ld
->alloc_layout_hdr
? ld
->alloc_layout_hdr(ino
, gfp_flags
) :
185 kzalloc(sizeof(struct pnfs_layout_hdr
), gfp_flags
);
189 pnfs_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
191 struct pnfs_layoutdriver_type
*ld
= NFS_SERVER(lo
->plh_inode
)->pnfs_curr_ld
;
192 put_rpccred(lo
->plh_lc_cred
);
193 return ld
->alloc_layout_hdr
? ld
->free_layout_hdr(lo
) : kfree(lo
);
197 destroy_layout_hdr(struct pnfs_layout_hdr
*lo
)
199 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
200 BUG_ON(!list_empty(&lo
->plh_layouts
));
201 NFS_I(lo
->plh_inode
)->layout
= NULL
;
202 pnfs_free_layout_hdr(lo
);
206 put_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
208 if (atomic_dec_and_test(&lo
->plh_refcount
))
209 destroy_layout_hdr(lo
);
213 put_layout_hdr(struct pnfs_layout_hdr
*lo
)
215 struct inode
*inode
= lo
->plh_inode
;
217 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
218 destroy_layout_hdr(lo
);
219 spin_unlock(&inode
->i_lock
);
224 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
226 INIT_LIST_HEAD(&lseg
->pls_list
);
227 INIT_LIST_HEAD(&lseg
->pls_lc_list
);
228 atomic_set(&lseg
->pls_refcount
, 1);
230 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
231 lseg
->pls_layout
= lo
;
234 static void free_lseg(struct pnfs_layout_segment
*lseg
)
236 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
238 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
239 /* Matched by get_layout_hdr in pnfs_insert_layout */
240 put_layout_hdr(NFS_I(ino
)->layout
);
244 put_lseg_common(struct pnfs_layout_segment
*lseg
)
246 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
248 WARN_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
249 list_del_init(&lseg
->pls_list
);
250 if (list_empty(&lseg
->pls_layout
->plh_segs
)) {
251 set_bit(NFS_LAYOUT_DESTROYED
, &lseg
->pls_layout
->plh_flags
);
252 /* Matched by initial refcount set in alloc_init_layout_hdr */
253 put_layout_hdr_locked(lseg
->pls_layout
);
255 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
259 put_lseg(struct pnfs_layout_segment
*lseg
)
266 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
267 atomic_read(&lseg
->pls_refcount
),
268 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
269 inode
= lseg
->pls_layout
->plh_inode
;
270 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
273 put_lseg_common(lseg
);
274 list_add(&lseg
->pls_list
, &free_me
);
275 spin_unlock(&inode
->i_lock
);
276 pnfs_free_lseg_list(&free_me
);
279 EXPORT_SYMBOL_GPL(put_lseg
);
282 end_offset(u64 start
, u64 len
)
287 return end
>= start
? end
: NFS4_MAX_UINT64
;
290 /* last octet in a range */
292 last_byte_offset(u64 start
, u64 len
)
298 return end
> start
? end
- 1 : NFS4_MAX_UINT64
;
302 * is l2 fully contained in l1?
304 * [----------------------------------)
309 lo_seg_contained(struct pnfs_layout_range
*l1
,
310 struct pnfs_layout_range
*l2
)
312 u64 start1
= l1
->offset
;
313 u64 end1
= end_offset(start1
, l1
->length
);
314 u64 start2
= l2
->offset
;
315 u64 end2
= end_offset(start2
, l2
->length
);
317 return (start1
<= start2
) && (end1
>= end2
);
321 * is l1 and l2 intersecting?
323 * [----------------------------------)
328 lo_seg_intersecting(struct pnfs_layout_range
*l1
,
329 struct pnfs_layout_range
*l2
)
331 u64 start1
= l1
->offset
;
332 u64 end1
= end_offset(start1
, l1
->length
);
333 u64 start2
= l2
->offset
;
334 u64 end2
= end_offset(start2
, l2
->length
);
336 return (end1
== NFS4_MAX_UINT64
|| end1
> start2
) &&
337 (end2
== NFS4_MAX_UINT64
|| end2
> start1
);
341 should_free_lseg(struct pnfs_layout_range
*lseg_range
,
342 struct pnfs_layout_range
*recall_range
)
344 return (recall_range
->iomode
== IOMODE_ANY
||
345 lseg_range
->iomode
== recall_range
->iomode
) &&
346 lo_seg_intersecting(lseg_range
, recall_range
);
349 /* Returns 1 if lseg is removed from list, 0 otherwise */
350 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
351 struct list_head
*tmp_list
)
355 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
356 /* Remove the reference keeping the lseg in the
357 * list. It will now be removed when all
358 * outstanding io is finished.
360 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
361 atomic_read(&lseg
->pls_refcount
));
362 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
363 put_lseg_common(lseg
);
364 list_add(&lseg
->pls_list
, tmp_list
);
371 /* Returns count of number of matching invalid lsegs remaining in list
375 mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
376 struct list_head
*tmp_list
,
377 struct pnfs_layout_range
*recall_range
)
379 struct pnfs_layout_segment
*lseg
, *next
;
380 int invalid
= 0, removed
= 0;
382 dprintk("%s:Begin lo %p\n", __func__
, lo
);
384 if (list_empty(&lo
->plh_segs
)) {
385 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
))
386 put_layout_hdr_locked(lo
);
389 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
391 should_free_lseg(&lseg
->pls_range
, recall_range
)) {
392 dprintk("%s: freeing lseg %p iomode %d "
393 "offset %llu length %llu\n", __func__
,
394 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
395 lseg
->pls_range
.length
);
397 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
399 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
400 return invalid
- removed
;
403 /* note free_me must contain lsegs from a single layout_hdr */
405 pnfs_free_lseg_list(struct list_head
*free_me
)
407 struct pnfs_layout_segment
*lseg
, *tmp
;
408 struct pnfs_layout_hdr
*lo
;
410 if (list_empty(free_me
))
413 lo
= list_first_entry(free_me
, struct pnfs_layout_segment
,
414 pls_list
)->pls_layout
;
416 if (test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
)) {
417 struct nfs_client
*clp
;
419 clp
= NFS_SERVER(lo
->plh_inode
)->nfs_client
;
420 spin_lock(&clp
->cl_lock
);
421 list_del_init(&lo
->plh_layouts
);
422 spin_unlock(&clp
->cl_lock
);
424 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
425 list_del(&lseg
->pls_list
);
431 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
433 struct pnfs_layout_hdr
*lo
;
436 spin_lock(&nfsi
->vfs_inode
.i_lock
);
439 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
440 mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
442 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
443 pnfs_free_lseg_list(&tmp_list
);
447 * Called by the state manger to remove all layouts established under an
451 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
453 struct pnfs_layout_hdr
*lo
;
456 spin_lock(&clp
->cl_lock
);
457 list_splice_init(&clp
->cl_layouts
, &tmp_list
);
458 spin_unlock(&clp
->cl_lock
);
460 while (!list_empty(&tmp_list
)) {
461 lo
= list_entry(tmp_list
.next
, struct pnfs_layout_hdr
,
463 dprintk("%s freeing layout for inode %lu\n", __func__
,
464 lo
->plh_inode
->i_ino
);
465 list_del_init(&lo
->plh_layouts
);
466 pnfs_destroy_layout(NFS_I(lo
->plh_inode
));
470 /* update lo->plh_stateid with new if is more recent */
472 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
477 oldseq
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
478 newseq
= be32_to_cpu(new->stateid
.seqid
);
479 if ((int)(newseq
- oldseq
) > 0) {
480 memcpy(&lo
->plh_stateid
, &new->stateid
, sizeof(new->stateid
));
481 if (update_barrier
) {
482 u32 new_barrier
= be32_to_cpu(new->stateid
.seqid
);
484 if ((int)(new_barrier
- lo
->plh_barrier
))
485 lo
->plh_barrier
= new_barrier
;
487 /* Because of wraparound, we want to keep the barrier
488 * "close" to the current seqids. It needs to be
489 * within 2**31 to count as "behind", so if it
490 * gets too near that limit, give us a litle leeway
491 * and bring it to within 2**30.
492 * NOTE - and yes, this is all unsigned arithmetic.
494 if (unlikely((newseq
- lo
->plh_barrier
) > (3 << 29)))
495 lo
->plh_barrier
= newseq
- (1 << 30);
500 /* lget is set to 1 if called from inside send_layoutget call chain */
502 pnfs_layoutgets_blocked(struct pnfs_layout_hdr
*lo
, nfs4_stateid
*stateid
,
506 (int)(lo
->plh_barrier
- be32_to_cpu(stateid
->stateid
.seqid
)) >= 0)
508 return lo
->plh_block_lgets
||
509 test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
) ||
510 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
511 (list_empty(&lo
->plh_segs
) &&
512 (atomic_read(&lo
->plh_outstanding
) > lget
));
516 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
517 struct nfs4_state
*open_state
)
521 dprintk("--> %s\n", __func__
);
522 spin_lock(&lo
->plh_inode
->i_lock
);
523 if (pnfs_layoutgets_blocked(lo
, NULL
, 1)) {
525 } else if (list_empty(&lo
->plh_segs
)) {
529 seq
= read_seqbegin(&open_state
->seqlock
);
530 memcpy(dst
->data
, open_state
->stateid
.data
,
531 sizeof(open_state
->stateid
.data
));
532 } while (read_seqretry(&open_state
->seqlock
, seq
));
534 memcpy(dst
->data
, lo
->plh_stateid
.data
, sizeof(lo
->plh_stateid
.data
));
535 spin_unlock(&lo
->plh_inode
->i_lock
);
536 dprintk("<-- %s\n", __func__
);
541 * Get layout from server.
542 * for now, assume that whole file layouts are requested.
544 * arg->length: all ones
546 static struct pnfs_layout_segment
*
547 send_layoutget(struct pnfs_layout_hdr
*lo
,
548 struct nfs_open_context
*ctx
,
549 struct pnfs_layout_range
*range
,
552 struct inode
*ino
= lo
->plh_inode
;
553 struct nfs_server
*server
= NFS_SERVER(ino
);
554 struct nfs4_layoutget
*lgp
;
555 struct pnfs_layout_segment
*lseg
= NULL
;
556 struct page
**pages
= NULL
;
558 u32 max_resp_sz
, max_pages
;
560 dprintk("--> %s\n", __func__
);
563 lgp
= kzalloc(sizeof(*lgp
), gfp_flags
);
567 /* allocate pages for xdr post processing */
568 max_resp_sz
= server
->nfs_client
->cl_session
->fc_attrs
.max_resp_sz
;
569 max_pages
= max_resp_sz
>> PAGE_SHIFT
;
571 pages
= kzalloc(max_pages
* sizeof(struct page
*), gfp_flags
);
575 for (i
= 0; i
< max_pages
; i
++) {
576 pages
[i
] = alloc_page(gfp_flags
);
581 lgp
->args
.minlength
= PAGE_CACHE_SIZE
;
582 if (lgp
->args
.minlength
> range
->length
)
583 lgp
->args
.minlength
= range
->length
;
584 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
585 lgp
->args
.range
= *range
;
586 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
587 lgp
->args
.inode
= ino
;
588 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
589 lgp
->args
.layout
.pages
= pages
;
590 lgp
->args
.layout
.pglen
= max_pages
* PAGE_SIZE
;
592 lgp
->gfp_flags
= gfp_flags
;
594 /* Synchronously retrieve layout information from server and
597 nfs4_proc_layoutget(lgp
);
599 /* remember that LAYOUTGET failed and suspend trying */
600 set_bit(lo_fail_bit(range
->iomode
), &lo
->plh_flags
);
604 for (i
= 0; i
< max_pages
; i
++)
605 __free_page(pages
[i
]);
611 /* free any allocated xdr pages, lgp as it's not used */
613 for (i
= 0; i
< max_pages
; i
++) {
616 __free_page(pages
[i
]);
624 /* Initiates a LAYOUTRETURN(FILE) */
626 _pnfs_return_layout(struct inode
*ino
)
628 struct pnfs_layout_hdr
*lo
= NULL
;
629 struct nfs_inode
*nfsi
= NFS_I(ino
);
631 struct nfs4_layoutreturn
*lrp
;
632 nfs4_stateid stateid
;
635 dprintk("--> %s\n", __func__
);
637 spin_lock(&ino
->i_lock
);
640 spin_unlock(&ino
->i_lock
);
641 dprintk("%s: no layout to return\n", __func__
);
644 stateid
= nfsi
->layout
->plh_stateid
;
645 /* Reference matched in nfs4_layoutreturn_release */
647 mark_matching_lsegs_invalid(lo
, &tmp_list
, NULL
);
648 lo
->plh_block_lgets
++;
649 spin_unlock(&ino
->i_lock
);
650 pnfs_free_lseg_list(&tmp_list
);
652 WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
));
654 lrp
= kzalloc(sizeof(*lrp
), GFP_KERNEL
);
655 if (unlikely(lrp
== NULL
)) {
657 set_bit(NFS_LAYOUT_RW_FAILED
, &lo
->plh_flags
);
658 set_bit(NFS_LAYOUT_RO_FAILED
, &lo
->plh_flags
);
663 lrp
->args
.stateid
= stateid
;
664 lrp
->args
.layout_type
= NFS_SERVER(ino
)->pnfs_curr_ld
->id
;
665 lrp
->args
.inode
= ino
;
666 lrp
->clp
= NFS_SERVER(ino
)->nfs_client
;
668 status
= nfs4_proc_layoutreturn(lrp
);
670 dprintk("<-- %s status: %d\n", __func__
, status
);
674 bool pnfs_roc(struct inode
*ino
)
676 struct pnfs_layout_hdr
*lo
;
677 struct pnfs_layout_segment
*lseg
, *tmp
;
681 spin_lock(&ino
->i_lock
);
682 lo
= NFS_I(ino
)->layout
;
683 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
684 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
686 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
687 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
688 mark_lseg_invalid(lseg
, &tmp_list
);
693 lo
->plh_block_lgets
++;
694 get_layout_hdr(lo
); /* matched in pnfs_roc_release */
695 spin_unlock(&ino
->i_lock
);
696 pnfs_free_lseg_list(&tmp_list
);
700 spin_unlock(&ino
->i_lock
);
704 void pnfs_roc_release(struct inode
*ino
)
706 struct pnfs_layout_hdr
*lo
;
708 spin_lock(&ino
->i_lock
);
709 lo
= NFS_I(ino
)->layout
;
710 lo
->plh_block_lgets
--;
711 put_layout_hdr_locked(lo
);
712 spin_unlock(&ino
->i_lock
);
715 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
717 struct pnfs_layout_hdr
*lo
;
719 spin_lock(&ino
->i_lock
);
720 lo
= NFS_I(ino
)->layout
;
721 if ((int)(barrier
- lo
->plh_barrier
) > 0)
722 lo
->plh_barrier
= barrier
;
723 spin_unlock(&ino
->i_lock
);
726 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
)
728 struct nfs_inode
*nfsi
= NFS_I(ino
);
729 struct pnfs_layout_segment
*lseg
;
732 spin_lock(&ino
->i_lock
);
733 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
734 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
739 struct pnfs_layout_hdr
*lo
= nfsi
->layout
;
740 u32 current_seqid
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
742 /* Since close does not return a layout stateid for use as
743 * a barrier, we choose the worst-case barrier.
745 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
747 spin_unlock(&ino
->i_lock
);
752 * Compare two layout segments for sorting into layout cache.
753 * We want to preferentially return RW over RO layouts, so ensure those
757 cmp_layout(struct pnfs_layout_range
*l1
,
758 struct pnfs_layout_range
*l2
)
762 /* high offset > low offset */
763 d
= l1
->offset
- l2
->offset
;
767 /* short length > long length */
768 d
= l2
->length
- l1
->length
;
772 /* read > read/write */
773 return (int)(l1
->iomode
== IOMODE_READ
) - (int)(l2
->iomode
== IOMODE_READ
);
777 pnfs_insert_layout(struct pnfs_layout_hdr
*lo
,
778 struct pnfs_layout_segment
*lseg
)
780 struct pnfs_layout_segment
*lp
;
782 dprintk("%s:Begin\n", __func__
);
784 assert_spin_locked(&lo
->plh_inode
->i_lock
);
785 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
786 if (cmp_layout(&lseg
->pls_range
, &lp
->pls_range
) > 0)
788 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
789 dprintk("%s: inserted lseg %p "
790 "iomode %d offset %llu length %llu before "
791 "lp %p iomode %d offset %llu length %llu\n",
792 __func__
, lseg
, lseg
->pls_range
.iomode
,
793 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
794 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
795 lp
->pls_range
.length
);
798 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
799 dprintk("%s: inserted lseg %p "
800 "iomode %d offset %llu length %llu at tail\n",
801 __func__
, lseg
, lseg
->pls_range
.iomode
,
802 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
806 dprintk("%s:Return\n", __func__
);
809 static struct pnfs_layout_hdr
*
810 alloc_init_layout_hdr(struct inode
*ino
,
811 struct nfs_open_context
*ctx
,
814 struct pnfs_layout_hdr
*lo
;
816 lo
= pnfs_alloc_layout_hdr(ino
, gfp_flags
);
819 atomic_set(&lo
->plh_refcount
, 1);
820 INIT_LIST_HEAD(&lo
->plh_layouts
);
821 INIT_LIST_HEAD(&lo
->plh_segs
);
822 INIT_LIST_HEAD(&lo
->plh_bulk_recall
);
824 lo
->plh_lc_cred
= get_rpccred(ctx
->state
->owner
->so_cred
);
828 static struct pnfs_layout_hdr
*
829 pnfs_find_alloc_layout(struct inode
*ino
,
830 struct nfs_open_context
*ctx
,
833 struct nfs_inode
*nfsi
= NFS_I(ino
);
834 struct pnfs_layout_hdr
*new = NULL
;
836 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
838 assert_spin_locked(&ino
->i_lock
);
840 if (test_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
))
845 spin_unlock(&ino
->i_lock
);
846 new = alloc_init_layout_hdr(ino
, ctx
, gfp_flags
);
847 spin_lock(&ino
->i_lock
);
849 if (likely(nfsi
->layout
== NULL
)) /* Won the race? */
852 pnfs_free_layout_hdr(new);
857 * iomode matching rules:
868 is_matching_lseg(struct pnfs_layout_range
*ls_range
,
869 struct pnfs_layout_range
*range
)
871 struct pnfs_layout_range range1
;
873 if ((range
->iomode
== IOMODE_RW
&&
874 ls_range
->iomode
!= IOMODE_RW
) ||
875 !lo_seg_intersecting(ls_range
, range
))
878 /* range1 covers only the first byte in the range */
881 return lo_seg_contained(ls_range
, &range1
);
885 * lookup range in layout
887 static struct pnfs_layout_segment
*
888 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
,
889 struct pnfs_layout_range
*range
)
891 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
893 dprintk("%s:Begin\n", __func__
);
895 assert_spin_locked(&lo
->plh_inode
->i_lock
);
896 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
897 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
898 is_matching_lseg(&lseg
->pls_range
, range
)) {
899 ret
= get_lseg(lseg
);
902 if (lseg
->pls_range
.offset
> range
->offset
)
906 dprintk("%s:Return lseg %p ref %d\n",
907 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
912 * Layout segment is retreived from the server if not cached.
913 * The appropriate layout segment is referenced and returned to the caller.
915 struct pnfs_layout_segment
*
916 pnfs_update_layout(struct inode
*ino
,
917 struct nfs_open_context
*ctx
,
920 enum pnfs_iomode iomode
,
923 struct pnfs_layout_range arg
= {
929 struct nfs_inode
*nfsi
= NFS_I(ino
);
930 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
931 struct pnfs_layout_hdr
*lo
;
932 struct pnfs_layout_segment
*lseg
= NULL
;
935 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
937 spin_lock(&ino
->i_lock
);
938 lo
= pnfs_find_alloc_layout(ino
, ctx
, gfp_flags
);
940 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__
);
944 /* Do we even need to bother with this? */
945 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
946 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
947 dprintk("%s matches recall, use MDS\n", __func__
);
951 /* if LAYOUTGET already failed once we don't try again */
952 if (test_bit(lo_fail_bit(iomode
), &nfsi
->layout
->plh_flags
))
955 /* Check to see if the layout for the given range already exists */
956 lseg
= pnfs_find_lseg(lo
, &arg
);
960 if (pnfs_layoutgets_blocked(lo
, NULL
, 0))
962 atomic_inc(&lo
->plh_outstanding
);
965 if (list_empty(&lo
->plh_segs
))
967 spin_unlock(&ino
->i_lock
);
969 /* The lo must be on the clp list if there is any
970 * chance of a CB_LAYOUTRECALL(FILE) coming in.
972 spin_lock(&clp
->cl_lock
);
973 BUG_ON(!list_empty(&lo
->plh_layouts
));
974 list_add_tail(&lo
->plh_layouts
, &clp
->cl_layouts
);
975 spin_unlock(&clp
->cl_lock
);
978 pg_offset
= arg
.offset
& ~PAGE_CACHE_MASK
;
980 arg
.offset
-= pg_offset
;
981 arg
.length
+= pg_offset
;
983 if (arg
.length
!= NFS4_MAX_UINT64
)
984 arg
.length
= PAGE_CACHE_ALIGN(arg
.length
);
986 lseg
= send_layoutget(lo
, ctx
, &arg
, gfp_flags
);
987 if (!lseg
&& first
) {
988 spin_lock(&clp
->cl_lock
);
989 list_del_init(&lo
->plh_layouts
);
990 spin_unlock(&clp
->cl_lock
);
992 atomic_dec(&lo
->plh_outstanding
);
995 dprintk("%s end, state 0x%lx lseg %p\n", __func__
,
996 nfsi
->layout
? nfsi
->layout
->plh_flags
: -1, lseg
);
999 spin_unlock(&ino
->i_lock
);
1004 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
1006 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
1007 struct nfs4_layoutget_res
*res
= &lgp
->res
;
1008 struct pnfs_layout_segment
*lseg
;
1009 struct inode
*ino
= lo
->plh_inode
;
1010 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
1013 /* Inject layout blob into I/O device driver */
1014 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
, lgp
->gfp_flags
);
1015 if (!lseg
|| IS_ERR(lseg
)) {
1019 status
= PTR_ERR(lseg
);
1020 dprintk("%s: Could not allocate layout: error %d\n",
1025 spin_lock(&ino
->i_lock
);
1026 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
1027 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
1028 dprintk("%s forget reply due to recall\n", __func__
);
1029 goto out_forget_reply
;
1032 if (pnfs_layoutgets_blocked(lo
, &res
->stateid
, 1)) {
1033 dprintk("%s forget reply due to state\n", __func__
);
1034 goto out_forget_reply
;
1036 init_lseg(lo
, lseg
);
1037 lseg
->pls_range
= res
->range
;
1038 *lgp
->lsegpp
= get_lseg(lseg
);
1039 pnfs_insert_layout(lo
, lseg
);
1041 if (res
->return_on_close
) {
1042 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
1043 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
1046 /* Done processing layoutget. Set the layout stateid */
1047 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
1048 spin_unlock(&ino
->i_lock
);
1053 spin_unlock(&ino
->i_lock
);
1054 lseg
->pls_layout
= lo
;
1055 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
1060 pnfs_generic_pg_test(struct nfs_pageio_descriptor
*pgio
, struct nfs_page
*prev
,
1061 struct nfs_page
*req
)
1063 enum pnfs_iomode access_type
;
1066 /* We assume that pg_ioflags == 0 iff we're reading a page */
1067 if (pgio
->pg_ioflags
== 0) {
1068 access_type
= IOMODE_READ
;
1069 gfp_flags
= GFP_KERNEL
;
1071 access_type
= IOMODE_RW
;
1072 gfp_flags
= GFP_NOFS
;
1075 if (pgio
->pg_lseg
== NULL
) {
1076 if (pgio
->pg_count
!= prev
->wb_bytes
)
1078 /* This is first coelesce call for a series of nfs_pages */
1079 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
1085 if (pgio
->pg_lseg
== NULL
)
1090 * Test if a nfs_page is fully contained in the pnfs_layout_range.
1091 * Note that this test makes several assumptions:
1092 * - that the previous nfs_page in the struct nfs_pageio_descriptor
1093 * is known to lie within the range.
1094 * - that the nfs_page being tested is known to be contiguous with the
1095 * previous nfs_page.
1096 * - Layout ranges are page aligned, so we only have to test the
1097 * start offset of the request.
1099 * Please also note that 'end_offset' is actually the offset of the
1100 * first byte that lies outside the pnfs_layout_range. FIXME?
1103 return req_offset(req
) < end_offset(pgio
->pg_lseg
->pls_range
.offset
,
1104 pgio
->pg_lseg
->pls_range
.length
);
1106 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test
);
1109 * Called by non rpc-based layout drivers
1112 pnfs_ld_write_done(struct nfs_write_data
*data
)
1116 if (!data
->pnfs_error
) {
1117 pnfs_set_layoutcommit(data
);
1118 data
->mds_ops
->rpc_call_done(&data
->task
, data
);
1119 data
->mds_ops
->rpc_release(data
);
1122 if (NFS_SERVER(data
->inode
)->pnfs_curr_ld
->flags
&
1123 PNFS_LAYOUTRET_ON_ERROR
) {
1124 /* Don't lo_commit on error, Server will needs to
1125 * preform a file recovery.
1127 clear_bit(NFS_INO_LAYOUTCOMMIT
, &NFS_I(data
->inode
)->flags
);
1128 pnfs_return_layout(data
->inode
);
1131 dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__
,
1133 status
= nfs_initiate_write(data
, NFS_CLIENT(data
->inode
),
1134 data
->mds_ops
, NFS_FILE_SYNC
);
1135 return status
? : -EAGAIN
;
1137 EXPORT_SYMBOL_GPL(pnfs_ld_write_done
);
1139 enum pnfs_try_status
1140 pnfs_try_to_write_data(struct nfs_write_data
*wdata
,
1141 const struct rpc_call_ops
*call_ops
, int how
)
1143 struct inode
*inode
= wdata
->inode
;
1144 enum pnfs_try_status trypnfs
;
1145 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1147 wdata
->mds_ops
= call_ops
;
1149 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
1150 inode
->i_ino
, wdata
->args
.count
, wdata
->args
.offset
, how
);
1152 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(wdata
, how
);
1153 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
1154 put_lseg(wdata
->lseg
);
1157 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
1159 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1164 * Called by non rpc-based layout drivers
1167 pnfs_ld_read_done(struct nfs_read_data
*data
)
1171 if (!data
->pnfs_error
) {
1172 __nfs4_read_done_cb(data
);
1173 data
->mds_ops
->rpc_call_done(&data
->task
, data
);
1174 data
->mds_ops
->rpc_release(data
);
1178 if (NFS_SERVER(data
->inode
)->pnfs_curr_ld
->flags
&
1179 PNFS_LAYOUTRET_ON_ERROR
)
1180 pnfs_return_layout(data
->inode
);
1182 dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__
,
1184 status
= nfs_initiate_read(data
, NFS_CLIENT(data
->inode
),
1186 return status
? : -EAGAIN
;
1188 EXPORT_SYMBOL_GPL(pnfs_ld_read_done
);
1191 * Call the appropriate parallel I/O subsystem read function.
1193 enum pnfs_try_status
1194 pnfs_try_to_read_data(struct nfs_read_data
*rdata
,
1195 const struct rpc_call_ops
*call_ops
)
1197 struct inode
*inode
= rdata
->inode
;
1198 struct nfs_server
*nfss
= NFS_SERVER(inode
);
1199 enum pnfs_try_status trypnfs
;
1201 rdata
->mds_ops
= call_ops
;
1203 dprintk("%s: Reading ino:%lu %u@%llu\n",
1204 __func__
, inode
->i_ino
, rdata
->args
.count
, rdata
->args
.offset
);
1206 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(rdata
);
1207 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
1208 put_lseg(rdata
->lseg
);
1211 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
1213 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
1218 * There can be multiple RW segments.
1220 static void pnfs_list_write_lseg(struct inode
*inode
, struct list_head
*listp
)
1222 struct pnfs_layout_segment
*lseg
;
1224 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
) {
1225 if (lseg
->pls_range
.iomode
== IOMODE_RW
&&
1226 test_bit(NFS_LSEG_LAYOUTCOMMIT
, &lseg
->pls_flags
))
1227 list_add(&lseg
->pls_lc_list
, listp
);
1232 pnfs_set_layoutcommit(struct nfs_write_data
*wdata
)
1234 struct nfs_inode
*nfsi
= NFS_I(wdata
->inode
);
1235 loff_t end_pos
= wdata
->mds_offset
+ wdata
->res
.count
;
1236 bool mark_as_dirty
= false;
1238 spin_lock(&nfsi
->vfs_inode
.i_lock
);
1239 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1240 mark_as_dirty
= true;
1241 dprintk("%s: Set layoutcommit for inode %lu ",
1242 __func__
, wdata
->inode
->i_ino
);
1244 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT
, &wdata
->lseg
->pls_flags
)) {
1245 /* references matched in nfs4_layoutcommit_release */
1246 get_lseg(wdata
->lseg
);
1248 if (end_pos
> nfsi
->layout
->plh_lwb
)
1249 nfsi
->layout
->plh_lwb
= end_pos
;
1250 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
1251 dprintk("%s: lseg %p end_pos %llu\n",
1252 __func__
, wdata
->lseg
, nfsi
->layout
->plh_lwb
);
1254 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1255 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1257 mark_inode_dirty_sync(wdata
->inode
);
1259 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
1262 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1263 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1264 * data to disk to allow the server to recover the data if it crashes.
1265 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1266 * is off, and a COMMIT is sent to a data server, or
1267 * if WRITEs to a data server return NFS_DATA_SYNC.
1270 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
1272 struct nfs4_layoutcommit_data
*data
;
1273 struct nfs_inode
*nfsi
= NFS_I(inode
);
1277 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
1279 if (!test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1282 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1283 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
1285 mark_inode_dirty_sync(inode
);
1290 INIT_LIST_HEAD(&data
->lseg_list
);
1291 spin_lock(&inode
->i_lock
);
1292 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1293 spin_unlock(&inode
->i_lock
);
1298 pnfs_list_write_lseg(inode
, &data
->lseg_list
);
1300 end_pos
= nfsi
->layout
->plh_lwb
;
1301 nfsi
->layout
->plh_lwb
= 0;
1303 memcpy(&data
->args
.stateid
.data
, nfsi
->layout
->plh_stateid
.data
,
1304 sizeof(nfsi
->layout
->plh_stateid
.data
));
1305 spin_unlock(&inode
->i_lock
);
1307 data
->args
.inode
= inode
;
1308 data
->cred
= get_rpccred(nfsi
->layout
->plh_lc_cred
);
1309 nfs_fattr_init(&data
->fattr
);
1310 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
1311 data
->res
.fattr
= &data
->fattr
;
1312 data
->args
.lastbytewritten
= end_pos
- 1;
1313 data
->res
.server
= NFS_SERVER(inode
);
1315 status
= nfs4_proc_layoutcommit(data
, sync
);
1317 dprintk("<-- %s status %d\n", __func__
, status
);