2 * pNFS functions to call and manage layout drivers.
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
30 #include <linux/nfs_fs.h>
35 #define NFSDBG_FACILITY NFSDBG_PNFS
40 * protects pnfs_modules_tbl.
42 static DEFINE_SPINLOCK(pnfs_spinlock
);
45 * pnfs_modules_tbl holds all pnfs modules
47 static LIST_HEAD(pnfs_modules_tbl
);
49 /* Return the registered pnfs layout driver module matching given id */
50 static struct pnfs_layoutdriver_type
*
51 find_pnfs_driver_locked(u32 id
)
53 struct pnfs_layoutdriver_type
*local
;
55 list_for_each_entry(local
, &pnfs_modules_tbl
, pnfs_tblid
)
60 dprintk("%s: Searching for id %u, found %p\n", __func__
, id
, local
);
64 static struct pnfs_layoutdriver_type
*
65 find_pnfs_driver(u32 id
)
67 struct pnfs_layoutdriver_type
*local
;
69 spin_lock(&pnfs_spinlock
);
70 local
= find_pnfs_driver_locked(id
);
71 spin_unlock(&pnfs_spinlock
);
76 unset_pnfs_layoutdriver(struct nfs_server
*nfss
)
78 if (nfss
->pnfs_curr_ld
)
79 module_put(nfss
->pnfs_curr_ld
->owner
);
80 nfss
->pnfs_curr_ld
= NULL
;
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
90 set_pnfs_layoutdriver(struct nfs_server
*server
, u32 id
)
92 struct pnfs_layoutdriver_type
*ld_type
= NULL
;
96 if (!(server
->nfs_client
->cl_exchange_flags
&
97 (EXCHGID4_FLAG_USE_NON_PNFS
| EXCHGID4_FLAG_USE_PNFS_MDS
))) {
98 printk(KERN_ERR
"%s: id %u cl_exchange_flags 0x%x\n", __func__
,
99 id
, server
->nfs_client
->cl_exchange_flags
);
102 ld_type
= find_pnfs_driver(id
);
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX
, id
);
105 ld_type
= find_pnfs_driver(id
);
107 dprintk("%s: No pNFS module found for %u.\n",
112 if (!try_module_get(ld_type
->owner
)) {
113 dprintk("%s: Could not grab reference on module\n", __func__
);
116 server
->pnfs_curr_ld
= ld_type
;
118 dprintk("%s: pNFS module for %u set\n", __func__
, id
);
122 dprintk("%s: Using NFSv4 I/O\n", __func__
);
123 server
->pnfs_curr_ld
= NULL
;
127 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
129 int status
= -EINVAL
;
130 struct pnfs_layoutdriver_type
*tmp
;
132 if (ld_type
->id
== 0) {
133 printk(KERN_ERR
"%s id 0 is reserved\n", __func__
);
136 if (!ld_type
->alloc_lseg
|| !ld_type
->free_lseg
) {
137 printk(KERN_ERR
"%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__
);
142 spin_lock(&pnfs_spinlock
);
143 tmp
= find_pnfs_driver_locked(ld_type
->id
);
145 list_add(&ld_type
->pnfs_tblid
, &pnfs_modules_tbl
);
147 dprintk("%s Registering id:%u name:%s\n", __func__
, ld_type
->id
,
150 printk(KERN_ERR
"%s Module with id %d already loaded!\n",
151 __func__
, ld_type
->id
);
153 spin_unlock(&pnfs_spinlock
);
157 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver
);
160 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type
*ld_type
)
162 dprintk("%s Deregistering id:%u\n", __func__
, ld_type
->id
);
163 spin_lock(&pnfs_spinlock
);
164 list_del(&ld_type
->pnfs_tblid
);
165 spin_unlock(&pnfs_spinlock
);
167 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver
);
170 * pNFS client layout cache
173 /* Need to hold i_lock if caller does not already hold reference */
175 get_layout_hdr(struct pnfs_layout_hdr
*lo
)
177 atomic_inc(&lo
->plh_refcount
);
181 destroy_layout_hdr(struct pnfs_layout_hdr
*lo
)
183 dprintk("%s: freeing layout cache %p\n", __func__
, lo
);
184 BUG_ON(!list_empty(&lo
->plh_layouts
));
185 NFS_I(lo
->plh_inode
)->layout
= NULL
;
190 put_layout_hdr_locked(struct pnfs_layout_hdr
*lo
)
192 if (atomic_dec_and_test(&lo
->plh_refcount
))
193 destroy_layout_hdr(lo
);
197 put_layout_hdr(struct pnfs_layout_hdr
*lo
)
199 struct inode
*inode
= lo
->plh_inode
;
201 if (atomic_dec_and_lock(&lo
->plh_refcount
, &inode
->i_lock
)) {
202 destroy_layout_hdr(lo
);
203 spin_unlock(&inode
->i_lock
);
208 init_lseg(struct pnfs_layout_hdr
*lo
, struct pnfs_layout_segment
*lseg
)
210 INIT_LIST_HEAD(&lseg
->pls_list
);
211 atomic_set(&lseg
->pls_refcount
, 1);
213 set_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
);
214 lseg
->pls_layout
= lo
;
217 static void free_lseg(struct pnfs_layout_segment
*lseg
)
219 struct inode
*ino
= lseg
->pls_layout
->plh_inode
;
221 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
222 /* Matched by get_layout_hdr in pnfs_insert_layout */
223 put_layout_hdr(NFS_I(ino
)->layout
);
227 put_lseg_common(struct pnfs_layout_segment
*lseg
)
229 struct inode
*inode
= lseg
->pls_layout
->plh_inode
;
231 BUG_ON(test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
232 list_del_init(&lseg
->pls_list
);
233 if (list_empty(&lseg
->pls_layout
->plh_segs
)) {
234 set_bit(NFS_LAYOUT_DESTROYED
, &lseg
->pls_layout
->plh_flags
);
235 /* Matched by initial refcount set in alloc_init_layout_hdr */
236 put_layout_hdr_locked(lseg
->pls_layout
);
238 rpc_wake_up(&NFS_SERVER(inode
)->roc_rpcwaitq
);
242 put_lseg(struct pnfs_layout_segment
*lseg
)
249 dprintk("%s: lseg %p ref %d valid %d\n", __func__
, lseg
,
250 atomic_read(&lseg
->pls_refcount
),
251 test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
));
252 inode
= lseg
->pls_layout
->plh_inode
;
253 if (atomic_dec_and_lock(&lseg
->pls_refcount
, &inode
->i_lock
)) {
256 put_lseg_common(lseg
);
257 list_add(&lseg
->pls_list
, &free_me
);
258 spin_unlock(&inode
->i_lock
);
259 pnfs_free_lseg_list(&free_me
);
262 EXPORT_SYMBOL_GPL(put_lseg
);
265 should_free_lseg(u32 lseg_iomode
, u32 recall_iomode
)
267 return (recall_iomode
== IOMODE_ANY
||
268 lseg_iomode
== recall_iomode
);
271 /* Returns 1 if lseg is removed from list, 0 otherwise */
272 static int mark_lseg_invalid(struct pnfs_layout_segment
*lseg
,
273 struct list_head
*tmp_list
)
277 if (test_and_clear_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
)) {
278 /* Remove the reference keeping the lseg in the
279 * list. It will now be removed when all
280 * outstanding io is finished.
282 dprintk("%s: lseg %p ref %d\n", __func__
, lseg
,
283 atomic_read(&lseg
->pls_refcount
));
284 if (atomic_dec_and_test(&lseg
->pls_refcount
)) {
285 put_lseg_common(lseg
);
286 list_add(&lseg
->pls_list
, tmp_list
);
293 /* Returns count of number of matching invalid lsegs remaining in list
297 mark_matching_lsegs_invalid(struct pnfs_layout_hdr
*lo
,
298 struct list_head
*tmp_list
,
301 struct pnfs_layout_segment
*lseg
, *next
;
302 int invalid
= 0, removed
= 0;
304 dprintk("%s:Begin lo %p\n", __func__
, lo
);
306 if (list_empty(&lo
->plh_segs
)) {
307 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
))
308 put_layout_hdr_locked(lo
);
311 list_for_each_entry_safe(lseg
, next
, &lo
->plh_segs
, pls_list
)
312 if (should_free_lseg(lseg
->pls_range
.iomode
, iomode
)) {
313 dprintk("%s: freeing lseg %p iomode %d "
314 "offset %llu length %llu\n", __func__
,
315 lseg
, lseg
->pls_range
.iomode
, lseg
->pls_range
.offset
,
316 lseg
->pls_range
.length
);
318 removed
+= mark_lseg_invalid(lseg
, tmp_list
);
320 dprintk("%s:Return %i\n", __func__
, invalid
- removed
);
321 return invalid
- removed
;
324 /* note free_me must contain lsegs from a single layout_hdr */
326 pnfs_free_lseg_list(struct list_head
*free_me
)
328 struct pnfs_layout_segment
*lseg
, *tmp
;
329 struct pnfs_layout_hdr
*lo
;
331 if (list_empty(free_me
))
334 lo
= list_first_entry(free_me
, struct pnfs_layout_segment
,
335 pls_list
)->pls_layout
;
337 if (test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
)) {
338 struct nfs_client
*clp
;
340 clp
= NFS_SERVER(lo
->plh_inode
)->nfs_client
;
341 spin_lock(&clp
->cl_lock
);
342 list_del_init(&lo
->plh_layouts
);
343 spin_unlock(&clp
->cl_lock
);
345 list_for_each_entry_safe(lseg
, tmp
, free_me
, pls_list
) {
346 list_del(&lseg
->pls_list
);
352 pnfs_destroy_layout(struct nfs_inode
*nfsi
)
354 struct pnfs_layout_hdr
*lo
;
357 spin_lock(&nfsi
->vfs_inode
.i_lock
);
360 lo
->plh_block_lgets
++; /* permanently block new LAYOUTGETs */
361 mark_matching_lsegs_invalid(lo
, &tmp_list
, IOMODE_ANY
);
363 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
364 pnfs_free_lseg_list(&tmp_list
);
368 * Called by the state manger to remove all layouts established under an
372 pnfs_destroy_all_layouts(struct nfs_client
*clp
)
374 struct pnfs_layout_hdr
*lo
;
377 spin_lock(&clp
->cl_lock
);
378 list_splice_init(&clp
->cl_layouts
, &tmp_list
);
379 spin_unlock(&clp
->cl_lock
);
381 while (!list_empty(&tmp_list
)) {
382 lo
= list_entry(tmp_list
.next
, struct pnfs_layout_hdr
,
384 dprintk("%s freeing layout for inode %lu\n", __func__
,
385 lo
->plh_inode
->i_ino
);
386 pnfs_destroy_layout(NFS_I(lo
->plh_inode
));
390 /* update lo->plh_stateid with new if is more recent */
392 pnfs_set_layout_stateid(struct pnfs_layout_hdr
*lo
, const nfs4_stateid
*new,
397 oldseq
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
398 newseq
= be32_to_cpu(new->stateid
.seqid
);
399 if ((int)(newseq
- oldseq
) > 0) {
400 memcpy(&lo
->plh_stateid
, &new->stateid
, sizeof(new->stateid
));
401 if (update_barrier
) {
402 u32 new_barrier
= be32_to_cpu(new->stateid
.seqid
);
404 if ((int)(new_barrier
- lo
->plh_barrier
))
405 lo
->plh_barrier
= new_barrier
;
407 /* Because of wraparound, we want to keep the barrier
408 * "close" to the current seqids. It needs to be
409 * within 2**31 to count as "behind", so if it
410 * gets too near that limit, give us a litle leeway
411 * and bring it to within 2**30.
412 * NOTE - and yes, this is all unsigned arithmetic.
414 if (unlikely((newseq
- lo
->plh_barrier
) > (3 << 29)))
415 lo
->plh_barrier
= newseq
- (1 << 30);
420 /* lget is set to 1 if called from inside send_layoutget call chain */
422 pnfs_layoutgets_blocked(struct pnfs_layout_hdr
*lo
, nfs4_stateid
*stateid
,
426 (int)(lo
->plh_barrier
- be32_to_cpu(stateid
->stateid
.seqid
)) >= 0)
428 return lo
->plh_block_lgets
||
429 test_bit(NFS_LAYOUT_DESTROYED
, &lo
->plh_flags
) ||
430 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
) ||
431 (list_empty(&lo
->plh_segs
) &&
432 (atomic_read(&lo
->plh_outstanding
) > lget
));
436 pnfs_choose_layoutget_stateid(nfs4_stateid
*dst
, struct pnfs_layout_hdr
*lo
,
437 struct nfs4_state
*open_state
)
441 dprintk("--> %s\n", __func__
);
442 spin_lock(&lo
->plh_inode
->i_lock
);
443 if (pnfs_layoutgets_blocked(lo
, NULL
, 1)) {
445 } else if (list_empty(&lo
->plh_segs
)) {
449 seq
= read_seqbegin(&open_state
->seqlock
);
450 memcpy(dst
->data
, open_state
->stateid
.data
,
451 sizeof(open_state
->stateid
.data
));
452 } while (read_seqretry(&open_state
->seqlock
, seq
));
454 memcpy(dst
->data
, lo
->plh_stateid
.data
, sizeof(lo
->plh_stateid
.data
));
455 spin_unlock(&lo
->plh_inode
->i_lock
);
456 dprintk("<-- %s\n", __func__
);
461 * Get layout from server.
462 * for now, assume that whole file layouts are requested.
464 * arg->length: all ones
466 static struct pnfs_layout_segment
*
467 send_layoutget(struct pnfs_layout_hdr
*lo
,
468 struct nfs_open_context
*ctx
,
471 struct inode
*ino
= lo
->plh_inode
;
472 struct nfs_server
*server
= NFS_SERVER(ino
);
473 struct nfs4_layoutget
*lgp
;
474 struct pnfs_layout_segment
*lseg
= NULL
;
475 struct page
**pages
= NULL
;
477 u32 max_resp_sz
, max_pages
;
479 dprintk("--> %s\n", __func__
);
482 lgp
= kzalloc(sizeof(*lgp
), GFP_KERNEL
);
486 /* allocate pages for xdr post processing */
487 max_resp_sz
= server
->nfs_client
->cl_session
->fc_attrs
.max_resp_sz
;
488 max_pages
= max_resp_sz
>> PAGE_SHIFT
;
490 pages
= kzalloc(max_pages
* sizeof(struct page
*), GFP_KERNEL
);
494 for (i
= 0; i
< max_pages
; i
++) {
495 pages
[i
] = alloc_page(GFP_KERNEL
);
500 lgp
->args
.minlength
= NFS4_MAX_UINT64
;
501 lgp
->args
.maxcount
= PNFS_LAYOUT_MAXSIZE
;
502 lgp
->args
.range
.iomode
= iomode
;
503 lgp
->args
.range
.offset
= 0;
504 lgp
->args
.range
.length
= NFS4_MAX_UINT64
;
505 lgp
->args
.type
= server
->pnfs_curr_ld
->id
;
506 lgp
->args
.inode
= ino
;
507 lgp
->args
.ctx
= get_nfs_open_context(ctx
);
508 lgp
->args
.layout
.pages
= pages
;
509 lgp
->args
.layout
.pglen
= max_pages
* PAGE_SIZE
;
512 /* Synchronously retrieve layout information from server and
515 nfs4_proc_layoutget(lgp
);
517 /* remember that LAYOUTGET failed and suspend trying */
518 set_bit(lo_fail_bit(iomode
), &lo
->plh_flags
);
522 for (i
= 0; i
< max_pages
; i
++)
523 __free_page(pages
[i
]);
529 /* free any allocated xdr pages, lgp as it's not used */
531 for (i
= 0; i
< max_pages
; i
++) {
534 __free_page(pages
[i
]);
542 bool pnfs_roc(struct inode
*ino
)
544 struct pnfs_layout_hdr
*lo
;
545 struct pnfs_layout_segment
*lseg
, *tmp
;
549 spin_lock(&ino
->i_lock
);
550 lo
= NFS_I(ino
)->layout
;
551 if (!lo
|| !test_and_clear_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
) ||
552 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
))
554 list_for_each_entry_safe(lseg
, tmp
, &lo
->plh_segs
, pls_list
)
555 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
556 mark_lseg_invalid(lseg
, &tmp_list
);
561 lo
->plh_block_lgets
++;
562 get_layout_hdr(lo
); /* matched in pnfs_roc_release */
563 spin_unlock(&ino
->i_lock
);
564 pnfs_free_lseg_list(&tmp_list
);
568 spin_unlock(&ino
->i_lock
);
572 void pnfs_roc_release(struct inode
*ino
)
574 struct pnfs_layout_hdr
*lo
;
576 spin_lock(&ino
->i_lock
);
577 lo
= NFS_I(ino
)->layout
;
578 lo
->plh_block_lgets
--;
579 put_layout_hdr_locked(lo
);
580 spin_unlock(&ino
->i_lock
);
583 void pnfs_roc_set_barrier(struct inode
*ino
, u32 barrier
)
585 struct pnfs_layout_hdr
*lo
;
587 spin_lock(&ino
->i_lock
);
588 lo
= NFS_I(ino
)->layout
;
589 if ((int)(barrier
- lo
->plh_barrier
) > 0)
590 lo
->plh_barrier
= barrier
;
591 spin_unlock(&ino
->i_lock
);
594 bool pnfs_roc_drain(struct inode
*ino
, u32
*barrier
)
596 struct nfs_inode
*nfsi
= NFS_I(ino
);
597 struct pnfs_layout_segment
*lseg
;
600 spin_lock(&ino
->i_lock
);
601 list_for_each_entry(lseg
, &nfsi
->layout
->plh_segs
, pls_list
)
602 if (test_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
)) {
607 struct pnfs_layout_hdr
*lo
= nfsi
->layout
;
608 u32 current_seqid
= be32_to_cpu(lo
->plh_stateid
.stateid
.seqid
);
610 /* Since close does not return a layout stateid for use as
611 * a barrier, we choose the worst-case barrier.
613 *barrier
= current_seqid
+ atomic_read(&lo
->plh_outstanding
);
615 spin_unlock(&ino
->i_lock
);
620 * Compare two layout segments for sorting into layout cache.
621 * We want to preferentially return RW over RO layouts, so ensure those
625 cmp_layout(u32 iomode1
, u32 iomode2
)
627 /* read > read/write */
628 return (int)(iomode2
== IOMODE_READ
) - (int)(iomode1
== IOMODE_READ
);
632 pnfs_insert_layout(struct pnfs_layout_hdr
*lo
,
633 struct pnfs_layout_segment
*lseg
)
635 struct pnfs_layout_segment
*lp
;
638 dprintk("%s:Begin\n", __func__
);
640 assert_spin_locked(&lo
->plh_inode
->i_lock
);
641 list_for_each_entry(lp
, &lo
->plh_segs
, pls_list
) {
642 if (cmp_layout(lp
->pls_range
.iomode
, lseg
->pls_range
.iomode
) > 0)
644 list_add_tail(&lseg
->pls_list
, &lp
->pls_list
);
645 dprintk("%s: inserted lseg %p "
646 "iomode %d offset %llu length %llu before "
647 "lp %p iomode %d offset %llu length %llu\n",
648 __func__
, lseg
, lseg
->pls_range
.iomode
,
649 lseg
->pls_range
.offset
, lseg
->pls_range
.length
,
650 lp
, lp
->pls_range
.iomode
, lp
->pls_range
.offset
,
651 lp
->pls_range
.length
);
656 list_add_tail(&lseg
->pls_list
, &lo
->plh_segs
);
657 dprintk("%s: inserted lseg %p "
658 "iomode %d offset %llu length %llu at tail\n",
659 __func__
, lseg
, lseg
->pls_range
.iomode
,
660 lseg
->pls_range
.offset
, lseg
->pls_range
.length
);
664 dprintk("%s:Return\n", __func__
);
667 static struct pnfs_layout_hdr
*
668 alloc_init_layout_hdr(struct inode
*ino
)
670 struct pnfs_layout_hdr
*lo
;
672 lo
= kzalloc(sizeof(struct pnfs_layout_hdr
), GFP_KERNEL
);
675 atomic_set(&lo
->plh_refcount
, 1);
676 INIT_LIST_HEAD(&lo
->plh_layouts
);
677 INIT_LIST_HEAD(&lo
->plh_segs
);
678 INIT_LIST_HEAD(&lo
->plh_bulk_recall
);
683 static struct pnfs_layout_hdr
*
684 pnfs_find_alloc_layout(struct inode
*ino
)
686 struct nfs_inode
*nfsi
= NFS_I(ino
);
687 struct pnfs_layout_hdr
*new = NULL
;
689 dprintk("%s Begin ino=%p layout=%p\n", __func__
, ino
, nfsi
->layout
);
691 assert_spin_locked(&ino
->i_lock
);
693 if (test_bit(NFS_LAYOUT_DESTROYED
, &nfsi
->layout
->plh_flags
))
698 spin_unlock(&ino
->i_lock
);
699 new = alloc_init_layout_hdr(ino
);
700 spin_lock(&ino
->i_lock
);
702 if (likely(nfsi
->layout
== NULL
)) /* Won the race? */
710 * iomode matching rules:
721 is_matching_lseg(struct pnfs_layout_segment
*lseg
, u32 iomode
)
723 return (iomode
!= IOMODE_RW
|| lseg
->pls_range
.iomode
== IOMODE_RW
);
727 * lookup range in layout
729 static struct pnfs_layout_segment
*
730 pnfs_find_lseg(struct pnfs_layout_hdr
*lo
, u32 iomode
)
732 struct pnfs_layout_segment
*lseg
, *ret
= NULL
;
734 dprintk("%s:Begin\n", __func__
);
736 assert_spin_locked(&lo
->plh_inode
->i_lock
);
737 list_for_each_entry(lseg
, &lo
->plh_segs
, pls_list
) {
738 if (test_bit(NFS_LSEG_VALID
, &lseg
->pls_flags
) &&
739 is_matching_lseg(lseg
, iomode
)) {
740 ret
= get_lseg(lseg
);
743 if (cmp_layout(iomode
, lseg
->pls_range
.iomode
) > 0)
747 dprintk("%s:Return lseg %p ref %d\n",
748 __func__
, ret
, ret
? atomic_read(&ret
->pls_refcount
) : 0);
753 * Layout segment is retreived from the server if not cached.
754 * The appropriate layout segment is referenced and returned to the caller.
756 struct pnfs_layout_segment
*
757 pnfs_update_layout(struct inode
*ino
,
758 struct nfs_open_context
*ctx
,
759 enum pnfs_iomode iomode
)
761 struct nfs_inode
*nfsi
= NFS_I(ino
);
762 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
763 struct pnfs_layout_hdr
*lo
;
764 struct pnfs_layout_segment
*lseg
= NULL
;
767 if (!pnfs_enabled_sb(NFS_SERVER(ino
)))
769 spin_lock(&ino
->i_lock
);
770 lo
= pnfs_find_alloc_layout(ino
);
772 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__
);
776 /* Do we even need to bother with this? */
777 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
778 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
779 dprintk("%s matches recall, use MDS\n", __func__
);
783 /* if LAYOUTGET already failed once we don't try again */
784 if (test_bit(lo_fail_bit(iomode
), &nfsi
->layout
->plh_flags
))
787 /* Check to see if the layout for the given range already exists */
788 lseg
= pnfs_find_lseg(lo
, iomode
);
792 if (pnfs_layoutgets_blocked(lo
, NULL
, 0))
794 atomic_inc(&lo
->plh_outstanding
);
797 if (list_empty(&lo
->plh_segs
))
799 spin_unlock(&ino
->i_lock
);
801 /* The lo must be on the clp list if there is any
802 * chance of a CB_LAYOUTRECALL(FILE) coming in.
804 spin_lock(&clp
->cl_lock
);
805 BUG_ON(!list_empty(&lo
->plh_layouts
));
806 list_add_tail(&lo
->plh_layouts
, &clp
->cl_layouts
);
807 spin_unlock(&clp
->cl_lock
);
810 lseg
= send_layoutget(lo
, ctx
, iomode
);
811 if (!lseg
&& first
) {
812 spin_lock(&clp
->cl_lock
);
813 list_del_init(&lo
->plh_layouts
);
814 spin_unlock(&clp
->cl_lock
);
816 atomic_dec(&lo
->plh_outstanding
);
819 dprintk("%s end, state 0x%lx lseg %p\n", __func__
,
820 nfsi
->layout
? nfsi
->layout
->plh_flags
: -1, lseg
);
823 spin_unlock(&ino
->i_lock
);
828 pnfs_layout_process(struct nfs4_layoutget
*lgp
)
830 struct pnfs_layout_hdr
*lo
= NFS_I(lgp
->args
.inode
)->layout
;
831 struct nfs4_layoutget_res
*res
= &lgp
->res
;
832 struct pnfs_layout_segment
*lseg
;
833 struct inode
*ino
= lo
->plh_inode
;
834 struct nfs_client
*clp
= NFS_SERVER(ino
)->nfs_client
;
837 /* Verify we got what we asked for.
838 * Note that because the xdr parsing only accepts a single
839 * element array, this can fail even if the server is behaving
842 if (lgp
->args
.range
.iomode
> res
->range
.iomode
||
843 res
->range
.offset
!= 0 ||
844 res
->range
.length
!= NFS4_MAX_UINT64
) {
848 /* Inject layout blob into I/O device driver */
849 lseg
= NFS_SERVER(ino
)->pnfs_curr_ld
->alloc_lseg(lo
, res
);
850 if (!lseg
|| IS_ERR(lseg
)) {
854 status
= PTR_ERR(lseg
);
855 dprintk("%s: Could not allocate layout: error %d\n",
860 spin_lock(&ino
->i_lock
);
861 if (test_bit(NFS4CLNT_LAYOUTRECALL
, &clp
->cl_state
) ||
862 test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
863 dprintk("%s forget reply due to recall\n", __func__
);
864 goto out_forget_reply
;
867 if (pnfs_layoutgets_blocked(lo
, &res
->stateid
, 1)) {
868 dprintk("%s forget reply due to state\n", __func__
);
869 goto out_forget_reply
;
872 lseg
->pls_range
= res
->range
;
873 *lgp
->lsegpp
= get_lseg(lseg
);
874 pnfs_insert_layout(lo
, lseg
);
876 if (res
->return_on_close
) {
877 set_bit(NFS_LSEG_ROC
, &lseg
->pls_flags
);
878 set_bit(NFS_LAYOUT_ROC
, &lo
->plh_flags
);
881 /* Done processing layoutget. Set the layout stateid */
882 pnfs_set_layout_stateid(lo
, &res
->stateid
, false);
883 spin_unlock(&ino
->i_lock
);
888 spin_unlock(&ino
->i_lock
);
889 lseg
->pls_layout
= lo
;
890 NFS_SERVER(ino
)->pnfs_curr_ld
->free_lseg(lseg
);
894 static int pnfs_read_pg_test(struct nfs_pageio_descriptor
*pgio
,
895 struct nfs_page
*prev
,
896 struct nfs_page
*req
)
898 if (pgio
->pg_count
== prev
->wb_bytes
) {
899 /* This is first coelesce call for a series of nfs_pages */
900 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
904 return NFS_SERVER(pgio
->pg_inode
)->pnfs_curr_ld
->pg_test(pgio
, prev
, req
);
908 pnfs_pageio_init_read(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
910 struct pnfs_layoutdriver_type
*ld
;
912 ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
913 pgio
->pg_test
= (ld
&& ld
->pg_test
) ? pnfs_read_pg_test
: NULL
;
916 static int pnfs_write_pg_test(struct nfs_pageio_descriptor
*pgio
,
917 struct nfs_page
*prev
,
918 struct nfs_page
*req
)
920 if (pgio
->pg_count
== prev
->wb_bytes
) {
921 /* This is first coelesce call for a series of nfs_pages */
922 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
926 return NFS_SERVER(pgio
->pg_inode
)->pnfs_curr_ld
->pg_test(pgio
, prev
, req
);
930 pnfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
, struct inode
*inode
)
932 struct pnfs_layoutdriver_type
*ld
;
934 ld
= NFS_SERVER(inode
)->pnfs_curr_ld
;
935 pgio
->pg_test
= (ld
&& ld
->pg_test
) ? pnfs_write_pg_test
: NULL
;
939 pnfs_try_to_write_data(struct nfs_write_data
*wdata
,
940 const struct rpc_call_ops
*call_ops
, int how
)
942 struct inode
*inode
= wdata
->inode
;
943 enum pnfs_try_status trypnfs
;
944 struct nfs_server
*nfss
= NFS_SERVER(inode
);
946 wdata
->mds_ops
= call_ops
;
948 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__
,
949 inode
->i_ino
, wdata
->args
.count
, wdata
->args
.offset
, how
);
951 trypnfs
= nfss
->pnfs_curr_ld
->write_pagelist(wdata
, how
);
952 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
953 put_lseg(wdata
->lseg
);
956 nfs_inc_stats(inode
, NFSIOS_PNFS_WRITE
);
958 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
963 * Call the appropriate parallel I/O subsystem read function.
966 pnfs_try_to_read_data(struct nfs_read_data
*rdata
,
967 const struct rpc_call_ops
*call_ops
)
969 struct inode
*inode
= rdata
->inode
;
970 struct nfs_server
*nfss
= NFS_SERVER(inode
);
971 enum pnfs_try_status trypnfs
;
973 rdata
->mds_ops
= call_ops
;
975 dprintk("%s: Reading ino:%lu %u@%llu\n",
976 __func__
, inode
->i_ino
, rdata
->args
.count
, rdata
->args
.offset
);
978 trypnfs
= nfss
->pnfs_curr_ld
->read_pagelist(rdata
);
979 if (trypnfs
== PNFS_NOT_ATTEMPTED
) {
980 put_lseg(rdata
->lseg
);
983 nfs_inc_stats(inode
, NFSIOS_PNFS_READ
);
985 dprintk("%s End (trypnfs:%d)\n", __func__
, trypnfs
);
990 * Currently there is only one (whole file) write lseg.
992 static struct pnfs_layout_segment
*pnfs_list_write_lseg(struct inode
*inode
)
994 struct pnfs_layout_segment
*lseg
, *rv
= NULL
;
996 list_for_each_entry(lseg
, &NFS_I(inode
)->layout
->plh_segs
, pls_list
)
997 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
1003 pnfs_set_layoutcommit(struct nfs_write_data
*wdata
)
1005 struct nfs_inode
*nfsi
= NFS_I(wdata
->inode
);
1006 loff_t end_pos
= wdata
->args
.offset
+ wdata
->res
.count
;
1008 spin_lock(&nfsi
->vfs_inode
.i_lock
);
1009 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1010 /* references matched in nfs4_layoutcommit_release */
1011 get_lseg(wdata
->lseg
);
1012 wdata
->lseg
->pls_lc_cred
=
1013 get_rpccred(wdata
->args
.context
->state
->owner
->so_cred
);
1014 mark_inode_dirty_sync(wdata
->inode
);
1015 dprintk("%s: Set layoutcommit for inode %lu ",
1016 __func__
, wdata
->inode
->i_ino
);
1018 if (end_pos
> wdata
->lseg
->pls_end_pos
)
1019 wdata
->lseg
->pls_end_pos
= end_pos
;
1020 spin_unlock(&nfsi
->vfs_inode
.i_lock
);
1022 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit
);
1025 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1026 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1027 * data to disk to allow the server to recover the data if it crashes.
1028 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1029 * is off, and a COMMIT is sent to a data server, or
1030 * if WRITEs to a data server return NFS_DATA_SYNC.
1033 pnfs_layoutcommit_inode(struct inode
*inode
, bool sync
)
1035 struct nfs4_layoutcommit_data
*data
;
1036 struct nfs_inode
*nfsi
= NFS_I(inode
);
1037 struct pnfs_layout_segment
*lseg
;
1038 struct rpc_cred
*cred
;
1042 dprintk("--> %s inode %lu\n", __func__
, inode
->i_ino
);
1044 if (!test_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
))
1047 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1048 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
1050 mark_inode_dirty_sync(inode
);
1055 spin_lock(&inode
->i_lock
);
1056 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT
, &nfsi
->flags
)) {
1057 spin_unlock(&inode
->i_lock
);
1062 * Currently only one (whole file) write lseg which is referenced
1063 * in pnfs_set_layoutcommit and will be found.
1065 lseg
= pnfs_list_write_lseg(inode
);
1067 end_pos
= lseg
->pls_end_pos
;
1068 cred
= lseg
->pls_lc_cred
;
1069 lseg
->pls_end_pos
= 0;
1070 lseg
->pls_lc_cred
= NULL
;
1072 memcpy(&data
->args
.stateid
.data
, nfsi
->layout
->plh_stateid
.data
,
1073 sizeof(nfsi
->layout
->plh_stateid
.data
));
1074 spin_unlock(&inode
->i_lock
);
1076 data
->args
.inode
= inode
;
1079 nfs_fattr_init(&data
->fattr
);
1080 data
->args
.bitmask
= NFS_SERVER(inode
)->cache_consistency_bitmask
;
1081 data
->res
.fattr
= &data
->fattr
;
1082 data
->args
.lastbytewritten
= end_pos
- 1;
1083 data
->res
.server
= NFS_SERVER(inode
);
1085 status
= nfs4_proc_layoutcommit(data
, sync
);
1087 dprintk("<-- %s status %d\n", __func__
, status
);