2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
29 static struct pnfs_layout_hdr
*
30 ff_layout_alloc_layout_hdr(struct inode
*inode
, gfp_t gfp_flags
)
32 struct nfs4_flexfile_layout
*ffl
;
34 ffl
= kzalloc(sizeof(*ffl
), gfp_flags
);
36 INIT_LIST_HEAD(&ffl
->error_list
);
37 return &ffl
->generic_hdr
;
43 ff_layout_free_layout_hdr(struct pnfs_layout_hdr
*lo
)
45 struct nfs4_ff_layout_ds_err
*err
, *n
;
47 list_for_each_entry_safe(err
, n
, &FF_LAYOUT_FROM_HDR(lo
)->error_list
,
52 kfree(FF_LAYOUT_FROM_HDR(lo
));
55 static int decode_stateid(struct xdr_stream
*xdr
, nfs4_stateid
*stateid
)
59 p
= xdr_inline_decode(xdr
, NFS4_STATEID_SIZE
);
60 if (unlikely(p
== NULL
))
62 memcpy(stateid
, p
, NFS4_STATEID_SIZE
);
63 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__
,
64 p
[0], p
[1], p
[2], p
[3]);
68 static int decode_deviceid(struct xdr_stream
*xdr
, struct nfs4_deviceid
*devid
)
72 p
= xdr_inline_decode(xdr
, NFS4_DEVICEID4_SIZE
);
75 memcpy(devid
, p
, NFS4_DEVICEID4_SIZE
);
76 nfs4_print_deviceid(devid
);
80 static int decode_nfs_fh(struct xdr_stream
*xdr
, struct nfs_fh
*fh
)
84 p
= xdr_inline_decode(xdr
, 4);
87 fh
->size
= be32_to_cpup(p
++);
88 if (fh
->size
> sizeof(struct nfs_fh
)) {
89 printk(KERN_ERR
"NFS flexfiles: Too big fh received %d\n",
94 p
= xdr_inline_decode(xdr
, fh
->size
);
97 memcpy(&fh
->data
, p
, fh
->size
);
98 dprintk("%s: fh len %d\n", __func__
, fh
->size
);
104 * Currently only stringified uids and gids are accepted.
105 * I.e., kerberos is not supported to the DSes, so no pricipals.
107 * That means that one common function will suffice, but when
108 * principals are added, this should be split to accomodate
109 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
112 decode_name(struct xdr_stream
*xdr
, u32
*id
)
117 /* opaque_length(4)*/
118 p
= xdr_inline_decode(xdr
, 4);
121 len
= be32_to_cpup(p
++);
125 dprintk("%s: len %u\n", __func__
, len
);
128 p
= xdr_inline_decode(xdr
, len
);
132 if (!nfs_map_string_to_numeric((char *)p
, len
, id
))
138 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment
*fls
)
142 if (fls
->mirror_array
) {
143 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
144 /* normally mirror_ds is freed in
145 * .free_deviceid_node but we still do it here
146 * for .alloc_lseg error path */
147 if (fls
->mirror_array
[i
]) {
148 kfree(fls
->mirror_array
[i
]->fh_versions
);
149 nfs4_ff_layout_put_deviceid(fls
->mirror_array
[i
]->mirror_ds
);
150 kfree(fls
->mirror_array
[i
]);
153 kfree(fls
->mirror_array
);
154 fls
->mirror_array
= NULL
;
158 static int ff_layout_check_layout(struct nfs4_layoutget_res
*lgr
)
162 dprintk("--> %s\n", __func__
);
164 /* FIXME: remove this check when layout segment support is added */
165 if (lgr
->range
.offset
!= 0 ||
166 lgr
->range
.length
!= NFS4_MAX_UINT64
) {
167 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
172 dprintk("--> %s returns %d\n", __func__
, ret
);
176 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment
*fls
)
179 ff_layout_free_mirror_array(fls
);
184 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment
*fls
)
188 for (i
= 0; i
< fls
->mirror_array_cnt
- 1; i
++) {
189 for (j
= i
+ 1; j
< fls
->mirror_array_cnt
; j
++)
190 if (fls
->mirror_array
[i
]->efficiency
<
191 fls
->mirror_array
[j
]->efficiency
)
192 swap(fls
->mirror_array
[i
],
193 fls
->mirror_array
[j
]);
197 static struct pnfs_layout_segment
*
198 ff_layout_alloc_lseg(struct pnfs_layout_hdr
*lh
,
199 struct nfs4_layoutget_res
*lgr
,
202 struct pnfs_layout_segment
*ret
;
203 struct nfs4_ff_layout_segment
*fls
= NULL
;
204 struct xdr_stream stream
;
206 struct page
*scratch
;
208 u32 mirror_array_cnt
;
212 dprintk("--> %s\n", __func__
);
213 scratch
= alloc_page(gfp_flags
);
215 return ERR_PTR(-ENOMEM
);
217 xdr_init_decode_pages(&stream
, &buf
, lgr
->layoutp
->pages
,
219 xdr_set_scratch_buffer(&stream
, page_address(scratch
), PAGE_SIZE
);
221 /* stripe unit and mirror_array_cnt */
223 p
= xdr_inline_decode(&stream
, 8 + 4);
227 p
= xdr_decode_hyper(p
, &stripe_unit
);
228 mirror_array_cnt
= be32_to_cpup(p
++);
229 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__
,
230 stripe_unit
, mirror_array_cnt
);
232 if (mirror_array_cnt
> NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT
||
233 mirror_array_cnt
== 0)
237 fls
= kzalloc(sizeof(*fls
), gfp_flags
);
241 fls
->mirror_array_cnt
= mirror_array_cnt
;
242 fls
->stripe_unit
= stripe_unit
;
243 fls
->mirror_array
= kcalloc(fls
->mirror_array_cnt
,
244 sizeof(fls
->mirror_array
[0]), gfp_flags
);
245 if (fls
->mirror_array
== NULL
)
248 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
249 struct nfs4_deviceid devid
;
250 struct nfs4_deviceid_node
*idnode
;
256 p
= xdr_inline_decode(&stream
, 4);
259 ds_count
= be32_to_cpup(p
);
261 /* FIXME: allow for striping? */
265 fls
->mirror_array
[i
] =
266 kzalloc(sizeof(struct nfs4_ff_layout_mirror
),
268 if (fls
->mirror_array
[i
] == NULL
) {
273 spin_lock_init(&fls
->mirror_array
[i
]->lock
);
274 fls
->mirror_array
[i
]->ds_count
= ds_count
;
275 fls
->mirror_array
[i
]->lseg
= &fls
->generic_hdr
;
278 rc
= decode_deviceid(&stream
, &devid
);
282 idnode
= nfs4_find_get_deviceid(NFS_SERVER(lh
->plh_inode
),
283 &devid
, lh
->plh_lc_cred
,
286 * upon success, mirror_ds is allocated by previous
287 * getdeviceinfo, or newly by .alloc_deviceid_node
288 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
291 fls
->mirror_array
[i
]->mirror_ds
=
292 FF_LAYOUT_MIRROR_DS(idnode
);
298 p
= xdr_inline_decode(&stream
, 4);
301 fls
->mirror_array
[i
]->efficiency
= be32_to_cpup(p
);
304 rc
= decode_stateid(&stream
, &fls
->mirror_array
[i
]->stateid
);
309 p
= xdr_inline_decode(&stream
, 4);
312 fh_count
= be32_to_cpup(p
);
314 fls
->mirror_array
[i
]->fh_versions
=
315 kzalloc(fh_count
* sizeof(struct nfs_fh
),
317 if (fls
->mirror_array
[i
]->fh_versions
== NULL
) {
322 for (j
= 0; j
< fh_count
; j
++) {
323 rc
= decode_nfs_fh(&stream
,
324 &fls
->mirror_array
[i
]->fh_versions
[j
]);
329 fls
->mirror_array
[i
]->fh_versions_cnt
= fh_count
;
332 rc
= decode_name(&stream
, &fls
->mirror_array
[i
]->uid
);
337 rc
= decode_name(&stream
, &fls
->mirror_array
[i
]->gid
);
341 dprintk("%s: uid %d gid %d\n", __func__
,
342 fls
->mirror_array
[i
]->uid
,
343 fls
->mirror_array
[i
]->gid
);
346 p
= xdr_inline_decode(&stream
, 4);
348 fls
->flags
= be32_to_cpup(p
);
350 ff_layout_sort_mirrors(fls
);
351 rc
= ff_layout_check_layout(lgr
);
355 ret
= &fls
->generic_hdr
;
356 dprintk("<-- %s (success)\n", __func__
);
358 __free_page(scratch
);
361 _ff_layout_free_lseg(fls
);
363 dprintk("<-- %s (%d)\n", __func__
, rc
);
367 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr
*layout
)
369 struct pnfs_layout_segment
*lseg
;
371 list_for_each_entry(lseg
, &layout
->plh_segs
, pls_list
)
372 if (lseg
->pls_range
.iomode
== IOMODE_RW
)
379 ff_layout_free_lseg(struct pnfs_layout_segment
*lseg
)
381 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
384 dprintk("--> %s\n", __func__
);
386 for (i
= 0; i
< fls
->mirror_array_cnt
; i
++) {
387 if (fls
->mirror_array
[i
]) {
388 nfs4_ff_layout_put_deviceid(fls
->mirror_array
[i
]->mirror_ds
);
389 fls
->mirror_array
[i
]->mirror_ds
= NULL
;
390 if (fls
->mirror_array
[i
]->cred
) {
391 put_rpccred(fls
->mirror_array
[i
]->cred
);
392 fls
->mirror_array
[i
]->cred
= NULL
;
397 if (lseg
->pls_range
.iomode
== IOMODE_RW
) {
398 struct nfs4_flexfile_layout
*ffl
;
401 ffl
= FF_LAYOUT_FROM_HDR(lseg
->pls_layout
);
402 inode
= ffl
->generic_hdr
.plh_inode
;
403 spin_lock(&inode
->i_lock
);
404 if (!ff_layout_has_rw_segments(lseg
->pls_layout
)) {
405 ffl
->commit_info
.nbuckets
= 0;
406 kfree(ffl
->commit_info
.buckets
);
407 ffl
->commit_info
.buckets
= NULL
;
409 spin_unlock(&inode
->i_lock
);
411 _ff_layout_free_lseg(fls
);
414 /* Return 1 until we have multiple lsegs support */
416 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment
*fls
)
422 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer
*timer
)
424 /* first IO request? */
425 if (atomic_inc_return(&timer
->n_ops
) == 1) {
426 timer
->start_time
= ktime_get();
431 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer
*timer
)
435 if (atomic_dec_return(&timer
->n_ops
) < 0)
439 start
= timer
->start_time
;
440 timer
->start_time
= now
;
441 return ktime_sub(now
, start
);
445 nfs4_ff_layout_calc_completion_time(struct rpc_task
*task
)
447 return ktime_sub(ktime_get(), task
->tk_start
);
451 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror
*mirror
,
452 struct nfs4_ff_layoutstat
*layoutstat
)
454 static const ktime_t notime
= {0};
455 ktime_t now
= ktime_get();
457 nfs4_ff_start_busy_timer(&layoutstat
->busy_timer
);
458 if (ktime_equal(mirror
->start_time
, notime
))
459 mirror
->start_time
= now
;
460 if (ktime_equal(mirror
->last_report_time
, notime
))
461 mirror
->last_report_time
= now
;
462 if (ktime_to_ms(ktime_sub(now
, mirror
->last_report_time
)) >=
463 FF_LAYOUTSTATS_REPORT_INTERVAL
) {
464 mirror
->last_report_time
= now
;
472 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat
*layoutstat
,
475 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
477 iostat
->ops_requested
++;
478 iostat
->bytes_requested
+= requested
;
482 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat
*layoutstat
,
485 ktime_t time_completed
)
487 struct nfs4_ff_io_stat
*iostat
= &layoutstat
->io_stat
;
490 iostat
->ops_completed
++;
491 iostat
->bytes_completed
+= completed
;
492 iostat
->bytes_not_delivered
+= requested
- completed
;
494 timer
= nfs4_ff_end_busy_timer(&layoutstat
->busy_timer
);
495 iostat
->total_busy_time
=
496 ktime_add(iostat
->total_busy_time
, timer
);
497 iostat
->aggregate_completion_time
=
498 ktime_add(iostat
->aggregate_completion_time
, time_completed
);
502 nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror
*mirror
,
507 spin_lock(&mirror
->lock
);
508 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->read_stat
);
509 nfs4_ff_layout_stat_io_update_requested(&mirror
->read_stat
, requested
);
510 spin_unlock(&mirror
->lock
);
513 pnfs_report_layoutstat(mirror
->lseg
->pls_layout
->plh_inode
,
518 nfs4_ff_layout_stat_io_end_read(struct rpc_task
*task
,
519 struct nfs4_ff_layout_mirror
*mirror
,
523 spin_lock(&mirror
->lock
);
524 nfs4_ff_layout_stat_io_update_completed(&mirror
->read_stat
,
525 requested
, completed
,
526 nfs4_ff_layout_calc_completion_time(task
));
527 spin_unlock(&mirror
->lock
);
531 nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror
*mirror
,
536 spin_lock(&mirror
->lock
);
537 report
= nfs4_ff_layoutstat_start_io(mirror
, &mirror
->write_stat
);
538 nfs4_ff_layout_stat_io_update_requested(&mirror
->write_stat
, requested
);
539 spin_unlock(&mirror
->lock
);
542 pnfs_report_layoutstat(mirror
->lseg
->pls_layout
->plh_inode
,
547 nfs4_ff_layout_stat_io_end_write(struct rpc_task
*task
,
548 struct nfs4_ff_layout_mirror
*mirror
,
551 enum nfs3_stable_how committed
)
553 if (committed
== NFS_UNSTABLE
)
554 requested
= completed
= 0;
556 spin_lock(&mirror
->lock
);
557 nfs4_ff_layout_stat_io_update_completed(&mirror
->write_stat
,
558 requested
, completed
,
559 nfs4_ff_layout_calc_completion_time(task
));
560 spin_unlock(&mirror
->lock
);
564 ff_layout_alloc_commit_info(struct pnfs_layout_segment
*lseg
,
565 struct nfs_commit_info
*cinfo
,
568 struct nfs4_ff_layout_segment
*fls
= FF_LAYOUT_LSEG(lseg
);
569 struct pnfs_commit_bucket
*buckets
;
572 if (cinfo
->ds
->nbuckets
!= 0) {
573 /* This assumes there is only one RW lseg per file.
574 * To support multiple lseg per file, we need to
575 * change struct pnfs_commit_bucket to allow dynamic
576 * increasing nbuckets.
581 size
= ff_layout_get_lseg_count(fls
) * FF_LAYOUT_MIRROR_COUNT(lseg
);
583 buckets
= kcalloc(size
, sizeof(struct pnfs_commit_bucket
),
590 spin_lock(cinfo
->lock
);
591 if (cinfo
->ds
->nbuckets
!= 0)
594 cinfo
->ds
->buckets
= buckets
;
595 cinfo
->ds
->nbuckets
= size
;
596 for (i
= 0; i
< size
; i
++) {
597 INIT_LIST_HEAD(&buckets
[i
].written
);
598 INIT_LIST_HEAD(&buckets
[i
].committing
);
599 /* mark direct verifier as unset */
600 buckets
[i
].direct_verf
.committed
=
601 NFS_INVALID_STABLE_HOW
;
604 spin_unlock(cinfo
->lock
);
609 static struct nfs4_pnfs_ds
*
610 ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor
*pgio
,
613 struct nfs4_ff_layout_segment
*fls
;
614 struct nfs4_pnfs_ds
*ds
;
617 fls
= FF_LAYOUT_LSEG(pgio
->pg_lseg
);
618 /* mirrors are sorted by efficiency */
619 for (idx
= 0; idx
< fls
->mirror_array_cnt
; idx
++) {
620 ds
= nfs4_ff_layout_prepare_ds(pgio
->pg_lseg
, idx
, false);
631 ff_layout_pg_init_read(struct nfs_pageio_descriptor
*pgio
,
632 struct nfs_page
*req
)
634 struct nfs_pgio_mirror
*pgm
;
635 struct nfs4_ff_layout_mirror
*mirror
;
636 struct nfs4_pnfs_ds
*ds
;
639 /* Use full layout for now */
641 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
647 /* If no lseg, fall back to read through mds */
648 if (pgio
->pg_lseg
== NULL
)
651 ds
= ff_layout_choose_best_ds_for_read(pgio
, &ds_idx
);
654 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, ds_idx
);
656 pgio
->pg_mirror_idx
= ds_idx
;
658 /* read always uses only one mirror - idx 0 for pgio layer */
659 pgm
= &pgio
->pg_mirrors
[0];
660 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].rsize
;
664 pnfs_put_lseg(pgio
->pg_lseg
);
665 pgio
->pg_lseg
= NULL
;
666 nfs_pageio_reset_read_mds(pgio
);
670 ff_layout_pg_init_write(struct nfs_pageio_descriptor
*pgio
,
671 struct nfs_page
*req
)
673 struct nfs4_ff_layout_mirror
*mirror
;
674 struct nfs_pgio_mirror
*pgm
;
675 struct nfs_commit_info cinfo
;
676 struct nfs4_pnfs_ds
*ds
;
681 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
687 /* If no lseg, fall back to write through mds */
688 if (pgio
->pg_lseg
== NULL
)
691 nfs_init_cinfo(&cinfo
, pgio
->pg_inode
, pgio
->pg_dreq
);
692 status
= ff_layout_alloc_commit_info(pgio
->pg_lseg
, &cinfo
, GFP_NOFS
);
696 /* Use a direct mapping of ds_idx to pgio mirror_idx */
697 if (WARN_ON_ONCE(pgio
->pg_mirror_count
!=
698 FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
)))
701 for (i
= 0; i
< pgio
->pg_mirror_count
; i
++) {
702 ds
= nfs4_ff_layout_prepare_ds(pgio
->pg_lseg
, i
, true);
705 pgm
= &pgio
->pg_mirrors
[i
];
706 mirror
= FF_LAYOUT_COMP(pgio
->pg_lseg
, i
);
707 pgm
->pg_bsize
= mirror
->mirror_ds
->ds_versions
[0].wsize
;
713 pnfs_put_lseg(pgio
->pg_lseg
);
714 pgio
->pg_lseg
= NULL
;
715 nfs_pageio_reset_write_mds(pgio
);
719 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor
*pgio
,
720 struct nfs_page
*req
)
723 pgio
->pg_lseg
= pnfs_update_layout(pgio
->pg_inode
,
730 return FF_LAYOUT_MIRROR_COUNT(pgio
->pg_lseg
);
732 /* no lseg means that pnfs is not in use, so no mirroring here */
733 pnfs_put_lseg(pgio
->pg_lseg
);
734 pgio
->pg_lseg
= NULL
;
735 nfs_pageio_reset_write_mds(pgio
);
739 static const struct nfs_pageio_ops ff_layout_pg_read_ops
= {
740 .pg_init
= ff_layout_pg_init_read
,
741 .pg_test
= pnfs_generic_pg_test
,
742 .pg_doio
= pnfs_generic_pg_readpages
,
743 .pg_cleanup
= pnfs_generic_pg_cleanup
,
746 static const struct nfs_pageio_ops ff_layout_pg_write_ops
= {
747 .pg_init
= ff_layout_pg_init_write
,
748 .pg_test
= pnfs_generic_pg_test
,
749 .pg_doio
= pnfs_generic_pg_writepages
,
750 .pg_get_mirror_count
= ff_layout_pg_get_mirror_count_write
,
751 .pg_cleanup
= pnfs_generic_pg_cleanup
,
754 static void ff_layout_reset_write(struct nfs_pgio_header
*hdr
, bool retry_pnfs
)
756 struct rpc_task
*task
= &hdr
->task
;
758 pnfs_layoutcommit_inode(hdr
->inode
, false);
761 dprintk("%s Reset task %5u for i/o through pNFS "
762 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
764 hdr
->inode
->i_sb
->s_id
,
765 (unsigned long long)NFS_FILEID(hdr
->inode
),
767 (unsigned long long)hdr
->args
.offset
);
770 struct nfs_open_context
*ctx
;
772 ctx
= nfs_list_entry(hdr
->pages
.next
)->wb_context
;
773 set_bit(NFS_CONTEXT_RESEND_WRITES
, &ctx
->flags
);
774 hdr
->completion_ops
->error_cleanup(&hdr
->pages
);
776 nfs_direct_set_resched_writes(hdr
->dreq
);
777 /* fake unstable write to let common nfs resend pages */
778 hdr
->verf
.committed
= NFS_UNSTABLE
;
779 hdr
->good_bytes
= hdr
->args
.count
;
784 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
785 dprintk("%s Reset task %5u for i/o through MDS "
786 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
788 hdr
->inode
->i_sb
->s_id
,
789 (unsigned long long)NFS_FILEID(hdr
->inode
),
791 (unsigned long long)hdr
->args
.offset
);
793 task
->tk_status
= pnfs_write_done_resend_to_mds(hdr
);
797 static void ff_layout_reset_read(struct nfs_pgio_header
*hdr
)
799 struct rpc_task
*task
= &hdr
->task
;
801 pnfs_layoutcommit_inode(hdr
->inode
, false);
803 if (!test_and_set_bit(NFS_IOHDR_REDO
, &hdr
->flags
)) {
804 dprintk("%s Reset task %5u for i/o through MDS "
805 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__
,
807 hdr
->inode
->i_sb
->s_id
,
808 (unsigned long long)NFS_FILEID(hdr
->inode
),
810 (unsigned long long)hdr
->args
.offset
);
812 task
->tk_status
= pnfs_read_done_resend_to_mds(hdr
);
816 static int ff_layout_async_handle_error_v4(struct rpc_task
*task
,
817 struct nfs4_state
*state
,
818 struct nfs_client
*clp
,
819 struct pnfs_layout_segment
*lseg
,
822 struct pnfs_layout_hdr
*lo
= lseg
->pls_layout
;
823 struct inode
*inode
= lo
->plh_inode
;
824 struct nfs_server
*mds_server
= NFS_SERVER(inode
);
826 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
827 struct nfs_client
*mds_client
= mds_server
->nfs_client
;
828 struct nfs4_slot_table
*tbl
= &clp
->cl_session
->fc_slot_table
;
830 if (task
->tk_status
>= 0)
833 switch (task
->tk_status
) {
834 /* MDS state errors */
835 case -NFS4ERR_DELEG_REVOKED
:
836 case -NFS4ERR_ADMIN_REVOKED
:
837 case -NFS4ERR_BAD_STATEID
:
840 nfs_remove_bad_delegation(state
->inode
);
841 case -NFS4ERR_OPENMODE
:
844 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
845 goto out_bad_stateid
;
846 goto wait_on_recovery
;
847 case -NFS4ERR_EXPIRED
:
849 if (nfs4_schedule_stateid_recovery(mds_server
, state
) < 0)
850 goto out_bad_stateid
;
852 nfs4_schedule_lease_recovery(mds_client
);
853 goto wait_on_recovery
;
854 /* DS session errors */
855 case -NFS4ERR_BADSESSION
:
856 case -NFS4ERR_BADSLOT
:
857 case -NFS4ERR_BAD_HIGH_SLOT
:
858 case -NFS4ERR_DEADSESSION
:
859 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
860 case -NFS4ERR_SEQ_FALSE_RETRY
:
861 case -NFS4ERR_SEQ_MISORDERED
:
862 dprintk("%s ERROR %d, Reset session. Exchangeid "
863 "flags 0x%x\n", __func__
, task
->tk_status
,
864 clp
->cl_exchange_flags
);
865 nfs4_schedule_session_recovery(clp
->cl_session
, task
->tk_status
);
869 rpc_delay(task
, FF_LAYOUT_POLL_RETRY_MAX
);
871 case -NFS4ERR_RETRY_UNCACHED_REP
:
873 /* Invalidate Layout errors */
874 case -NFS4ERR_PNFS_NO_LAYOUT
:
875 case -ESTALE
: /* mapped NFS4ERR_STALE */
876 case -EBADHANDLE
: /* mapped NFS4ERR_BADHANDLE */
877 case -EISDIR
: /* mapped NFS4ERR_ISDIR */
878 case -NFS4ERR_FHEXPIRED
:
879 case -NFS4ERR_WRONG_TYPE
:
880 dprintk("%s Invalid layout error %d\n", __func__
,
883 * Destroy layout so new i/o will get a new layout.
884 * Layout will not be destroyed until all current lseg
885 * references are put. Mark layout as invalid to resend failed
886 * i/o and all i/o waiting on the slot table to the MDS until
887 * layout is destroyed and a new valid layout is obtained.
889 pnfs_destroy_layout(NFS_I(inode
));
890 rpc_wake_up(&tbl
->slot_tbl_waitq
);
892 /* RPC connection errors */
900 dprintk("%s DS connection error %d\n", __func__
,
902 nfs4_mark_deviceid_unavailable(devid
);
903 rpc_wake_up(&tbl
->slot_tbl_waitq
);
906 if (ff_layout_has_available_ds(lseg
))
907 return -NFS4ERR_RESET_TO_PNFS
;
909 dprintk("%s Retry through MDS. Error %d\n", __func__
,
911 return -NFS4ERR_RESET_TO_MDS
;
917 task
->tk_status
= -EIO
;
920 rpc_sleep_on(&mds_client
->cl_rpcwaitq
, task
, NULL
);
921 if (test_bit(NFS4CLNT_MANAGER_RUNNING
, &mds_client
->cl_state
) == 0)
922 rpc_wake_up_queued_task(&mds_client
->cl_rpcwaitq
, task
);
926 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
927 static int ff_layout_async_handle_error_v3(struct rpc_task
*task
,
928 struct pnfs_layout_segment
*lseg
,
931 struct nfs4_deviceid_node
*devid
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
933 if (task
->tk_status
>= 0)
936 if (task
->tk_status
!= -EJUKEBOX
) {
937 dprintk("%s DS connection error %d\n", __func__
,
939 nfs4_mark_deviceid_unavailable(devid
);
940 if (ff_layout_has_available_ds(lseg
))
941 return -NFS4ERR_RESET_TO_PNFS
;
943 return -NFS4ERR_RESET_TO_MDS
;
946 if (task
->tk_status
== -EJUKEBOX
)
947 nfs_inc_stats(lseg
->pls_layout
->plh_inode
, NFSIOS_DELAY
);
949 rpc_restart_call(task
);
950 rpc_delay(task
, NFS_JUKEBOX_RETRY_TIME
);
954 static int ff_layout_async_handle_error(struct rpc_task
*task
,
955 struct nfs4_state
*state
,
956 struct nfs_client
*clp
,
957 struct pnfs_layout_segment
*lseg
,
960 int vers
= clp
->cl_nfs_mod
->rpc_vers
->number
;
964 return ff_layout_async_handle_error_v3(task
, lseg
, idx
);
966 return ff_layout_async_handle_error_v4(task
, state
, clp
,
969 /* should never happen */
975 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment
*lseg
,
976 int idx
, u64 offset
, u64 length
,
977 u32 status
, int opnum
)
979 struct nfs4_ff_layout_mirror
*mirror
;
982 mirror
= FF_LAYOUT_COMP(lseg
, idx
);
983 err
= ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg
->pls_layout
),
984 mirror
, offset
, length
, status
, opnum
,
986 dprintk("%s: err %d op %d status %u\n", __func__
, err
, opnum
, status
);
989 /* NFS_PROTO call done callback routines */
991 static int ff_layout_read_done_cb(struct rpc_task
*task
,
992 struct nfs_pgio_header
*hdr
)
997 trace_nfs4_pnfs_read(hdr
, task
->tk_status
);
998 if (task
->tk_status
== -ETIMEDOUT
&& !hdr
->res
.op_status
)
999 hdr
->res
.op_status
= NFS4ERR_NXIO
;
1000 if (task
->tk_status
< 0 && hdr
->res
.op_status
)
1001 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1002 hdr
->args
.offset
, hdr
->args
.count
,
1003 hdr
->res
.op_status
, OP_READ
);
1004 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1005 hdr
->ds_clp
, hdr
->lseg
,
1006 hdr
->pgio_mirror_idx
);
1009 case -NFS4ERR_RESET_TO_PNFS
:
1010 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE
,
1011 &hdr
->lseg
->pls_layout
->plh_flags
);
1012 pnfs_read_resend_pnfs(hdr
);
1013 return task
->tk_status
;
1014 case -NFS4ERR_RESET_TO_MDS
:
1015 inode
= hdr
->lseg
->pls_layout
->plh_inode
;
1016 pnfs_error_mark_layout_for_return(inode
, hdr
->lseg
);
1017 ff_layout_reset_read(hdr
);
1018 return task
->tk_status
;
1020 rpc_restart_call_prepare(task
);
1028 ff_layout_need_layoutcommit(struct pnfs_layout_segment
*lseg
)
1030 return !(FF_LAYOUT_LSEG(lseg
)->flags
& FF_FLAGS_NO_LAYOUTCOMMIT
);
1034 * We reference the rpc_cred of the first WRITE that triggers the need for
1035 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1036 * rfc5661 is not clear about which credential should be used.
1038 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1039 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1040 * we always send layoutcommit after DS writes.
1043 ff_layout_set_layoutcommit(struct nfs_pgio_header
*hdr
)
1045 if (!ff_layout_need_layoutcommit(hdr
->lseg
))
1048 pnfs_set_layoutcommit(hdr
->inode
, hdr
->lseg
,
1049 hdr
->mds_offset
+ hdr
->res
.count
);
1050 dprintk("%s inode %lu pls_end_pos %lu\n", __func__
, hdr
->inode
->i_ino
,
1051 (unsigned long) NFS_I(hdr
->inode
)->layout
->plh_lwb
);
1055 ff_layout_reset_to_mds(struct pnfs_layout_segment
*lseg
, int idx
)
1057 /* No mirroring for now */
1058 struct nfs4_deviceid_node
*node
= FF_LAYOUT_DEVID_NODE(lseg
, idx
);
1060 return ff_layout_test_devid_unavailable(node
);
1063 static int ff_layout_read_prepare_common(struct rpc_task
*task
,
1064 struct nfs_pgio_header
*hdr
)
1066 nfs4_ff_layout_stat_io_start_read(
1067 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1070 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1071 rpc_exit(task
, -EIO
);
1074 if (ff_layout_reset_to_mds(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1075 dprintk("%s task %u reset io to MDS\n", __func__
, task
->tk_pid
);
1076 if (ff_layout_has_available_ds(hdr
->lseg
))
1077 pnfs_read_resend_pnfs(hdr
);
1079 ff_layout_reset_read(hdr
);
1083 hdr
->pgio_done_cb
= ff_layout_read_done_cb
;
1089 * Call ops for the async read/write cases
1090 * In the case of dense layouts, the offset needs to be reset to its
1093 static void ff_layout_read_prepare_v3(struct rpc_task
*task
, void *data
)
1095 struct nfs_pgio_header
*hdr
= data
;
1097 if (ff_layout_read_prepare_common(task
, hdr
))
1100 rpc_call_start(task
);
1103 static int ff_layout_setup_sequence(struct nfs_client
*ds_clp
,
1104 struct nfs4_sequence_args
*args
,
1105 struct nfs4_sequence_res
*res
,
1106 struct rpc_task
*task
)
1108 if (ds_clp
->cl_session
)
1109 return nfs41_setup_sequence(ds_clp
->cl_session
,
1113 return nfs40_setup_sequence(ds_clp
->cl_slot_tbl
,
1119 static void ff_layout_read_prepare_v4(struct rpc_task
*task
, void *data
)
1121 struct nfs_pgio_header
*hdr
= data
;
1123 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1124 &hdr
->args
.seq_args
,
1129 if (ff_layout_read_prepare_common(task
, hdr
))
1132 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1133 hdr
->args
.lock_context
, FMODE_READ
) == -EIO
)
1134 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1137 static void ff_layout_read_call_done(struct rpc_task
*task
, void *data
)
1139 struct nfs_pgio_header
*hdr
= data
;
1141 dprintk("--> %s task->tk_status %d\n", __func__
, task
->tk_status
);
1143 nfs4_ff_layout_stat_io_end_read(task
,
1144 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1145 hdr
->args
.count
, hdr
->res
.count
);
1147 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1148 task
->tk_status
== 0) {
1149 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1153 /* Note this may cause RPC to be resent */
1154 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1157 static void ff_layout_read_count_stats(struct rpc_task
*task
, void *data
)
1159 struct nfs_pgio_header
*hdr
= data
;
1161 rpc_count_iostats_metrics(task
,
1162 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_READ
]);
1165 static int ff_layout_write_done_cb(struct rpc_task
*task
,
1166 struct nfs_pgio_header
*hdr
)
1168 struct inode
*inode
;
1171 trace_nfs4_pnfs_write(hdr
, task
->tk_status
);
1172 if (task
->tk_status
== -ETIMEDOUT
&& !hdr
->res
.op_status
)
1173 hdr
->res
.op_status
= NFS4ERR_NXIO
;
1174 if (task
->tk_status
< 0 && hdr
->res
.op_status
)
1175 ff_layout_io_track_ds_error(hdr
->lseg
, hdr
->pgio_mirror_idx
,
1176 hdr
->args
.offset
, hdr
->args
.count
,
1177 hdr
->res
.op_status
, OP_WRITE
);
1178 err
= ff_layout_async_handle_error(task
, hdr
->args
.context
->state
,
1179 hdr
->ds_clp
, hdr
->lseg
,
1180 hdr
->pgio_mirror_idx
);
1183 case -NFS4ERR_RESET_TO_PNFS
:
1184 case -NFS4ERR_RESET_TO_MDS
:
1185 inode
= hdr
->lseg
->pls_layout
->plh_inode
;
1186 pnfs_error_mark_layout_for_return(inode
, hdr
->lseg
);
1187 if (err
== -NFS4ERR_RESET_TO_PNFS
) {
1188 pnfs_set_retry_layoutget(hdr
->lseg
->pls_layout
);
1189 ff_layout_reset_write(hdr
, true);
1191 pnfs_clear_retry_layoutget(hdr
->lseg
->pls_layout
);
1192 ff_layout_reset_write(hdr
, false);
1194 return task
->tk_status
;
1196 rpc_restart_call_prepare(task
);
1200 if (hdr
->res
.verf
->committed
== NFS_FILE_SYNC
||
1201 hdr
->res
.verf
->committed
== NFS_DATA_SYNC
)
1202 ff_layout_set_layoutcommit(hdr
);
1207 static int ff_layout_commit_done_cb(struct rpc_task
*task
,
1208 struct nfs_commit_data
*data
)
1210 struct inode
*inode
;
1213 trace_nfs4_pnfs_commit_ds(data
, task
->tk_status
);
1214 if (task
->tk_status
== -ETIMEDOUT
&& !data
->res
.op_status
)
1215 data
->res
.op_status
= NFS4ERR_NXIO
;
1216 if (task
->tk_status
< 0 && data
->res
.op_status
)
1217 ff_layout_io_track_ds_error(data
->lseg
, data
->ds_commit_index
,
1218 data
->args
.offset
, data
->args
.count
,
1219 data
->res
.op_status
, OP_COMMIT
);
1220 err
= ff_layout_async_handle_error(task
, NULL
, data
->ds_clp
,
1221 data
->lseg
, data
->ds_commit_index
);
1224 case -NFS4ERR_RESET_TO_PNFS
:
1225 case -NFS4ERR_RESET_TO_MDS
:
1226 inode
= data
->lseg
->pls_layout
->plh_inode
;
1227 pnfs_error_mark_layout_for_return(inode
, data
->lseg
);
1228 if (err
== -NFS4ERR_RESET_TO_PNFS
)
1229 pnfs_set_retry_layoutget(data
->lseg
->pls_layout
);
1231 pnfs_clear_retry_layoutget(data
->lseg
->pls_layout
);
1232 pnfs_generic_prepare_to_resend_writes(data
);
1235 rpc_restart_call_prepare(task
);
1239 if (data
->verf
.committed
== NFS_UNSTABLE
1240 && ff_layout_need_layoutcommit(data
->lseg
))
1241 pnfs_set_layoutcommit(data
->inode
, data
->lseg
, data
->lwb
);
1246 static int ff_layout_write_prepare_common(struct rpc_task
*task
,
1247 struct nfs_pgio_header
*hdr
)
1249 nfs4_ff_layout_stat_io_start_write(
1250 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1253 if (unlikely(test_bit(NFS_CONTEXT_BAD
, &hdr
->args
.context
->flags
))) {
1254 rpc_exit(task
, -EIO
);
1258 if (ff_layout_reset_to_mds(hdr
->lseg
, hdr
->pgio_mirror_idx
)) {
1261 retry_pnfs
= ff_layout_has_available_ds(hdr
->lseg
);
1262 dprintk("%s task %u reset io to %s\n", __func__
,
1263 task
->tk_pid
, retry_pnfs
? "pNFS" : "MDS");
1264 ff_layout_reset_write(hdr
, retry_pnfs
);
1272 static void ff_layout_write_prepare_v3(struct rpc_task
*task
, void *data
)
1274 struct nfs_pgio_header
*hdr
= data
;
1276 if (ff_layout_write_prepare_common(task
, hdr
))
1279 rpc_call_start(task
);
1282 static void ff_layout_write_prepare_v4(struct rpc_task
*task
, void *data
)
1284 struct nfs_pgio_header
*hdr
= data
;
1286 if (ff_layout_setup_sequence(hdr
->ds_clp
,
1287 &hdr
->args
.seq_args
,
1292 if (ff_layout_write_prepare_common(task
, hdr
))
1295 if (nfs4_set_rw_stateid(&hdr
->args
.stateid
, hdr
->args
.context
,
1296 hdr
->args
.lock_context
, FMODE_WRITE
) == -EIO
)
1297 rpc_exit(task
, -EIO
); /* lost lock, terminate I/O */
1300 static void ff_layout_write_call_done(struct rpc_task
*task
, void *data
)
1302 struct nfs_pgio_header
*hdr
= data
;
1304 nfs4_ff_layout_stat_io_end_write(task
,
1305 FF_LAYOUT_COMP(hdr
->lseg
, hdr
->pgio_mirror_idx
),
1306 hdr
->args
.count
, hdr
->res
.count
,
1307 hdr
->res
.verf
->committed
);
1309 if (test_bit(NFS_IOHDR_REDO
, &hdr
->flags
) &&
1310 task
->tk_status
== 0) {
1311 nfs4_sequence_done(task
, &hdr
->res
.seq_res
);
1315 /* Note this may cause RPC to be resent */
1316 hdr
->mds_ops
->rpc_call_done(task
, hdr
);
1319 static void ff_layout_write_count_stats(struct rpc_task
*task
, void *data
)
1321 struct nfs_pgio_header
*hdr
= data
;
1323 rpc_count_iostats_metrics(task
,
1324 &NFS_CLIENT(hdr
->inode
)->cl_metrics
[NFSPROC4_CLNT_WRITE
]);
1327 static void ff_layout_commit_prepare_common(struct rpc_task
*task
,
1328 struct nfs_commit_data
*cdata
)
1330 nfs4_ff_layout_stat_io_start_write(
1331 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1335 static void ff_layout_commit_prepare_v3(struct rpc_task
*task
, void *data
)
1337 ff_layout_commit_prepare_common(task
, data
);
1338 rpc_call_start(task
);
1341 static void ff_layout_commit_prepare_v4(struct rpc_task
*task
, void *data
)
1343 struct nfs_commit_data
*wdata
= data
;
1345 if (ff_layout_setup_sequence(wdata
->ds_clp
,
1346 &wdata
->args
.seq_args
,
1347 &wdata
->res
.seq_res
,
1350 ff_layout_commit_prepare_common(task
, data
);
1353 static void ff_layout_commit_done(struct rpc_task
*task
, void *data
)
1355 struct nfs_commit_data
*cdata
= data
;
1356 struct nfs_page
*req
;
1359 if (task
->tk_status
== 0) {
1360 list_for_each_entry(req
, &cdata
->pages
, wb_list
)
1361 count
+= req
->wb_bytes
;
1364 nfs4_ff_layout_stat_io_end_write(task
,
1365 FF_LAYOUT_COMP(cdata
->lseg
, cdata
->ds_commit_index
),
1366 count
, count
, NFS_FILE_SYNC
);
1368 pnfs_generic_write_commit_done(task
, data
);
1371 static void ff_layout_commit_count_stats(struct rpc_task
*task
, void *data
)
1373 struct nfs_commit_data
*cdata
= data
;
1375 rpc_count_iostats_metrics(task
,
1376 &NFS_CLIENT(cdata
->inode
)->cl_metrics
[NFSPROC4_CLNT_COMMIT
]);
1379 static const struct rpc_call_ops ff_layout_read_call_ops_v3
= {
1380 .rpc_call_prepare
= ff_layout_read_prepare_v3
,
1381 .rpc_call_done
= ff_layout_read_call_done
,
1382 .rpc_count_stats
= ff_layout_read_count_stats
,
1383 .rpc_release
= pnfs_generic_rw_release
,
1386 static const struct rpc_call_ops ff_layout_read_call_ops_v4
= {
1387 .rpc_call_prepare
= ff_layout_read_prepare_v4
,
1388 .rpc_call_done
= ff_layout_read_call_done
,
1389 .rpc_count_stats
= ff_layout_read_count_stats
,
1390 .rpc_release
= pnfs_generic_rw_release
,
1393 static const struct rpc_call_ops ff_layout_write_call_ops_v3
= {
1394 .rpc_call_prepare
= ff_layout_write_prepare_v3
,
1395 .rpc_call_done
= ff_layout_write_call_done
,
1396 .rpc_count_stats
= ff_layout_write_count_stats
,
1397 .rpc_release
= pnfs_generic_rw_release
,
1400 static const struct rpc_call_ops ff_layout_write_call_ops_v4
= {
1401 .rpc_call_prepare
= ff_layout_write_prepare_v4
,
1402 .rpc_call_done
= ff_layout_write_call_done
,
1403 .rpc_count_stats
= ff_layout_write_count_stats
,
1404 .rpc_release
= pnfs_generic_rw_release
,
1407 static const struct rpc_call_ops ff_layout_commit_call_ops_v3
= {
1408 .rpc_call_prepare
= ff_layout_commit_prepare_v3
,
1409 .rpc_call_done
= ff_layout_commit_done
,
1410 .rpc_count_stats
= ff_layout_commit_count_stats
,
1411 .rpc_release
= pnfs_generic_commit_release
,
1414 static const struct rpc_call_ops ff_layout_commit_call_ops_v4
= {
1415 .rpc_call_prepare
= ff_layout_commit_prepare_v4
,
1416 .rpc_call_done
= ff_layout_commit_done
,
1417 .rpc_count_stats
= ff_layout_commit_count_stats
,
1418 .rpc_release
= pnfs_generic_commit_release
,
1421 static enum pnfs_try_status
1422 ff_layout_read_pagelist(struct nfs_pgio_header
*hdr
)
1424 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1425 struct nfs4_pnfs_ds
*ds
;
1426 struct rpc_clnt
*ds_clnt
;
1427 struct rpc_cred
*ds_cred
;
1428 loff_t offset
= hdr
->args
.offset
;
1429 u32 idx
= hdr
->pgio_mirror_idx
;
1433 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1434 __func__
, hdr
->inode
->i_ino
,
1435 hdr
->args
.pgbase
, (size_t)hdr
->args
.count
, offset
);
1437 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, false);
1441 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1443 if (IS_ERR(ds_clnt
))
1446 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1447 if (IS_ERR(ds_cred
))
1450 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1452 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__
,
1453 ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
), vers
);
1455 atomic_inc(&ds
->ds_clp
->cl_count
);
1456 hdr
->ds_clp
= ds
->ds_clp
;
1457 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1461 * Note that if we ever decide to split across DSes,
1462 * then we may need to handle dense-like offsets.
1464 hdr
->args
.offset
= offset
;
1465 hdr
->mds_offset
= offset
;
1467 /* Perform an asynchronous read to ds */
1468 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1469 vers
== 3 ? &ff_layout_read_call_ops_v3
:
1470 &ff_layout_read_call_ops_v4
,
1471 0, RPC_TASK_SOFTCONN
);
1473 return PNFS_ATTEMPTED
;
1476 if (ff_layout_has_available_ds(lseg
))
1477 return PNFS_TRY_AGAIN
;
1478 return PNFS_NOT_ATTEMPTED
;
1481 /* Perform async writes. */
1482 static enum pnfs_try_status
1483 ff_layout_write_pagelist(struct nfs_pgio_header
*hdr
, int sync
)
1485 struct pnfs_layout_segment
*lseg
= hdr
->lseg
;
1486 struct nfs4_pnfs_ds
*ds
;
1487 struct rpc_clnt
*ds_clnt
;
1488 struct rpc_cred
*ds_cred
;
1489 loff_t offset
= hdr
->args
.offset
;
1492 int idx
= hdr
->pgio_mirror_idx
;
1494 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1496 return PNFS_NOT_ATTEMPTED
;
1498 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1500 if (IS_ERR(ds_clnt
))
1501 return PNFS_NOT_ATTEMPTED
;
1503 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, hdr
->cred
);
1504 if (IS_ERR(ds_cred
))
1505 return PNFS_NOT_ATTEMPTED
;
1507 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1509 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1510 __func__
, hdr
->inode
->i_ino
, sync
, (size_t) hdr
->args
.count
,
1511 offset
, ds
->ds_remotestr
, atomic_read(&ds
->ds_clp
->cl_count
),
1514 hdr
->pgio_done_cb
= ff_layout_write_done_cb
;
1515 atomic_inc(&ds
->ds_clp
->cl_count
);
1516 hdr
->ds_clp
= ds
->ds_clp
;
1517 hdr
->ds_commit_idx
= idx
;
1518 fh
= nfs4_ff_layout_select_ds_fh(lseg
, idx
);
1523 * Note that if we ever decide to split across DSes,
1524 * then we may need to handle dense-like offsets.
1526 hdr
->args
.offset
= offset
;
1528 /* Perform an asynchronous write */
1529 nfs_initiate_pgio(ds_clnt
, hdr
, ds_cred
, ds
->ds_clp
->rpc_ops
,
1530 vers
== 3 ? &ff_layout_write_call_ops_v3
:
1531 &ff_layout_write_call_ops_v4
,
1532 sync
, RPC_TASK_SOFTCONN
);
1533 return PNFS_ATTEMPTED
;
1536 static u32
calc_ds_index_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1541 static struct nfs_fh
*
1542 select_ds_fh_from_commit(struct pnfs_layout_segment
*lseg
, u32 i
)
1544 struct nfs4_ff_layout_segment
*flseg
= FF_LAYOUT_LSEG(lseg
);
1546 /* FIXME: Assume that there is only one NFS version available
1549 return &flseg
->mirror_array
[i
]->fh_versions
[0];
1552 static int ff_layout_initiate_commit(struct nfs_commit_data
*data
, int how
)
1554 struct pnfs_layout_segment
*lseg
= data
->lseg
;
1555 struct nfs4_pnfs_ds
*ds
;
1556 struct rpc_clnt
*ds_clnt
;
1557 struct rpc_cred
*ds_cred
;
1562 idx
= calc_ds_index_from_commit(lseg
, data
->ds_commit_index
);
1563 ds
= nfs4_ff_layout_prepare_ds(lseg
, idx
, true);
1567 ds_clnt
= nfs4_ff_find_or_create_ds_client(lseg
, idx
, ds
->ds_clp
,
1569 if (IS_ERR(ds_clnt
))
1572 ds_cred
= ff_layout_get_ds_cred(lseg
, idx
, data
->cred
);
1573 if (IS_ERR(ds_cred
))
1576 vers
= nfs4_ff_layout_ds_version(lseg
, idx
);
1578 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__
,
1579 data
->inode
->i_ino
, how
, atomic_read(&ds
->ds_clp
->cl_count
),
1581 data
->commit_done_cb
= ff_layout_commit_done_cb
;
1582 data
->cred
= ds_cred
;
1583 atomic_inc(&ds
->ds_clp
->cl_count
);
1584 data
->ds_clp
= ds
->ds_clp
;
1585 fh
= select_ds_fh_from_commit(lseg
, data
->ds_commit_index
);
1589 return nfs_initiate_commit(ds_clnt
, data
, ds
->ds_clp
->rpc_ops
,
1590 vers
== 3 ? &ff_layout_commit_call_ops_v3
:
1591 &ff_layout_commit_call_ops_v4
,
1592 how
, RPC_TASK_SOFTCONN
);
1594 pnfs_generic_prepare_to_resend_writes(data
);
1595 pnfs_generic_commit_release(data
);
1600 ff_layout_commit_pagelist(struct inode
*inode
, struct list_head
*mds_pages
,
1601 int how
, struct nfs_commit_info
*cinfo
)
1603 return pnfs_generic_commit_pagelist(inode
, mds_pages
, how
, cinfo
,
1604 ff_layout_initiate_commit
);
1607 static struct pnfs_ds_commit_info
*
1608 ff_layout_get_ds_info(struct inode
*inode
)
1610 struct pnfs_layout_hdr
*layout
= NFS_I(inode
)->layout
;
1615 return &FF_LAYOUT_FROM_HDR(layout
)->commit_info
;
1619 ff_layout_free_deviceid_node(struct nfs4_deviceid_node
*d
)
1621 nfs4_ff_layout_free_deviceid(container_of(d
, struct nfs4_ff_layout_ds
,
1625 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout
*flo
,
1626 struct xdr_stream
*xdr
,
1627 const struct nfs4_layoutreturn_args
*args
)
1629 struct pnfs_layout_hdr
*hdr
= &flo
->generic_hdr
;
1631 int count
= 0, ret
= 0;
1633 start
= xdr_reserve_space(xdr
, 4);
1634 if (unlikely(!start
))
1637 /* This assume we always return _ALL_ layouts */
1638 spin_lock(&hdr
->plh_inode
->i_lock
);
1639 ret
= ff_layout_encode_ds_ioerr(flo
, xdr
, &count
, &args
->range
);
1640 spin_unlock(&hdr
->plh_inode
->i_lock
);
1642 *start
= cpu_to_be32(count
);
1647 /* report nothing for now */
1648 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout
*flo
,
1649 struct xdr_stream
*xdr
,
1650 const struct nfs4_layoutreturn_args
*args
)
1654 p
= xdr_reserve_space(xdr
, 4);
1656 *p
= cpu_to_be32(0);
1659 static struct nfs4_deviceid_node
*
1660 ff_layout_alloc_deviceid_node(struct nfs_server
*server
,
1661 struct pnfs_device
*pdev
, gfp_t gfp_flags
)
1663 struct nfs4_ff_layout_ds
*dsaddr
;
1665 dsaddr
= nfs4_ff_alloc_deviceid_node(server
, pdev
, gfp_flags
);
1668 return &dsaddr
->id_node
;
1672 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr
*lo
,
1673 struct xdr_stream
*xdr
,
1674 const struct nfs4_layoutreturn_args
*args
)
1676 struct nfs4_flexfile_layout
*flo
= FF_LAYOUT_FROM_HDR(lo
);
1679 dprintk("%s: Begin\n", __func__
);
1680 start
= xdr_reserve_space(xdr
, 4);
1683 if (ff_layout_encode_ioerr(flo
, xdr
, args
))
1686 ff_layout_encode_iostats(flo
, xdr
, args
);
1688 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
1689 dprintk("%s: Return\n", __func__
);
1693 ff_layout_ntop4(const struct sockaddr
*sap
, char *buf
, const size_t buflen
)
1695 const struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
1697 return snprintf(buf
, buflen
, "%pI4", &sin
->sin_addr
);
1701 ff_layout_ntop6_noscopeid(const struct sockaddr
*sap
, char *buf
,
1704 const struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)sap
;
1705 const struct in6_addr
*addr
= &sin6
->sin6_addr
;
1708 * RFC 4291, Section 2.2.2
1710 * Shorthanded ANY address
1712 if (ipv6_addr_any(addr
))
1713 return snprintf(buf
, buflen
, "::");
1716 * RFC 4291, Section 2.2.2
1718 * Shorthanded loopback address
1720 if (ipv6_addr_loopback(addr
))
1721 return snprintf(buf
, buflen
, "::1");
1724 * RFC 4291, Section 2.2.3
1726 * Special presentation address format for mapped v4
1729 if (ipv6_addr_v4mapped(addr
))
1730 return snprintf(buf
, buflen
, "::ffff:%pI4",
1731 &addr
->s6_addr32
[3]);
1734 * RFC 4291, Section 2.2.1
1736 return snprintf(buf
, buflen
, "%pI6c", addr
);
1739 /* Derived from rpc_sockaddr2uaddr */
1741 ff_layout_encode_netaddr(struct xdr_stream
*xdr
, struct nfs4_pnfs_ds_addr
*da
)
1743 struct sockaddr
*sap
= (struct sockaddr
*)&da
->da_addr
;
1744 char portbuf
[RPCBIND_MAXUADDRPLEN
];
1745 char addrbuf
[RPCBIND_MAXUADDRLEN
];
1747 unsigned short port
;
1751 switch (sap
->sa_family
) {
1753 if (ff_layout_ntop4(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
1755 port
= ntohs(((struct sockaddr_in
*)sap
)->sin_port
);
1760 if (ff_layout_ntop6_noscopeid(sap
, addrbuf
, sizeof(addrbuf
)) == 0)
1762 port
= ntohs(((struct sockaddr_in6
*)sap
)->sin6_port
);
1767 /* we only support tcp and tcp6 */
1772 snprintf(portbuf
, sizeof(portbuf
), ".%u.%u", port
>> 8, port
& 0xff);
1773 len
= strlcat(addrbuf
, portbuf
, sizeof(addrbuf
));
1775 p
= xdr_reserve_space(xdr
, 4 + netid_len
);
1776 xdr_encode_opaque(p
, netid
, netid_len
);
1778 p
= xdr_reserve_space(xdr
, 4 + len
);
1779 xdr_encode_opaque(p
, addrbuf
, len
);
1783 ff_layout_encode_nfstime(struct xdr_stream
*xdr
,
1786 struct timespec64 ts
;
1789 p
= xdr_reserve_space(xdr
, 12);
1790 ts
= ktime_to_timespec64(t
);
1791 p
= xdr_encode_hyper(p
, ts
.tv_sec
);
1792 *p
++ = cpu_to_be32(ts
.tv_nsec
);
1796 ff_layout_encode_io_latency(struct xdr_stream
*xdr
,
1797 struct nfs4_ff_io_stat
*stat
)
1801 p
= xdr_reserve_space(xdr
, 5 * 8);
1802 p
= xdr_encode_hyper(p
, stat
->ops_requested
);
1803 p
= xdr_encode_hyper(p
, stat
->bytes_requested
);
1804 p
= xdr_encode_hyper(p
, stat
->ops_completed
);
1805 p
= xdr_encode_hyper(p
, stat
->bytes_completed
);
1806 p
= xdr_encode_hyper(p
, stat
->bytes_not_delivered
);
1807 ff_layout_encode_nfstime(xdr
, stat
->total_busy_time
);
1808 ff_layout_encode_nfstime(xdr
, stat
->aggregate_completion_time
);
1812 ff_layout_encode_layoutstats(struct xdr_stream
*xdr
,
1813 struct nfs42_layoutstat_args
*args
,
1814 struct nfs42_layoutstat_devinfo
*devinfo
)
1816 struct nfs4_ff_layout_mirror
*mirror
= devinfo
->layout_private
;
1817 struct nfs4_pnfs_ds_addr
*da
;
1818 struct nfs4_pnfs_ds
*ds
= mirror
->mirror_ds
->ds
;
1819 struct nfs_fh
*fh
= &mirror
->fh_versions
[0];
1822 da
= list_first_entry(&ds
->ds_addrs
, struct nfs4_pnfs_ds_addr
, da_node
);
1823 dprintk("%s: DS %s: encoding address %s\n",
1824 __func__
, ds
->ds_remotestr
, da
->da_remotestr
);
1825 /* layoutupdate length */
1826 start
= xdr_reserve_space(xdr
, 4);
1828 ff_layout_encode_netaddr(xdr
, da
);
1830 p
= xdr_reserve_space(xdr
, 4 + fh
->size
);
1831 xdr_encode_opaque(p
, fh
->data
, fh
->size
);
1832 /* ff_io_latency4 read */
1833 spin_lock(&mirror
->lock
);
1834 ff_layout_encode_io_latency(xdr
, &mirror
->read_stat
.io_stat
);
1835 /* ff_io_latency4 write */
1836 ff_layout_encode_io_latency(xdr
, &mirror
->write_stat
.io_stat
);
1837 spin_unlock(&mirror
->lock
);
1839 ff_layout_encode_nfstime(xdr
, ktime_sub(ktime_get(), mirror
->start_time
));
1841 p
= xdr_reserve_space(xdr
, 4);
1842 *p
= cpu_to_be32(false);
1844 *start
= cpu_to_be32((xdr
->p
- start
- 1) * 4);
1848 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args
*args
,
1849 struct pnfs_layout_segment
*pls
,
1850 int *dev_count
, int dev_limit
)
1852 struct nfs4_ff_layout_mirror
*mirror
;
1853 struct nfs4_deviceid_node
*dev
;
1854 struct nfs42_layoutstat_devinfo
*devinfo
;
1857 for (i
= 0; i
< FF_LAYOUT_MIRROR_COUNT(pls
); i
++) {
1858 if (*dev_count
>= dev_limit
)
1860 mirror
= FF_LAYOUT_COMP(pls
, i
);
1861 if (!mirror
|| !mirror
->mirror_ds
)
1863 dev
= FF_LAYOUT_DEVID_NODE(pls
, i
);
1864 devinfo
= &args
->devinfo
[*dev_count
];
1865 memcpy(&devinfo
->dev_id
, &dev
->deviceid
, NFS4_DEVICEID4_SIZE
);
1866 devinfo
->offset
= pls
->pls_range
.offset
;
1867 devinfo
->length
= pls
->pls_range
.length
;
1868 devinfo
->read_count
= mirror
->read_stat
.io_stat
.ops_completed
;
1869 devinfo
->read_bytes
= mirror
->read_stat
.io_stat
.bytes_completed
;
1870 devinfo
->write_count
= mirror
->write_stat
.io_stat
.ops_completed
;
1871 devinfo
->write_bytes
= mirror
->write_stat
.io_stat
.bytes_completed
;
1872 devinfo
->layout_type
= LAYOUT_FLEX_FILES
;
1873 devinfo
->layoutstats_encode
= ff_layout_encode_layoutstats
;
1874 devinfo
->layout_private
= mirror
;
1875 /* lseg refcount put in cleanup_layoutstats */
1881 return *dev_count
< dev_limit
;
1885 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args
*args
)
1887 struct pnfs_layout_segment
*pls
;
1890 spin_lock(&args
->inode
->i_lock
);
1891 list_for_each_entry(pls
, &NFS_I(args
->inode
)->layout
->plh_segs
, pls_list
) {
1892 dev_count
+= FF_LAYOUT_MIRROR_COUNT(pls
);
1894 spin_unlock(&args
->inode
->i_lock
);
1895 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
1896 if (dev_count
> PNFS_LAYOUTSTATS_MAXDEV
) {
1897 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
1898 __func__
, dev_count
, PNFS_LAYOUTSTATS_MAXDEV
);
1899 dev_count
= PNFS_LAYOUTSTATS_MAXDEV
;
1901 args
->devinfo
= kmalloc(dev_count
* sizeof(*args
->devinfo
), GFP_KERNEL
);
1906 spin_lock(&args
->inode
->i_lock
);
1907 list_for_each_entry(pls
, &NFS_I(args
->inode
)->layout
->plh_segs
, pls_list
) {
1908 if (!ff_layout_mirror_prepare_stats(args
, pls
, &dev_count
,
1909 PNFS_LAYOUTSTATS_MAXDEV
)) {
1913 spin_unlock(&args
->inode
->i_lock
);
1914 args
->num_dev
= dev_count
;
1920 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data
*data
)
1922 struct nfs4_ff_layout_mirror
*mirror
;
1925 for (i
= 0; i
< data
->args
.num_dev
; i
++) {
1926 mirror
= data
->args
.devinfo
[i
].layout_private
;
1927 data
->args
.devinfo
[i
].layout_private
= NULL
;
1928 pnfs_put_lseg(mirror
->lseg
);
1932 static struct pnfs_layoutdriver_type flexfilelayout_type
= {
1933 .id
= LAYOUT_FLEX_FILES
,
1934 .name
= "LAYOUT_FLEX_FILES",
1935 .owner
= THIS_MODULE
,
1936 .alloc_layout_hdr
= ff_layout_alloc_layout_hdr
,
1937 .free_layout_hdr
= ff_layout_free_layout_hdr
,
1938 .alloc_lseg
= ff_layout_alloc_lseg
,
1939 .free_lseg
= ff_layout_free_lseg
,
1940 .pg_read_ops
= &ff_layout_pg_read_ops
,
1941 .pg_write_ops
= &ff_layout_pg_write_ops
,
1942 .get_ds_info
= ff_layout_get_ds_info
,
1943 .free_deviceid_node
= ff_layout_free_deviceid_node
,
1944 .mark_request_commit
= pnfs_layout_mark_request_commit
,
1945 .clear_request_commit
= pnfs_generic_clear_request_commit
,
1946 .scan_commit_lists
= pnfs_generic_scan_commit_lists
,
1947 .recover_commit_reqs
= pnfs_generic_recover_commit_reqs
,
1948 .commit_pagelist
= ff_layout_commit_pagelist
,
1949 .read_pagelist
= ff_layout_read_pagelist
,
1950 .write_pagelist
= ff_layout_write_pagelist
,
1951 .alloc_deviceid_node
= ff_layout_alloc_deviceid_node
,
1952 .encode_layoutreturn
= ff_layout_encode_layoutreturn
,
1953 .sync
= pnfs_nfs_generic_sync
,
1954 .prepare_layoutstats
= ff_layout_prepare_layoutstats
,
1955 .cleanup_layoutstats
= ff_layout_cleanup_layoutstats
,
1958 static int __init
nfs4flexfilelayout_init(void)
1960 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Registering...\n",
1962 return pnfs_register_layoutdriver(&flexfilelayout_type
);
1965 static void __exit
nfs4flexfilelayout_exit(void)
1967 printk(KERN_INFO
"%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
1969 pnfs_unregister_layoutdriver(&flexfilelayout_type
);
1972 MODULE_ALIAS("nfs-layouttype4-4");
1974 MODULE_LICENSE("GPL");
1975 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
1977 module_init(nfs4flexfilelayout_init
);
1978 module_exit(nfs4flexfilelayout_exit
);