NFSv4.2/pnfs: Use GFP_NOIO for layoutstat reporting in the writeback path
[linux/fpc-iii.git] / fs / nfs / flexfilelayout / flexfilelayout.c
blob2a93bec7e6dd0b7d1585f348dd138f3d31e6d1d9
1 /*
2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
7 */
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
29 static struct pnfs_layout_hdr *
30 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
32 struct nfs4_flexfile_layout *ffl;
34 ffl = kzalloc(sizeof(*ffl), gfp_flags);
35 if (ffl) {
36 INIT_LIST_HEAD(&ffl->error_list);
37 return &ffl->generic_hdr;
38 } else
39 return NULL;
42 static void
43 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
45 struct nfs4_ff_layout_ds_err *err, *n;
47 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
48 list) {
49 list_del(&err->list);
50 kfree(err);
52 kfree(FF_LAYOUT_FROM_HDR(lo));
55 static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
57 __be32 *p;
59 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
60 if (unlikely(p == NULL))
61 return -ENOBUFS;
62 memcpy(stateid, p, NFS4_STATEID_SIZE);
63 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
64 p[0], p[1], p[2], p[3]);
65 return 0;
68 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
70 __be32 *p;
72 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
73 if (unlikely(!p))
74 return -ENOBUFS;
75 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
76 nfs4_print_deviceid(devid);
77 return 0;
80 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
82 __be32 *p;
84 p = xdr_inline_decode(xdr, 4);
85 if (unlikely(!p))
86 return -ENOBUFS;
87 fh->size = be32_to_cpup(p++);
88 if (fh->size > sizeof(struct nfs_fh)) {
89 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
90 fh->size);
91 return -EOVERFLOW;
93 /* fh.data */
94 p = xdr_inline_decode(xdr, fh->size);
95 if (unlikely(!p))
96 return -ENOBUFS;
97 memcpy(&fh->data, p, fh->size);
98 dprintk("%s: fh len %d\n", __func__, fh->size);
100 return 0;
104 * Currently only stringified uids and gids are accepted.
105 * I.e., kerberos is not supported to the DSes, so no pricipals.
107 * That means that one common function will suffice, but when
108 * principals are added, this should be split to accomodate
109 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
111 static int
112 decode_name(struct xdr_stream *xdr, u32 *id)
114 __be32 *p;
115 int len;
117 /* opaque_length(4)*/
118 p = xdr_inline_decode(xdr, 4);
119 if (unlikely(!p))
120 return -ENOBUFS;
121 len = be32_to_cpup(p++);
122 if (len < 0)
123 return -EINVAL;
125 dprintk("%s: len %u\n", __func__, len);
127 /* opaque body */
128 p = xdr_inline_decode(xdr, len);
129 if (unlikely(!p))
130 return -ENOBUFS;
132 if (!nfs_map_string_to_numeric((char *)p, len, id))
133 return -EINVAL;
135 return 0;
138 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
140 int i;
142 if (fls->mirror_array) {
143 for (i = 0; i < fls->mirror_array_cnt; i++) {
144 /* normally mirror_ds is freed in
145 * .free_deviceid_node but we still do it here
146 * for .alloc_lseg error path */
147 if (fls->mirror_array[i]) {
148 kfree(fls->mirror_array[i]->fh_versions);
149 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
150 kfree(fls->mirror_array[i]);
153 kfree(fls->mirror_array);
154 fls->mirror_array = NULL;
158 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
160 int ret = 0;
162 dprintk("--> %s\n", __func__);
164 /* FIXME: remove this check when layout segment support is added */
165 if (lgr->range.offset != 0 ||
166 lgr->range.length != NFS4_MAX_UINT64) {
167 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
168 __func__);
169 ret = -EINVAL;
172 dprintk("--> %s returns %d\n", __func__, ret);
173 return ret;
176 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
178 if (fls) {
179 ff_layout_free_mirror_array(fls);
180 kfree(fls);
184 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
186 int i, j;
188 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
189 for (j = i + 1; j < fls->mirror_array_cnt; j++)
190 if (fls->mirror_array[i]->efficiency <
191 fls->mirror_array[j]->efficiency)
192 swap(fls->mirror_array[i],
193 fls->mirror_array[j]);
197 static struct pnfs_layout_segment *
198 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
199 struct nfs4_layoutget_res *lgr,
200 gfp_t gfp_flags)
202 struct pnfs_layout_segment *ret;
203 struct nfs4_ff_layout_segment *fls = NULL;
204 struct xdr_stream stream;
205 struct xdr_buf buf;
206 struct page *scratch;
207 u64 stripe_unit;
208 u32 mirror_array_cnt;
209 __be32 *p;
210 int i, rc;
212 dprintk("--> %s\n", __func__);
213 scratch = alloc_page(gfp_flags);
214 if (!scratch)
215 return ERR_PTR(-ENOMEM);
217 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
218 lgr->layoutp->len);
219 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
221 /* stripe unit and mirror_array_cnt */
222 rc = -EIO;
223 p = xdr_inline_decode(&stream, 8 + 4);
224 if (!p)
225 goto out_err_free;
227 p = xdr_decode_hyper(p, &stripe_unit);
228 mirror_array_cnt = be32_to_cpup(p++);
229 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
230 stripe_unit, mirror_array_cnt);
232 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
233 mirror_array_cnt == 0)
234 goto out_err_free;
236 rc = -ENOMEM;
237 fls = kzalloc(sizeof(*fls), gfp_flags);
238 if (!fls)
239 goto out_err_free;
241 fls->mirror_array_cnt = mirror_array_cnt;
242 fls->stripe_unit = stripe_unit;
243 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
244 sizeof(fls->mirror_array[0]), gfp_flags);
245 if (fls->mirror_array == NULL)
246 goto out_err_free;
248 for (i = 0; i < fls->mirror_array_cnt; i++) {
249 struct nfs4_deviceid devid;
250 struct nfs4_deviceid_node *idnode;
251 u32 ds_count;
252 u32 fh_count;
253 int j;
255 rc = -EIO;
256 p = xdr_inline_decode(&stream, 4);
257 if (!p)
258 goto out_err_free;
259 ds_count = be32_to_cpup(p);
261 /* FIXME: allow for striping? */
262 if (ds_count != 1)
263 goto out_err_free;
265 fls->mirror_array[i] =
266 kzalloc(sizeof(struct nfs4_ff_layout_mirror),
267 gfp_flags);
268 if (fls->mirror_array[i] == NULL) {
269 rc = -ENOMEM;
270 goto out_err_free;
273 spin_lock_init(&fls->mirror_array[i]->lock);
274 fls->mirror_array[i]->ds_count = ds_count;
275 fls->mirror_array[i]->lseg = &fls->generic_hdr;
277 /* deviceid */
278 rc = decode_deviceid(&stream, &devid);
279 if (rc)
280 goto out_err_free;
282 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
283 &devid, lh->plh_lc_cred,
284 gfp_flags);
286 * upon success, mirror_ds is allocated by previous
287 * getdeviceinfo, or newly by .alloc_deviceid_node
288 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
290 if (idnode)
291 fls->mirror_array[i]->mirror_ds =
292 FF_LAYOUT_MIRROR_DS(idnode);
293 else
294 goto out_err_free;
296 /* efficiency */
297 rc = -EIO;
298 p = xdr_inline_decode(&stream, 4);
299 if (!p)
300 goto out_err_free;
301 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
303 /* stateid */
304 rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
305 if (rc)
306 goto out_err_free;
308 /* fh */
309 p = xdr_inline_decode(&stream, 4);
310 if (!p)
311 goto out_err_free;
312 fh_count = be32_to_cpup(p);
314 fls->mirror_array[i]->fh_versions =
315 kzalloc(fh_count * sizeof(struct nfs_fh),
316 gfp_flags);
317 if (fls->mirror_array[i]->fh_versions == NULL) {
318 rc = -ENOMEM;
319 goto out_err_free;
322 for (j = 0; j < fh_count; j++) {
323 rc = decode_nfs_fh(&stream,
324 &fls->mirror_array[i]->fh_versions[j]);
325 if (rc)
326 goto out_err_free;
329 fls->mirror_array[i]->fh_versions_cnt = fh_count;
331 /* user */
332 rc = decode_name(&stream, &fls->mirror_array[i]->uid);
333 if (rc)
334 goto out_err_free;
336 /* group */
337 rc = decode_name(&stream, &fls->mirror_array[i]->gid);
338 if (rc)
339 goto out_err_free;
341 dprintk("%s: uid %d gid %d\n", __func__,
342 fls->mirror_array[i]->uid,
343 fls->mirror_array[i]->gid);
346 p = xdr_inline_decode(&stream, 4);
347 if (p)
348 fls->flags = be32_to_cpup(p);
350 ff_layout_sort_mirrors(fls);
351 rc = ff_layout_check_layout(lgr);
352 if (rc)
353 goto out_err_free;
355 ret = &fls->generic_hdr;
356 dprintk("<-- %s (success)\n", __func__);
357 out_free_page:
358 __free_page(scratch);
359 return ret;
360 out_err_free:
361 _ff_layout_free_lseg(fls);
362 ret = ERR_PTR(rc);
363 dprintk("<-- %s (%d)\n", __func__, rc);
364 goto out_free_page;
367 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
369 struct pnfs_layout_segment *lseg;
371 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
372 if (lseg->pls_range.iomode == IOMODE_RW)
373 return true;
375 return false;
378 static void
379 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
381 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
382 int i;
384 dprintk("--> %s\n", __func__);
386 for (i = 0; i < fls->mirror_array_cnt; i++) {
387 if (fls->mirror_array[i]) {
388 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
389 fls->mirror_array[i]->mirror_ds = NULL;
390 if (fls->mirror_array[i]->cred) {
391 put_rpccred(fls->mirror_array[i]->cred);
392 fls->mirror_array[i]->cred = NULL;
397 if (lseg->pls_range.iomode == IOMODE_RW) {
398 struct nfs4_flexfile_layout *ffl;
399 struct inode *inode;
401 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
402 inode = ffl->generic_hdr.plh_inode;
403 spin_lock(&inode->i_lock);
404 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
405 ffl->commit_info.nbuckets = 0;
406 kfree(ffl->commit_info.buckets);
407 ffl->commit_info.buckets = NULL;
409 spin_unlock(&inode->i_lock);
411 _ff_layout_free_lseg(fls);
414 /* Return 1 until we have multiple lsegs support */
415 static int
416 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
418 return 1;
421 static void
422 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer)
424 /* first IO request? */
425 if (atomic_inc_return(&timer->n_ops) == 1) {
426 timer->start_time = ktime_get();
430 static ktime_t
431 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer)
433 ktime_t start, now;
435 if (atomic_dec_return(&timer->n_ops) < 0)
436 WARN_ON_ONCE(1);
438 now = ktime_get();
439 start = timer->start_time;
440 timer->start_time = now;
441 return ktime_sub(now, start);
444 static ktime_t
445 nfs4_ff_layout_calc_completion_time(struct rpc_task *task)
447 return ktime_sub(ktime_get(), task->tk_start);
450 static bool
451 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
452 struct nfs4_ff_layoutstat *layoutstat)
454 static const ktime_t notime = {0};
455 ktime_t now = ktime_get();
457 nfs4_ff_start_busy_timer(&layoutstat->busy_timer);
458 if (ktime_equal(mirror->start_time, notime))
459 mirror->start_time = now;
460 if (ktime_equal(mirror->last_report_time, notime))
461 mirror->last_report_time = now;
462 if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
463 FF_LAYOUTSTATS_REPORT_INTERVAL) {
464 mirror->last_report_time = now;
465 return true;
468 return false;
471 static void
472 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
473 __u64 requested)
475 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
477 iostat->ops_requested++;
478 iostat->bytes_requested += requested;
481 static void
482 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
483 __u64 requested,
484 __u64 completed,
485 ktime_t time_completed)
487 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
488 ktime_t timer;
490 iostat->ops_completed++;
491 iostat->bytes_completed += completed;
492 iostat->bytes_not_delivered += requested - completed;
494 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer);
495 iostat->total_busy_time =
496 ktime_add(iostat->total_busy_time, timer);
497 iostat->aggregate_completion_time =
498 ktime_add(iostat->aggregate_completion_time, time_completed);
501 static void
502 nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror *mirror,
503 __u64 requested)
505 bool report;
507 spin_lock(&mirror->lock);
508 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat);
509 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
510 spin_unlock(&mirror->lock);
512 if (report)
513 pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode,
514 GFP_KERNEL);
517 static void
518 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
519 struct nfs4_ff_layout_mirror *mirror,
520 __u64 requested,
521 __u64 completed)
523 spin_lock(&mirror->lock);
524 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
525 requested, completed,
526 nfs4_ff_layout_calc_completion_time(task));
527 spin_unlock(&mirror->lock);
530 static void
531 nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror *mirror,
532 __u64 requested)
534 bool report;
536 spin_lock(&mirror->lock);
537 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat);
538 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
539 spin_unlock(&mirror->lock);
541 if (report)
542 pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode,
543 GFP_NOIO);
546 static void
547 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
548 struct nfs4_ff_layout_mirror *mirror,
549 __u64 requested,
550 __u64 completed,
551 enum nfs3_stable_how committed)
553 if (committed == NFS_UNSTABLE)
554 requested = completed = 0;
556 spin_lock(&mirror->lock);
557 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
558 requested, completed,
559 nfs4_ff_layout_calc_completion_time(task));
560 spin_unlock(&mirror->lock);
563 static int
564 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
565 struct nfs_commit_info *cinfo,
566 gfp_t gfp_flags)
568 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
569 struct pnfs_commit_bucket *buckets;
570 int size;
572 if (cinfo->ds->nbuckets != 0) {
573 /* This assumes there is only one RW lseg per file.
574 * To support multiple lseg per file, we need to
575 * change struct pnfs_commit_bucket to allow dynamic
576 * increasing nbuckets.
578 return 0;
581 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
583 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
584 gfp_flags);
585 if (!buckets)
586 return -ENOMEM;
587 else {
588 int i;
590 spin_lock(cinfo->lock);
591 if (cinfo->ds->nbuckets != 0)
592 kfree(buckets);
593 else {
594 cinfo->ds->buckets = buckets;
595 cinfo->ds->nbuckets = size;
596 for (i = 0; i < size; i++) {
597 INIT_LIST_HEAD(&buckets[i].written);
598 INIT_LIST_HEAD(&buckets[i].committing);
599 /* mark direct verifier as unset */
600 buckets[i].direct_verf.committed =
601 NFS_INVALID_STABLE_HOW;
604 spin_unlock(cinfo->lock);
605 return 0;
609 static struct nfs4_pnfs_ds *
610 ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
611 int *best_idx)
613 struct nfs4_ff_layout_segment *fls;
614 struct nfs4_pnfs_ds *ds;
615 int idx;
617 fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
618 /* mirrors are sorted by efficiency */
619 for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
620 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
621 if (ds) {
622 *best_idx = idx;
623 return ds;
627 return NULL;
630 static void
631 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
632 struct nfs_page *req)
634 struct nfs_pgio_mirror *pgm;
635 struct nfs4_ff_layout_mirror *mirror;
636 struct nfs4_pnfs_ds *ds;
637 int ds_idx;
639 /* Use full layout for now */
640 if (!pgio->pg_lseg)
641 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
642 req->wb_context,
644 NFS4_MAX_UINT64,
645 IOMODE_READ,
646 GFP_KERNEL);
647 /* If no lseg, fall back to read through mds */
648 if (pgio->pg_lseg == NULL)
649 goto out_mds;
651 ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
652 if (!ds)
653 goto out_mds;
654 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
656 pgio->pg_mirror_idx = ds_idx;
658 /* read always uses only one mirror - idx 0 for pgio layer */
659 pgm = &pgio->pg_mirrors[0];
660 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
662 return;
663 out_mds:
664 pnfs_put_lseg(pgio->pg_lseg);
665 pgio->pg_lseg = NULL;
666 nfs_pageio_reset_read_mds(pgio);
669 static void
670 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
671 struct nfs_page *req)
673 struct nfs4_ff_layout_mirror *mirror;
674 struct nfs_pgio_mirror *pgm;
675 struct nfs_commit_info cinfo;
676 struct nfs4_pnfs_ds *ds;
677 int i;
678 int status;
680 if (!pgio->pg_lseg)
681 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
682 req->wb_context,
684 NFS4_MAX_UINT64,
685 IOMODE_RW,
686 GFP_NOFS);
687 /* If no lseg, fall back to write through mds */
688 if (pgio->pg_lseg == NULL)
689 goto out_mds;
691 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
692 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
693 if (status < 0)
694 goto out_mds;
696 /* Use a direct mapping of ds_idx to pgio mirror_idx */
697 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
698 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
699 goto out_mds;
701 for (i = 0; i < pgio->pg_mirror_count; i++) {
702 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
703 if (!ds)
704 goto out_mds;
705 pgm = &pgio->pg_mirrors[i];
706 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
707 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
710 return;
712 out_mds:
713 pnfs_put_lseg(pgio->pg_lseg);
714 pgio->pg_lseg = NULL;
715 nfs_pageio_reset_write_mds(pgio);
718 static unsigned int
719 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
720 struct nfs_page *req)
722 if (!pgio->pg_lseg)
723 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
724 req->wb_context,
726 NFS4_MAX_UINT64,
727 IOMODE_RW,
728 GFP_NOFS);
729 if (pgio->pg_lseg)
730 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
732 /* no lseg means that pnfs is not in use, so no mirroring here */
733 pnfs_put_lseg(pgio->pg_lseg);
734 pgio->pg_lseg = NULL;
735 nfs_pageio_reset_write_mds(pgio);
736 return 1;
739 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
740 .pg_init = ff_layout_pg_init_read,
741 .pg_test = pnfs_generic_pg_test,
742 .pg_doio = pnfs_generic_pg_readpages,
743 .pg_cleanup = pnfs_generic_pg_cleanup,
746 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
747 .pg_init = ff_layout_pg_init_write,
748 .pg_test = pnfs_generic_pg_test,
749 .pg_doio = pnfs_generic_pg_writepages,
750 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
751 .pg_cleanup = pnfs_generic_pg_cleanup,
754 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
756 struct rpc_task *task = &hdr->task;
758 pnfs_layoutcommit_inode(hdr->inode, false);
760 if (retry_pnfs) {
761 dprintk("%s Reset task %5u for i/o through pNFS "
762 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
763 hdr->task.tk_pid,
764 hdr->inode->i_sb->s_id,
765 (unsigned long long)NFS_FILEID(hdr->inode),
766 hdr->args.count,
767 (unsigned long long)hdr->args.offset);
769 if (!hdr->dreq) {
770 struct nfs_open_context *ctx;
772 ctx = nfs_list_entry(hdr->pages.next)->wb_context;
773 set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
774 hdr->completion_ops->error_cleanup(&hdr->pages);
775 } else {
776 nfs_direct_set_resched_writes(hdr->dreq);
777 /* fake unstable write to let common nfs resend pages */
778 hdr->verf.committed = NFS_UNSTABLE;
779 hdr->good_bytes = hdr->args.count;
781 return;
784 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
785 dprintk("%s Reset task %5u for i/o through MDS "
786 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
787 hdr->task.tk_pid,
788 hdr->inode->i_sb->s_id,
789 (unsigned long long)NFS_FILEID(hdr->inode),
790 hdr->args.count,
791 (unsigned long long)hdr->args.offset);
793 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
797 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
799 struct rpc_task *task = &hdr->task;
801 pnfs_layoutcommit_inode(hdr->inode, false);
803 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
804 dprintk("%s Reset task %5u for i/o through MDS "
805 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
806 hdr->task.tk_pid,
807 hdr->inode->i_sb->s_id,
808 (unsigned long long)NFS_FILEID(hdr->inode),
809 hdr->args.count,
810 (unsigned long long)hdr->args.offset);
812 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
816 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
817 struct nfs4_state *state,
818 struct nfs_client *clp,
819 struct pnfs_layout_segment *lseg,
820 int idx)
822 struct pnfs_layout_hdr *lo = lseg->pls_layout;
823 struct inode *inode = lo->plh_inode;
824 struct nfs_server *mds_server = NFS_SERVER(inode);
826 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
827 struct nfs_client *mds_client = mds_server->nfs_client;
828 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
830 if (task->tk_status >= 0)
831 return 0;
833 switch (task->tk_status) {
834 /* MDS state errors */
835 case -NFS4ERR_DELEG_REVOKED:
836 case -NFS4ERR_ADMIN_REVOKED:
837 case -NFS4ERR_BAD_STATEID:
838 if (state == NULL)
839 break;
840 nfs_remove_bad_delegation(state->inode);
841 case -NFS4ERR_OPENMODE:
842 if (state == NULL)
843 break;
844 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
845 goto out_bad_stateid;
846 goto wait_on_recovery;
847 case -NFS4ERR_EXPIRED:
848 if (state != NULL) {
849 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
850 goto out_bad_stateid;
852 nfs4_schedule_lease_recovery(mds_client);
853 goto wait_on_recovery;
854 /* DS session errors */
855 case -NFS4ERR_BADSESSION:
856 case -NFS4ERR_BADSLOT:
857 case -NFS4ERR_BAD_HIGH_SLOT:
858 case -NFS4ERR_DEADSESSION:
859 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
860 case -NFS4ERR_SEQ_FALSE_RETRY:
861 case -NFS4ERR_SEQ_MISORDERED:
862 dprintk("%s ERROR %d, Reset session. Exchangeid "
863 "flags 0x%x\n", __func__, task->tk_status,
864 clp->cl_exchange_flags);
865 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
866 break;
867 case -NFS4ERR_DELAY:
868 case -NFS4ERR_GRACE:
869 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
870 break;
871 case -NFS4ERR_RETRY_UNCACHED_REP:
872 break;
873 /* Invalidate Layout errors */
874 case -NFS4ERR_PNFS_NO_LAYOUT:
875 case -ESTALE: /* mapped NFS4ERR_STALE */
876 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
877 case -EISDIR: /* mapped NFS4ERR_ISDIR */
878 case -NFS4ERR_FHEXPIRED:
879 case -NFS4ERR_WRONG_TYPE:
880 dprintk("%s Invalid layout error %d\n", __func__,
881 task->tk_status);
883 * Destroy layout so new i/o will get a new layout.
884 * Layout will not be destroyed until all current lseg
885 * references are put. Mark layout as invalid to resend failed
886 * i/o and all i/o waiting on the slot table to the MDS until
887 * layout is destroyed and a new valid layout is obtained.
889 pnfs_destroy_layout(NFS_I(inode));
890 rpc_wake_up(&tbl->slot_tbl_waitq);
891 goto reset;
892 /* RPC connection errors */
893 case -ECONNREFUSED:
894 case -EHOSTDOWN:
895 case -EHOSTUNREACH:
896 case -ENETUNREACH:
897 case -EIO:
898 case -ETIMEDOUT:
899 case -EPIPE:
900 dprintk("%s DS connection error %d\n", __func__,
901 task->tk_status);
902 nfs4_mark_deviceid_unavailable(devid);
903 rpc_wake_up(&tbl->slot_tbl_waitq);
904 /* fall through */
905 default:
906 if (ff_layout_has_available_ds(lseg))
907 return -NFS4ERR_RESET_TO_PNFS;
908 reset:
909 dprintk("%s Retry through MDS. Error %d\n", __func__,
910 task->tk_status);
911 return -NFS4ERR_RESET_TO_MDS;
913 out:
914 task->tk_status = 0;
915 return -EAGAIN;
916 out_bad_stateid:
917 task->tk_status = -EIO;
918 return 0;
919 wait_on_recovery:
920 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
921 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
922 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
923 goto out;
926 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
927 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
928 struct pnfs_layout_segment *lseg,
929 int idx)
931 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
933 if (task->tk_status >= 0)
934 return 0;
936 if (task->tk_status != -EJUKEBOX) {
937 dprintk("%s DS connection error %d\n", __func__,
938 task->tk_status);
939 nfs4_mark_deviceid_unavailable(devid);
940 if (ff_layout_has_available_ds(lseg))
941 return -NFS4ERR_RESET_TO_PNFS;
942 else
943 return -NFS4ERR_RESET_TO_MDS;
946 if (task->tk_status == -EJUKEBOX)
947 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
948 task->tk_status = 0;
949 rpc_restart_call(task);
950 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
951 return -EAGAIN;
954 static int ff_layout_async_handle_error(struct rpc_task *task,
955 struct nfs4_state *state,
956 struct nfs_client *clp,
957 struct pnfs_layout_segment *lseg,
958 int idx)
960 int vers = clp->cl_nfs_mod->rpc_vers->number;
962 switch (vers) {
963 case 3:
964 return ff_layout_async_handle_error_v3(task, lseg, idx);
965 case 4:
966 return ff_layout_async_handle_error_v4(task, state, clp,
967 lseg, idx);
968 default:
969 /* should never happen */
970 WARN_ON_ONCE(1);
971 return 0;
975 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
976 int idx, u64 offset, u64 length,
977 u32 status, int opnum)
979 struct nfs4_ff_layout_mirror *mirror;
980 int err;
982 mirror = FF_LAYOUT_COMP(lseg, idx);
983 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
984 mirror, offset, length, status, opnum,
985 GFP_NOIO);
986 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
989 /* NFS_PROTO call done callback routines */
991 static int ff_layout_read_done_cb(struct rpc_task *task,
992 struct nfs_pgio_header *hdr)
994 struct inode *inode;
995 int err;
997 trace_nfs4_pnfs_read(hdr, task->tk_status);
998 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
999 hdr->res.op_status = NFS4ERR_NXIO;
1000 if (task->tk_status < 0 && hdr->res.op_status)
1001 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1002 hdr->args.offset, hdr->args.count,
1003 hdr->res.op_status, OP_READ);
1004 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1005 hdr->ds_clp, hdr->lseg,
1006 hdr->pgio_mirror_idx);
1008 switch (err) {
1009 case -NFS4ERR_RESET_TO_PNFS:
1010 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1011 &hdr->lseg->pls_layout->plh_flags);
1012 pnfs_read_resend_pnfs(hdr);
1013 return task->tk_status;
1014 case -NFS4ERR_RESET_TO_MDS:
1015 inode = hdr->lseg->pls_layout->plh_inode;
1016 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1017 ff_layout_reset_read(hdr);
1018 return task->tk_status;
1019 case -EAGAIN:
1020 rpc_restart_call_prepare(task);
1021 return -EAGAIN;
1024 return 0;
1027 static bool
1028 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1030 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1034 * We reference the rpc_cred of the first WRITE that triggers the need for
1035 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1036 * rfc5661 is not clear about which credential should be used.
1038 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1039 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1040 * we always send layoutcommit after DS writes.
1042 static void
1043 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1045 if (!ff_layout_need_layoutcommit(hdr->lseg))
1046 return;
1048 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1049 hdr->mds_offset + hdr->res.count);
1050 dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1051 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1054 static bool
1055 ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1057 /* No mirroring for now */
1058 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1060 return ff_layout_test_devid_unavailable(node);
1063 static int ff_layout_read_prepare_common(struct rpc_task *task,
1064 struct nfs_pgio_header *hdr)
1066 nfs4_ff_layout_stat_io_start_read(
1067 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1068 hdr->args.count);
1070 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1071 rpc_exit(task, -EIO);
1072 return -EIO;
1074 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1075 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1076 if (ff_layout_has_available_ds(hdr->lseg))
1077 pnfs_read_resend_pnfs(hdr);
1078 else
1079 ff_layout_reset_read(hdr);
1080 rpc_exit(task, 0);
1081 return -EAGAIN;
1083 hdr->pgio_done_cb = ff_layout_read_done_cb;
1085 return 0;
1089 * Call ops for the async read/write cases
1090 * In the case of dense layouts, the offset needs to be reset to its
1091 * original value.
1093 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1095 struct nfs_pgio_header *hdr = data;
1097 if (ff_layout_read_prepare_common(task, hdr))
1098 return;
1100 rpc_call_start(task);
1103 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1104 struct nfs4_sequence_args *args,
1105 struct nfs4_sequence_res *res,
1106 struct rpc_task *task)
1108 if (ds_clp->cl_session)
1109 return nfs41_setup_sequence(ds_clp->cl_session,
1110 args,
1111 res,
1112 task);
1113 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1114 args,
1115 res,
1116 task);
1119 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1121 struct nfs_pgio_header *hdr = data;
1123 if (ff_layout_setup_sequence(hdr->ds_clp,
1124 &hdr->args.seq_args,
1125 &hdr->res.seq_res,
1126 task))
1127 return;
1129 if (ff_layout_read_prepare_common(task, hdr))
1130 return;
1132 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1133 hdr->args.lock_context, FMODE_READ) == -EIO)
1134 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1137 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1139 struct nfs_pgio_header *hdr = data;
1141 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1143 nfs4_ff_layout_stat_io_end_read(task,
1144 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1145 hdr->args.count, hdr->res.count);
1147 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1148 task->tk_status == 0) {
1149 nfs4_sequence_done(task, &hdr->res.seq_res);
1150 return;
1153 /* Note this may cause RPC to be resent */
1154 hdr->mds_ops->rpc_call_done(task, hdr);
1157 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1159 struct nfs_pgio_header *hdr = data;
1161 rpc_count_iostats_metrics(task,
1162 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1165 static int ff_layout_write_done_cb(struct rpc_task *task,
1166 struct nfs_pgio_header *hdr)
1168 struct inode *inode;
1169 int err;
1171 trace_nfs4_pnfs_write(hdr, task->tk_status);
1172 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
1173 hdr->res.op_status = NFS4ERR_NXIO;
1174 if (task->tk_status < 0 && hdr->res.op_status)
1175 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1176 hdr->args.offset, hdr->args.count,
1177 hdr->res.op_status, OP_WRITE);
1178 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1179 hdr->ds_clp, hdr->lseg,
1180 hdr->pgio_mirror_idx);
1182 switch (err) {
1183 case -NFS4ERR_RESET_TO_PNFS:
1184 case -NFS4ERR_RESET_TO_MDS:
1185 inode = hdr->lseg->pls_layout->plh_inode;
1186 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1187 if (err == -NFS4ERR_RESET_TO_PNFS) {
1188 pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1189 ff_layout_reset_write(hdr, true);
1190 } else {
1191 pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1192 ff_layout_reset_write(hdr, false);
1194 return task->tk_status;
1195 case -EAGAIN:
1196 rpc_restart_call_prepare(task);
1197 return -EAGAIN;
1200 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1201 hdr->res.verf->committed == NFS_DATA_SYNC)
1202 ff_layout_set_layoutcommit(hdr);
1204 return 0;
1207 static int ff_layout_commit_done_cb(struct rpc_task *task,
1208 struct nfs_commit_data *data)
1210 struct inode *inode;
1211 int err;
1213 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1214 if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
1215 data->res.op_status = NFS4ERR_NXIO;
1216 if (task->tk_status < 0 && data->res.op_status)
1217 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1218 data->args.offset, data->args.count,
1219 data->res.op_status, OP_COMMIT);
1220 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1221 data->lseg, data->ds_commit_index);
1223 switch (err) {
1224 case -NFS4ERR_RESET_TO_PNFS:
1225 case -NFS4ERR_RESET_TO_MDS:
1226 inode = data->lseg->pls_layout->plh_inode;
1227 pnfs_error_mark_layout_for_return(inode, data->lseg);
1228 if (err == -NFS4ERR_RESET_TO_PNFS)
1229 pnfs_set_retry_layoutget(data->lseg->pls_layout);
1230 else
1231 pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1232 pnfs_generic_prepare_to_resend_writes(data);
1233 return -EAGAIN;
1234 case -EAGAIN:
1235 rpc_restart_call_prepare(task);
1236 return -EAGAIN;
1239 if (data->verf.committed == NFS_UNSTABLE
1240 && ff_layout_need_layoutcommit(data->lseg))
1241 pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
1243 return 0;
1246 static int ff_layout_write_prepare_common(struct rpc_task *task,
1247 struct nfs_pgio_header *hdr)
1249 nfs4_ff_layout_stat_io_start_write(
1250 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1251 hdr->args.count);
1253 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1254 rpc_exit(task, -EIO);
1255 return -EIO;
1258 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1259 bool retry_pnfs;
1261 retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1262 dprintk("%s task %u reset io to %s\n", __func__,
1263 task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1264 ff_layout_reset_write(hdr, retry_pnfs);
1265 rpc_exit(task, 0);
1266 return -EAGAIN;
1269 return 0;
1272 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1274 struct nfs_pgio_header *hdr = data;
1276 if (ff_layout_write_prepare_common(task, hdr))
1277 return;
1279 rpc_call_start(task);
1282 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1284 struct nfs_pgio_header *hdr = data;
1286 if (ff_layout_setup_sequence(hdr->ds_clp,
1287 &hdr->args.seq_args,
1288 &hdr->res.seq_res,
1289 task))
1290 return;
1292 if (ff_layout_write_prepare_common(task, hdr))
1293 return;
1295 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1296 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1297 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1300 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1302 struct nfs_pgio_header *hdr = data;
1304 nfs4_ff_layout_stat_io_end_write(task,
1305 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1306 hdr->args.count, hdr->res.count,
1307 hdr->res.verf->committed);
1309 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1310 task->tk_status == 0) {
1311 nfs4_sequence_done(task, &hdr->res.seq_res);
1312 return;
1315 /* Note this may cause RPC to be resent */
1316 hdr->mds_ops->rpc_call_done(task, hdr);
1319 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1321 struct nfs_pgio_header *hdr = data;
1323 rpc_count_iostats_metrics(task,
1324 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1327 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1328 struct nfs_commit_data *cdata)
1330 nfs4_ff_layout_stat_io_start_write(
1331 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1335 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1337 ff_layout_commit_prepare_common(task, data);
1338 rpc_call_start(task);
1341 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1343 struct nfs_commit_data *wdata = data;
1345 if (ff_layout_setup_sequence(wdata->ds_clp,
1346 &wdata->args.seq_args,
1347 &wdata->res.seq_res,
1348 task))
1349 return;
1350 ff_layout_commit_prepare_common(task, data);
1353 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1355 struct nfs_commit_data *cdata = data;
1356 struct nfs_page *req;
1357 __u64 count = 0;
1359 if (task->tk_status == 0) {
1360 list_for_each_entry(req, &cdata->pages, wb_list)
1361 count += req->wb_bytes;
1364 nfs4_ff_layout_stat_io_end_write(task,
1365 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1366 count, count, NFS_FILE_SYNC);
1368 pnfs_generic_write_commit_done(task, data);
1371 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1373 struct nfs_commit_data *cdata = data;
1375 rpc_count_iostats_metrics(task,
1376 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1379 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1380 .rpc_call_prepare = ff_layout_read_prepare_v3,
1381 .rpc_call_done = ff_layout_read_call_done,
1382 .rpc_count_stats = ff_layout_read_count_stats,
1383 .rpc_release = pnfs_generic_rw_release,
1386 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1387 .rpc_call_prepare = ff_layout_read_prepare_v4,
1388 .rpc_call_done = ff_layout_read_call_done,
1389 .rpc_count_stats = ff_layout_read_count_stats,
1390 .rpc_release = pnfs_generic_rw_release,
1393 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1394 .rpc_call_prepare = ff_layout_write_prepare_v3,
1395 .rpc_call_done = ff_layout_write_call_done,
1396 .rpc_count_stats = ff_layout_write_count_stats,
1397 .rpc_release = pnfs_generic_rw_release,
1400 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1401 .rpc_call_prepare = ff_layout_write_prepare_v4,
1402 .rpc_call_done = ff_layout_write_call_done,
1403 .rpc_count_stats = ff_layout_write_count_stats,
1404 .rpc_release = pnfs_generic_rw_release,
1407 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1408 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1409 .rpc_call_done = ff_layout_commit_done,
1410 .rpc_count_stats = ff_layout_commit_count_stats,
1411 .rpc_release = pnfs_generic_commit_release,
1414 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1415 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1416 .rpc_call_done = ff_layout_commit_done,
1417 .rpc_count_stats = ff_layout_commit_count_stats,
1418 .rpc_release = pnfs_generic_commit_release,
1421 static enum pnfs_try_status
1422 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1424 struct pnfs_layout_segment *lseg = hdr->lseg;
1425 struct nfs4_pnfs_ds *ds;
1426 struct rpc_clnt *ds_clnt;
1427 struct rpc_cred *ds_cred;
1428 loff_t offset = hdr->args.offset;
1429 u32 idx = hdr->pgio_mirror_idx;
1430 int vers;
1431 struct nfs_fh *fh;
1433 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1434 __func__, hdr->inode->i_ino,
1435 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1437 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1438 if (!ds)
1439 goto out_failed;
1441 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1442 hdr->inode);
1443 if (IS_ERR(ds_clnt))
1444 goto out_failed;
1446 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1447 if (IS_ERR(ds_cred))
1448 goto out_failed;
1450 vers = nfs4_ff_layout_ds_version(lseg, idx);
1452 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1453 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1455 atomic_inc(&ds->ds_clp->cl_count);
1456 hdr->ds_clp = ds->ds_clp;
1457 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1458 if (fh)
1459 hdr->args.fh = fh;
1461 * Note that if we ever decide to split across DSes,
1462 * then we may need to handle dense-like offsets.
1464 hdr->args.offset = offset;
1465 hdr->mds_offset = offset;
1467 /* Perform an asynchronous read to ds */
1468 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1469 vers == 3 ? &ff_layout_read_call_ops_v3 :
1470 &ff_layout_read_call_ops_v4,
1471 0, RPC_TASK_SOFTCONN);
1473 return PNFS_ATTEMPTED;
1475 out_failed:
1476 if (ff_layout_has_available_ds(lseg))
1477 return PNFS_TRY_AGAIN;
1478 return PNFS_NOT_ATTEMPTED;
1481 /* Perform async writes. */
1482 static enum pnfs_try_status
1483 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1485 struct pnfs_layout_segment *lseg = hdr->lseg;
1486 struct nfs4_pnfs_ds *ds;
1487 struct rpc_clnt *ds_clnt;
1488 struct rpc_cred *ds_cred;
1489 loff_t offset = hdr->args.offset;
1490 int vers;
1491 struct nfs_fh *fh;
1492 int idx = hdr->pgio_mirror_idx;
1494 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1495 if (!ds)
1496 return PNFS_NOT_ATTEMPTED;
1498 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1499 hdr->inode);
1500 if (IS_ERR(ds_clnt))
1501 return PNFS_NOT_ATTEMPTED;
1503 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1504 if (IS_ERR(ds_cred))
1505 return PNFS_NOT_ATTEMPTED;
1507 vers = nfs4_ff_layout_ds_version(lseg, idx);
1509 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1510 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1511 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1512 vers);
1514 hdr->pgio_done_cb = ff_layout_write_done_cb;
1515 atomic_inc(&ds->ds_clp->cl_count);
1516 hdr->ds_clp = ds->ds_clp;
1517 hdr->ds_commit_idx = idx;
1518 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1519 if (fh)
1520 hdr->args.fh = fh;
1523 * Note that if we ever decide to split across DSes,
1524 * then we may need to handle dense-like offsets.
1526 hdr->args.offset = offset;
1528 /* Perform an asynchronous write */
1529 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1530 vers == 3 ? &ff_layout_write_call_ops_v3 :
1531 &ff_layout_write_call_ops_v4,
1532 sync, RPC_TASK_SOFTCONN);
1533 return PNFS_ATTEMPTED;
1536 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1538 return i;
1541 static struct nfs_fh *
1542 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1544 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1546 /* FIXME: Assume that there is only one NFS version available
1547 * for the DS.
1549 return &flseg->mirror_array[i]->fh_versions[0];
1552 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1554 struct pnfs_layout_segment *lseg = data->lseg;
1555 struct nfs4_pnfs_ds *ds;
1556 struct rpc_clnt *ds_clnt;
1557 struct rpc_cred *ds_cred;
1558 u32 idx;
1559 int vers;
1560 struct nfs_fh *fh;
1562 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1563 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1564 if (!ds)
1565 goto out_err;
1567 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1568 data->inode);
1569 if (IS_ERR(ds_clnt))
1570 goto out_err;
1572 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1573 if (IS_ERR(ds_cred))
1574 goto out_err;
1576 vers = nfs4_ff_layout_ds_version(lseg, idx);
1578 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1579 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1580 vers);
1581 data->commit_done_cb = ff_layout_commit_done_cb;
1582 data->cred = ds_cred;
1583 atomic_inc(&ds->ds_clp->cl_count);
1584 data->ds_clp = ds->ds_clp;
1585 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1586 if (fh)
1587 data->args.fh = fh;
1589 return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1590 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1591 &ff_layout_commit_call_ops_v4,
1592 how, RPC_TASK_SOFTCONN);
1593 out_err:
1594 pnfs_generic_prepare_to_resend_writes(data);
1595 pnfs_generic_commit_release(data);
1596 return -EAGAIN;
1599 static int
1600 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1601 int how, struct nfs_commit_info *cinfo)
1603 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1604 ff_layout_initiate_commit);
1607 static struct pnfs_ds_commit_info *
1608 ff_layout_get_ds_info(struct inode *inode)
1610 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1612 if (layout == NULL)
1613 return NULL;
1615 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1618 static void
1619 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1621 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1622 id_node));
1625 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1626 struct xdr_stream *xdr,
1627 const struct nfs4_layoutreturn_args *args)
1629 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1630 __be32 *start;
1631 int count = 0, ret = 0;
1633 start = xdr_reserve_space(xdr, 4);
1634 if (unlikely(!start))
1635 return -E2BIG;
1637 /* This assume we always return _ALL_ layouts */
1638 spin_lock(&hdr->plh_inode->i_lock);
1639 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1640 spin_unlock(&hdr->plh_inode->i_lock);
1642 *start = cpu_to_be32(count);
1644 return ret;
1647 /* report nothing for now */
1648 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1649 struct xdr_stream *xdr,
1650 const struct nfs4_layoutreturn_args *args)
1652 __be32 *p;
1654 p = xdr_reserve_space(xdr, 4);
1655 if (likely(p))
1656 *p = cpu_to_be32(0);
1659 static struct nfs4_deviceid_node *
1660 ff_layout_alloc_deviceid_node(struct nfs_server *server,
1661 struct pnfs_device *pdev, gfp_t gfp_flags)
1663 struct nfs4_ff_layout_ds *dsaddr;
1665 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1666 if (!dsaddr)
1667 return NULL;
1668 return &dsaddr->id_node;
1671 static void
1672 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1673 struct xdr_stream *xdr,
1674 const struct nfs4_layoutreturn_args *args)
1676 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1677 __be32 *start;
1679 dprintk("%s: Begin\n", __func__);
1680 start = xdr_reserve_space(xdr, 4);
1681 BUG_ON(!start);
1683 if (ff_layout_encode_ioerr(flo, xdr, args))
1684 goto out;
1686 ff_layout_encode_iostats(flo, xdr, args);
1687 out:
1688 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1689 dprintk("%s: Return\n", __func__);
1692 static int
1693 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
1695 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
1697 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
1700 static size_t
1701 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
1702 const int buflen)
1704 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
1705 const struct in6_addr *addr = &sin6->sin6_addr;
1708 * RFC 4291, Section 2.2.2
1710 * Shorthanded ANY address
1712 if (ipv6_addr_any(addr))
1713 return snprintf(buf, buflen, "::");
1716 * RFC 4291, Section 2.2.2
1718 * Shorthanded loopback address
1720 if (ipv6_addr_loopback(addr))
1721 return snprintf(buf, buflen, "::1");
1724 * RFC 4291, Section 2.2.3
1726 * Special presentation address format for mapped v4
1727 * addresses.
1729 if (ipv6_addr_v4mapped(addr))
1730 return snprintf(buf, buflen, "::ffff:%pI4",
1731 &addr->s6_addr32[3]);
1734 * RFC 4291, Section 2.2.1
1736 return snprintf(buf, buflen, "%pI6c", addr);
1739 /* Derived from rpc_sockaddr2uaddr */
1740 static void
1741 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
1743 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
1744 char portbuf[RPCBIND_MAXUADDRPLEN];
1745 char addrbuf[RPCBIND_MAXUADDRLEN];
1746 char *netid;
1747 unsigned short port;
1748 int len, netid_len;
1749 __be32 *p;
1751 switch (sap->sa_family) {
1752 case AF_INET:
1753 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
1754 return;
1755 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
1756 netid = "tcp";
1757 netid_len = 3;
1758 break;
1759 case AF_INET6:
1760 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
1761 return;
1762 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
1763 netid = "tcp6";
1764 netid_len = 4;
1765 break;
1766 default:
1767 /* we only support tcp and tcp6 */
1768 WARN_ON_ONCE(1);
1769 return;
1772 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
1773 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
1775 p = xdr_reserve_space(xdr, 4 + netid_len);
1776 xdr_encode_opaque(p, netid, netid_len);
1778 p = xdr_reserve_space(xdr, 4 + len);
1779 xdr_encode_opaque(p, addrbuf, len);
1782 static void
1783 ff_layout_encode_nfstime(struct xdr_stream *xdr,
1784 ktime_t t)
1786 struct timespec64 ts;
1787 __be32 *p;
1789 p = xdr_reserve_space(xdr, 12);
1790 ts = ktime_to_timespec64(t);
1791 p = xdr_encode_hyper(p, ts.tv_sec);
1792 *p++ = cpu_to_be32(ts.tv_nsec);
1795 static void
1796 ff_layout_encode_io_latency(struct xdr_stream *xdr,
1797 struct nfs4_ff_io_stat *stat)
1799 __be32 *p;
1801 p = xdr_reserve_space(xdr, 5 * 8);
1802 p = xdr_encode_hyper(p, stat->ops_requested);
1803 p = xdr_encode_hyper(p, stat->bytes_requested);
1804 p = xdr_encode_hyper(p, stat->ops_completed);
1805 p = xdr_encode_hyper(p, stat->bytes_completed);
1806 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
1807 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
1808 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
1811 static void
1812 ff_layout_encode_layoutstats(struct xdr_stream *xdr,
1813 struct nfs42_layoutstat_args *args,
1814 struct nfs42_layoutstat_devinfo *devinfo)
1816 struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
1817 struct nfs4_pnfs_ds_addr *da;
1818 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
1819 struct nfs_fh *fh = &mirror->fh_versions[0];
1820 __be32 *p, *start;
1822 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
1823 dprintk("%s: DS %s: encoding address %s\n",
1824 __func__, ds->ds_remotestr, da->da_remotestr);
1825 /* layoutupdate length */
1826 start = xdr_reserve_space(xdr, 4);
1827 /* netaddr4 */
1828 ff_layout_encode_netaddr(xdr, da);
1829 /* nfs_fh4 */
1830 p = xdr_reserve_space(xdr, 4 + fh->size);
1831 xdr_encode_opaque(p, fh->data, fh->size);
1832 /* ff_io_latency4 read */
1833 spin_lock(&mirror->lock);
1834 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
1835 /* ff_io_latency4 write */
1836 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
1837 spin_unlock(&mirror->lock);
1838 /* nfstime4 */
1839 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
1840 /* bool */
1841 p = xdr_reserve_space(xdr, 4);
1842 *p = cpu_to_be32(false);
1844 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1847 static bool
1848 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1849 struct pnfs_layout_segment *pls,
1850 int *dev_count, int dev_limit)
1852 struct nfs4_ff_layout_mirror *mirror;
1853 struct nfs4_deviceid_node *dev;
1854 struct nfs42_layoutstat_devinfo *devinfo;
1855 int i;
1857 for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
1858 if (*dev_count >= dev_limit)
1859 break;
1860 mirror = FF_LAYOUT_COMP(pls, i);
1861 if (!mirror || !mirror->mirror_ds)
1862 continue;
1863 dev = FF_LAYOUT_DEVID_NODE(pls, i);
1864 devinfo = &args->devinfo[*dev_count];
1865 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
1866 devinfo->offset = pls->pls_range.offset;
1867 devinfo->length = pls->pls_range.length;
1868 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
1869 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
1870 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
1871 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
1872 devinfo->layout_type = LAYOUT_FLEX_FILES;
1873 devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
1874 devinfo->layout_private = mirror;
1875 /* lseg refcount put in cleanup_layoutstats */
1876 pnfs_get_lseg(pls);
1878 ++(*dev_count);
1881 return *dev_count < dev_limit;
1884 static int
1885 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
1887 struct pnfs_layout_segment *pls;
1888 int dev_count = 0;
1890 spin_lock(&args->inode->i_lock);
1891 list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) {
1892 dev_count += FF_LAYOUT_MIRROR_COUNT(pls);
1894 spin_unlock(&args->inode->i_lock);
1895 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
1896 if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
1897 dprintk("%s: truncating devinfo to limit (%d:%d)\n",
1898 __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
1899 dev_count = PNFS_LAYOUTSTATS_MAXDEV;
1901 args->devinfo = kmalloc(dev_count * sizeof(*args->devinfo), GFP_KERNEL);
1902 if (!args->devinfo)
1903 return -ENOMEM;
1905 dev_count = 0;
1906 spin_lock(&args->inode->i_lock);
1907 list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) {
1908 if (!ff_layout_mirror_prepare_stats(args, pls, &dev_count,
1909 PNFS_LAYOUTSTATS_MAXDEV)) {
1910 break;
1913 spin_unlock(&args->inode->i_lock);
1914 args->num_dev = dev_count;
1916 return 0;
1919 static void
1920 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
1922 struct nfs4_ff_layout_mirror *mirror;
1923 int i;
1925 for (i = 0; i < data->args.num_dev; i++) {
1926 mirror = data->args.devinfo[i].layout_private;
1927 data->args.devinfo[i].layout_private = NULL;
1928 pnfs_put_lseg(mirror->lseg);
1932 static struct pnfs_layoutdriver_type flexfilelayout_type = {
1933 .id = LAYOUT_FLEX_FILES,
1934 .name = "LAYOUT_FLEX_FILES",
1935 .owner = THIS_MODULE,
1936 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
1937 .free_layout_hdr = ff_layout_free_layout_hdr,
1938 .alloc_lseg = ff_layout_alloc_lseg,
1939 .free_lseg = ff_layout_free_lseg,
1940 .pg_read_ops = &ff_layout_pg_read_ops,
1941 .pg_write_ops = &ff_layout_pg_write_ops,
1942 .get_ds_info = ff_layout_get_ds_info,
1943 .free_deviceid_node = ff_layout_free_deviceid_node,
1944 .mark_request_commit = pnfs_layout_mark_request_commit,
1945 .clear_request_commit = pnfs_generic_clear_request_commit,
1946 .scan_commit_lists = pnfs_generic_scan_commit_lists,
1947 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
1948 .commit_pagelist = ff_layout_commit_pagelist,
1949 .read_pagelist = ff_layout_read_pagelist,
1950 .write_pagelist = ff_layout_write_pagelist,
1951 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
1952 .encode_layoutreturn = ff_layout_encode_layoutreturn,
1953 .sync = pnfs_nfs_generic_sync,
1954 .prepare_layoutstats = ff_layout_prepare_layoutstats,
1955 .cleanup_layoutstats = ff_layout_cleanup_layoutstats,
1958 static int __init nfs4flexfilelayout_init(void)
1960 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
1961 __func__);
1962 return pnfs_register_layoutdriver(&flexfilelayout_type);
1965 static void __exit nfs4flexfilelayout_exit(void)
1967 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
1968 __func__);
1969 pnfs_unregister_layoutdriver(&flexfilelayout_type);
1972 MODULE_ALIAS("nfs-layouttype4-4");
1974 MODULE_LICENSE("GPL");
1975 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
1977 module_init(nfs4flexfilelayout_init);
1978 module_exit(nfs4flexfilelayout_exit);