1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Christoph Hellwig.
5 #include <linux/blkdev.h>
6 #include <linux/kmod.h>
7 #include <linux/file.h>
8 #include <linux/jhash.h>
9 #include <linux/sched.h>
10 #include <linux/sunrpc/addr.h>
16 #define NFSDDBG_FACILITY NFSDDBG_PNFS
19 struct list_head lo_perstate
;
20 struct nfs4_layout_stateid
*lo_state
;
21 struct nfsd4_layout_seg lo_seg
;
24 static struct kmem_cache
*nfs4_layout_cache
;
25 static struct kmem_cache
*nfs4_layout_stateid_cache
;
27 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops
;
28 static const struct lease_manager_operations nfsd4_layouts_lm_ops
;
30 const struct nfsd4_layout_ops
*nfsd4_layout_ops
[LAYOUT_TYPE_MAX
] = {
31 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
32 [LAYOUT_FLEX_FILES
] = &ff_layout_ops
,
34 #ifdef CONFIG_NFSD_BLOCKLAYOUT
35 [LAYOUT_BLOCK_VOLUME
] = &bl_layout_ops
,
37 #ifdef CONFIG_NFSD_SCSILAYOUT
38 [LAYOUT_SCSI
] = &scsi_layout_ops
,
42 /* pNFS device ID to export fsid mapping */
43 #define DEVID_HASH_BITS 8
44 #define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
45 #define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
46 static u64 nfsd_devid_seq
= 1;
47 static struct list_head nfsd_devid_hash
[DEVID_HASH_SIZE
];
48 static DEFINE_SPINLOCK(nfsd_devid_lock
);
50 static inline u32
devid_hashfn(u64 idx
)
52 return jhash_2words(idx
, idx
>> 32, 0) & DEVID_HASH_MASK
;
56 nfsd4_alloc_devid_map(const struct svc_fh
*fhp
)
58 const struct knfsd_fh
*fh
= &fhp
->fh_handle
;
59 size_t fsid_len
= key_len(fh
->fh_fsid_type
);
60 struct nfsd4_deviceid_map
*map
, *old
;
63 map
= kzalloc(sizeof(*map
) + fsid_len
, GFP_KERNEL
);
67 map
->fsid_type
= fh
->fh_fsid_type
;
68 memcpy(&map
->fsid
, fh
->fh_fsid
, fsid_len
);
70 spin_lock(&nfsd_devid_lock
);
71 if (fhp
->fh_export
->ex_devid_map
)
74 for (i
= 0; i
< DEVID_HASH_SIZE
; i
++) {
75 list_for_each_entry(old
, &nfsd_devid_hash
[i
], hash
) {
76 if (old
->fsid_type
!= fh
->fh_fsid_type
)
78 if (memcmp(old
->fsid
, fh
->fh_fsid
,
79 key_len(old
->fsid_type
)))
82 fhp
->fh_export
->ex_devid_map
= old
;
87 map
->idx
= nfsd_devid_seq
++;
88 list_add_tail_rcu(&map
->hash
, &nfsd_devid_hash
[devid_hashfn(map
->idx
)]);
89 fhp
->fh_export
->ex_devid_map
= map
;
93 spin_unlock(&nfsd_devid_lock
);
97 struct nfsd4_deviceid_map
*
98 nfsd4_find_devid_map(int idx
)
100 struct nfsd4_deviceid_map
*map
, *ret
= NULL
;
103 list_for_each_entry_rcu(map
, &nfsd_devid_hash
[devid_hashfn(idx
)], hash
)
112 nfsd4_set_deviceid(struct nfsd4_deviceid
*id
, const struct svc_fh
*fhp
,
113 u32 device_generation
)
115 if (!fhp
->fh_export
->ex_devid_map
) {
116 nfsd4_alloc_devid_map(fhp
);
117 if (!fhp
->fh_export
->ex_devid_map
)
121 id
->fsid_idx
= fhp
->fh_export
->ex_devid_map
->idx
;
122 id
->generation
= device_generation
;
127 void nfsd4_setup_layout_type(struct svc_export
*exp
)
129 #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
130 struct super_block
*sb
= exp
->ex_path
.mnt
->mnt_sb
;
133 if (!(exp
->ex_flags
& NFSEXP_PNFS
))
136 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
137 exp
->ex_layout_types
|= 1 << LAYOUT_FLEX_FILES
;
139 #ifdef CONFIG_NFSD_BLOCKLAYOUT
140 if (sb
->s_export_op
->get_uuid
&&
141 sb
->s_export_op
->map_blocks
&&
142 sb
->s_export_op
->commit_blocks
)
143 exp
->ex_layout_types
|= 1 << LAYOUT_BLOCK_VOLUME
;
145 #ifdef CONFIG_NFSD_SCSILAYOUT
146 if (sb
->s_export_op
->map_blocks
&&
147 sb
->s_export_op
->commit_blocks
&&
149 sb
->s_bdev
->bd_disk
->fops
->pr_ops
&&
150 sb
->s_bdev
->bd_disk
->fops
->get_unique_id
)
151 exp
->ex_layout_types
|= 1 << LAYOUT_SCSI
;
155 void nfsd4_close_layout(struct nfs4_layout_stateid
*ls
)
157 struct nfsd_file
*fl
;
159 spin_lock(&ls
->ls_stid
.sc_file
->fi_lock
);
162 spin_unlock(&ls
->ls_stid
.sc_file
->fi_lock
);
165 if (!nfsd4_layout_ops
[ls
->ls_layout_type
]->disable_recalls
)
166 kernel_setlease(fl
->nf_file
, F_UNLCK
, NULL
,
173 nfsd4_free_layout_stateid(struct nfs4_stid
*stid
)
175 struct nfs4_layout_stateid
*ls
= layoutstateid(stid
);
176 struct nfs4_client
*clp
= ls
->ls_stid
.sc_client
;
177 struct nfs4_file
*fp
= ls
->ls_stid
.sc_file
;
179 trace_nfsd_layoutstate_free(&ls
->ls_stid
.sc_stateid
);
181 spin_lock(&clp
->cl_lock
);
182 list_del_init(&ls
->ls_perclnt
);
183 spin_unlock(&clp
->cl_lock
);
185 spin_lock(&fp
->fi_lock
);
186 list_del_init(&ls
->ls_perfile
);
187 spin_unlock(&fp
->fi_lock
);
189 nfsd4_close_layout(ls
);
192 atomic_dec(&ls
->ls_stid
.sc_file
->fi_lo_recalls
);
194 kmem_cache_free(nfs4_layout_stateid_cache
, ls
);
198 nfsd4_layout_setlease(struct nfs4_layout_stateid
*ls
)
200 struct file_lease
*fl
;
203 if (nfsd4_layout_ops
[ls
->ls_layout_type
]->disable_recalls
)
206 fl
= locks_alloc_lease();
209 locks_init_lease(fl
);
210 fl
->fl_lmops
= &nfsd4_layouts_lm_ops
;
211 fl
->c
.flc_flags
= FL_LAYOUT
;
212 fl
->c
.flc_type
= F_RDLCK
;
213 fl
->c
.flc_owner
= ls
;
214 fl
->c
.flc_pid
= current
->tgid
;
215 fl
->c
.flc_file
= ls
->ls_file
->nf_file
;
217 status
= kernel_setlease(fl
->c
.flc_file
, fl
->c
.flc_type
, &fl
, NULL
);
219 locks_free_lease(fl
);
226 static struct nfs4_layout_stateid
*
227 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state
*cstate
,
228 struct nfs4_stid
*parent
, u32 layout_type
)
230 struct nfs4_client
*clp
= cstate
->clp
;
231 struct nfs4_file
*fp
= parent
->sc_file
;
232 struct nfs4_layout_stateid
*ls
;
233 struct nfs4_stid
*stp
;
235 stp
= nfs4_alloc_stid(cstate
->clp
, nfs4_layout_stateid_cache
,
236 nfsd4_free_layout_stateid
);
243 ls
= layoutstateid(stp
);
244 INIT_LIST_HEAD(&ls
->ls_perclnt
);
245 INIT_LIST_HEAD(&ls
->ls_perfile
);
246 spin_lock_init(&ls
->ls_lock
);
247 INIT_LIST_HEAD(&ls
->ls_layouts
);
248 mutex_init(&ls
->ls_mutex
);
249 ls
->ls_layout_type
= layout_type
;
250 nfsd4_init_cb(&ls
->ls_recall
, clp
, &nfsd4_cb_layout_ops
,
251 NFSPROC4_CLNT_CB_LAYOUT
);
253 if (parent
->sc_type
== SC_TYPE_DELEG
)
254 ls
->ls_file
= nfsd_file_get(fp
->fi_deleg_file
);
256 ls
->ls_file
= find_any_file(fp
);
257 BUG_ON(!ls
->ls_file
);
259 if (nfsd4_layout_setlease(ls
)) {
260 nfsd_file_put(ls
->ls_file
);
262 kmem_cache_free(nfs4_layout_stateid_cache
, ls
);
266 spin_lock(&clp
->cl_lock
);
267 stp
->sc_type
= SC_TYPE_LAYOUT
;
268 list_add(&ls
->ls_perclnt
, &clp
->cl_lo_states
);
269 spin_unlock(&clp
->cl_lock
);
271 spin_lock(&fp
->fi_lock
);
272 list_add(&ls
->ls_perfile
, &fp
->fi_lo_states
);
273 spin_unlock(&fp
->fi_lock
);
275 trace_nfsd_layoutstate_alloc(&ls
->ls_stid
.sc_stateid
);
280 nfsd4_preprocess_layout_stateid(struct svc_rqst
*rqstp
,
281 struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
,
282 bool create
, u32 layout_type
, struct nfs4_layout_stateid
**lsp
)
284 struct nfs4_layout_stateid
*ls
;
285 struct nfs4_stid
*stid
;
286 unsigned short typemask
= SC_TYPE_LAYOUT
;
290 typemask
|= (SC_TYPE_OPEN
| SC_TYPE_LOCK
| SC_TYPE_DELEG
);
292 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, 0, &stid
,
293 net_generic(SVC_NET(rqstp
), nfsd_net_id
));
297 if (!fh_match(&cstate
->current_fh
.fh_handle
,
298 &stid
->sc_file
->fi_fhandle
)) {
299 status
= nfserr_bad_stateid
;
303 if (stid
->sc_type
!= SC_TYPE_LAYOUT
) {
304 ls
= nfsd4_alloc_layout_stateid(cstate
, stid
, layout_type
);
307 status
= nfserr_jukebox
;
310 mutex_lock(&ls
->ls_mutex
);
312 ls
= container_of(stid
, struct nfs4_layout_stateid
, ls_stid
);
314 status
= nfserr_bad_stateid
;
315 mutex_lock(&ls
->ls_mutex
);
316 if (nfsd4_stateid_generation_after(stateid
, &stid
->sc_stateid
))
317 goto out_unlock_stid
;
318 if (layout_type
!= ls
->ls_layout_type
)
319 goto out_unlock_stid
;
326 mutex_unlock(&ls
->ls_mutex
);
334 nfsd4_recall_file_layout(struct nfs4_layout_stateid
*ls
)
336 spin_lock(&ls
->ls_lock
);
340 if (list_empty(&ls
->ls_layouts
))
343 ls
->ls_recalled
= true;
344 atomic_inc(&ls
->ls_stid
.sc_file
->fi_lo_recalls
);
345 trace_nfsd_layout_recall(&ls
->ls_stid
.sc_stateid
);
347 refcount_inc(&ls
->ls_stid
.sc_count
);
348 nfsd4_run_cb(&ls
->ls_recall
);
351 spin_unlock(&ls
->ls_lock
);
355 layout_end(struct nfsd4_layout_seg
*seg
)
357 u64 end
= seg
->offset
+ seg
->length
;
358 return end
>= seg
->offset
? end
: NFS4_MAX_UINT64
;
362 layout_update_len(struct nfsd4_layout_seg
*lo
, u64 end
)
364 if (end
== NFS4_MAX_UINT64
)
365 lo
->length
= NFS4_MAX_UINT64
;
367 lo
->length
= end
- lo
->offset
;
371 layouts_overlapping(struct nfs4_layout
*lo
, struct nfsd4_layout_seg
*s
)
373 if (s
->iomode
!= IOMODE_ANY
&& s
->iomode
!= lo
->lo_seg
.iomode
)
375 if (layout_end(&lo
->lo_seg
) <= s
->offset
)
377 if (layout_end(s
) <= lo
->lo_seg
.offset
)
383 layouts_try_merge(struct nfsd4_layout_seg
*lo
, struct nfsd4_layout_seg
*new)
385 if (lo
->iomode
!= new->iomode
)
387 if (layout_end(new) < lo
->offset
)
389 if (layout_end(lo
) < new->offset
)
392 lo
->offset
= min(lo
->offset
, new->offset
);
393 layout_update_len(lo
, max(layout_end(lo
), layout_end(new)));
398 nfsd4_recall_conflict(struct nfs4_layout_stateid
*ls
)
400 struct nfs4_file
*fp
= ls
->ls_stid
.sc_file
;
401 struct nfs4_layout_stateid
*l
, *n
;
402 __be32 nfserr
= nfs_ok
;
404 assert_spin_locked(&fp
->fi_lock
);
406 list_for_each_entry_safe(l
, n
, &fp
->fi_lo_states
, ls_perfile
) {
408 nfsd4_recall_file_layout(l
);
409 nfserr
= nfserr_recallconflict
;
417 nfsd4_insert_layout(struct nfsd4_layoutget
*lgp
, struct nfs4_layout_stateid
*ls
)
419 struct nfsd4_layout_seg
*seg
= &lgp
->lg_seg
;
420 struct nfs4_file
*fp
= ls
->ls_stid
.sc_file
;
421 struct nfs4_layout
*lp
, *new = NULL
;
424 spin_lock(&fp
->fi_lock
);
425 nfserr
= nfsd4_recall_conflict(ls
);
428 spin_lock(&ls
->ls_lock
);
429 list_for_each_entry(lp
, &ls
->ls_layouts
, lo_perstate
) {
430 if (layouts_try_merge(&lp
->lo_seg
, seg
))
433 spin_unlock(&ls
->ls_lock
);
434 spin_unlock(&fp
->fi_lock
);
436 new = kmem_cache_alloc(nfs4_layout_cache
, GFP_KERNEL
);
438 return nfserr_jukebox
;
439 memcpy(&new->lo_seg
, seg
, sizeof(new->lo_seg
));
442 spin_lock(&fp
->fi_lock
);
443 nfserr
= nfsd4_recall_conflict(ls
);
446 spin_lock(&ls
->ls_lock
);
447 list_for_each_entry(lp
, &ls
->ls_layouts
, lo_perstate
) {
448 if (layouts_try_merge(&lp
->lo_seg
, seg
))
452 refcount_inc(&ls
->ls_stid
.sc_count
);
453 list_add_tail(&new->lo_perstate
, &ls
->ls_layouts
);
456 nfs4_inc_and_copy_stateid(&lgp
->lg_sid
, &ls
->ls_stid
);
457 spin_unlock(&ls
->ls_lock
);
459 spin_unlock(&fp
->fi_lock
);
461 kmem_cache_free(nfs4_layout_cache
, new);
466 nfsd4_free_layouts(struct list_head
*reaplist
)
468 while (!list_empty(reaplist
)) {
469 struct nfs4_layout
*lp
= list_first_entry(reaplist
,
470 struct nfs4_layout
, lo_perstate
);
472 list_del(&lp
->lo_perstate
);
473 nfs4_put_stid(&lp
->lo_state
->ls_stid
);
474 kmem_cache_free(nfs4_layout_cache
, lp
);
479 nfsd4_return_file_layout(struct nfs4_layout
*lp
, struct nfsd4_layout_seg
*seg
,
480 struct list_head
*reaplist
)
482 struct nfsd4_layout_seg
*lo
= &lp
->lo_seg
;
483 u64 end
= layout_end(lo
);
485 if (seg
->offset
<= lo
->offset
) {
486 if (layout_end(seg
) >= end
) {
487 list_move_tail(&lp
->lo_perstate
, reaplist
);
490 lo
->offset
= layout_end(seg
);
492 /* retain the whole layout segment on a split. */
493 if (layout_end(seg
) < end
) {
494 dprintk("%s: split not supported\n", __func__
);
500 layout_update_len(lo
, end
);
504 nfsd4_return_file_layouts(struct svc_rqst
*rqstp
,
505 struct nfsd4_compound_state
*cstate
,
506 struct nfsd4_layoutreturn
*lrp
)
508 struct nfs4_layout_stateid
*ls
;
509 struct nfs4_layout
*lp
, *n
;
514 nfserr
= nfsd4_preprocess_layout_stateid(rqstp
, cstate
, &lrp
->lr_sid
,
515 false, lrp
->lr_layout_type
,
518 trace_nfsd_layout_return_lookup_fail(&lrp
->lr_sid
);
522 spin_lock(&ls
->ls_lock
);
523 list_for_each_entry_safe(lp
, n
, &ls
->ls_layouts
, lo_perstate
) {
524 if (layouts_overlapping(lp
, &lrp
->lr_seg
)) {
525 nfsd4_return_file_layout(lp
, &lrp
->lr_seg
, &reaplist
);
529 if (!list_empty(&ls
->ls_layouts
)) {
531 nfs4_inc_and_copy_stateid(&lrp
->lr_sid
, &ls
->ls_stid
);
532 lrp
->lrs_present
= true;
534 trace_nfsd_layoutstate_unhash(&ls
->ls_stid
.sc_stateid
);
535 ls
->ls_stid
.sc_status
|= SC_STATUS_CLOSED
;
536 lrp
->lrs_present
= false;
538 spin_unlock(&ls
->ls_lock
);
540 mutex_unlock(&ls
->ls_mutex
);
541 nfs4_put_stid(&ls
->ls_stid
);
542 nfsd4_free_layouts(&reaplist
);
547 nfsd4_return_client_layouts(struct svc_rqst
*rqstp
,
548 struct nfsd4_compound_state
*cstate
,
549 struct nfsd4_layoutreturn
*lrp
)
551 struct nfs4_layout_stateid
*ls
, *n
;
552 struct nfs4_client
*clp
= cstate
->clp
;
553 struct nfs4_layout
*lp
, *t
;
556 lrp
->lrs_present
= false;
558 spin_lock(&clp
->cl_lock
);
559 list_for_each_entry_safe(ls
, n
, &clp
->cl_lo_states
, ls_perclnt
) {
560 if (ls
->ls_layout_type
!= lrp
->lr_layout_type
)
563 if (lrp
->lr_return_type
== RETURN_FSID
&&
564 !fh_fsid_match(&ls
->ls_stid
.sc_file
->fi_fhandle
,
565 &cstate
->current_fh
.fh_handle
))
568 spin_lock(&ls
->ls_lock
);
569 list_for_each_entry_safe(lp
, t
, &ls
->ls_layouts
, lo_perstate
) {
570 if (lrp
->lr_seg
.iomode
== IOMODE_ANY
||
571 lrp
->lr_seg
.iomode
== lp
->lo_seg
.iomode
)
572 list_move_tail(&lp
->lo_perstate
, &reaplist
);
574 spin_unlock(&ls
->ls_lock
);
576 spin_unlock(&clp
->cl_lock
);
578 nfsd4_free_layouts(&reaplist
);
583 nfsd4_return_all_layouts(struct nfs4_layout_stateid
*ls
,
584 struct list_head
*reaplist
)
586 spin_lock(&ls
->ls_lock
);
587 list_splice_init(&ls
->ls_layouts
, reaplist
);
588 spin_unlock(&ls
->ls_lock
);
592 nfsd4_return_all_client_layouts(struct nfs4_client
*clp
)
594 struct nfs4_layout_stateid
*ls
, *n
;
597 spin_lock(&clp
->cl_lock
);
598 list_for_each_entry_safe(ls
, n
, &clp
->cl_lo_states
, ls_perclnt
)
599 nfsd4_return_all_layouts(ls
, &reaplist
);
600 spin_unlock(&clp
->cl_lock
);
602 nfsd4_free_layouts(&reaplist
);
606 nfsd4_return_all_file_layouts(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
608 struct nfs4_layout_stateid
*ls
, *n
;
611 spin_lock(&fp
->fi_lock
);
612 list_for_each_entry_safe(ls
, n
, &fp
->fi_lo_states
, ls_perfile
) {
613 if (ls
->ls_stid
.sc_client
== clp
)
614 nfsd4_return_all_layouts(ls
, &reaplist
);
616 spin_unlock(&fp
->fi_lock
);
618 nfsd4_free_layouts(&reaplist
);
622 nfsd4_cb_layout_fail(struct nfs4_layout_stateid
*ls
, struct nfsd_file
*file
)
624 struct nfs4_client
*clp
= ls
->ls_stid
.sc_client
;
625 char addr_str
[INET6_ADDRSTRLEN
];
626 static char const nfsd_recall_failed
[] = "/sbin/nfsd-recall-failed";
627 static char *envp
[] = {
630 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
636 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, addr_str
, sizeof(addr_str
));
639 "nfsd: client %s failed to respond to layout recall. "
640 " Fencing..\n", addr_str
);
642 argv
[0] = (char *)nfsd_recall_failed
;
644 argv
[2] = file
->nf_file
->f_path
.mnt
->mnt_sb
->s_id
;
647 error
= call_usermodehelper(nfsd_recall_failed
, argv
, envp
,
650 printk(KERN_ERR
"nfsd: fence failed for client %s: %d!\n",
656 nfsd4_cb_layout_prepare(struct nfsd4_callback
*cb
)
658 struct nfs4_layout_stateid
*ls
=
659 container_of(cb
, struct nfs4_layout_stateid
, ls_recall
);
661 mutex_lock(&ls
->ls_mutex
);
662 nfs4_inc_and_copy_stateid(&ls
->ls_recall_sid
, &ls
->ls_stid
);
663 mutex_unlock(&ls
->ls_mutex
);
667 nfsd4_cb_layout_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
669 struct nfs4_layout_stateid
*ls
=
670 container_of(cb
, struct nfs4_layout_stateid
, ls_recall
);
673 const struct nfsd4_layout_ops
*ops
;
674 struct nfsd_file
*fl
;
676 trace_nfsd_cb_layout_done(&ls
->ls_stid
.sc_stateid
, task
);
677 switch (task
->tk_status
) {
681 * Anything left? If not, then call it done. Note that we don't
682 * take the spinlock since this is an optimization and nothing
683 * should get added until the cb counter goes to zero.
685 if (list_empty(&ls
->ls_layouts
))
688 /* Poll the client until it's done with the layout */
690 nn
= net_generic(ls
->ls_stid
.sc_client
->net
, nfsd_net_id
);
692 /* Client gets 2 lease periods to return it */
693 cutoff
= ktime_add_ns(task
->tk_start
,
694 (u64
)nn
->nfsd4_lease
* NSEC_PER_SEC
* 2);
696 if (ktime_before(now
, cutoff
)) {
697 rpc_delay(task
, HZ
/100); /* 10 mili-seconds */
703 * Unknown error or non-responding client, we'll need to fence.
705 trace_nfsd_layout_recall_fail(&ls
->ls_stid
.sc_stateid
);
707 fl
= nfsd_file_get(ls
->ls_file
);
710 ops
= nfsd4_layout_ops
[ls
->ls_layout_type
];
711 if (ops
->fence_client
)
712 ops
->fence_client(ls
, fl
);
714 nfsd4_cb_layout_fail(ls
, fl
);
718 case -NFS4ERR_NOMATCHING_LAYOUT
:
719 trace_nfsd_layout_recall_done(&ls
->ls_stid
.sc_stateid
);
726 nfsd4_cb_layout_release(struct nfsd4_callback
*cb
)
728 struct nfs4_layout_stateid
*ls
=
729 container_of(cb
, struct nfs4_layout_stateid
, ls_recall
);
732 trace_nfsd_layout_recall_release(&ls
->ls_stid
.sc_stateid
);
734 nfsd4_return_all_layouts(ls
, &reaplist
);
735 nfsd4_free_layouts(&reaplist
);
736 nfs4_put_stid(&ls
->ls_stid
);
739 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops
= {
740 .prepare
= nfsd4_cb_layout_prepare
,
741 .done
= nfsd4_cb_layout_done
,
742 .release
= nfsd4_cb_layout_release
,
743 .opcode
= OP_CB_LAYOUTRECALL
,
747 nfsd4_layout_lm_break(struct file_lease
*fl
)
750 * We don't want the locks code to timeout the lease for us;
751 * we'll remove it ourself if a layout isn't returned
754 fl
->fl_break_time
= 0;
755 nfsd4_recall_file_layout(fl
->c
.flc_owner
);
760 nfsd4_layout_lm_change(struct file_lease
*onlist
, int arg
,
761 struct list_head
*dispose
)
763 BUG_ON(!(arg
& F_UNLCK
));
764 return lease_modify(onlist
, arg
, dispose
);
767 static const struct lease_manager_operations nfsd4_layouts_lm_ops
= {
768 .lm_break
= nfsd4_layout_lm_break
,
769 .lm_change
= nfsd4_layout_lm_change
,
773 nfsd4_init_pnfs(void)
777 for (i
= 0; i
< DEVID_HASH_SIZE
; i
++)
778 INIT_LIST_HEAD(&nfsd_devid_hash
[i
]);
780 nfs4_layout_cache
= KMEM_CACHE(nfs4_layout
, 0);
781 if (!nfs4_layout_cache
)
784 nfs4_layout_stateid_cache
= KMEM_CACHE(nfs4_layout_stateid
, 0);
785 if (!nfs4_layout_stateid_cache
) {
786 kmem_cache_destroy(nfs4_layout_cache
);
793 nfsd4_exit_pnfs(void)
797 kmem_cache_destroy(nfs4_layout_cache
);
798 kmem_cache_destroy(nfs4_layout_stateid_cache
);
800 for (i
= 0; i
< DEVID_HASH_SIZE
; i
++) {
801 struct nfsd4_deviceid_map
*map
, *n
;
803 list_for_each_entry_safe(map
, n
, &nfsd_devid_hash
[i
], hash
)