1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/nfs/callback_proc.c
5 * Copyright (C) 2004 Trond Myklebust
7 * NFSv4 callback procedures
9 #include <linux/nfs4.h>
10 #include <linux/nfs_fs.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
15 #include "delegation.h"
18 #include "nfs4session.h"
19 #include "nfs4trace.h"
21 #define NFSDBG_FACILITY NFSDBG_CALLBACK
23 __be32
nfs4_callback_getattr(void *argp
, void *resp
,
24 struct cb_process_state
*cps
)
26 struct cb_getattrargs
*args
= argp
;
27 struct cb_getattrres
*res
= resp
;
28 struct nfs_delegation
*delegation
;
29 struct nfs_inode
*nfsi
;
32 res
->status
= htonl(NFS4ERR_OP_NOT_IN_SESSION
);
33 if (!cps
->clp
) /* Always set for v4.0. Set in cb_sequence for v4.1 */
36 res
->bitmap
[0] = res
->bitmap
[1] = 0;
37 res
->status
= htonl(NFS4ERR_BADHANDLE
);
39 dprintk_rcu("NFS: GETATTR callback request from %s\n",
40 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
42 inode
= nfs_delegation_find_inode(cps
->clp
, &args
->fh
);
44 if (inode
== ERR_PTR(-EAGAIN
))
45 res
->status
= htonl(NFS4ERR_DELAY
);
46 trace_nfs4_cb_getattr(cps
->clp
, &args
->fh
, NULL
,
52 delegation
= rcu_dereference(nfsi
->delegation
);
53 if (delegation
== NULL
|| (delegation
->type
& FMODE_WRITE
) == 0)
55 res
->size
= i_size_read(inode
);
56 res
->change_attr
= delegation
->change_attr
;
57 if (nfs_have_writebacks(inode
))
59 res
->ctime
= timespec64_to_timespec(inode
->i_ctime
);
60 res
->mtime
= timespec64_to_timespec(inode
->i_mtime
);
61 res
->bitmap
[0] = (FATTR4_WORD0_CHANGE
|FATTR4_WORD0_SIZE
) &
63 res
->bitmap
[1] = (FATTR4_WORD1_TIME_METADATA
|FATTR4_WORD1_TIME_MODIFY
) &
68 trace_nfs4_cb_getattr(cps
->clp
, &args
->fh
, inode
, -ntohl(res
->status
));
69 nfs_iput_and_deactive(inode
);
71 dprintk("%s: exit with status = %d\n", __func__
, ntohl(res
->status
));
75 __be32
nfs4_callback_recall(void *argp
, void *resp
,
76 struct cb_process_state
*cps
)
78 struct cb_recallargs
*args
= argp
;
82 res
= htonl(NFS4ERR_OP_NOT_IN_SESSION
);
83 if (!cps
->clp
) /* Always set for v4.0. Set in cb_sequence for v4.1 */
86 dprintk_rcu("NFS: RECALL callback request from %s\n",
87 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
89 res
= htonl(NFS4ERR_BADHANDLE
);
90 inode
= nfs_delegation_find_inode(cps
->clp
, &args
->fh
);
92 if (inode
== ERR_PTR(-EAGAIN
))
93 res
= htonl(NFS4ERR_DELAY
);
94 trace_nfs4_cb_recall(cps
->clp
, &args
->fh
, NULL
,
95 &args
->stateid
, -ntohl(res
));
98 /* Set up a helper thread to actually return the delegation */
99 switch (nfs_async_inode_return_delegation(inode
, &args
->stateid
)) {
104 res
= htonl(NFS4ERR_BAD_STATEID
);
107 res
= htonl(NFS4ERR_RESOURCE
);
109 trace_nfs4_cb_recall(cps
->clp
, &args
->fh
, inode
,
110 &args
->stateid
, -ntohl(res
));
111 nfs_iput_and_deactive(inode
);
113 dprintk("%s: exit with status = %d\n", __func__
, ntohl(res
));
117 #if defined(CONFIG_NFS_V4_1)
120 * Lookup a layout inode by stateid
122 * Note: returns a refcount on the inode and superblock
124 static struct inode
*nfs_layout_find_inode_by_stateid(struct nfs_client
*clp
,
125 const nfs4_stateid
*stateid
)
127 struct nfs_server
*server
;
129 struct pnfs_layout_hdr
*lo
;
131 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
132 list_for_each_entry(lo
, &server
->layouts
, plh_layouts
) {
133 if (stateid
!= NULL
&&
134 !nfs4_stateid_match_other(stateid
, &lo
->plh_stateid
))
136 inode
= igrab(lo
->plh_inode
);
138 return ERR_PTR(-EAGAIN
);
139 if (!nfs_sb_active(inode
->i_sb
)) {
141 spin_unlock(&clp
->cl_lock
);
143 spin_lock(&clp
->cl_lock
);
145 return ERR_PTR(-EAGAIN
);
151 return ERR_PTR(-ENOENT
);
155 * Lookup a layout inode by filehandle.
157 * Note: returns a refcount on the inode and superblock
160 static struct inode
*nfs_layout_find_inode_by_fh(struct nfs_client
*clp
,
161 const struct nfs_fh
*fh
)
163 struct nfs_server
*server
;
164 struct nfs_inode
*nfsi
;
166 struct pnfs_layout_hdr
*lo
;
168 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
) {
169 list_for_each_entry(lo
, &server
->layouts
, plh_layouts
) {
170 nfsi
= NFS_I(lo
->plh_inode
);
171 if (nfs_compare_fh(fh
, &nfsi
->fh
))
173 if (nfsi
->layout
!= lo
)
175 inode
= igrab(lo
->plh_inode
);
177 return ERR_PTR(-EAGAIN
);
178 if (!nfs_sb_active(inode
->i_sb
)) {
180 spin_unlock(&clp
->cl_lock
);
182 spin_lock(&clp
->cl_lock
);
184 return ERR_PTR(-EAGAIN
);
190 return ERR_PTR(-ENOENT
);
193 static struct inode
*nfs_layout_find_inode(struct nfs_client
*clp
,
194 const struct nfs_fh
*fh
,
195 const nfs4_stateid
*stateid
)
199 spin_lock(&clp
->cl_lock
);
201 inode
= nfs_layout_find_inode_by_stateid(clp
, stateid
);
202 if (inode
== ERR_PTR(-ENOENT
))
203 inode
= nfs_layout_find_inode_by_fh(clp
, fh
);
205 spin_unlock(&clp
->cl_lock
);
211 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
213 static u32
pnfs_check_callback_stateid(struct pnfs_layout_hdr
*lo
,
214 const nfs4_stateid
*new)
218 /* Is the stateid not initialised? */
219 if (!pnfs_layout_is_valid(lo
))
220 return NFS4ERR_NOMATCHING_LAYOUT
;
222 /* Mismatched stateid? */
223 if (!nfs4_stateid_match_other(&lo
->plh_stateid
, new))
224 return NFS4ERR_BAD_STATEID
;
226 newseq
= be32_to_cpu(new->seqid
);
227 /* Are we already in a layout recall situation? */
228 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED
, &lo
->plh_flags
) &&
229 lo
->plh_return_seq
!= 0) {
230 if (newseq
< lo
->plh_return_seq
)
231 return NFS4ERR_OLD_STATEID
;
232 if (newseq
> lo
->plh_return_seq
)
233 return NFS4ERR_DELAY
;
237 /* Check that the stateid matches what we think it should be. */
238 oldseq
= be32_to_cpu(lo
->plh_stateid
.seqid
);
239 if (newseq
> oldseq
+ 1)
240 return NFS4ERR_DELAY
;
242 if (newseq
<= oldseq
)
243 return NFS4ERR_OLD_STATEID
;
248 static u32
initiate_file_draining(struct nfs_client
*clp
,
249 struct cb_layoutrecallargs
*args
)
252 struct pnfs_layout_hdr
*lo
;
253 u32 rv
= NFS4ERR_NOMATCHING_LAYOUT
;
254 LIST_HEAD(free_me_list
);
256 ino
= nfs_layout_find_inode(clp
, &args
->cbl_fh
, &args
->cbl_stateid
);
258 if (ino
== ERR_PTR(-EAGAIN
))
263 pnfs_layoutcommit_inode(ino
, false);
266 spin_lock(&ino
->i_lock
);
267 lo
= NFS_I(ino
)->layout
;
269 spin_unlock(&ino
->i_lock
);
272 pnfs_get_layout_hdr(lo
);
273 rv
= pnfs_check_callback_stateid(lo
, &args
->cbl_stateid
);
278 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
280 if (test_bit(NFS_LAYOUT_BULK_RECALL
, &lo
->plh_flags
)) {
285 pnfs_set_layout_stateid(lo
, &args
->cbl_stateid
, true);
286 switch (pnfs_mark_matching_lsegs_return(lo
, &free_me_list
,
288 be32_to_cpu(args
->cbl_stateid
.seqid
))) {
291 /* There are layout segments that need to be returned */
295 /* Embrace your forgetfulness! */
296 rv
= NFS4ERR_NOMATCHING_LAYOUT
;
298 if (NFS_SERVER(ino
)->pnfs_curr_ld
->return_range
) {
299 NFS_SERVER(ino
)->pnfs_curr_ld
->return_range(lo
,
304 spin_unlock(&ino
->i_lock
);
305 pnfs_free_lseg_list(&free_me_list
);
306 /* Free all lsegs that are attached to commit buckets */
307 nfs_commit_inode(ino
, 0);
308 pnfs_put_layout_hdr(lo
);
310 nfs_iput_and_deactive(ino
);
312 trace_nfs4_cb_layoutrecall_file(clp
, &args
->cbl_fh
, ino
,
313 &args
->cbl_stateid
, -rv
);
317 static u32
initiate_bulk_draining(struct nfs_client
*clp
,
318 struct cb_layoutrecallargs
*args
)
322 if (args
->cbl_recall_type
== RETURN_FSID
)
323 stat
= pnfs_destroy_layouts_byfsid(clp
, &args
->cbl_fsid
, true);
325 stat
= pnfs_destroy_layouts_byclid(clp
, true);
327 return NFS4ERR_DELAY
;
328 return NFS4ERR_NOMATCHING_LAYOUT
;
331 static u32
do_callback_layoutrecall(struct nfs_client
*clp
,
332 struct cb_layoutrecallargs
*args
)
334 if (args
->cbl_recall_type
== RETURN_FILE
)
335 return initiate_file_draining(clp
, args
);
336 return initiate_bulk_draining(clp
, args
);
339 __be32
nfs4_callback_layoutrecall(void *argp
, void *resp
,
340 struct cb_process_state
*cps
)
342 struct cb_layoutrecallargs
*args
= argp
;
343 u32 res
= NFS4ERR_OP_NOT_IN_SESSION
;
346 res
= do_callback_layoutrecall(cps
->clp
, args
);
347 return cpu_to_be32(res
);
350 static void pnfs_recall_all_layouts(struct nfs_client
*clp
)
352 struct cb_layoutrecallargs args
;
354 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
355 memset(&args
, 0, sizeof(args
));
356 args
.cbl_recall_type
= RETURN_ALL
;
357 /* FIXME we ignore errors, what should we do? */
358 do_callback_layoutrecall(clp
, &args
);
361 __be32
nfs4_callback_devicenotify(void *argp
, void *resp
,
362 struct cb_process_state
*cps
)
364 struct cb_devicenotifyargs
*args
= argp
;
367 struct nfs_client
*clp
= cps
->clp
;
368 struct nfs_server
*server
= NULL
;
371 res
= cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION
);
375 for (i
= 0; i
< args
->ndevs
; i
++) {
376 struct cb_devicenotifyitem
*dev
= &args
->devs
[i
];
379 server
->pnfs_curr_ld
->id
!= dev
->cbd_layout_type
) {
381 list_for_each_entry_rcu(server
, &clp
->cl_superblocks
, client_link
)
382 if (server
->pnfs_curr_ld
&&
383 server
->pnfs_curr_ld
->id
== dev
->cbd_layout_type
) {
392 nfs4_delete_deviceid(server
->pnfs_curr_ld
, clp
, &dev
->cbd_dev_id
);
401 * Validate the sequenceID sent by the server.
402 * Return success if the sequenceID is one more than what we last saw on
403 * this slot, accounting for wraparound. Increments the slot's sequence.
405 * We don't yet implement a duplicate request cache, instead we set the
406 * back channel ca_maxresponsesize_cached to zero. This is OK for now
407 * since we only currently implement idempotent callbacks anyway.
409 * We have a single slot backchannel at this time, so we don't bother
410 * checking the used_slots bit array on the table. The lower layer guarantees
411 * a single outstanding callback request at a time.
414 validate_seqid(const struct nfs4_slot_table
*tbl
, const struct nfs4_slot
*slot
,
415 const struct cb_sequenceargs
* args
)
419 ret
= cpu_to_be32(NFS4ERR_BADSLOT
);
420 if (args
->csa_slotid
> tbl
->server_highest_slotid
)
424 if (args
->csa_sequenceid
== slot
->seq_nr
) {
425 ret
= cpu_to_be32(NFS4ERR_DELAY
);
426 if (nfs4_test_locked_slot(tbl
, slot
->slot_nr
))
429 /* Signal process_op to set this error on next op */
430 ret
= cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP
);
431 if (args
->csa_cachethis
== 0)
434 /* Liar! We never allowed you to set csa_cachethis != 0 */
435 ret
= cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY
);
439 /* Note: wraparound relies on seq_nr being of type u32 */
440 /* Misordered request */
441 ret
= cpu_to_be32(NFS4ERR_SEQ_MISORDERED
);
442 if (args
->csa_sequenceid
!= slot
->seq_nr
+ 1)
445 return cpu_to_be32(NFS4_OK
);
448 trace_nfs4_cb_seqid_err(args
, ret
);
453 * For each referring call triple, check the session's slot table for
454 * a match. If the slot is in use and the sequence numbers match, the
455 * client is still waiting for a response to the original request.
457 static int referring_call_exists(struct nfs_client
*clp
,
459 struct referring_call_list
*rclists
,
466 struct nfs4_session
*session
;
467 struct nfs4_slot_table
*tbl
;
468 struct referring_call_list
*rclist
;
469 struct referring_call
*ref
;
472 * XXX When client trunking is implemented, this becomes
473 * a session lookup from within the loop
475 session
= clp
->cl_session
;
476 tbl
= &session
->fc_slot_table
;
478 for (i
= 0; i
< nrclists
; i
++) {
479 rclist
= &rclists
[i
];
480 if (memcmp(session
->sess_id
.data
,
481 rclist
->rcl_sessionid
.data
,
482 NFS4_MAX_SESSIONID_LEN
) != 0)
485 for (j
= 0; j
< rclist
->rcl_nrefcalls
; j
++) {
486 ref
= &rclist
->rcl_refcalls
[j
];
488 status
= nfs4_slot_wait_on_seqid(tbl
, ref
->rc_slotid
,
489 ref
->rc_sequenceid
, HZ
>> 1) < 0;
500 __be32
nfs4_callback_sequence(void *argp
, void *resp
,
501 struct cb_process_state
*cps
)
503 struct cb_sequenceargs
*args
= argp
;
504 struct cb_sequenceres
*res
= resp
;
505 struct nfs4_slot_table
*tbl
;
506 struct nfs4_slot
*slot
;
507 struct nfs_client
*clp
;
509 __be32 status
= htonl(NFS4ERR_BADSESSION
);
511 clp
= nfs4_find_client_sessionid(cps
->net
, args
->csa_addr
,
512 &args
->csa_sessionid
, cps
->minorversion
);
516 if (!(clp
->cl_session
->flags
& SESSION4_BACK_CHAN
))
519 tbl
= &clp
->cl_session
->bc_slot_table
;
521 /* Set up res before grabbing the spinlock */
522 memcpy(&res
->csr_sessionid
, &args
->csa_sessionid
,
523 sizeof(res
->csr_sessionid
));
524 res
->csr_sequenceid
= args
->csa_sequenceid
;
525 res
->csr_slotid
= args
->csa_slotid
;
527 spin_lock(&tbl
->slot_tbl_lock
);
528 /* state manager is resetting the session */
529 if (test_bit(NFS4_SLOT_TBL_DRAINING
, &tbl
->slot_tbl_state
)) {
530 status
= htonl(NFS4ERR_DELAY
);
531 /* Return NFS4ERR_BADSESSION if we're draining the session
532 * in order to reset it.
534 if (test_bit(NFS4CLNT_SESSION_RESET
, &clp
->cl_state
))
535 status
= htonl(NFS4ERR_BADSESSION
);
539 status
= htonl(NFS4ERR_BADSLOT
);
540 slot
= nfs4_lookup_slot(tbl
, args
->csa_slotid
);
544 res
->csr_highestslotid
= tbl
->server_highest_slotid
;
545 res
->csr_target_highestslotid
= tbl
->target_highest_slotid
;
547 status
= validate_seqid(tbl
, slot
, args
);
550 if (!nfs4_try_to_lock_slot(tbl
, slot
)) {
551 status
= htonl(NFS4ERR_DELAY
);
556 /* The ca_maxresponsesize_cached is 0 with no DRC */
557 if (args
->csa_cachethis
!= 0) {
558 status
= htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE
);
563 * Check for pending referring calls. If a match is found, a
564 * related callback was received before the response to the original
567 if (referring_call_exists(clp
, args
->csa_nrclists
, args
->csa_rclists
,
568 &tbl
->slot_tbl_lock
) < 0) {
569 status
= htonl(NFS4ERR_DELAY
);
575 * If CB_SEQUENCE returns an error, then the state of the slot
576 * (sequence ID, cached reply) MUST NOT change.
578 slot
->seq_nr
= args
->csa_sequenceid
;
580 spin_unlock(&tbl
->slot_tbl_lock
);
583 cps
->clp
= clp
; /* put in nfs4_callback_compound */
584 for (i
= 0; i
< args
->csa_nrclists
; i
++)
585 kfree(args
->csa_rclists
[i
].rcl_refcalls
);
586 kfree(args
->csa_rclists
);
588 if (status
== htonl(NFS4ERR_RETRY_UNCACHED_REP
)) {
589 cps
->drc_status
= status
;
592 res
->csr_status
= status
;
594 trace_nfs4_cb_sequence(args
, res
, status
);
599 validate_bitmap_values(unsigned int mask
)
601 return (mask
& ~RCA4_TYPE_MASK_ALL
) == 0;
604 __be32
nfs4_callback_recallany(void *argp
, void *resp
,
605 struct cb_process_state
*cps
)
607 struct cb_recallanyargs
*args
= argp
;
611 status
= cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION
);
612 if (!cps
->clp
) /* set in cb_sequence */
615 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
616 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
618 status
= cpu_to_be32(NFS4ERR_INVAL
);
619 if (!validate_bitmap_values(args
->craa_type_mask
))
622 status
= cpu_to_be32(NFS4_OK
);
623 if (args
->craa_type_mask
& BIT(RCA4_TYPE_MASK_RDATA_DLG
))
625 if (args
->craa_type_mask
& BIT(RCA4_TYPE_MASK_WDATA_DLG
))
626 flags
|= FMODE_WRITE
;
628 nfs_expire_unused_delegation_types(cps
->clp
, flags
);
630 if (args
->craa_type_mask
& BIT(RCA4_TYPE_MASK_FILE_LAYOUT
))
631 pnfs_recall_all_layouts(cps
->clp
);
633 dprintk("%s: exit with status = %d\n", __func__
, ntohl(status
));
637 /* Reduce the fore channel's max_slots to the target value */
638 __be32
nfs4_callback_recallslot(void *argp
, void *resp
,
639 struct cb_process_state
*cps
)
641 struct cb_recallslotargs
*args
= argp
;
642 struct nfs4_slot_table
*fc_tbl
;
645 status
= htonl(NFS4ERR_OP_NOT_IN_SESSION
);
646 if (!cps
->clp
) /* set in cb_sequence */
649 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
650 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
),
651 args
->crsa_target_highest_slotid
);
653 fc_tbl
= &cps
->clp
->cl_session
->fc_slot_table
;
655 status
= htonl(NFS4_OK
);
657 nfs41_set_target_slotid(fc_tbl
, args
->crsa_target_highest_slotid
);
658 nfs41_notify_server(cps
->clp
);
660 dprintk("%s: exit with status = %d\n", __func__
, ntohl(status
));
664 __be32
nfs4_callback_notify_lock(void *argp
, void *resp
,
665 struct cb_process_state
*cps
)
667 struct cb_notify_lock_args
*args
= argp
;
669 if (!cps
->clp
) /* set in cb_sequence */
670 return htonl(NFS4ERR_OP_NOT_IN_SESSION
);
672 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
673 rpc_peeraddr2str(cps
->clp
->cl_rpcclient
, RPC_DISPLAY_ADDR
));
675 /* Don't wake anybody if the string looked bogus */
676 if (args
->cbnl_valid
)
677 __wake_up(&cps
->clp
->cl_lock_waitq
, TASK_NORMAL
, 0, args
);
679 return htonl(NFS4_OK
);
681 #endif /* CONFIG_NFS_V4_1 */
682 #ifdef CONFIG_NFS_V4_2
683 static void nfs4_copy_cb_args(struct nfs4_copy_state
*cp_state
,
684 struct cb_offloadargs
*args
)
686 cp_state
->count
= args
->wr_count
;
687 cp_state
->error
= args
->error
;
689 cp_state
->verf
.committed
= args
->wr_writeverf
.committed
;
690 memcpy(&cp_state
->verf
.verifier
.data
[0],
691 &args
->wr_writeverf
.verifier
.data
[0],
696 __be32
nfs4_callback_offload(void *data
, void *dummy
,
697 struct cb_process_state
*cps
)
699 struct cb_offloadargs
*args
= data
;
700 struct nfs_server
*server
;
701 struct nfs4_copy_state
*copy
, *tmp_copy
;
704 copy
= kzalloc(sizeof(struct nfs4_copy_state
), GFP_NOFS
);
706 return htonl(NFS4ERR_SERVERFAULT
);
708 spin_lock(&cps
->clp
->cl_lock
);
710 list_for_each_entry_rcu(server
, &cps
->clp
->cl_superblocks
,
712 list_for_each_entry(tmp_copy
, &server
->ss_copies
, copies
) {
713 if (memcmp(args
->coa_stateid
.other
,
714 tmp_copy
->stateid
.other
,
715 sizeof(args
->coa_stateid
.other
)))
717 nfs4_copy_cb_args(tmp_copy
, args
);
718 complete(&tmp_copy
->completion
);
726 memcpy(©
->stateid
, &args
->coa_stateid
, NFS4_STATEID_SIZE
);
727 nfs4_copy_cb_args(copy
, args
);
728 list_add_tail(©
->copies
, &cps
->clp
->pending_cb_stateids
);
731 spin_unlock(&cps
->clp
->cl_lock
);
735 #endif /* CONFIG_NFS_V4_2 */