4 * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/string.h>
10 #include <linux/printk.h>
11 #include <linux/slab.h>
12 #include <linux/sunrpc/sched.h>
13 #include <linux/sunrpc/bc_xprt.h>
14 #include <linux/nfs.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/module.h>
21 #include "nfs4session.h"
24 #define NFSDBG_FACILITY NFSDBG_STATE
26 static void nfs4_init_slot_table(struct nfs4_slot_table
*tbl
, const char *queue
)
28 tbl
->highest_used_slotid
= NFS4_NO_SLOT
;
29 spin_lock_init(&tbl
->slot_tbl_lock
);
30 rpc_init_priority_wait_queue(&tbl
->slot_tbl_waitq
, queue
);
31 init_completion(&tbl
->complete
);
35 * nfs4_shrink_slot_table - free retired slots from the slot table
37 static void nfs4_shrink_slot_table(struct nfs4_slot_table
*tbl
, u32 newsize
)
40 if (newsize
>= tbl
->max_slots
)
47 struct nfs4_slot
*slot
= *p
;
56 * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
57 * @tbl - controlling slot table
60 void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table
*tbl
)
62 if (nfs4_slot_tbl_draining(tbl
))
63 complete(&tbl
->complete
);
67 * nfs4_free_slot - free a slot and efficiently update slot table.
69 * freeing a slot is trivially done by clearing its respective bit
71 * If the freed slotid equals highest_used_slotid we want to update it
72 * so that the server would be able to size down the slot table if needed,
73 * otherwise we know that the highest_used_slotid is still in use.
74 * When updating highest_used_slotid there may be "holes" in the bitmap
75 * so we need to scan down from highest_used_slotid to 0 looking for the now
76 * highest slotid in use.
77 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
79 * Must be called while holding tbl->slot_tbl_lock
81 void nfs4_free_slot(struct nfs4_slot_table
*tbl
, struct nfs4_slot
*slot
)
83 u32 slotid
= slot
->slot_nr
;
85 /* clear used bit in bitmap */
86 __clear_bit(slotid
, tbl
->used_slots
);
88 /* update highest_used_slotid when it is freed */
89 if (slotid
== tbl
->highest_used_slotid
) {
90 u32 new_max
= find_last_bit(tbl
->used_slots
, slotid
);
92 tbl
->highest_used_slotid
= new_max
;
94 tbl
->highest_used_slotid
= NFS4_NO_SLOT
;
95 nfs4_slot_tbl_drain_complete(tbl
);
98 dprintk("%s: slotid %u highest_used_slotid %u\n", __func__
,
99 slotid
, tbl
->highest_used_slotid
);
102 static struct nfs4_slot
*nfs4_new_slot(struct nfs4_slot_table
*tbl
,
103 u32 slotid
, u32 seq_init
, gfp_t gfp_mask
)
105 struct nfs4_slot
*slot
;
107 slot
= kzalloc(sizeof(*slot
), gfp_mask
);
110 slot
->slot_nr
= slotid
;
111 slot
->seq_nr
= seq_init
;
116 static struct nfs4_slot
*nfs4_find_or_create_slot(struct nfs4_slot_table
*tbl
,
117 u32 slotid
, u32 seq_init
, gfp_t gfp_mask
)
119 struct nfs4_slot
**p
, *slot
;
124 *p
= nfs4_new_slot(tbl
, tbl
->max_slots
,
131 if (slot
->slot_nr
== slotid
)
135 return ERR_PTR(-ENOMEM
);
139 * nfs4_alloc_slot - efficiently look for a free slot
141 * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
142 * If found, we mark the slot as used, update the highest_used_slotid,
143 * and respectively set up the sequence operation args.
145 * Note: must be called with under the slot_tbl_lock.
147 struct nfs4_slot
*nfs4_alloc_slot(struct nfs4_slot_table
*tbl
)
149 struct nfs4_slot
*ret
= ERR_PTR(-EBUSY
);
152 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
153 __func__
, tbl
->used_slots
[0], tbl
->highest_used_slotid
,
154 tbl
->max_slotid
+ 1);
155 slotid
= find_first_zero_bit(tbl
->used_slots
, tbl
->max_slotid
+ 1);
156 if (slotid
> tbl
->max_slotid
)
158 ret
= nfs4_find_or_create_slot(tbl
, slotid
, 1, GFP_NOWAIT
);
161 __set_bit(slotid
, tbl
->used_slots
);
162 if (slotid
> tbl
->highest_used_slotid
||
163 tbl
->highest_used_slotid
== NFS4_NO_SLOT
)
164 tbl
->highest_used_slotid
= slotid
;
165 ret
->generation
= tbl
->generation
;
168 dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
169 __func__
, tbl
->used_slots
[0], tbl
->highest_used_slotid
,
170 !IS_ERR(ret
) ? ret
->slot_nr
: NFS4_NO_SLOT
);
174 static int nfs4_grow_slot_table(struct nfs4_slot_table
*tbl
,
175 u32 max_reqs
, u32 ivalue
)
177 if (max_reqs
<= tbl
->max_slots
)
179 if (!IS_ERR(nfs4_find_or_create_slot(tbl
, max_reqs
- 1, ivalue
, GFP_NOFS
)))
184 static void nfs4_reset_slot_table(struct nfs4_slot_table
*tbl
,
185 u32 server_highest_slotid
,
188 struct nfs4_slot
**p
;
190 nfs4_shrink_slot_table(tbl
, server_highest_slotid
+ 1);
193 (*p
)->seq_nr
= ivalue
;
194 (*p
)->interrupted
= 0;
197 tbl
->highest_used_slotid
= NFS4_NO_SLOT
;
198 tbl
->target_highest_slotid
= server_highest_slotid
;
199 tbl
->server_highest_slotid
= server_highest_slotid
;
200 tbl
->d_target_highest_slotid
= 0;
201 tbl
->d2_target_highest_slotid
= 0;
202 tbl
->max_slotid
= server_highest_slotid
;
206 * (re)Initialise a slot table
208 static int nfs4_realloc_slot_table(struct nfs4_slot_table
*tbl
,
209 u32 max_reqs
, u32 ivalue
)
213 dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__
,
214 max_reqs
, tbl
->max_slots
);
216 if (max_reqs
> NFS4_MAX_SLOT_TABLE
)
217 max_reqs
= NFS4_MAX_SLOT_TABLE
;
219 ret
= nfs4_grow_slot_table(tbl
, max_reqs
, ivalue
);
223 spin_lock(&tbl
->slot_tbl_lock
);
224 nfs4_reset_slot_table(tbl
, max_reqs
- 1, ivalue
);
225 spin_unlock(&tbl
->slot_tbl_lock
);
227 dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__
,
228 tbl
, tbl
->slots
, tbl
->max_slots
);
230 dprintk("<-- %s: return %d\n", __func__
, ret
);
235 * nfs4_release_slot_table - release all slot table entries
237 static void nfs4_release_slot_table(struct nfs4_slot_table
*tbl
)
239 nfs4_shrink_slot_table(tbl
, 0);
243 * nfs4_shutdown_slot_table - release resources attached to a slot table
244 * @tbl: slot table to shut down
247 void nfs4_shutdown_slot_table(struct nfs4_slot_table
*tbl
)
249 nfs4_release_slot_table(tbl
);
250 rpc_destroy_wait_queue(&tbl
->slot_tbl_waitq
);
254 * nfs4_setup_slot_table - prepare a stand-alone slot table for use
255 * @tbl: slot table to set up
256 * @max_reqs: maximum number of requests allowed
257 * @queue: name to give RPC wait queue
259 * Returns zero on success, or a negative errno.
261 int nfs4_setup_slot_table(struct nfs4_slot_table
*tbl
, unsigned int max_reqs
,
264 nfs4_init_slot_table(tbl
, queue
);
265 return nfs4_realloc_slot_table(tbl
, max_reqs
, 0);
268 static bool nfs41_assign_slot(struct rpc_task
*task
, void *pslot
)
270 struct nfs4_sequence_args
*args
= task
->tk_msg
.rpc_argp
;
271 struct nfs4_sequence_res
*res
= task
->tk_msg
.rpc_resp
;
272 struct nfs4_slot
*slot
= pslot
;
273 struct nfs4_slot_table
*tbl
= slot
->table
;
275 if (nfs4_slot_tbl_draining(tbl
) && !args
->sa_privileged
)
277 slot
->generation
= tbl
->generation
;
278 args
->sa_slot
= slot
;
279 res
->sr_timestamp
= jiffies
;
281 res
->sr_status_flags
= 0;
286 static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table
*tbl
,
287 struct nfs4_slot
*slot
)
289 if (rpc_wake_up_first(&tbl
->slot_tbl_waitq
, nfs41_assign_slot
, slot
))
294 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table
*tbl
,
295 struct nfs4_slot
*slot
)
297 if (slot
->slot_nr
> tbl
->max_slotid
)
299 return __nfs41_wake_and_assign_slot(tbl
, slot
);
302 static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table
*tbl
)
304 struct nfs4_slot
*slot
= nfs4_alloc_slot(tbl
);
306 bool ret
= __nfs41_wake_and_assign_slot(tbl
, slot
);
309 nfs4_free_slot(tbl
, slot
);
314 void nfs41_wake_slot_table(struct nfs4_slot_table
*tbl
)
317 if (!nfs41_try_wake_next_slot_table_entry(tbl
))
322 #if defined(CONFIG_NFS_V4_1)
324 static void nfs41_set_max_slotid_locked(struct nfs4_slot_table
*tbl
,
325 u32 target_highest_slotid
)
329 max_slotid
= min(NFS4_MAX_SLOT_TABLE
- 1, target_highest_slotid
);
330 if (max_slotid
> tbl
->server_highest_slotid
)
331 max_slotid
= tbl
->server_highest_slotid
;
332 if (max_slotid
> tbl
->target_highest_slotid
)
333 max_slotid
= tbl
->target_highest_slotid
;
334 tbl
->max_slotid
= max_slotid
;
335 nfs41_wake_slot_table(tbl
);
338 /* Update the client's idea of target_highest_slotid */
339 static void nfs41_set_target_slotid_locked(struct nfs4_slot_table
*tbl
,
340 u32 target_highest_slotid
)
342 if (tbl
->target_highest_slotid
== target_highest_slotid
)
344 tbl
->target_highest_slotid
= target_highest_slotid
;
348 void nfs41_set_target_slotid(struct nfs4_slot_table
*tbl
,
349 u32 target_highest_slotid
)
351 spin_lock(&tbl
->slot_tbl_lock
);
352 nfs41_set_target_slotid_locked(tbl
, target_highest_slotid
);
353 tbl
->d_target_highest_slotid
= 0;
354 tbl
->d2_target_highest_slotid
= 0;
355 nfs41_set_max_slotid_locked(tbl
, target_highest_slotid
);
356 spin_unlock(&tbl
->slot_tbl_lock
);
359 static void nfs41_set_server_slotid_locked(struct nfs4_slot_table
*tbl
,
362 if (tbl
->server_highest_slotid
== highest_slotid
)
364 if (tbl
->highest_used_slotid
> highest_slotid
)
366 /* Deallocate slots */
367 nfs4_shrink_slot_table(tbl
, highest_slotid
+ 1);
368 tbl
->server_highest_slotid
= highest_slotid
;
371 static s32
nfs41_derivative_target_slotid(s32 s1
, s32 s2
)
377 return (s1
- 1) >> 1;
378 return (s1
+ 1) >> 1;
381 static int nfs41_sign_s32(s32 s1
)
390 static bool nfs41_same_sign_or_zero_s32(s32 s1
, s32 s2
)
394 return nfs41_sign_s32(s1
) == nfs41_sign_s32(s2
);
397 /* Try to eliminate outliers by checking for sharp changes in the
398 * derivatives and second derivatives
400 static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table
*tbl
,
403 s32 d_target
, d2_target
;
406 d_target
= nfs41_derivative_target_slotid(new_target
,
407 tbl
->target_highest_slotid
);
408 d2_target
= nfs41_derivative_target_slotid(d_target
,
409 tbl
->d_target_highest_slotid
);
410 /* Is first derivative same sign? */
411 if (nfs41_same_sign_or_zero_s32(d_target
, tbl
->d_target_highest_slotid
))
413 /* Is second derivative same sign? */
414 if (nfs41_same_sign_or_zero_s32(d2_target
, tbl
->d2_target_highest_slotid
))
416 tbl
->d_target_highest_slotid
= d_target
;
417 tbl
->d2_target_highest_slotid
= d2_target
;
421 void nfs41_update_target_slotid(struct nfs4_slot_table
*tbl
,
422 struct nfs4_slot
*slot
,
423 struct nfs4_sequence_res
*res
)
425 spin_lock(&tbl
->slot_tbl_lock
);
426 if (!nfs41_is_outlier_target_slotid(tbl
, res
->sr_target_highest_slotid
))
427 nfs41_set_target_slotid_locked(tbl
, res
->sr_target_highest_slotid
);
428 if (tbl
->generation
== slot
->generation
)
429 nfs41_set_server_slotid_locked(tbl
, res
->sr_highest_slotid
);
430 nfs41_set_max_slotid_locked(tbl
, res
->sr_target_highest_slotid
);
431 spin_unlock(&tbl
->slot_tbl_lock
);
434 static void nfs4_release_session_slot_tables(struct nfs4_session
*session
)
436 nfs4_release_slot_table(&session
->fc_slot_table
);
437 nfs4_release_slot_table(&session
->bc_slot_table
);
441 * Initialize or reset the forechannel and backchannel tables
443 int nfs4_setup_session_slot_tables(struct nfs4_session
*ses
)
445 struct nfs4_slot_table
*tbl
;
448 dprintk("--> %s\n", __func__
);
450 tbl
= &ses
->fc_slot_table
;
452 status
= nfs4_realloc_slot_table(tbl
, ses
->fc_attrs
.max_reqs
, 1);
453 if (status
) /* -ENOMEM */
456 tbl
= &ses
->bc_slot_table
;
458 status
= nfs4_realloc_slot_table(tbl
, ses
->bc_attrs
.max_reqs
, 0);
459 if (status
&& tbl
->slots
== NULL
)
460 /* Fore and back channel share a connection so get
461 * both slot tables or neither */
462 nfs4_release_session_slot_tables(ses
);
466 struct nfs4_session
*nfs4_alloc_session(struct nfs_client
*clp
)
468 struct nfs4_session
*session
;
470 session
= kzalloc(sizeof(struct nfs4_session
), GFP_NOFS
);
474 nfs4_init_slot_table(&session
->fc_slot_table
, "ForeChannel Slot table");
475 nfs4_init_slot_table(&session
->bc_slot_table
, "BackChannel Slot table");
476 session
->session_state
= 1<<NFS4_SESSION_INITING
;
482 static void nfs4_destroy_session_slot_tables(struct nfs4_session
*session
)
484 nfs4_shutdown_slot_table(&session
->fc_slot_table
);
485 nfs4_shutdown_slot_table(&session
->bc_slot_table
);
488 void nfs4_destroy_session(struct nfs4_session
*session
)
490 struct rpc_xprt
*xprt
;
491 struct rpc_cred
*cred
;
493 cred
= nfs4_get_clid_cred(session
->clp
);
494 nfs4_proc_destroy_session(session
, cred
);
499 xprt
= rcu_dereference(session
->clp
->cl_rpcclient
->cl_xprt
);
501 dprintk("%s Destroy backchannel for xprt %p\n",
503 xprt_destroy_backchannel(xprt
, NFS41_BC_MIN_CALLBACKS
);
504 nfs4_destroy_session_slot_tables(session
);
509 * With sessions, the client is not marked ready until after a
510 * successful EXCHANGE_ID and CREATE_SESSION.
512 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
513 * other versions of NFS can be tried.
515 static int nfs41_check_session_ready(struct nfs_client
*clp
)
519 if (clp
->cl_cons_state
== NFS_CS_SESSION_INITING
) {
520 ret
= nfs4_client_recover_expired_lease(clp
);
524 if (clp
->cl_cons_state
< NFS_CS_READY
)
525 return -EPROTONOSUPPORT
;
530 int nfs4_init_session(struct nfs_client
*clp
)
532 if (!nfs4_has_session(clp
))
535 clear_bit(NFS4_SESSION_INITING
, &clp
->cl_session
->session_state
);
536 return nfs41_check_session_ready(clp
);
539 int nfs4_init_ds_session(struct nfs_client
*clp
, unsigned long lease_time
)
541 struct nfs4_session
*session
= clp
->cl_session
;
544 spin_lock(&clp
->cl_lock
);
545 if (test_and_clear_bit(NFS4_SESSION_INITING
, &session
->session_state
)) {
547 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
548 * DS lease to be equal to the MDS lease.
550 clp
->cl_lease_time
= lease_time
;
551 clp
->cl_last_renewal
= jiffies
;
553 spin_unlock(&clp
->cl_lock
);
555 ret
= nfs41_check_session_ready(clp
);
558 /* Test for the DS role */
559 if (!is_ds_client(clp
))
563 EXPORT_SYMBOL_GPL(nfs4_init_ds_session
);
565 #endif /* defined(CONFIG_NFS_V4_1) */