1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xprt.c
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
8 * The interface works like this:
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
41 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
55 #include <trace/events/sunrpc.h>
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
70 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
);
71 static __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
);
72 static void xprt_destroy(struct rpc_xprt
*xprt
);
74 static DEFINE_SPINLOCK(xprt_list_lock
);
75 static LIST_HEAD(xprt_list
);
77 static unsigned long xprt_request_timeout(const struct rpc_rqst
*req
)
79 unsigned long timeout
= jiffies
+ req
->rq_timeout
;
81 if (time_before(timeout
, req
->rq_majortimeo
))
83 return req
->rq_majortimeo
;
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
98 int xprt_register_transport(struct xprt_class
*transport
)
100 struct xprt_class
*t
;
104 spin_lock(&xprt_list_lock
);
105 list_for_each_entry(t
, &xprt_list
, list
) {
106 /* don't register the same transport class twice */
107 if (t
->ident
== transport
->ident
)
111 list_add_tail(&transport
->list
, &xprt_list
);
112 printk(KERN_INFO
"RPC: Registered %s transport module.\n",
117 spin_unlock(&xprt_list_lock
);
120 EXPORT_SYMBOL_GPL(xprt_register_transport
);
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
130 int xprt_unregister_transport(struct xprt_class
*transport
)
132 struct xprt_class
*t
;
136 spin_lock(&xprt_list_lock
);
137 list_for_each_entry(t
, &xprt_list
, list
) {
138 if (t
== transport
) {
140 "RPC: Unregistered %s transport module.\n",
142 list_del_init(&transport
->list
);
149 spin_unlock(&xprt_list_lock
);
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport
);
155 * xprt_load_transport - load a transport implementation
156 * @transport_name: transport to load
159 * 0: transport successfully loaded
160 * -ENOENT: transport module not available
162 int xprt_load_transport(const char *transport_name
)
164 struct xprt_class
*t
;
168 spin_lock(&xprt_list_lock
);
169 list_for_each_entry(t
, &xprt_list
, list
) {
170 if (strcmp(t
->name
, transport_name
) == 0) {
171 spin_unlock(&xprt_list_lock
);
175 spin_unlock(&xprt_list_lock
);
176 result
= request_module("xprt%s", transport_name
);
180 EXPORT_SYMBOL_GPL(xprt_load_transport
);
182 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
184 xprt
->snd_task
= NULL
;
185 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
)) {
186 smp_mb__before_atomic();
187 clear_bit(XPRT_LOCKED
, &xprt
->state
);
188 smp_mb__after_atomic();
190 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
194 * xprt_reserve_xprt - serialize write access to transports
195 * @task: task that is requesting access to the transport
196 * @xprt: pointer to the target transport
198 * This prevents mixing the payload of separate requests, and prevents
199 * transport connects from colliding with writes. No congestion control
202 int xprt_reserve_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
204 struct rpc_rqst
*req
= task
->tk_rqstp
;
206 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
207 if (task
== xprt
->snd_task
)
211 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
213 xprt
->snd_task
= task
;
216 trace_xprt_reserve_xprt(xprt
, task
);
220 xprt_clear_locked(xprt
);
222 task
->tk_status
= -EAGAIN
;
223 if (RPC_IS_SOFT(task
))
224 rpc_sleep_on_timeout(&xprt
->sending
, task
, NULL
,
225 xprt_request_timeout(req
));
227 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
230 EXPORT_SYMBOL_GPL(xprt_reserve_xprt
);
233 xprt_need_congestion_window_wait(struct rpc_xprt
*xprt
)
235 return test_bit(XPRT_CWND_WAIT
, &xprt
->state
);
239 xprt_set_congestion_window_wait(struct rpc_xprt
*xprt
)
241 if (!list_empty(&xprt
->xmit_queue
)) {
242 /* Peek at head of queue to see if it can make progress */
243 if (list_first_entry(&xprt
->xmit_queue
, struct rpc_rqst
,
247 set_bit(XPRT_CWND_WAIT
, &xprt
->state
);
251 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt
*xprt
)
253 if (!RPCXPRT_CONGESTED(xprt
))
254 clear_bit(XPRT_CWND_WAIT
, &xprt
->state
);
258 * xprt_reserve_xprt_cong - serialize write access to transports
259 * @task: task that is requesting access to the transport
261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
262 * integrated into the decision of whether a request is allowed to be
263 * woken up and given access to the transport.
264 * Note that the lock is only granted if we know there are free slots.
266 int xprt_reserve_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
268 struct rpc_rqst
*req
= task
->tk_rqstp
;
270 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
271 if (task
== xprt
->snd_task
)
276 xprt
->snd_task
= task
;
279 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
281 if (!xprt_need_congestion_window_wait(xprt
)) {
282 xprt
->snd_task
= task
;
286 xprt_clear_locked(xprt
);
288 task
->tk_status
= -EAGAIN
;
289 if (RPC_IS_SOFT(task
))
290 rpc_sleep_on_timeout(&xprt
->sending
, task
, NULL
,
291 xprt_request_timeout(req
));
293 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
296 trace_xprt_reserve_cong(xprt
, task
);
299 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong
);
301 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
305 if (test_bit(XPRT_LOCKED
, &xprt
->state
) && xprt
->snd_task
== task
)
307 spin_lock(&xprt
->transport_lock
);
308 retval
= xprt
->ops
->reserve_xprt(xprt
, task
);
309 spin_unlock(&xprt
->transport_lock
);
313 static bool __xprt_lock_write_func(struct rpc_task
*task
, void *data
)
315 struct rpc_xprt
*xprt
= data
;
317 xprt
->snd_task
= task
;
321 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
323 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
325 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
327 if (rpc_wake_up_first_on_wq(xprtiod_workqueue
, &xprt
->sending
,
328 __xprt_lock_write_func
, xprt
))
331 xprt_clear_locked(xprt
);
334 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
336 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
338 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
340 if (xprt_need_congestion_window_wait(xprt
))
342 if (rpc_wake_up_first_on_wq(xprtiod_workqueue
, &xprt
->sending
,
343 __xprt_lock_write_func
, xprt
))
346 xprt_clear_locked(xprt
);
350 * xprt_release_xprt - allow other requests to use a transport
351 * @xprt: transport with other tasks potentially waiting
352 * @task: task that is releasing access to the transport
354 * Note that "task" can be NULL. No congestion control is provided.
356 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
358 if (xprt
->snd_task
== task
) {
359 xprt_clear_locked(xprt
);
360 __xprt_lock_write_next(xprt
);
362 trace_xprt_release_xprt(xprt
, task
);
364 EXPORT_SYMBOL_GPL(xprt_release_xprt
);
367 * xprt_release_xprt_cong - allow other requests to use a transport
368 * @xprt: transport with other tasks potentially waiting
369 * @task: task that is releasing access to the transport
371 * Note that "task" can be NULL. Another task is awoken to use the
372 * transport if the transport's congestion window allows it.
374 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
376 if (xprt
->snd_task
== task
) {
377 xprt_clear_locked(xprt
);
378 __xprt_lock_write_next_cong(xprt
);
380 trace_xprt_release_cong(xprt
, task
);
382 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong
);
384 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
386 if (xprt
->snd_task
!= task
)
388 spin_lock(&xprt
->transport_lock
);
389 xprt
->ops
->release_xprt(xprt
, task
);
390 spin_unlock(&xprt
->transport_lock
);
394 * Van Jacobson congestion avoidance. Check if the congestion window
395 * overflowed. Put the task to sleep if this is the case.
398 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
402 trace_xprt_get_cong(xprt
, req
->rq_task
);
403 if (RPCXPRT_CONGESTED(xprt
)) {
404 xprt_set_congestion_window_wait(xprt
);
408 xprt
->cong
+= RPC_CWNDSCALE
;
413 * Adjust the congestion window, and wake up the next task
414 * that has been sleeping due to congestion
417 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
422 xprt
->cong
-= RPC_CWNDSCALE
;
423 xprt_test_and_clear_congestion_window_wait(xprt
);
424 trace_xprt_put_cong(xprt
, req
->rq_task
);
425 __xprt_lock_write_next_cong(xprt
);
429 * xprt_request_get_cong - Request congestion control credits
430 * @xprt: pointer to transport
431 * @req: pointer to RPC request
433 * Useful for transports that require congestion control.
436 xprt_request_get_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
442 spin_lock(&xprt
->transport_lock
);
443 ret
= __xprt_get_cong(xprt
, req
) != 0;
444 spin_unlock(&xprt
->transport_lock
);
447 EXPORT_SYMBOL_GPL(xprt_request_get_cong
);
450 * xprt_release_rqst_cong - housekeeping when request is complete
451 * @task: RPC request that recently completed
453 * Useful for transports that require congestion control.
455 void xprt_release_rqst_cong(struct rpc_task
*task
)
457 struct rpc_rqst
*req
= task
->tk_rqstp
;
459 __xprt_put_cong(req
->rq_xprt
, req
);
461 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong
);
463 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt
*xprt
)
465 if (test_and_clear_bit(XPRT_CWND_WAIT
, &xprt
->state
))
466 __xprt_lock_write_next_cong(xprt
);
470 * Clear the congestion window wait flag and wake up the next
471 * entry on xprt->sending
474 xprt_clear_congestion_window_wait(struct rpc_xprt
*xprt
)
476 if (test_and_clear_bit(XPRT_CWND_WAIT
, &xprt
->state
)) {
477 spin_lock(&xprt
->transport_lock
);
478 __xprt_lock_write_next_cong(xprt
);
479 spin_unlock(&xprt
->transport_lock
);
484 * xprt_adjust_cwnd - adjust transport congestion window
485 * @xprt: pointer to xprt
486 * @task: recently completed RPC request used to adjust window
487 * @result: result code of completed RPC request
489 * The transport code maintains an estimate on the maximum number of out-
490 * standing RPC requests, using a smoothed version of the congestion
491 * avoidance implemented in 44BSD. This is basically the Van Jacobson
492 * congestion algorithm: If a retransmit occurs, the congestion window is
493 * halved; otherwise, it is incremented by 1/cwnd when
495 * - a reply is received and
496 * - a full number of requests are outstanding and
497 * - the congestion window hasn't been updated recently.
499 void xprt_adjust_cwnd(struct rpc_xprt
*xprt
, struct rpc_task
*task
, int result
)
501 struct rpc_rqst
*req
= task
->tk_rqstp
;
502 unsigned long cwnd
= xprt
->cwnd
;
504 if (result
>= 0 && cwnd
<= xprt
->cong
) {
505 /* The (cwnd >> 1) term makes sure
506 * the result gets rounded properly. */
507 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
508 if (cwnd
> RPC_MAXCWND(xprt
))
509 cwnd
= RPC_MAXCWND(xprt
);
510 __xprt_lock_write_next_cong(xprt
);
511 } else if (result
== -ETIMEDOUT
) {
513 if (cwnd
< RPC_CWNDSCALE
)
514 cwnd
= RPC_CWNDSCALE
;
516 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
517 xprt
->cong
, xprt
->cwnd
, cwnd
);
519 __xprt_put_cong(xprt
, req
);
521 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd
);
524 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
525 * @xprt: transport with waiting tasks
526 * @status: result code to plant in each task before waking it
529 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
532 rpc_wake_up_status(&xprt
->pending
, status
);
534 rpc_wake_up(&xprt
->pending
);
536 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks
);
539 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
542 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
543 * we don't in general want to force a socket disconnection due to
544 * an incomplete RPC call transmission.
546 void xprt_wait_for_buffer_space(struct rpc_xprt
*xprt
)
548 set_bit(XPRT_WRITE_SPACE
, &xprt
->state
);
550 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space
);
553 xprt_clear_write_space_locked(struct rpc_xprt
*xprt
)
555 if (test_and_clear_bit(XPRT_WRITE_SPACE
, &xprt
->state
)) {
556 __xprt_lock_write_next(xprt
);
557 dprintk("RPC: write space: waking waiting task on "
565 * xprt_write_space - wake the task waiting for transport output buffer space
566 * @xprt: transport with waiting tasks
568 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
570 bool xprt_write_space(struct rpc_xprt
*xprt
)
574 if (!test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
576 spin_lock(&xprt
->transport_lock
);
577 ret
= xprt_clear_write_space_locked(xprt
);
578 spin_unlock(&xprt
->transport_lock
);
581 EXPORT_SYMBOL_GPL(xprt_write_space
);
583 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime
)
585 s64 delta
= ktime_to_ns(ktime_get() - abstime
);
586 return likely(delta
>= 0) ?
587 jiffies
- nsecs_to_jiffies(delta
) :
588 jiffies
+ nsecs_to_jiffies(-delta
);
591 static unsigned long xprt_calc_majortimeo(struct rpc_rqst
*req
)
593 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
594 unsigned long majortimeo
= req
->rq_timeout
;
596 if (to
->to_exponential
)
597 majortimeo
<<= to
->to_retries
;
599 majortimeo
+= to
->to_increment
* to
->to_retries
;
600 if (majortimeo
> to
->to_maxval
|| majortimeo
== 0)
601 majortimeo
= to
->to_maxval
;
605 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
607 req
->rq_majortimeo
+= xprt_calc_majortimeo(req
);
610 static void xprt_init_majortimeo(struct rpc_task
*task
, struct rpc_rqst
*req
)
612 unsigned long time_init
;
613 struct rpc_xprt
*xprt
= req
->rq_xprt
;
615 if (likely(xprt
&& xprt_connected(xprt
)))
618 time_init
= xprt_abs_ktime_to_jiffies(task
->tk_start
);
619 req
->rq_timeout
= task
->tk_client
->cl_timeout
->to_initval
;
620 req
->rq_majortimeo
= time_init
+ xprt_calc_majortimeo(req
);
624 * xprt_adjust_timeout - adjust timeout values for next retransmit
625 * @req: RPC request containing parameters to use for the adjustment
628 int xprt_adjust_timeout(struct rpc_rqst
*req
)
630 struct rpc_xprt
*xprt
= req
->rq_xprt
;
631 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
634 if (time_before(jiffies
, req
->rq_majortimeo
)) {
635 if (to
->to_exponential
)
636 req
->rq_timeout
<<= 1;
638 req
->rq_timeout
+= to
->to_increment
;
639 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
640 req
->rq_timeout
= to
->to_maxval
;
643 req
->rq_timeout
= to
->to_initval
;
645 xprt_reset_majortimeo(req
);
646 /* Reset the RTT counters == "slow start" */
647 spin_lock(&xprt
->transport_lock
);
648 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
649 spin_unlock(&xprt
->transport_lock
);
653 if (req
->rq_timeout
== 0) {
654 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
655 req
->rq_timeout
= 5 * HZ
;
660 static void xprt_autoclose(struct work_struct
*work
)
662 struct rpc_xprt
*xprt
=
663 container_of(work
, struct rpc_xprt
, task_cleanup
);
664 unsigned int pflags
= memalloc_nofs_save();
666 clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
667 xprt
->ops
->close(xprt
);
668 xprt_release_write(xprt
, NULL
);
669 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
670 memalloc_nofs_restore(pflags
);
674 * xprt_disconnect_done - mark a transport as disconnected
675 * @xprt: transport to flag for disconnect
678 void xprt_disconnect_done(struct rpc_xprt
*xprt
)
680 dprintk("RPC: disconnected transport %p\n", xprt
);
681 spin_lock(&xprt
->transport_lock
);
682 xprt_clear_connected(xprt
);
683 xprt_clear_write_space_locked(xprt
);
684 xprt_clear_congestion_window_wait_locked(xprt
);
685 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
686 spin_unlock(&xprt
->transport_lock
);
688 EXPORT_SYMBOL_GPL(xprt_disconnect_done
);
691 * xprt_force_disconnect - force a transport to disconnect
692 * @xprt: transport to disconnect
695 void xprt_force_disconnect(struct rpc_xprt
*xprt
)
697 /* Don't race with the test_bit() in xprt_clear_locked() */
698 spin_lock(&xprt
->transport_lock
);
699 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
700 /* Try to schedule an autoclose RPC call */
701 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
702 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
703 else if (xprt
->snd_task
)
704 rpc_wake_up_queued_task_set_status(&xprt
->pending
,
705 xprt
->snd_task
, -ENOTCONN
);
706 spin_unlock(&xprt
->transport_lock
);
708 EXPORT_SYMBOL_GPL(xprt_force_disconnect
);
711 xprt_connect_cookie(struct rpc_xprt
*xprt
)
713 return READ_ONCE(xprt
->connect_cookie
);
717 xprt_request_retransmit_after_disconnect(struct rpc_task
*task
)
719 struct rpc_rqst
*req
= task
->tk_rqstp
;
720 struct rpc_xprt
*xprt
= req
->rq_xprt
;
722 return req
->rq_connect_cookie
!= xprt_connect_cookie(xprt
) ||
723 !xprt_connected(xprt
);
727 * xprt_conditional_disconnect - force a transport to disconnect
728 * @xprt: transport to disconnect
729 * @cookie: 'connection cookie'
731 * This attempts to break the connection if and only if 'cookie' matches
732 * the current transport 'connection cookie'. It ensures that we don't
733 * try to break the connection more than once when we need to retransmit
734 * a batch of RPC requests.
737 void xprt_conditional_disconnect(struct rpc_xprt
*xprt
, unsigned int cookie
)
739 /* Don't race with the test_bit() in xprt_clear_locked() */
740 spin_lock(&xprt
->transport_lock
);
741 if (cookie
!= xprt
->connect_cookie
)
743 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
745 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
746 /* Try to schedule an autoclose RPC call */
747 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
748 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
749 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
751 spin_unlock(&xprt
->transport_lock
);
755 xprt_has_timer(const struct rpc_xprt
*xprt
)
757 return xprt
->idle_timeout
!= 0;
761 xprt_schedule_autodisconnect(struct rpc_xprt
*xprt
)
762 __must_hold(&xprt
->transport_lock
)
764 xprt
->last_used
= jiffies
;
765 if (RB_EMPTY_ROOT(&xprt
->recv_queue
) && xprt_has_timer(xprt
))
766 mod_timer(&xprt
->timer
, xprt
->last_used
+ xprt
->idle_timeout
);
770 xprt_init_autodisconnect(struct timer_list
*t
)
772 struct rpc_xprt
*xprt
= from_timer(xprt
, t
, timer
);
774 if (!RB_EMPTY_ROOT(&xprt
->recv_queue
))
776 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
777 xprt
->last_used
= jiffies
;
778 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
780 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
783 bool xprt_lock_connect(struct rpc_xprt
*xprt
,
784 struct rpc_task
*task
,
789 spin_lock(&xprt
->transport_lock
);
790 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
792 if (xprt
->snd_task
!= task
)
794 xprt
->snd_task
= cookie
;
797 spin_unlock(&xprt
->transport_lock
);
801 void xprt_unlock_connect(struct rpc_xprt
*xprt
, void *cookie
)
803 spin_lock(&xprt
->transport_lock
);
804 if (xprt
->snd_task
!= cookie
)
806 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
808 xprt
->snd_task
=NULL
;
809 xprt
->ops
->release_xprt(xprt
, NULL
);
810 xprt_schedule_autodisconnect(xprt
);
812 spin_unlock(&xprt
->transport_lock
);
813 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
817 * xprt_connect - schedule a transport connect operation
818 * @task: RPC task that is requesting the connect
821 void xprt_connect(struct rpc_task
*task
)
823 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
825 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task
->tk_pid
,
826 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
828 if (!xprt_bound(xprt
)) {
829 task
->tk_status
= -EAGAIN
;
832 if (!xprt_lock_write(xprt
, task
))
835 if (test_and_clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
))
836 xprt
->ops
->close(xprt
);
838 if (!xprt_connected(xprt
)) {
839 task
->tk_rqstp
->rq_connect_cookie
= xprt
->connect_cookie
;
840 rpc_sleep_on_timeout(&xprt
->pending
, task
, NULL
,
841 xprt_request_timeout(task
->tk_rqstp
));
843 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
845 if (xprt_test_and_set_connecting(xprt
))
848 if (!xprt_connected(xprt
)) {
849 xprt
->stat
.connect_start
= jiffies
;
850 xprt
->ops
->connect(xprt
, task
);
852 xprt_clear_connecting(xprt
);
854 rpc_wake_up_queued_task(&xprt
->pending
, task
);
857 xprt_release_write(xprt
, task
);
861 * xprt_reconnect_delay - compute the wait before scheduling a connect
862 * @xprt: transport instance
865 unsigned long xprt_reconnect_delay(const struct rpc_xprt
*xprt
)
867 unsigned long start
, now
= jiffies
;
869 start
= xprt
->stat
.connect_start
+ xprt
->reestablish_timeout
;
870 if (time_after(start
, now
))
874 EXPORT_SYMBOL_GPL(xprt_reconnect_delay
);
877 * xprt_reconnect_backoff - compute the new re-establish timeout
878 * @xprt: transport instance
879 * @init_to: initial reestablish timeout
882 void xprt_reconnect_backoff(struct rpc_xprt
*xprt
, unsigned long init_to
)
884 xprt
->reestablish_timeout
<<= 1;
885 if (xprt
->reestablish_timeout
> xprt
->max_reconnect_timeout
)
886 xprt
->reestablish_timeout
= xprt
->max_reconnect_timeout
;
887 if (xprt
->reestablish_timeout
< init_to
)
888 xprt
->reestablish_timeout
= init_to
;
890 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff
);
892 enum xprt_xid_rb_cmp
{
897 static enum xprt_xid_rb_cmp
898 xprt_xid_cmp(__be32 xid1
, __be32 xid2
)
902 if ((__force u32
)xid1
< (__force u32
)xid2
)
907 static struct rpc_rqst
*
908 xprt_request_rb_find(struct rpc_xprt
*xprt
, __be32 xid
)
910 struct rb_node
*n
= xprt
->recv_queue
.rb_node
;
911 struct rpc_rqst
*req
;
914 req
= rb_entry(n
, struct rpc_rqst
, rq_recv
);
915 switch (xprt_xid_cmp(xid
, req
->rq_xid
)) {
930 xprt_request_rb_insert(struct rpc_xprt
*xprt
, struct rpc_rqst
*new)
932 struct rb_node
**p
= &xprt
->recv_queue
.rb_node
;
933 struct rb_node
*n
= NULL
;
934 struct rpc_rqst
*req
;
938 req
= rb_entry(n
, struct rpc_rqst
, rq_recv
);
939 switch(xprt_xid_cmp(new->rq_xid
, req
->rq_xid
)) {
947 WARN_ON_ONCE(new != req
);
951 rb_link_node(&new->rq_recv
, n
, p
);
952 rb_insert_color(&new->rq_recv
, &xprt
->recv_queue
);
956 xprt_request_rb_remove(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
958 rb_erase(&req
->rq_recv
, &xprt
->recv_queue
);
962 * xprt_lookup_rqst - find an RPC request corresponding to an XID
963 * @xprt: transport on which the original request was transmitted
964 * @xid: RPC XID of incoming reply
966 * Caller holds xprt->queue_lock.
968 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
970 struct rpc_rqst
*entry
;
972 entry
= xprt_request_rb_find(xprt
, xid
);
974 trace_xprt_lookup_rqst(xprt
, xid
, 0);
975 entry
->rq_rtt
= ktime_sub(ktime_get(), entry
->rq_xtime
);
979 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
981 trace_xprt_lookup_rqst(xprt
, xid
, -ENOENT
);
982 xprt
->stat
.bad_xids
++;
985 EXPORT_SYMBOL_GPL(xprt_lookup_rqst
);
988 xprt_is_pinned_rqst(struct rpc_rqst
*req
)
990 return atomic_read(&req
->rq_pin
) != 0;
994 * xprt_pin_rqst - Pin a request on the transport receive list
995 * @req: Request to pin
997 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
998 * so should be holding xprt->queue_lock.
1000 void xprt_pin_rqst(struct rpc_rqst
*req
)
1002 atomic_inc(&req
->rq_pin
);
1004 EXPORT_SYMBOL_GPL(xprt_pin_rqst
);
1007 * xprt_unpin_rqst - Unpin a request on the transport receive list
1008 * @req: Request to pin
1010 * Caller should be holding xprt->queue_lock.
1012 void xprt_unpin_rqst(struct rpc_rqst
*req
)
1014 if (!test_bit(RPC_TASK_MSG_PIN_WAIT
, &req
->rq_task
->tk_runstate
)) {
1015 atomic_dec(&req
->rq_pin
);
1018 if (atomic_dec_and_test(&req
->rq_pin
))
1019 wake_up_var(&req
->rq_pin
);
1021 EXPORT_SYMBOL_GPL(xprt_unpin_rqst
);
1023 static void xprt_wait_on_pinned_rqst(struct rpc_rqst
*req
)
1025 wait_var_event(&req
->rq_pin
, !xprt_is_pinned_rqst(req
));
1029 xprt_request_data_received(struct rpc_task
*task
)
1031 return !test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) &&
1032 READ_ONCE(task
->tk_rqstp
->rq_reply_bytes_recvd
) != 0;
1036 xprt_request_need_enqueue_receive(struct rpc_task
*task
, struct rpc_rqst
*req
)
1038 return !test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) &&
1039 READ_ONCE(task
->tk_rqstp
->rq_reply_bytes_recvd
) == 0;
1043 * xprt_request_enqueue_receive - Add an request to the receive queue
1048 xprt_request_enqueue_receive(struct rpc_task
*task
)
1050 struct rpc_rqst
*req
= task
->tk_rqstp
;
1051 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1053 if (!xprt_request_need_enqueue_receive(task
, req
))
1056 xprt_request_prepare(task
->tk_rqstp
);
1057 spin_lock(&xprt
->queue_lock
);
1059 /* Update the softirq receive buffer */
1060 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
1061 sizeof(req
->rq_private_buf
));
1063 /* Add request to the receive list */
1064 xprt_request_rb_insert(xprt
, req
);
1065 set_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
);
1066 spin_unlock(&xprt
->queue_lock
);
1068 /* Turn off autodisconnect */
1069 del_singleshot_timer_sync(&xprt
->timer
);
1073 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1076 * Caller must hold xprt->queue_lock.
1079 xprt_request_dequeue_receive_locked(struct rpc_task
*task
)
1081 struct rpc_rqst
*req
= task
->tk_rqstp
;
1083 if (test_and_clear_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
))
1084 xprt_request_rb_remove(req
->rq_xprt
, req
);
1088 * xprt_update_rtt - Update RPC RTT statistics
1089 * @task: RPC request that recently completed
1091 * Caller holds xprt->queue_lock.
1093 void xprt_update_rtt(struct rpc_task
*task
)
1095 struct rpc_rqst
*req
= task
->tk_rqstp
;
1096 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
1097 unsigned int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
1098 long m
= usecs_to_jiffies(ktime_to_us(req
->rq_rtt
));
1101 if (req
->rq_ntrans
== 1)
1102 rpc_update_rtt(rtt
, timer
, m
);
1103 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
1106 EXPORT_SYMBOL_GPL(xprt_update_rtt
);
1109 * xprt_complete_rqst - called when reply processing is complete
1110 * @task: RPC request that recently completed
1111 * @copied: actual number of bytes received from the transport
1113 * Caller holds xprt->queue_lock.
1115 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
1117 struct rpc_rqst
*req
= task
->tk_rqstp
;
1118 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1120 trace_xprt_complete_rqst(xprt
, req
->rq_xid
, copied
);
1124 req
->rq_private_buf
.len
= copied
;
1125 /* Ensure all writes are done before we update */
1126 /* req->rq_reply_bytes_recvd */
1128 req
->rq_reply_bytes_recvd
= copied
;
1129 xprt_request_dequeue_receive_locked(task
);
1130 rpc_wake_up_queued_task(&xprt
->pending
, task
);
1132 EXPORT_SYMBOL_GPL(xprt_complete_rqst
);
1134 static void xprt_timer(struct rpc_task
*task
)
1136 struct rpc_rqst
*req
= task
->tk_rqstp
;
1137 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1139 if (task
->tk_status
!= -ETIMEDOUT
)
1142 trace_xprt_timer(xprt
, req
->rq_xid
, task
->tk_status
);
1143 if (!req
->rq_reply_bytes_recvd
) {
1144 if (xprt
->ops
->timer
)
1145 xprt
->ops
->timer(xprt
, task
);
1147 task
->tk_status
= 0;
1151 * xprt_wait_for_reply_request_def - wait for reply
1152 * @task: pointer to rpc_task
1154 * Set a request's retransmit timeout based on the transport's
1155 * default timeout parameters. Used by transports that don't adjust
1156 * the retransmit timeout based on round-trip time estimation,
1157 * and put the task to sleep on the pending queue.
1159 void xprt_wait_for_reply_request_def(struct rpc_task
*task
)
1161 struct rpc_rqst
*req
= task
->tk_rqstp
;
1163 rpc_sleep_on_timeout(&req
->rq_xprt
->pending
, task
, xprt_timer
,
1164 xprt_request_timeout(req
));
1166 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def
);
1169 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1170 * @task: pointer to rpc_task
1172 * Set a request's retransmit timeout using the RTT estimator,
1173 * and put the task to sleep on the pending queue.
1175 void xprt_wait_for_reply_request_rtt(struct rpc_task
*task
)
1177 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
1178 struct rpc_clnt
*clnt
= task
->tk_client
;
1179 struct rpc_rtt
*rtt
= clnt
->cl_rtt
;
1180 struct rpc_rqst
*req
= task
->tk_rqstp
;
1181 unsigned long max_timeout
= clnt
->cl_timeout
->to_maxval
;
1182 unsigned long timeout
;
1184 timeout
= rpc_calc_rto(rtt
, timer
);
1185 timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
1186 if (timeout
> max_timeout
|| timeout
== 0)
1187 timeout
= max_timeout
;
1188 rpc_sleep_on_timeout(&req
->rq_xprt
->pending
, task
, xprt_timer
,
1191 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt
);
1194 * xprt_request_wait_receive - wait for the reply to an RPC request
1195 * @task: RPC task about to send a request
1198 void xprt_request_wait_receive(struct rpc_task
*task
)
1200 struct rpc_rqst
*req
= task
->tk_rqstp
;
1201 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1203 if (!test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
))
1206 * Sleep on the pending queue if we're expecting a reply.
1207 * The spinlock ensures atomicity between the test of
1208 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1210 spin_lock(&xprt
->queue_lock
);
1211 if (test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
)) {
1212 xprt
->ops
->wait_for_reply_request(task
);
1214 * Send an extra queue wakeup call if the
1215 * connection was dropped in case the call to
1216 * rpc_sleep_on() raced.
1218 if (xprt_request_retransmit_after_disconnect(task
))
1219 rpc_wake_up_queued_task_set_status(&xprt
->pending
,
1222 spin_unlock(&xprt
->queue_lock
);
1226 xprt_request_need_enqueue_transmit(struct rpc_task
*task
, struct rpc_rqst
*req
)
1228 return !test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1232 * xprt_request_enqueue_transmit - queue a task for transmission
1233 * @task: pointer to rpc_task
1235 * Add a task to the transmission queue.
1238 xprt_request_enqueue_transmit(struct rpc_task
*task
)
1240 struct rpc_rqst
*pos
, *req
= task
->tk_rqstp
;
1241 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1243 if (xprt_request_need_enqueue_transmit(task
, req
)) {
1244 req
->rq_bytes_sent
= 0;
1245 spin_lock(&xprt
->queue_lock
);
1247 * Requests that carry congestion control credits are added
1248 * to the head of the list to avoid starvation issues.
1251 xprt_clear_congestion_window_wait(xprt
);
1252 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1255 /* Note: req is added _before_ pos */
1256 list_add_tail(&req
->rq_xmit
, &pos
->rq_xmit
);
1257 INIT_LIST_HEAD(&req
->rq_xmit2
);
1258 trace_xprt_enq_xmit(task
, 1);
1261 } else if (RPC_IS_SWAPPER(task
)) {
1262 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1263 if (pos
->rq_cong
|| pos
->rq_bytes_sent
)
1265 if (RPC_IS_SWAPPER(pos
->rq_task
))
1267 /* Note: req is added _before_ pos */
1268 list_add_tail(&req
->rq_xmit
, &pos
->rq_xmit
);
1269 INIT_LIST_HEAD(&req
->rq_xmit2
);
1270 trace_xprt_enq_xmit(task
, 2);
1273 } else if (!req
->rq_seqno
) {
1274 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1275 if (pos
->rq_task
->tk_owner
!= task
->tk_owner
)
1277 list_add_tail(&req
->rq_xmit2
, &pos
->rq_xmit2
);
1278 INIT_LIST_HEAD(&req
->rq_xmit
);
1279 trace_xprt_enq_xmit(task
, 3);
1283 list_add_tail(&req
->rq_xmit
, &xprt
->xmit_queue
);
1284 INIT_LIST_HEAD(&req
->rq_xmit2
);
1285 trace_xprt_enq_xmit(task
, 4);
1287 set_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1288 spin_unlock(&xprt
->queue_lock
);
1293 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1294 * @task: pointer to rpc_task
1296 * Remove a task from the transmission queue
1297 * Caller must hold xprt->queue_lock
1300 xprt_request_dequeue_transmit_locked(struct rpc_task
*task
)
1302 struct rpc_rqst
*req
= task
->tk_rqstp
;
1304 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1306 if (!list_empty(&req
->rq_xmit
)) {
1307 list_del(&req
->rq_xmit
);
1308 if (!list_empty(&req
->rq_xmit2
)) {
1309 struct rpc_rqst
*next
= list_first_entry(&req
->rq_xmit2
,
1310 struct rpc_rqst
, rq_xmit2
);
1311 list_del(&req
->rq_xmit2
);
1312 list_add_tail(&next
->rq_xmit
, &next
->rq_xprt
->xmit_queue
);
1315 list_del(&req
->rq_xmit2
);
1319 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1320 * @task: pointer to rpc_task
1322 * Remove a task from the transmission queue
1325 xprt_request_dequeue_transmit(struct rpc_task
*task
)
1327 struct rpc_rqst
*req
= task
->tk_rqstp
;
1328 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1330 spin_lock(&xprt
->queue_lock
);
1331 xprt_request_dequeue_transmit_locked(task
);
1332 spin_unlock(&xprt
->queue_lock
);
1336 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1337 * @task: pointer to rpc_task
1339 * Remove a task from the transmit and receive queues, and ensure that
1340 * it is not pinned by the receive work item.
1343 xprt_request_dequeue_xprt(struct rpc_task
*task
)
1345 struct rpc_rqst
*req
= task
->tk_rqstp
;
1346 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1348 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
) ||
1349 test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) ||
1350 xprt_is_pinned_rqst(req
)) {
1351 spin_lock(&xprt
->queue_lock
);
1352 xprt_request_dequeue_transmit_locked(task
);
1353 xprt_request_dequeue_receive_locked(task
);
1354 while (xprt_is_pinned_rqst(req
)) {
1355 set_bit(RPC_TASK_MSG_PIN_WAIT
, &task
->tk_runstate
);
1356 spin_unlock(&xprt
->queue_lock
);
1357 xprt_wait_on_pinned_rqst(req
);
1358 spin_lock(&xprt
->queue_lock
);
1359 clear_bit(RPC_TASK_MSG_PIN_WAIT
, &task
->tk_runstate
);
1361 spin_unlock(&xprt
->queue_lock
);
1366 * xprt_request_prepare - prepare an encoded request for transport
1367 * @req: pointer to rpc_rqst
1369 * Calls into the transport layer to do whatever is needed to prepare
1370 * the request for transmission or receive.
1373 xprt_request_prepare(struct rpc_rqst
*req
)
1375 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1377 if (xprt
->ops
->prepare_request
)
1378 xprt
->ops
->prepare_request(req
);
1382 * xprt_request_need_retransmit - Test if a task needs retransmission
1383 * @task: pointer to rpc_task
1385 * Test for whether a connection breakage requires the task to retransmit
1388 xprt_request_need_retransmit(struct rpc_task
*task
)
1390 return xprt_request_retransmit_after_disconnect(task
);
1394 * xprt_prepare_transmit - reserve the transport before sending a request
1395 * @task: RPC task about to send a request
1398 bool xprt_prepare_transmit(struct rpc_task
*task
)
1400 struct rpc_rqst
*req
= task
->tk_rqstp
;
1401 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1403 dprintk("RPC: %5u xprt_prepare_transmit\n", task
->tk_pid
);
1405 if (!xprt_lock_write(xprt
, task
)) {
1406 /* Race breaker: someone may have transmitted us */
1407 if (!test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1408 rpc_wake_up_queued_task_set_status(&xprt
->sending
,
1416 void xprt_end_transmit(struct rpc_task
*task
)
1418 xprt_release_write(task
->tk_rqstp
->rq_xprt
, task
);
1422 * xprt_request_transmit - send an RPC request on a transport
1423 * @req: pointer to request to transmit
1424 * @snd_task: RPC task that owns the transport lock
1426 * This performs the transmission of a single request.
1427 * Note that if the request is not the same as snd_task, then it
1428 * does need to be pinned.
1429 * Returns '0' on success.
1432 xprt_request_transmit(struct rpc_rqst
*req
, struct rpc_task
*snd_task
)
1434 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1435 struct rpc_task
*task
= req
->rq_task
;
1436 unsigned int connect_cookie
;
1437 int is_retrans
= RPC_WAS_SENT(task
);
1440 if (!req
->rq_bytes_sent
) {
1441 if (xprt_request_data_received(task
)) {
1445 /* Verify that our message lies in the RPCSEC_GSS window */
1446 if (rpcauth_xmit_need_reencode(task
)) {
1450 if (RPC_SIGNALLED(task
)) {
1451 status
= -ERESTARTSYS
;
1457 * Update req->rq_ntrans before transmitting to avoid races with
1458 * xprt_update_rtt(), which needs to know that it is recording a
1459 * reply to the first transmission.
1463 trace_xprt_sendto(&req
->rq_snd_buf
);
1464 connect_cookie
= xprt
->connect_cookie
;
1465 status
= xprt
->ops
->send_request(req
);
1468 trace_xprt_transmit(req
, status
);
1473 task
->tk_client
->cl_stats
->rpcretrans
++;
1475 xprt_inject_disconnect(xprt
);
1477 task
->tk_flags
|= RPC_TASK_SENT
;
1478 spin_lock(&xprt
->transport_lock
);
1481 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
1482 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
1483 xprt
->stat
.sending_u
+= xprt
->sending
.qlen
;
1484 xprt
->stat
.pending_u
+= xprt
->pending
.qlen
;
1485 spin_unlock(&xprt
->transport_lock
);
1487 req
->rq_connect_cookie
= connect_cookie
;
1489 trace_xprt_transmit(req
, status
);
1490 xprt_request_dequeue_transmit(task
);
1491 rpc_wake_up_queued_task_set_status(&xprt
->sending
, task
, status
);
1496 * xprt_transmit - send an RPC request on a transport
1497 * @task: controlling RPC task
1499 * Attempts to drain the transmit queue. On exit, either the transport
1500 * signalled an error that needs to be handled before transmission can
1501 * resume, or @task finished transmitting, and detected that it already
1505 xprt_transmit(struct rpc_task
*task
)
1507 struct rpc_rqst
*next
, *req
= task
->tk_rqstp
;
1508 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1511 spin_lock(&xprt
->queue_lock
);
1512 while (!list_empty(&xprt
->xmit_queue
)) {
1513 next
= list_first_entry(&xprt
->xmit_queue
,
1514 struct rpc_rqst
, rq_xmit
);
1515 xprt_pin_rqst(next
);
1516 spin_unlock(&xprt
->queue_lock
);
1517 status
= xprt_request_transmit(next
, task
);
1518 if (status
== -EBADMSG
&& next
!= req
)
1521 spin_lock(&xprt
->queue_lock
);
1522 xprt_unpin_rqst(next
);
1524 if (!xprt_request_data_received(task
) ||
1525 test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1527 } else if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1528 task
->tk_status
= status
;
1531 spin_unlock(&xprt
->queue_lock
);
1534 static void xprt_add_backlog(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1536 set_bit(XPRT_CONGESTED
, &xprt
->state
);
1537 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1540 static void xprt_wake_up_backlog(struct rpc_xprt
*xprt
)
1542 if (rpc_wake_up_next(&xprt
->backlog
) == NULL
)
1543 clear_bit(XPRT_CONGESTED
, &xprt
->state
);
1546 static bool xprt_throttle_congested(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1550 if (!test_bit(XPRT_CONGESTED
, &xprt
->state
))
1552 spin_lock(&xprt
->reserve_lock
);
1553 if (test_bit(XPRT_CONGESTED
, &xprt
->state
)) {
1554 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1557 spin_unlock(&xprt
->reserve_lock
);
1562 static struct rpc_rqst
*xprt_dynamic_alloc_slot(struct rpc_xprt
*xprt
)
1564 struct rpc_rqst
*req
= ERR_PTR(-EAGAIN
);
1566 if (xprt
->num_reqs
>= xprt
->max_reqs
)
1569 spin_unlock(&xprt
->reserve_lock
);
1570 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_NOFS
);
1571 spin_lock(&xprt
->reserve_lock
);
1575 req
= ERR_PTR(-ENOMEM
);
1580 static bool xprt_dynamic_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1582 if (xprt
->num_reqs
> xprt
->min_reqs
) {
1590 void xprt_alloc_slot(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1592 struct rpc_rqst
*req
;
1594 spin_lock(&xprt
->reserve_lock
);
1595 if (!list_empty(&xprt
->free
)) {
1596 req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
1597 list_del(&req
->rq_list
);
1600 req
= xprt_dynamic_alloc_slot(xprt
);
1603 switch (PTR_ERR(req
)) {
1605 dprintk("RPC: dynamic allocation of request slot "
1606 "failed! Retrying\n");
1607 task
->tk_status
= -ENOMEM
;
1610 xprt_add_backlog(xprt
, task
);
1611 dprintk("RPC: waiting for request slot\n");
1614 task
->tk_status
= -EAGAIN
;
1616 spin_unlock(&xprt
->reserve_lock
);
1619 xprt
->stat
.max_slots
= max_t(unsigned int, xprt
->stat
.max_slots
,
1621 spin_unlock(&xprt
->reserve_lock
);
1623 task
->tk_status
= 0;
1624 task
->tk_rqstp
= req
;
1626 EXPORT_SYMBOL_GPL(xprt_alloc_slot
);
1628 void xprt_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1630 spin_lock(&xprt
->reserve_lock
);
1631 if (!xprt_dynamic_free_slot(xprt
, req
)) {
1632 memset(req
, 0, sizeof(*req
)); /* mark unused */
1633 list_add(&req
->rq_list
, &xprt
->free
);
1635 xprt_wake_up_backlog(xprt
);
1636 spin_unlock(&xprt
->reserve_lock
);
1638 EXPORT_SYMBOL_GPL(xprt_free_slot
);
1640 static void xprt_free_all_slots(struct rpc_xprt
*xprt
)
1642 struct rpc_rqst
*req
;
1643 while (!list_empty(&xprt
->free
)) {
1644 req
= list_first_entry(&xprt
->free
, struct rpc_rqst
, rq_list
);
1645 list_del(&req
->rq_list
);
1650 struct rpc_xprt
*xprt_alloc(struct net
*net
, size_t size
,
1651 unsigned int num_prealloc
,
1652 unsigned int max_alloc
)
1654 struct rpc_xprt
*xprt
;
1655 struct rpc_rqst
*req
;
1658 xprt
= kzalloc(size
, GFP_KERNEL
);
1662 xprt_init(xprt
, net
);
1664 for (i
= 0; i
< num_prealloc
; i
++) {
1665 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_KERNEL
);
1668 list_add(&req
->rq_list
, &xprt
->free
);
1670 if (max_alloc
> num_prealloc
)
1671 xprt
->max_reqs
= max_alloc
;
1673 xprt
->max_reqs
= num_prealloc
;
1674 xprt
->min_reqs
= num_prealloc
;
1675 xprt
->num_reqs
= num_prealloc
;
1684 EXPORT_SYMBOL_GPL(xprt_alloc
);
1686 void xprt_free(struct rpc_xprt
*xprt
)
1688 put_net(xprt
->xprt_net
);
1689 xprt_free_all_slots(xprt
);
1690 kfree_rcu(xprt
, rcu
);
1692 EXPORT_SYMBOL_GPL(xprt_free
);
1695 xprt_init_connect_cookie(struct rpc_rqst
*req
, struct rpc_xprt
*xprt
)
1697 req
->rq_connect_cookie
= xprt_connect_cookie(xprt
) - 1;
1701 xprt_alloc_xid(struct rpc_xprt
*xprt
)
1705 spin_lock(&xprt
->reserve_lock
);
1706 xid
= (__force __be32
)xprt
->xid
++;
1707 spin_unlock(&xprt
->reserve_lock
);
1712 xprt_init_xid(struct rpc_xprt
*xprt
)
1714 xprt
->xid
= prandom_u32();
1718 xprt_request_init(struct rpc_task
*task
)
1720 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1721 struct rpc_rqst
*req
= task
->tk_rqstp
;
1723 req
->rq_task
= task
;
1724 req
->rq_xprt
= xprt
;
1725 req
->rq_buffer
= NULL
;
1726 req
->rq_xid
= xprt_alloc_xid(xprt
);
1727 xprt_init_connect_cookie(req
, xprt
);
1728 req
->rq_snd_buf
.len
= 0;
1729 req
->rq_snd_buf
.buflen
= 0;
1730 req
->rq_rcv_buf
.len
= 0;
1731 req
->rq_rcv_buf
.buflen
= 0;
1732 req
->rq_snd_buf
.bvec
= NULL
;
1733 req
->rq_rcv_buf
.bvec
= NULL
;
1734 req
->rq_release_snd_buf
= NULL
;
1735 xprt_init_majortimeo(task
, req
);
1736 dprintk("RPC: %5u reserved req %p xid %08x\n", task
->tk_pid
,
1737 req
, ntohl(req
->rq_xid
));
1741 xprt_do_reserve(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1743 xprt
->ops
->alloc_slot(xprt
, task
);
1744 if (task
->tk_rqstp
!= NULL
)
1745 xprt_request_init(task
);
1749 * xprt_reserve - allocate an RPC request slot
1750 * @task: RPC task requesting a slot allocation
1752 * If the transport is marked as being congested, or if no more
1753 * slots are available, place the task on the transport's
1756 void xprt_reserve(struct rpc_task
*task
)
1758 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1760 task
->tk_status
= 0;
1761 if (task
->tk_rqstp
!= NULL
)
1764 task
->tk_status
= -EAGAIN
;
1765 if (!xprt_throttle_congested(xprt
, task
))
1766 xprt_do_reserve(xprt
, task
);
1770 * xprt_retry_reserve - allocate an RPC request slot
1771 * @task: RPC task requesting a slot allocation
1773 * If no more slots are available, place the task on the transport's
1775 * Note that the only difference with xprt_reserve is that we now
1776 * ignore the value of the XPRT_CONGESTED flag.
1778 void xprt_retry_reserve(struct rpc_task
*task
)
1780 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1782 task
->tk_status
= 0;
1783 if (task
->tk_rqstp
!= NULL
)
1786 task
->tk_status
= -EAGAIN
;
1787 xprt_do_reserve(xprt
, task
);
1791 * xprt_release - release an RPC request slot
1792 * @task: task which is finished with the slot
1795 void xprt_release(struct rpc_task
*task
)
1797 struct rpc_xprt
*xprt
;
1798 struct rpc_rqst
*req
= task
->tk_rqstp
;
1801 if (task
->tk_client
) {
1802 xprt
= task
->tk_xprt
;
1803 xprt_release_write(xprt
, task
);
1808 xprt
= req
->rq_xprt
;
1809 xprt_request_dequeue_xprt(task
);
1810 spin_lock(&xprt
->transport_lock
);
1811 xprt
->ops
->release_xprt(xprt
, task
);
1812 if (xprt
->ops
->release_request
)
1813 xprt
->ops
->release_request(task
);
1814 xprt_schedule_autodisconnect(xprt
);
1815 spin_unlock(&xprt
->transport_lock
);
1817 xprt
->ops
->buf_free(task
);
1818 xprt_inject_disconnect(xprt
);
1819 xdr_free_bvec(&req
->rq_rcv_buf
);
1820 xdr_free_bvec(&req
->rq_snd_buf
);
1821 if (req
->rq_cred
!= NULL
)
1822 put_rpccred(req
->rq_cred
);
1823 task
->tk_rqstp
= NULL
;
1824 if (req
->rq_release_snd_buf
)
1825 req
->rq_release_snd_buf(req
);
1827 dprintk("RPC: %5u release request %p\n", task
->tk_pid
, req
);
1828 if (likely(!bc_prealloc(req
)))
1829 xprt
->ops
->free_slot(xprt
, req
);
1831 xprt_free_bc_request(req
);
1834 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1836 xprt_init_bc_request(struct rpc_rqst
*req
, struct rpc_task
*task
)
1838 struct xdr_buf
*xbufp
= &req
->rq_snd_buf
;
1840 task
->tk_rqstp
= req
;
1841 req
->rq_task
= task
;
1842 xprt_init_connect_cookie(req
, req
->rq_xprt
);
1844 * Set up the xdr_buf length.
1845 * This also indicates that the buffer is XDR encoded already.
1847 xbufp
->len
= xbufp
->head
[0].iov_len
+ xbufp
->page_len
+
1848 xbufp
->tail
[0].iov_len
;
1852 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
)
1854 kref_init(&xprt
->kref
);
1856 spin_lock_init(&xprt
->transport_lock
);
1857 spin_lock_init(&xprt
->reserve_lock
);
1858 spin_lock_init(&xprt
->queue_lock
);
1860 INIT_LIST_HEAD(&xprt
->free
);
1861 xprt
->recv_queue
= RB_ROOT
;
1862 INIT_LIST_HEAD(&xprt
->xmit_queue
);
1863 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1864 spin_lock_init(&xprt
->bc_pa_lock
);
1865 INIT_LIST_HEAD(&xprt
->bc_pa_list
);
1866 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1867 INIT_LIST_HEAD(&xprt
->xprt_switch
);
1869 xprt
->last_used
= jiffies
;
1870 xprt
->cwnd
= RPC_INITCWND
;
1871 xprt
->bind_index
= 0;
1873 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
1874 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
1875 rpc_init_wait_queue(&xprt
->sending
, "xprt_sending");
1876 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
1878 xprt_init_xid(xprt
);
1880 xprt
->xprt_net
= get_net(net
);
1884 * xprt_create_transport - create an RPC transport
1885 * @args: rpc transport creation arguments
1888 struct rpc_xprt
*xprt_create_transport(struct xprt_create
*args
)
1890 struct rpc_xprt
*xprt
;
1891 struct xprt_class
*t
;
1893 spin_lock(&xprt_list_lock
);
1894 list_for_each_entry(t
, &xprt_list
, list
) {
1895 if (t
->ident
== args
->ident
) {
1896 spin_unlock(&xprt_list_lock
);
1900 spin_unlock(&xprt_list_lock
);
1901 dprintk("RPC: transport (%d) not supported\n", args
->ident
);
1902 return ERR_PTR(-EIO
);
1905 xprt
= t
->setup(args
);
1907 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1911 if (args
->flags
& XPRT_CREATE_NO_IDLE_TIMEOUT
)
1912 xprt
->idle_timeout
= 0;
1913 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
1914 if (xprt_has_timer(xprt
))
1915 timer_setup(&xprt
->timer
, xprt_init_autodisconnect
, 0);
1917 timer_setup(&xprt
->timer
, NULL
, 0);
1919 if (strlen(args
->servername
) > RPC_MAXNETNAMELEN
) {
1921 return ERR_PTR(-EINVAL
);
1923 xprt
->servername
= kstrdup(args
->servername
, GFP_KERNEL
);
1924 if (xprt
->servername
== NULL
) {
1926 return ERR_PTR(-ENOMEM
);
1929 rpc_xprt_debugfs_register(xprt
);
1931 dprintk("RPC: created transport %p with %u slots\n", xprt
,
1937 static void xprt_destroy_cb(struct work_struct
*work
)
1939 struct rpc_xprt
*xprt
=
1940 container_of(work
, struct rpc_xprt
, task_cleanup
);
1942 rpc_xprt_debugfs_unregister(xprt
);
1943 rpc_destroy_wait_queue(&xprt
->binding
);
1944 rpc_destroy_wait_queue(&xprt
->pending
);
1945 rpc_destroy_wait_queue(&xprt
->sending
);
1946 rpc_destroy_wait_queue(&xprt
->backlog
);
1947 kfree(xprt
->servername
);
1949 * Destroy any existing back channel
1951 xprt_destroy_backchannel(xprt
, UINT_MAX
);
1954 * Tear down transport state and free the rpc_xprt
1956 xprt
->ops
->destroy(xprt
);
1960 * xprt_destroy - destroy an RPC transport, killing off all requests.
1961 * @xprt: transport to destroy
1964 static void xprt_destroy(struct rpc_xprt
*xprt
)
1966 dprintk("RPC: destroying transport %p\n", xprt
);
1969 * Exclude transport connect/disconnect handlers and autoclose
1971 wait_on_bit_lock(&xprt
->state
, XPRT_LOCKED
, TASK_UNINTERRUPTIBLE
);
1973 del_timer_sync(&xprt
->timer
);
1976 * Destroy sockets etc from the system workqueue so they can
1977 * safely flush receive work running on rpciod.
1979 INIT_WORK(&xprt
->task_cleanup
, xprt_destroy_cb
);
1980 schedule_work(&xprt
->task_cleanup
);
1983 static void xprt_destroy_kref(struct kref
*kref
)
1985 xprt_destroy(container_of(kref
, struct rpc_xprt
, kref
));
1989 * xprt_get - return a reference to an RPC transport.
1990 * @xprt: pointer to the transport
1993 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
1995 if (xprt
!= NULL
&& kref_get_unless_zero(&xprt
->kref
))
1999 EXPORT_SYMBOL_GPL(xprt_get
);
2002 * xprt_put - release a reference to an RPC transport.
2003 * @xprt: pointer to the transport
2006 void xprt_put(struct rpc_xprt
*xprt
)
2009 kref_put(&xprt
->kref
, xprt_destroy_kref
);
2011 EXPORT_SYMBOL_GPL(xprt_put
);