2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
40 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 #include <linux/rcupdate.h>
53 #include <trace/events/sunrpc.h>
61 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
62 # define RPCDBG_FACILITY RPCDBG_XPRT
68 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
);
69 static void xprt_request_init(struct rpc_task
*, struct rpc_xprt
*);
70 static void xprt_connect_status(struct rpc_task
*task
);
71 static int __xprt_get_cong(struct rpc_xprt
*, struct rpc_task
*);
72 static void __xprt_put_cong(struct rpc_xprt
*, struct rpc_rqst
*);
73 static void xprt_destroy(struct rpc_xprt
*xprt
);
75 static DEFINE_SPINLOCK(xprt_list_lock
);
76 static LIST_HEAD(xprt_list
);
79 * xprt_register_transport - register a transport implementation
80 * @transport: transport to register
82 * If a transport implementation is loaded as a kernel module, it can
83 * call this interface to make itself known to the RPC client.
86 * 0: transport successfully registered
87 * -EEXIST: transport already registered
88 * -EINVAL: transport module being unloaded
90 int xprt_register_transport(struct xprt_class
*transport
)
96 spin_lock(&xprt_list_lock
);
97 list_for_each_entry(t
, &xprt_list
, list
) {
98 /* don't register the same transport class twice */
99 if (t
->ident
== transport
->ident
)
103 list_add_tail(&transport
->list
, &xprt_list
);
104 printk(KERN_INFO
"RPC: Registered %s transport module.\n",
109 spin_unlock(&xprt_list_lock
);
112 EXPORT_SYMBOL_GPL(xprt_register_transport
);
115 * xprt_unregister_transport - unregister a transport implementation
116 * @transport: transport to unregister
119 * 0: transport successfully unregistered
120 * -ENOENT: transport never registered
122 int xprt_unregister_transport(struct xprt_class
*transport
)
124 struct xprt_class
*t
;
128 spin_lock(&xprt_list_lock
);
129 list_for_each_entry(t
, &xprt_list
, list
) {
130 if (t
== transport
) {
132 "RPC: Unregistered %s transport module.\n",
134 list_del_init(&transport
->list
);
141 spin_unlock(&xprt_list_lock
);
144 EXPORT_SYMBOL_GPL(xprt_unregister_transport
);
147 * xprt_load_transport - load a transport implementation
148 * @transport_name: transport to load
151 * 0: transport successfully loaded
152 * -ENOENT: transport module not available
154 int xprt_load_transport(const char *transport_name
)
156 struct xprt_class
*t
;
160 spin_lock(&xprt_list_lock
);
161 list_for_each_entry(t
, &xprt_list
, list
) {
162 if (strcmp(t
->name
, transport_name
) == 0) {
163 spin_unlock(&xprt_list_lock
);
167 spin_unlock(&xprt_list_lock
);
168 result
= request_module("xprt%s", transport_name
);
172 EXPORT_SYMBOL_GPL(xprt_load_transport
);
175 * xprt_reserve_xprt - serialize write access to transports
176 * @task: task that is requesting access to the transport
177 * @xprt: pointer to the target transport
179 * This prevents mixing the payload of separate requests, and prevents
180 * transport connects from colliding with writes. No congestion control
183 int xprt_reserve_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
185 struct rpc_rqst
*req
= task
->tk_rqstp
;
188 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
189 if (task
== xprt
->snd_task
)
193 xprt
->snd_task
= task
;
200 dprintk("RPC: %5u failed to lock transport %p\n",
202 task
->tk_timeout
= 0;
203 task
->tk_status
= -EAGAIN
;
205 priority
= RPC_PRIORITY_LOW
;
206 else if (!req
->rq_ntrans
)
207 priority
= RPC_PRIORITY_NORMAL
;
209 priority
= RPC_PRIORITY_HIGH
;
210 rpc_sleep_on_priority(&xprt
->sending
, task
, NULL
, priority
);
213 EXPORT_SYMBOL_GPL(xprt_reserve_xprt
);
215 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
217 xprt
->snd_task
= NULL
;
218 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
)) {
219 smp_mb__before_atomic();
220 clear_bit(XPRT_LOCKED
, &xprt
->state
);
221 smp_mb__after_atomic();
223 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
227 * xprt_reserve_xprt_cong - serialize write access to transports
228 * @task: task that is requesting access to the transport
230 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
231 * integrated into the decision of whether a request is allowed to be
232 * woken up and given access to the transport.
234 int xprt_reserve_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
236 struct rpc_rqst
*req
= task
->tk_rqstp
;
239 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
240 if (task
== xprt
->snd_task
)
245 xprt
->snd_task
= task
;
248 if (__xprt_get_cong(xprt
, task
)) {
249 xprt
->snd_task
= task
;
253 xprt_clear_locked(xprt
);
256 __xprt_put_cong(xprt
, req
);
257 dprintk("RPC: %5u failed to lock transport %p\n", task
->tk_pid
, xprt
);
258 task
->tk_timeout
= 0;
259 task
->tk_status
= -EAGAIN
;
261 priority
= RPC_PRIORITY_LOW
;
262 else if (!req
->rq_ntrans
)
263 priority
= RPC_PRIORITY_NORMAL
;
265 priority
= RPC_PRIORITY_HIGH
;
266 rpc_sleep_on_priority(&xprt
->sending
, task
, NULL
, priority
);
269 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong
);
271 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
275 spin_lock_bh(&xprt
->transport_lock
);
276 retval
= xprt
->ops
->reserve_xprt(xprt
, task
);
277 spin_unlock_bh(&xprt
->transport_lock
);
281 static bool __xprt_lock_write_func(struct rpc_task
*task
, void *data
)
283 struct rpc_xprt
*xprt
= data
;
284 struct rpc_rqst
*req
;
286 req
= task
->tk_rqstp
;
287 xprt
->snd_task
= task
;
293 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
295 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
298 if (rpc_wake_up_first(&xprt
->sending
, __xprt_lock_write_func
, xprt
))
300 xprt_clear_locked(xprt
);
303 static bool __xprt_lock_write_cong_func(struct rpc_task
*task
, void *data
)
305 struct rpc_xprt
*xprt
= data
;
306 struct rpc_rqst
*req
;
308 req
= task
->tk_rqstp
;
310 xprt
->snd_task
= task
;
313 if (__xprt_get_cong(xprt
, task
)) {
314 xprt
->snd_task
= task
;
321 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
323 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
325 if (RPCXPRT_CONGESTED(xprt
))
327 if (rpc_wake_up_first(&xprt
->sending
, __xprt_lock_write_cong_func
, xprt
))
330 xprt_clear_locked(xprt
);
333 static void xprt_task_clear_bytes_sent(struct rpc_task
*task
)
336 struct rpc_rqst
*req
= task
->tk_rqstp
;
338 req
->rq_bytes_sent
= 0;
343 * xprt_release_xprt - allow other requests to use a transport
344 * @xprt: transport with other tasks potentially waiting
345 * @task: task that is releasing access to the transport
347 * Note that "task" can be NULL. No congestion control is provided.
349 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
351 if (xprt
->snd_task
== task
) {
352 xprt_task_clear_bytes_sent(task
);
353 xprt_clear_locked(xprt
);
354 __xprt_lock_write_next(xprt
);
357 EXPORT_SYMBOL_GPL(xprt_release_xprt
);
360 * xprt_release_xprt_cong - allow other requests to use a transport
361 * @xprt: transport with other tasks potentially waiting
362 * @task: task that is releasing access to the transport
364 * Note that "task" can be NULL. Another task is awoken to use the
365 * transport if the transport's congestion window allows it.
367 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
369 if (xprt
->snd_task
== task
) {
370 xprt_task_clear_bytes_sent(task
);
371 xprt_clear_locked(xprt
);
372 __xprt_lock_write_next_cong(xprt
);
375 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong
);
377 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
379 spin_lock_bh(&xprt
->transport_lock
);
380 xprt
->ops
->release_xprt(xprt
, task
);
381 spin_unlock_bh(&xprt
->transport_lock
);
385 * Van Jacobson congestion avoidance. Check if the congestion window
386 * overflowed. Put the task to sleep if this is the case.
389 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
391 struct rpc_rqst
*req
= task
->tk_rqstp
;
395 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
396 task
->tk_pid
, xprt
->cong
, xprt
->cwnd
);
397 if (RPCXPRT_CONGESTED(xprt
))
400 xprt
->cong
+= RPC_CWNDSCALE
;
405 * Adjust the congestion window, and wake up the next task
406 * that has been sleeping due to congestion
409 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
414 xprt
->cong
-= RPC_CWNDSCALE
;
415 __xprt_lock_write_next_cong(xprt
);
419 * xprt_release_rqst_cong - housekeeping when request is complete
420 * @task: RPC request that recently completed
422 * Useful for transports that require congestion control.
424 void xprt_release_rqst_cong(struct rpc_task
*task
)
426 struct rpc_rqst
*req
= task
->tk_rqstp
;
428 __xprt_put_cong(req
->rq_xprt
, req
);
430 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong
);
433 * xprt_adjust_cwnd - adjust transport congestion window
434 * @xprt: pointer to xprt
435 * @task: recently completed RPC request used to adjust window
436 * @result: result code of completed RPC request
438 * The transport code maintains an estimate on the maximum number of out-
439 * standing RPC requests, using a smoothed version of the congestion
440 * avoidance implemented in 44BSD. This is basically the Van Jacobson
441 * congestion algorithm: If a retransmit occurs, the congestion window is
442 * halved; otherwise, it is incremented by 1/cwnd when
444 * - a reply is received and
445 * - a full number of requests are outstanding and
446 * - the congestion window hasn't been updated recently.
448 void xprt_adjust_cwnd(struct rpc_xprt
*xprt
, struct rpc_task
*task
, int result
)
450 struct rpc_rqst
*req
= task
->tk_rqstp
;
451 unsigned long cwnd
= xprt
->cwnd
;
453 if (result
>= 0 && cwnd
<= xprt
->cong
) {
454 /* The (cwnd >> 1) term makes sure
455 * the result gets rounded properly. */
456 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
457 if (cwnd
> RPC_MAXCWND(xprt
))
458 cwnd
= RPC_MAXCWND(xprt
);
459 __xprt_lock_write_next_cong(xprt
);
460 } else if (result
== -ETIMEDOUT
) {
462 if (cwnd
< RPC_CWNDSCALE
)
463 cwnd
= RPC_CWNDSCALE
;
465 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
466 xprt
->cong
, xprt
->cwnd
, cwnd
);
468 __xprt_put_cong(xprt
, req
);
470 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd
);
473 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
474 * @xprt: transport with waiting tasks
475 * @status: result code to plant in each task before waking it
478 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
481 rpc_wake_up_status(&xprt
->pending
, status
);
483 rpc_wake_up(&xprt
->pending
);
485 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks
);
488 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
489 * @task: task to be put to sleep
490 * @action: function pointer to be executed after wait
492 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
493 * we don't in general want to force a socket disconnection due to
494 * an incomplete RPC call transmission.
496 void xprt_wait_for_buffer_space(struct rpc_task
*task
, rpc_action action
)
498 struct rpc_rqst
*req
= task
->tk_rqstp
;
499 struct rpc_xprt
*xprt
= req
->rq_xprt
;
501 task
->tk_timeout
= RPC_IS_SOFT(task
) ? req
->rq_timeout
: 0;
502 rpc_sleep_on(&xprt
->pending
, task
, action
);
504 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space
);
507 * xprt_write_space - wake the task waiting for transport output buffer space
508 * @xprt: transport with waiting tasks
510 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
512 void xprt_write_space(struct rpc_xprt
*xprt
)
514 spin_lock_bh(&xprt
->transport_lock
);
515 if (xprt
->snd_task
) {
516 dprintk("RPC: write space: waking waiting task on "
518 rpc_wake_up_queued_task(&xprt
->pending
, xprt
->snd_task
);
520 spin_unlock_bh(&xprt
->transport_lock
);
522 EXPORT_SYMBOL_GPL(xprt_write_space
);
525 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
526 * @task: task whose timeout is to be set
528 * Set a request's retransmit timeout based on the transport's
529 * default timeout parameters. Used by transports that don't adjust
530 * the retransmit timeout based on round-trip time estimation.
532 void xprt_set_retrans_timeout_def(struct rpc_task
*task
)
534 task
->tk_timeout
= task
->tk_rqstp
->rq_timeout
;
536 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def
);
539 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
540 * @task: task whose timeout is to be set
542 * Set a request's retransmit timeout using the RTT estimator.
544 void xprt_set_retrans_timeout_rtt(struct rpc_task
*task
)
546 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
547 struct rpc_clnt
*clnt
= task
->tk_client
;
548 struct rpc_rtt
*rtt
= clnt
->cl_rtt
;
549 struct rpc_rqst
*req
= task
->tk_rqstp
;
550 unsigned long max_timeout
= clnt
->cl_timeout
->to_maxval
;
552 task
->tk_timeout
= rpc_calc_rto(rtt
, timer
);
553 task
->tk_timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
554 if (task
->tk_timeout
> max_timeout
|| task
->tk_timeout
== 0)
555 task
->tk_timeout
= max_timeout
;
557 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt
);
559 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
561 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
563 req
->rq_majortimeo
= req
->rq_timeout
;
564 if (to
->to_exponential
)
565 req
->rq_majortimeo
<<= to
->to_retries
;
567 req
->rq_majortimeo
+= to
->to_increment
* to
->to_retries
;
568 if (req
->rq_majortimeo
> to
->to_maxval
|| req
->rq_majortimeo
== 0)
569 req
->rq_majortimeo
= to
->to_maxval
;
570 req
->rq_majortimeo
+= jiffies
;
574 * xprt_adjust_timeout - adjust timeout values for next retransmit
575 * @req: RPC request containing parameters to use for the adjustment
578 int xprt_adjust_timeout(struct rpc_rqst
*req
)
580 struct rpc_xprt
*xprt
= req
->rq_xprt
;
581 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
584 if (time_before(jiffies
, req
->rq_majortimeo
)) {
585 if (to
->to_exponential
)
586 req
->rq_timeout
<<= 1;
588 req
->rq_timeout
+= to
->to_increment
;
589 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
590 req
->rq_timeout
= to
->to_maxval
;
593 req
->rq_timeout
= to
->to_initval
;
595 xprt_reset_majortimeo(req
);
596 /* Reset the RTT counters == "slow start" */
597 spin_lock_bh(&xprt
->transport_lock
);
598 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
599 spin_unlock_bh(&xprt
->transport_lock
);
603 if (req
->rq_timeout
== 0) {
604 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
605 req
->rq_timeout
= 5 * HZ
;
610 static void xprt_autoclose(struct work_struct
*work
)
612 struct rpc_xprt
*xprt
=
613 container_of(work
, struct rpc_xprt
, task_cleanup
);
615 clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
616 xprt
->ops
->close(xprt
);
617 xprt_release_write(xprt
, NULL
);
618 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
622 * xprt_disconnect_done - mark a transport as disconnected
623 * @xprt: transport to flag for disconnect
626 void xprt_disconnect_done(struct rpc_xprt
*xprt
)
628 dprintk("RPC: disconnected transport %p\n", xprt
);
629 spin_lock_bh(&xprt
->transport_lock
);
630 xprt_clear_connected(xprt
);
631 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
632 spin_unlock_bh(&xprt
->transport_lock
);
634 EXPORT_SYMBOL_GPL(xprt_disconnect_done
);
637 * xprt_force_disconnect - force a transport to disconnect
638 * @xprt: transport to disconnect
641 void xprt_force_disconnect(struct rpc_xprt
*xprt
)
643 /* Don't race with the test_bit() in xprt_clear_locked() */
644 spin_lock_bh(&xprt
->transport_lock
);
645 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
646 /* Try to schedule an autoclose RPC call */
647 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
648 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
649 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
650 spin_unlock_bh(&xprt
->transport_lock
);
654 * xprt_conditional_disconnect - force a transport to disconnect
655 * @xprt: transport to disconnect
656 * @cookie: 'connection cookie'
658 * This attempts to break the connection if and only if 'cookie' matches
659 * the current transport 'connection cookie'. It ensures that we don't
660 * try to break the connection more than once when we need to retransmit
661 * a batch of RPC requests.
664 void xprt_conditional_disconnect(struct rpc_xprt
*xprt
, unsigned int cookie
)
666 /* Don't race with the test_bit() in xprt_clear_locked() */
667 spin_lock_bh(&xprt
->transport_lock
);
668 if (cookie
!= xprt
->connect_cookie
)
670 if (test_bit(XPRT_CLOSING
, &xprt
->state
) || !xprt_connected(xprt
))
672 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
673 /* Try to schedule an autoclose RPC call */
674 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
675 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
676 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
678 spin_unlock_bh(&xprt
->transport_lock
);
682 xprt_init_autodisconnect(unsigned long data
)
684 struct rpc_xprt
*xprt
= (struct rpc_xprt
*)data
;
686 spin_lock(&xprt
->transport_lock
);
687 if (!list_empty(&xprt
->recv
))
689 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
691 spin_unlock(&xprt
->transport_lock
);
692 queue_work(rpciod_workqueue
, &xprt
->task_cleanup
);
695 spin_unlock(&xprt
->transport_lock
);
698 bool xprt_lock_connect(struct rpc_xprt
*xprt
,
699 struct rpc_task
*task
,
704 spin_lock_bh(&xprt
->transport_lock
);
705 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
707 if (xprt
->snd_task
!= task
)
709 xprt_task_clear_bytes_sent(task
);
710 xprt
->snd_task
= cookie
;
713 spin_unlock_bh(&xprt
->transport_lock
);
717 void xprt_unlock_connect(struct rpc_xprt
*xprt
, void *cookie
)
719 spin_lock_bh(&xprt
->transport_lock
);
720 if (xprt
->snd_task
!= cookie
)
722 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
724 xprt
->snd_task
=NULL
;
725 xprt
->ops
->release_xprt(xprt
, NULL
);
727 spin_unlock_bh(&xprt
->transport_lock
);
728 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
732 * xprt_connect - schedule a transport connect operation
733 * @task: RPC task that is requesting the connect
736 void xprt_connect(struct rpc_task
*task
)
738 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
740 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task
->tk_pid
,
741 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
743 if (!xprt_bound(xprt
)) {
744 task
->tk_status
= -EAGAIN
;
747 if (!xprt_lock_write(xprt
, task
))
750 if (test_and_clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
))
751 xprt
->ops
->close(xprt
);
753 if (!xprt_connected(xprt
)) {
754 task
->tk_rqstp
->rq_bytes_sent
= 0;
755 task
->tk_timeout
= task
->tk_rqstp
->rq_timeout
;
756 rpc_sleep_on(&xprt
->pending
, task
, xprt_connect_status
);
758 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
760 if (xprt_test_and_set_connecting(xprt
))
762 xprt
->stat
.connect_start
= jiffies
;
763 xprt
->ops
->connect(xprt
, task
);
765 xprt_release_write(xprt
, task
);
768 static void xprt_connect_status(struct rpc_task
*task
)
770 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
772 if (task
->tk_status
== 0) {
773 xprt
->stat
.connect_count
++;
774 xprt
->stat
.connect_time
+= (long)jiffies
- xprt
->stat
.connect_start
;
775 dprintk("RPC: %5u xprt_connect_status: connection established\n",
780 switch (task
->tk_status
) {
788 dprintk("RPC: %5u xprt_connect_status: retrying\n", task
->tk_pid
);
791 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
792 "out\n", task
->tk_pid
);
795 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
796 "server %s\n", task
->tk_pid
, -task
->tk_status
,
798 task
->tk_status
= -EIO
;
803 * xprt_lookup_rqst - find an RPC request corresponding to an XID
804 * @xprt: transport on which the original request was transmitted
805 * @xid: RPC XID of incoming reply
808 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
810 struct rpc_rqst
*entry
;
812 list_for_each_entry(entry
, &xprt
->recv
, rq_list
)
813 if (entry
->rq_xid
== xid
) {
814 trace_xprt_lookup_rqst(xprt
, xid
, 0);
818 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
820 trace_xprt_lookup_rqst(xprt
, xid
, -ENOENT
);
821 xprt
->stat
.bad_xids
++;
824 EXPORT_SYMBOL_GPL(xprt_lookup_rqst
);
826 static void xprt_update_rtt(struct rpc_task
*task
)
828 struct rpc_rqst
*req
= task
->tk_rqstp
;
829 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
830 unsigned int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
831 long m
= usecs_to_jiffies(ktime_to_us(req
->rq_rtt
));
834 if (req
->rq_ntrans
== 1)
835 rpc_update_rtt(rtt
, timer
, m
);
836 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
841 * xprt_complete_rqst - called when reply processing is complete
842 * @task: RPC request that recently completed
843 * @copied: actual number of bytes received from the transport
845 * Caller holds transport lock.
847 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
849 struct rpc_rqst
*req
= task
->tk_rqstp
;
850 struct rpc_xprt
*xprt
= req
->rq_xprt
;
852 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
853 task
->tk_pid
, ntohl(req
->rq_xid
), copied
);
854 trace_xprt_complete_rqst(xprt
, req
->rq_xid
, copied
);
857 req
->rq_rtt
= ktime_sub(ktime_get(), req
->rq_xtime
);
858 if (xprt
->ops
->timer
!= NULL
)
859 xprt_update_rtt(task
);
861 list_del_init(&req
->rq_list
);
862 req
->rq_private_buf
.len
= copied
;
863 /* Ensure all writes are done before we update */
864 /* req->rq_reply_bytes_recvd */
866 req
->rq_reply_bytes_recvd
= copied
;
867 rpc_wake_up_queued_task(&xprt
->pending
, task
);
869 EXPORT_SYMBOL_GPL(xprt_complete_rqst
);
871 static void xprt_timer(struct rpc_task
*task
)
873 struct rpc_rqst
*req
= task
->tk_rqstp
;
874 struct rpc_xprt
*xprt
= req
->rq_xprt
;
876 if (task
->tk_status
!= -ETIMEDOUT
)
878 dprintk("RPC: %5u xprt_timer\n", task
->tk_pid
);
880 spin_lock_bh(&xprt
->transport_lock
);
881 if (!req
->rq_reply_bytes_recvd
) {
882 if (xprt
->ops
->timer
)
883 xprt
->ops
->timer(xprt
, task
);
886 spin_unlock_bh(&xprt
->transport_lock
);
889 static inline int xprt_has_timer(struct rpc_xprt
*xprt
)
891 return xprt
->idle_timeout
!= 0;
895 * xprt_prepare_transmit - reserve the transport before sending a request
896 * @task: RPC task about to send a request
899 bool xprt_prepare_transmit(struct rpc_task
*task
)
901 struct rpc_rqst
*req
= task
->tk_rqstp
;
902 struct rpc_xprt
*xprt
= req
->rq_xprt
;
905 dprintk("RPC: %5u xprt_prepare_transmit\n", task
->tk_pid
);
907 spin_lock_bh(&xprt
->transport_lock
);
908 if (!req
->rq_bytes_sent
) {
909 if (req
->rq_reply_bytes_recvd
) {
910 task
->tk_status
= req
->rq_reply_bytes_recvd
;
913 if ((task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
)
914 && xprt_connected(xprt
)
915 && req
->rq_connect_cookie
== xprt
->connect_cookie
) {
916 xprt
->ops
->set_retrans_timeout(task
);
917 rpc_sleep_on(&xprt
->pending
, task
, xprt_timer
);
921 if (!xprt
->ops
->reserve_xprt(xprt
, task
)) {
922 task
->tk_status
= -EAGAIN
;
927 spin_unlock_bh(&xprt
->transport_lock
);
931 void xprt_end_transmit(struct rpc_task
*task
)
933 xprt_release_write(task
->tk_rqstp
->rq_xprt
, task
);
937 * xprt_transmit - send an RPC request on a transport
938 * @task: controlling RPC task
940 * We have to copy the iovec because sendmsg fiddles with its contents.
942 void xprt_transmit(struct rpc_task
*task
)
944 struct rpc_rqst
*req
= task
->tk_rqstp
;
945 struct rpc_xprt
*xprt
= req
->rq_xprt
;
948 dprintk("RPC: %5u xprt_transmit(%u)\n", task
->tk_pid
, req
->rq_slen
);
950 if (!req
->rq_reply_bytes_recvd
) {
951 if (list_empty(&req
->rq_list
) && rpc_reply_expected(task
)) {
953 * Add to the list only if we're expecting a reply
955 spin_lock_bh(&xprt
->transport_lock
);
956 /* Update the softirq receive buffer */
957 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
958 sizeof(req
->rq_private_buf
));
959 /* Add request to the receive list */
960 list_add_tail(&req
->rq_list
, &xprt
->recv
);
961 spin_unlock_bh(&xprt
->transport_lock
);
962 xprt_reset_majortimeo(req
);
963 /* Turn off autodisconnect */
964 del_singleshot_timer_sync(&xprt
->timer
);
966 } else if (!req
->rq_bytes_sent
)
969 req
->rq_xtime
= ktime_get();
970 status
= xprt
->ops
->send_request(task
);
971 trace_xprt_transmit(xprt
, req
->rq_xid
, status
);
973 task
->tk_status
= status
;
976 xprt_inject_disconnect(xprt
);
978 dprintk("RPC: %5u xmit complete\n", task
->tk_pid
);
979 task
->tk_flags
|= RPC_TASK_SENT
;
980 spin_lock_bh(&xprt
->transport_lock
);
982 xprt
->ops
->set_retrans_timeout(task
);
984 numreqs
= atomic_read(&xprt
->num_reqs
);
985 if (numreqs
> xprt
->stat
.max_slots
)
986 xprt
->stat
.max_slots
= numreqs
;
988 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
989 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
990 xprt
->stat
.sending_u
+= xprt
->sending
.qlen
;
991 xprt
->stat
.pending_u
+= xprt
->pending
.qlen
;
993 /* Don't race with disconnect */
994 if (!xprt_connected(xprt
))
995 task
->tk_status
= -ENOTCONN
;
998 * Sleep on the pending queue since
999 * we're expecting a reply.
1001 if (!req
->rq_reply_bytes_recvd
&& rpc_reply_expected(task
))
1002 rpc_sleep_on(&xprt
->pending
, task
, xprt_timer
);
1003 req
->rq_connect_cookie
= xprt
->connect_cookie
;
1005 spin_unlock_bh(&xprt
->transport_lock
);
1008 static void xprt_add_backlog(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1010 set_bit(XPRT_CONGESTED
, &xprt
->state
);
1011 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1014 static void xprt_wake_up_backlog(struct rpc_xprt
*xprt
)
1016 if (rpc_wake_up_next(&xprt
->backlog
) == NULL
)
1017 clear_bit(XPRT_CONGESTED
, &xprt
->state
);
1020 static bool xprt_throttle_congested(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1024 if (!test_bit(XPRT_CONGESTED
, &xprt
->state
))
1026 spin_lock(&xprt
->reserve_lock
);
1027 if (test_bit(XPRT_CONGESTED
, &xprt
->state
)) {
1028 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1031 spin_unlock(&xprt
->reserve_lock
);
1036 static struct rpc_rqst
*xprt_dynamic_alloc_slot(struct rpc_xprt
*xprt
, gfp_t gfp_flags
)
1038 struct rpc_rqst
*req
= ERR_PTR(-EAGAIN
);
1040 if (!atomic_add_unless(&xprt
->num_reqs
, 1, xprt
->max_reqs
))
1042 req
= kzalloc(sizeof(struct rpc_rqst
), gfp_flags
);
1045 atomic_dec(&xprt
->num_reqs
);
1046 req
= ERR_PTR(-ENOMEM
);
1051 static bool xprt_dynamic_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1053 if (atomic_add_unless(&xprt
->num_reqs
, -1, xprt
->min_reqs
)) {
1060 void xprt_alloc_slot(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1062 struct rpc_rqst
*req
;
1064 spin_lock(&xprt
->reserve_lock
);
1065 if (!list_empty(&xprt
->free
)) {
1066 req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
1067 list_del(&req
->rq_list
);
1070 req
= xprt_dynamic_alloc_slot(xprt
, GFP_NOWAIT
|__GFP_NOWARN
);
1073 switch (PTR_ERR(req
)) {
1075 dprintk("RPC: dynamic allocation of request slot "
1076 "failed! Retrying\n");
1077 task
->tk_status
= -ENOMEM
;
1080 xprt_add_backlog(xprt
, task
);
1081 dprintk("RPC: waiting for request slot\n");
1083 task
->tk_status
= -EAGAIN
;
1085 spin_unlock(&xprt
->reserve_lock
);
1088 task
->tk_status
= 0;
1089 task
->tk_rqstp
= req
;
1090 xprt_request_init(task
, xprt
);
1091 spin_unlock(&xprt
->reserve_lock
);
1093 EXPORT_SYMBOL_GPL(xprt_alloc_slot
);
1095 void xprt_lock_and_alloc_slot(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1097 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1098 * new slot allocation if the transport is congested (i.e. when
1099 * reconnecting a stream transport or when out of socket write
1102 if (xprt_lock_write(xprt
, task
)) {
1103 xprt_alloc_slot(xprt
, task
);
1104 xprt_release_write(xprt
, task
);
1107 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot
);
1109 static void xprt_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1111 spin_lock(&xprt
->reserve_lock
);
1112 if (!xprt_dynamic_free_slot(xprt
, req
)) {
1113 memset(req
, 0, sizeof(*req
)); /* mark unused */
1114 list_add(&req
->rq_list
, &xprt
->free
);
1116 xprt_wake_up_backlog(xprt
);
1117 spin_unlock(&xprt
->reserve_lock
);
1120 static void xprt_free_all_slots(struct rpc_xprt
*xprt
)
1122 struct rpc_rqst
*req
;
1123 while (!list_empty(&xprt
->free
)) {
1124 req
= list_first_entry(&xprt
->free
, struct rpc_rqst
, rq_list
);
1125 list_del(&req
->rq_list
);
1130 struct rpc_xprt
*xprt_alloc(struct net
*net
, size_t size
,
1131 unsigned int num_prealloc
,
1132 unsigned int max_alloc
)
1134 struct rpc_xprt
*xprt
;
1135 struct rpc_rqst
*req
;
1138 xprt
= kzalloc(size
, GFP_KERNEL
);
1142 xprt_init(xprt
, net
);
1144 for (i
= 0; i
< num_prealloc
; i
++) {
1145 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_KERNEL
);
1148 list_add(&req
->rq_list
, &xprt
->free
);
1150 if (max_alloc
> num_prealloc
)
1151 xprt
->max_reqs
= max_alloc
;
1153 xprt
->max_reqs
= num_prealloc
;
1154 xprt
->min_reqs
= num_prealloc
;
1155 atomic_set(&xprt
->num_reqs
, num_prealloc
);
1164 EXPORT_SYMBOL_GPL(xprt_alloc
);
1166 void xprt_free(struct rpc_xprt
*xprt
)
1168 put_net(xprt
->xprt_net
);
1169 xprt_free_all_slots(xprt
);
1170 kfree_rcu(xprt
, rcu
);
1172 EXPORT_SYMBOL_GPL(xprt_free
);
1175 * xprt_reserve - allocate an RPC request slot
1176 * @task: RPC task requesting a slot allocation
1178 * If the transport is marked as being congested, or if no more
1179 * slots are available, place the task on the transport's
1182 void xprt_reserve(struct rpc_task
*task
)
1184 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1186 task
->tk_status
= 0;
1187 if (task
->tk_rqstp
!= NULL
)
1190 task
->tk_timeout
= 0;
1191 task
->tk_status
= -EAGAIN
;
1192 if (!xprt_throttle_congested(xprt
, task
))
1193 xprt
->ops
->alloc_slot(xprt
, task
);
1197 * xprt_retry_reserve - allocate an RPC request slot
1198 * @task: RPC task requesting a slot allocation
1200 * If no more slots are available, place the task on the transport's
1202 * Note that the only difference with xprt_reserve is that we now
1203 * ignore the value of the XPRT_CONGESTED flag.
1205 void xprt_retry_reserve(struct rpc_task
*task
)
1207 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1209 task
->tk_status
= 0;
1210 if (task
->tk_rqstp
!= NULL
)
1213 task
->tk_timeout
= 0;
1214 task
->tk_status
= -EAGAIN
;
1215 xprt
->ops
->alloc_slot(xprt
, task
);
1218 static inline __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
)
1220 return (__force __be32
)xprt
->xid
++;
1223 static inline void xprt_init_xid(struct rpc_xprt
*xprt
)
1225 xprt
->xid
= prandom_u32();
1228 static void xprt_request_init(struct rpc_task
*task
, struct rpc_xprt
*xprt
)
1230 struct rpc_rqst
*req
= task
->tk_rqstp
;
1232 INIT_LIST_HEAD(&req
->rq_list
);
1233 req
->rq_timeout
= task
->tk_client
->cl_timeout
->to_initval
;
1234 req
->rq_task
= task
;
1235 req
->rq_xprt
= xprt
;
1236 req
->rq_buffer
= NULL
;
1237 req
->rq_xid
= xprt_alloc_xid(xprt
);
1238 req
->rq_connect_cookie
= xprt
->connect_cookie
- 1;
1239 req
->rq_bytes_sent
= 0;
1240 req
->rq_snd_buf
.len
= 0;
1241 req
->rq_snd_buf
.buflen
= 0;
1242 req
->rq_rcv_buf
.len
= 0;
1243 req
->rq_rcv_buf
.buflen
= 0;
1244 req
->rq_release_snd_buf
= NULL
;
1245 xprt_reset_majortimeo(req
);
1246 dprintk("RPC: %5u reserved req %p xid %08x\n", task
->tk_pid
,
1247 req
, ntohl(req
->rq_xid
));
1251 * xprt_release - release an RPC request slot
1252 * @task: task which is finished with the slot
1255 void xprt_release(struct rpc_task
*task
)
1257 struct rpc_xprt
*xprt
;
1258 struct rpc_rqst
*req
= task
->tk_rqstp
;
1261 if (task
->tk_client
) {
1262 xprt
= task
->tk_xprt
;
1263 if (xprt
->snd_task
== task
)
1264 xprt_release_write(xprt
, task
);
1269 xprt
= req
->rq_xprt
;
1270 if (task
->tk_ops
->rpc_count_stats
!= NULL
)
1271 task
->tk_ops
->rpc_count_stats(task
, task
->tk_calldata
);
1272 else if (task
->tk_client
)
1273 rpc_count_iostats(task
, task
->tk_client
->cl_metrics
);
1274 spin_lock_bh(&xprt
->transport_lock
);
1275 xprt
->ops
->release_xprt(xprt
, task
);
1276 if (xprt
->ops
->release_request
)
1277 xprt
->ops
->release_request(task
);
1278 if (!list_empty(&req
->rq_list
))
1279 list_del(&req
->rq_list
);
1280 xprt
->last_used
= jiffies
;
1281 if (list_empty(&xprt
->recv
) && xprt_has_timer(xprt
))
1282 mod_timer(&xprt
->timer
,
1283 xprt
->last_used
+ xprt
->idle_timeout
);
1284 spin_unlock_bh(&xprt
->transport_lock
);
1286 xprt
->ops
->buf_free(req
->rq_buffer
);
1287 xprt_inject_disconnect(xprt
);
1288 if (req
->rq_cred
!= NULL
)
1289 put_rpccred(req
->rq_cred
);
1290 task
->tk_rqstp
= NULL
;
1291 if (req
->rq_release_snd_buf
)
1292 req
->rq_release_snd_buf(req
);
1294 dprintk("RPC: %5u release request %p\n", task
->tk_pid
, req
);
1295 if (likely(!bc_prealloc(req
)))
1296 xprt_free_slot(xprt
, req
);
1298 xprt_free_bc_request(req
);
1301 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
)
1303 kref_init(&xprt
->kref
);
1305 spin_lock_init(&xprt
->transport_lock
);
1306 spin_lock_init(&xprt
->reserve_lock
);
1308 INIT_LIST_HEAD(&xprt
->free
);
1309 INIT_LIST_HEAD(&xprt
->recv
);
1310 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1311 spin_lock_init(&xprt
->bc_pa_lock
);
1312 INIT_LIST_HEAD(&xprt
->bc_pa_list
);
1313 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1314 INIT_LIST_HEAD(&xprt
->xprt_switch
);
1316 xprt
->last_used
= jiffies
;
1317 xprt
->cwnd
= RPC_INITCWND
;
1318 xprt
->bind_index
= 0;
1320 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
1321 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
1322 rpc_init_priority_wait_queue(&xprt
->sending
, "xprt_sending");
1323 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
1325 xprt_init_xid(xprt
);
1327 xprt
->xprt_net
= get_net(net
);
1331 * xprt_create_transport - create an RPC transport
1332 * @args: rpc transport creation arguments
1335 struct rpc_xprt
*xprt_create_transport(struct xprt_create
*args
)
1337 struct rpc_xprt
*xprt
;
1338 struct xprt_class
*t
;
1340 spin_lock(&xprt_list_lock
);
1341 list_for_each_entry(t
, &xprt_list
, list
) {
1342 if (t
->ident
== args
->ident
) {
1343 spin_unlock(&xprt_list_lock
);
1347 spin_unlock(&xprt_list_lock
);
1348 dprintk("RPC: transport (%d) not supported\n", args
->ident
);
1349 return ERR_PTR(-EIO
);
1352 xprt
= t
->setup(args
);
1354 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1358 if (args
->flags
& XPRT_CREATE_NO_IDLE_TIMEOUT
)
1359 xprt
->idle_timeout
= 0;
1360 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
1361 if (xprt_has_timer(xprt
))
1362 setup_timer(&xprt
->timer
, xprt_init_autodisconnect
,
1363 (unsigned long)xprt
);
1365 init_timer(&xprt
->timer
);
1367 if (strlen(args
->servername
) > RPC_MAXNETNAMELEN
) {
1369 return ERR_PTR(-EINVAL
);
1371 xprt
->servername
= kstrdup(args
->servername
, GFP_KERNEL
);
1372 if (xprt
->servername
== NULL
) {
1374 return ERR_PTR(-ENOMEM
);
1377 rpc_xprt_debugfs_register(xprt
);
1379 dprintk("RPC: created transport %p with %u slots\n", xprt
,
1386 * xprt_destroy - destroy an RPC transport, killing off all requests.
1387 * @xprt: transport to destroy
1390 static void xprt_destroy(struct rpc_xprt
*xprt
)
1392 dprintk("RPC: destroying transport %p\n", xprt
);
1394 /* Exclude transport connect/disconnect handlers */
1395 wait_on_bit_lock(&xprt
->state
, XPRT_LOCKED
, TASK_UNINTERRUPTIBLE
);
1397 del_timer_sync(&xprt
->timer
);
1399 rpc_xprt_debugfs_unregister(xprt
);
1400 rpc_destroy_wait_queue(&xprt
->binding
);
1401 rpc_destroy_wait_queue(&xprt
->pending
);
1402 rpc_destroy_wait_queue(&xprt
->sending
);
1403 rpc_destroy_wait_queue(&xprt
->backlog
);
1404 cancel_work_sync(&xprt
->task_cleanup
);
1405 kfree(xprt
->servername
);
1407 * Tear down transport state and free the rpc_xprt
1409 xprt
->ops
->destroy(xprt
);
1412 static void xprt_destroy_kref(struct kref
*kref
)
1414 xprt_destroy(container_of(kref
, struct rpc_xprt
, kref
));
1418 * xprt_get - return a reference to an RPC transport.
1419 * @xprt: pointer to the transport
1422 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
1424 if (xprt
!= NULL
&& kref_get_unless_zero(&xprt
->kref
))
1428 EXPORT_SYMBOL_GPL(xprt_get
);
1431 * xprt_put - release a reference to an RPC transport.
1432 * @xprt: pointer to the transport
1435 void xprt_put(struct rpc_xprt
*xprt
)
1438 kref_put(&xprt
->kref
, xprt_destroy_kref
);
1440 EXPORT_SYMBOL_GPL(xprt_put
);