2 * Copyright (c) 2015 Oracle. All rights reserved.
4 * Support for backward direction RPCs on RPC/RDMA.
7 #include <linux/module.h>
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
12 #include "xprt_rdma.h"
14 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
15 # define RPCDBG_FACILITY RPCDBG_TRANS
18 #undef RPCRDMA_BACKCHANNEL_DEBUG
20 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt
*r_xprt
,
21 struct rpc_rqst
*rqst
)
23 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
24 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
26 spin_lock(&buf
->rb_reqslock
);
27 list_del(&req
->rl_all
);
28 spin_unlock(&buf
->rb_reqslock
);
30 rpcrdma_destroy_req(&r_xprt
->rx_ia
, req
);
35 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt
*r_xprt
,
36 struct rpc_rqst
*rqst
)
38 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
39 struct rpcrdma_regbuf
*rb
;
40 struct rpcrdma_req
*req
;
44 req
= rpcrdma_create_req(r_xprt
);
47 req
->rl_backchannel
= true;
49 size
= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst
);
50 rb
= rpcrdma_alloc_regbuf(ia
, size
, GFP_KERNEL
);
55 size
+= RPCRDMA_INLINE_READ_THRESHOLD(rqst
);
56 rb
= rpcrdma_alloc_regbuf(ia
, size
, GFP_KERNEL
);
61 /* so that rpcr_to_rdmar works when receiving a request */
62 rqst
->rq_buffer
= (void *)req
->rl_sendbuf
->rg_base
;
64 buf
= &rqst
->rq_snd_buf
;
65 buf
->head
[0].iov_base
= rqst
->rq_buffer
;
66 buf
->head
[0].iov_len
= 0;
67 buf
->tail
[0].iov_base
= NULL
;
68 buf
->tail
[0].iov_len
= 0;
76 rpcrdma_bc_free_rqst(r_xprt
, rqst
);
80 /* Allocate and add receive buffers to the rpcrdma_buffer's
81 * existing list of rep's. These are released when the
82 * transport is destroyed.
84 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt
*r_xprt
,
87 struct rpcrdma_rep
*rep
;
91 rep
= rpcrdma_create_rep(r_xprt
);
93 pr_err("RPC: %s: reply buffer alloc failed\n",
99 rpcrdma_recv_buffer_put(rep
);
106 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
107 * @xprt: transport associated with these backchannel resources
108 * @reqs: number of concurrent incoming requests to expect
110 * Returns 0 on success; otherwise a negative errno
112 int xprt_rdma_bc_setup(struct rpc_xprt
*xprt
, unsigned int reqs
)
114 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
115 struct rpcrdma_buffer
*buffer
= &r_xprt
->rx_buf
;
116 struct rpc_rqst
*rqst
;
120 /* The backchannel reply path returns each rpc_rqst to the
121 * bc_pa_list _after_ the reply is sent. If the server is
122 * faster than the client, it can send another backward
123 * direction request before the rpc_rqst is returned to the
124 * list. The client rejects the request in this case.
126 * Twice as many rpc_rqsts are prepared to ensure there is
127 * always an rpc_rqst available as soon as a reply is sent.
129 if (reqs
> RPCRDMA_BACKWARD_WRS
>> 1)
132 for (i
= 0; i
< (reqs
<< 1); i
++) {
133 rqst
= kzalloc(sizeof(*rqst
), GFP_KERNEL
);
135 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
139 dprintk("RPC: %s: new rqst %p\n", __func__
, rqst
);
141 rqst
->rq_xprt
= &r_xprt
->rx_xprt
;
142 INIT_LIST_HEAD(&rqst
->rq_list
);
143 INIT_LIST_HEAD(&rqst
->rq_bc_list
);
145 if (rpcrdma_bc_setup_rqst(r_xprt
, rqst
))
148 spin_lock_bh(&xprt
->bc_pa_lock
);
149 list_add(&rqst
->rq_bc_pa_list
, &xprt
->bc_pa_list
);
150 spin_unlock_bh(&xprt
->bc_pa_lock
);
153 rc
= rpcrdma_bc_setup_reps(r_xprt
, reqs
);
157 rc
= rpcrdma_ep_post_extra_recv(r_xprt
, reqs
);
161 buffer
->rb_bc_srv_max_requests
= reqs
;
162 request_module("svcrdma");
167 xprt_rdma_bc_destroy(xprt
, reqs
);
170 pr_err("RPC: %s: setup backchannel transport failed\n", __func__
);
175 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
176 * @serv: server endpoint
177 * @net: network namespace
179 * The "xprt" is an implied argument: it supplies the name of the
180 * backchannel transport class.
182 * Returns zero on success, negative errno on failure
184 int xprt_rdma_bc_up(struct svc_serv
*serv
, struct net
*net
)
188 ret
= svc_create_xprt(serv
, "rdma-bc", net
, PF_INET
, 0, 0);
195 * rpcrdma_bc_marshal_reply - Send backwards direction reply
196 * @rqst: buffer containing RPC reply data
198 * Returns zero on success.
200 int rpcrdma_bc_marshal_reply(struct rpc_rqst
*rqst
)
202 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
203 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
204 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
205 struct rpcrdma_msg
*headerp
;
208 headerp
= rdmab_to_msg(req
->rl_rdmabuf
);
209 headerp
->rm_xid
= rqst
->rq_xid
;
210 headerp
->rm_vers
= rpcrdma_version
;
212 cpu_to_be32(r_xprt
->rx_buf
.rb_bc_srv_max_requests
);
213 headerp
->rm_type
= rdma_msg
;
214 headerp
->rm_body
.rm_chunks
[0] = xdr_zero
;
215 headerp
->rm_body
.rm_chunks
[1] = xdr_zero
;
216 headerp
->rm_body
.rm_chunks
[2] = xdr_zero
;
218 rpclen
= rqst
->rq_svec
[0].iov_len
;
220 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
221 pr_info("RPC: %s: rpclen %zd headerp 0x%p lkey 0x%x\n",
222 __func__
, rpclen
, headerp
, rdmab_lkey(req
->rl_rdmabuf
));
223 pr_info("RPC: %s: RPC/RDMA: %*ph\n",
224 __func__
, (int)RPCRDMA_HDRLEN_MIN
, headerp
);
225 pr_info("RPC: %s: RPC: %*ph\n",
226 __func__
, (int)rpclen
, rqst
->rq_svec
[0].iov_base
);
229 req
->rl_send_iov
[0].addr
= rdmab_addr(req
->rl_rdmabuf
);
230 req
->rl_send_iov
[0].length
= RPCRDMA_HDRLEN_MIN
;
231 req
->rl_send_iov
[0].lkey
= rdmab_lkey(req
->rl_rdmabuf
);
233 req
->rl_send_iov
[1].addr
= rdmab_addr(req
->rl_sendbuf
);
234 req
->rl_send_iov
[1].length
= rpclen
;
235 req
->rl_send_iov
[1].lkey
= rdmab_lkey(req
->rl_sendbuf
);
242 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
243 * @xprt: transport associated with these backchannel resources
244 * @reqs: number of incoming requests to destroy; ignored
246 void xprt_rdma_bc_destroy(struct rpc_xprt
*xprt
, unsigned int reqs
)
248 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
249 struct rpc_rqst
*rqst
, *tmp
;
251 spin_lock_bh(&xprt
->bc_pa_lock
);
252 list_for_each_entry_safe(rqst
, tmp
, &xprt
->bc_pa_list
, rq_bc_pa_list
) {
253 list_del(&rqst
->rq_bc_pa_list
);
254 spin_unlock_bh(&xprt
->bc_pa_lock
);
256 rpcrdma_bc_free_rqst(r_xprt
, rqst
);
258 spin_lock_bh(&xprt
->bc_pa_lock
);
260 spin_unlock_bh(&xprt
->bc_pa_lock
);
264 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
265 * @rqst: request to release
267 void xprt_rdma_bc_free_rqst(struct rpc_rqst
*rqst
)
269 struct rpc_xprt
*xprt
= rqst
->rq_xprt
;
271 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
272 __func__
, rqst
, rpcr_to_rdmar(rqst
));
274 smp_mb__before_atomic();
275 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE
, &rqst
->rq_bc_pa_state
));
276 clear_bit(RPC_BC_PA_IN_USE
, &rqst
->rq_bc_pa_state
);
277 smp_mb__after_atomic();
279 spin_lock_bh(&xprt
->bc_pa_lock
);
280 list_add_tail(&rqst
->rq_bc_pa_list
, &xprt
->bc_pa_list
);
281 spin_unlock_bh(&xprt
->bc_pa_lock
);
285 * rpcrdma_bc_receive_call - Handle a backward direction call
286 * @xprt: transport receiving the call
287 * @rep: receive buffer containing the call
289 * Called in the RPC reply handler, which runs in a tasklet.
292 * Operational assumptions:
293 * o Backchannel credits are ignored, just as the NFS server
294 * forechannel currently does
295 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
296 * No replay detection is done at the transport level
298 void rpcrdma_bc_receive_call(struct rpcrdma_xprt
*r_xprt
,
299 struct rpcrdma_rep
*rep
)
301 struct rpc_xprt
*xprt
= &r_xprt
->rx_xprt
;
302 struct rpcrdma_msg
*headerp
;
303 struct svc_serv
*bc_serv
;
304 struct rpcrdma_req
*req
;
305 struct rpc_rqst
*rqst
;
310 headerp
= rdmab_to_msg(rep
->rr_rdmabuf
);
311 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
312 pr_info("RPC: %s: callback XID %08x, length=%u\n",
313 __func__
, be32_to_cpu(headerp
->rm_xid
), rep
->rr_len
);
314 pr_info("RPC: %s: %*ph\n", __func__
, rep
->rr_len
, headerp
);
318 * Need at least enough bytes for RPC/RDMA header, as code
319 * here references the header fields by array offset. Also,
320 * backward calls are always inline, so ensure there
321 * are some bytes beyond the RPC/RDMA header.
323 if (rep
->rr_len
< RPCRDMA_HDRLEN_MIN
+ 24)
325 p
= (__be32
*)((unsigned char *)headerp
+ RPCRDMA_HDRLEN_MIN
);
326 size
= rep
->rr_len
- RPCRDMA_HDRLEN_MIN
;
328 /* Grab a free bc rqst */
329 spin_lock(&xprt
->bc_pa_lock
);
330 if (list_empty(&xprt
->bc_pa_list
)) {
331 spin_unlock(&xprt
->bc_pa_lock
);
334 rqst
= list_first_entry(&xprt
->bc_pa_list
,
335 struct rpc_rqst
, rq_bc_pa_list
);
336 list_del(&rqst
->rq_bc_pa_list
);
337 spin_unlock(&xprt
->bc_pa_lock
);
338 dprintk("RPC: %s: using rqst %p\n", __func__
, rqst
);
341 rqst
->rq_reply_bytes_recvd
= 0;
342 rqst
->rq_bytes_sent
= 0;
343 rqst
->rq_xid
= headerp
->rm_xid
;
345 rqst
->rq_private_buf
.len
= size
;
346 set_bit(RPC_BC_PA_IN_USE
, &rqst
->rq_bc_pa_state
);
348 buf
= &rqst
->rq_rcv_buf
;
349 memset(buf
, 0, sizeof(*buf
));
350 buf
->head
[0].iov_base
= p
;
351 buf
->head
[0].iov_len
= size
;
354 /* The receive buffer has to be hooked to the rpcrdma_req
355 * so that it can be reposted after the server is done
356 * parsing it but just before sending the backward
359 req
= rpcr_to_rdmar(rqst
);
360 dprintk("RPC: %s: attaching rep %p to req %p\n",
364 /* Defeat the retransmit detection logic in send_request */
365 req
->rl_connect_cookie
= 0;
367 /* Queue rqst for ULP's callback service */
368 bc_serv
= xprt
->bc_serv
;
369 spin_lock(&bc_serv
->sv_cb_lock
);
370 list_add(&rqst
->rq_bc_list
, &bc_serv
->sv_cb_list
);
371 spin_unlock(&bc_serv
->sv_cb_lock
);
373 wake_up(&bc_serv
->sv_cb_waitq
);
375 r_xprt
->rx_stats
.bcall_count
++;
379 pr_warn("RPC/RDMA backchannel overflow\n");
380 xprt_disconnect_done(xprt
);
381 /* This receive buffer gets reposted automatically
382 * when the connection is re-established.
387 pr_warn("RPC/RDMA short backward direction call\n");
389 if (rpcrdma_ep_post_recv(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, rep
))
390 xprt_disconnect_done(xprt
);
392 pr_warn("RPC: %s: reposting rep %p\n",