Merge tag 'block-6.13-20242901' of git://git.kernel.dk/linux
[drm/drm-misc.git] / include / linux / sunrpc / svc_rdma.h
blob619fc0bd837a8e3825f922db9d66c3c4674d9813
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #ifndef SVC_RDMA_H
44 #define SVC_RDMA_H
45 #include <linux/llist.h>
46 #include <linux/sunrpc/xdr.h>
47 #include <linux/sunrpc/svcsock.h>
48 #include <linux/sunrpc/rpc_rdma.h>
49 #include <linux/sunrpc/rpc_rdma_cid.h>
50 #include <linux/sunrpc/svc_rdma_pcl.h>
51 #include <linux/sunrpc/rdma_rn.h>
53 #include <linux/percpu_counter.h>
54 #include <rdma/ib_verbs.h>
55 #include <rdma/rdma_cm.h>
57 /* Default and maximum inline threshold sizes */
58 enum {
59 RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1,
60 RPCRDMA_DEF_INLINE_THRESH = 4096,
61 RPCRDMA_MAX_INLINE_THRESH = 65536
64 /* RPC/RDMA parameters and stats */
65 extern unsigned int svcrdma_ord;
66 extern unsigned int svcrdma_max_requests;
67 extern unsigned int svcrdma_max_bc_requests;
68 extern unsigned int svcrdma_max_req_size;
69 extern struct workqueue_struct *svcrdma_wq;
71 extern struct percpu_counter svcrdma_stat_read;
72 extern struct percpu_counter svcrdma_stat_recv;
73 extern struct percpu_counter svcrdma_stat_sq_starve;
74 extern struct percpu_counter svcrdma_stat_write;
76 struct svcxprt_rdma {
77 struct svc_xprt sc_xprt; /* SVC transport structure */
78 struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
79 struct list_head sc_accept_q; /* Conn. waiting accept */
80 struct rpcrdma_notification sc_rn; /* removal notification */
81 int sc_ord; /* RDMA read limit */
82 int sc_max_send_sges;
83 bool sc_snd_w_inv; /* OK to use Send With Invalidate */
85 atomic_t sc_sq_avail; /* SQEs ready to be consumed */
86 unsigned int sc_sq_depth; /* Depth of SQ */
87 __be32 sc_fc_credits; /* Forward credits */
88 u32 sc_max_requests; /* Max requests */
89 u32 sc_max_bc_requests;/* Backward credits */
90 int sc_max_req_size; /* Size of each RQ WR buf */
91 u8 sc_port_num;
93 struct ib_pd *sc_pd;
95 spinlock_t sc_send_lock;
96 struct llist_head sc_send_ctxts;
97 spinlock_t sc_rw_ctxt_lock;
98 struct llist_head sc_rw_ctxts;
100 u32 sc_pending_recvs;
101 u32 sc_recv_batch;
102 struct list_head sc_rq_dto_q;
103 struct list_head sc_read_complete_q;
104 spinlock_t sc_rq_dto_lock;
105 struct ib_qp *sc_qp;
106 struct ib_cq *sc_rq_cq;
107 struct ib_cq *sc_sq_cq;
109 spinlock_t sc_lock; /* transport lock */
111 wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
112 unsigned long sc_flags;
113 struct work_struct sc_work;
115 struct llist_head sc_recv_ctxts;
117 atomic_t sc_completion_ids;
119 /* sc_flags */
120 #define RDMAXPRT_CONN_PENDING 3
122 static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
124 struct svc_xprt *xprt = rqstp->rq_xprt;
126 return container_of(xprt, struct svcxprt_rdma, sc_xprt);
130 * Default connection parameters
132 enum {
133 RPCRDMA_LISTEN_BACKLOG = 10,
134 RPCRDMA_MAX_REQUESTS = 64,
135 RPCRDMA_MAX_BC_REQUESTS = 2,
138 #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
141 * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
142 * @rdma: controlling transport
143 * @cid: completion ID to initialize
145 static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
146 struct rpc_rdma_cid *cid)
148 cid->ci_queue_id = rdma->sc_rq_cq->res.id;
149 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
153 * svc_rdma_send_cid_init - Initialize a Send Queue completion ID
154 * @rdma: controlling transport
155 * @cid: completion ID to initialize
157 static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
158 struct rpc_rdma_cid *cid)
160 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
161 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
165 * A chunk context tracks all I/O for moving one Read or Write
166 * chunk. This is a set of rdma_rw's that handle data movement
167 * for all segments of one chunk.
169 struct svc_rdma_chunk_ctxt {
170 struct rpc_rdma_cid cc_cid;
171 struct ib_cqe cc_cqe;
172 struct list_head cc_rwctxts;
173 ktime_t cc_posttime;
174 int cc_sqecount;
177 struct svc_rdma_recv_ctxt {
178 struct llist_node rc_node;
179 struct list_head rc_list;
180 struct ib_recv_wr rc_recv_wr;
181 struct ib_cqe rc_cqe;
182 struct rpc_rdma_cid rc_cid;
183 struct ib_sge rc_recv_sge;
184 void *rc_recv_buf;
185 struct xdr_stream rc_stream;
186 u32 rc_byte_len;
187 u32 rc_inv_rkey;
188 __be32 rc_msgtype;
190 /* State for pulling a Read chunk */
191 unsigned int rc_pageoff;
192 unsigned int rc_curpage;
193 unsigned int rc_readbytes;
194 struct xdr_buf rc_saved_arg;
195 struct svc_rdma_chunk_ctxt rc_cc;
197 struct svc_rdma_pcl rc_call_pcl;
199 struct svc_rdma_pcl rc_read_pcl;
200 struct svc_rdma_chunk *rc_cur_result_payload;
201 struct svc_rdma_pcl rc_write_pcl;
202 struct svc_rdma_pcl rc_reply_pcl;
204 unsigned int rc_page_count;
205 struct page *rc_pages[RPCSVC_MAXPAGES];
209 * State for sending a Write chunk.
210 * - Tracks progress of writing one chunk over all its segments
211 * - Stores arguments for the SGL constructor functions
213 struct svc_rdma_write_info {
214 struct svcxprt_rdma *wi_rdma;
216 const struct svc_rdma_chunk *wi_chunk;
218 /* write state of this chunk */
219 unsigned int wi_seg_off;
220 unsigned int wi_seg_no;
222 /* SGL constructor arguments */
223 const struct xdr_buf *wi_xdr;
224 unsigned char *wi_base;
225 unsigned int wi_next_off;
227 struct svc_rdma_chunk_ctxt wi_cc;
228 struct work_struct wi_work;
231 struct svc_rdma_send_ctxt {
232 struct llist_node sc_node;
233 struct rpc_rdma_cid sc_cid;
234 struct work_struct sc_work;
236 struct svcxprt_rdma *sc_rdma;
237 struct ib_send_wr sc_send_wr;
238 struct ib_send_wr *sc_wr_chain;
239 int sc_sqecount;
240 struct ib_cqe sc_cqe;
241 struct xdr_buf sc_hdrbuf;
242 struct xdr_stream sc_stream;
243 struct svc_rdma_write_info sc_reply_info;
244 void *sc_xprt_buf;
245 int sc_page_count;
246 int sc_cur_sge_no;
247 struct page *sc_pages[RPCSVC_MAXPAGES];
248 struct ib_sge sc_sges[];
251 /* svc_rdma_backchannel.c */
252 extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
253 struct svc_rdma_recv_ctxt *rctxt);
255 /* svc_rdma_recvfrom.c */
256 extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
257 extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
258 extern struct svc_rdma_recv_ctxt *
259 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma);
260 extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
261 struct svc_rdma_recv_ctxt *ctxt);
262 extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
263 extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
264 extern int svc_rdma_recvfrom(struct svc_rqst *);
266 /* svc_rdma_rw.c */
267 extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
268 struct svc_rdma_chunk_ctxt *cc);
269 extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
270 extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
271 struct svc_rdma_chunk_ctxt *cc);
272 extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
273 struct svc_rdma_chunk_ctxt *cc,
274 enum dma_data_direction dir);
275 extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
276 struct svc_rdma_send_ctxt *ctxt);
277 extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
278 const struct svc_rdma_recv_ctxt *rctxt,
279 const struct xdr_buf *xdr);
280 extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
281 const struct svc_rdma_pcl *write_pcl,
282 const struct svc_rdma_pcl *reply_pcl,
283 struct svc_rdma_send_ctxt *sctxt,
284 const struct xdr_buf *xdr);
285 extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
286 struct svc_rqst *rqstp,
287 struct svc_rdma_recv_ctxt *head);
289 /* svc_rdma_sendto.c */
290 extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma);
291 extern struct svc_rdma_send_ctxt *
292 svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
293 extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
294 struct svc_rdma_send_ctxt *ctxt);
295 extern int svc_rdma_post_send(struct svcxprt_rdma *rdma,
296 struct svc_rdma_send_ctxt *ctxt);
297 extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
298 struct svc_rdma_send_ctxt *sctxt,
299 const struct svc_rdma_pcl *write_pcl,
300 const struct svc_rdma_pcl *reply_pcl,
301 const struct xdr_buf *xdr);
302 extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
303 struct svc_rdma_send_ctxt *sctxt,
304 struct svc_rdma_recv_ctxt *rctxt,
305 int status);
306 extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail);
307 extern int svc_rdma_sendto(struct svc_rqst *);
308 extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
309 unsigned int length);
311 /* svc_rdma_transport.c */
312 extern struct svc_xprt_class svc_rdma_class;
313 #ifdef CONFIG_SUNRPC_BACKCHANNEL
314 extern struct svc_xprt_class svc_rdma_bc_class;
315 #endif
317 /* svc_rdma.c */
318 extern int svc_rdma_init(void);
319 extern void svc_rdma_cleanup(void);
321 #endif