WIP FPC-III support
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_verbs.h
blob79e0a5a878da35de5e5e4c0830aadd8a7ab1ce6d
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
7 #ifndef RXE_VERBS_H
8 #define RXE_VERBS_H
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include <rdma/rdma_user_rxe.h>
13 #include "rxe_pool.h"
14 #include "rxe_task.h"
15 #include "rxe_hw_counters.h"
17 static inline int pkey_match(u16 key1, u16 key2)
19 return (((key1 & 0x7fff) != 0) &&
20 ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
21 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
24 /* Return >0 if psn_a > psn_b
25 * 0 if psn_a == psn_b
26 * <0 if psn_a < psn_b
28 static inline int psn_compare(u32 psn_a, u32 psn_b)
30 s32 diff;
32 diff = (psn_a - psn_b) << 8;
33 return diff;
36 struct rxe_ucontext {
37 struct ib_ucontext ibuc;
38 struct rxe_pool_entry pelem;
41 struct rxe_pd {
42 struct ib_pd ibpd;
43 struct rxe_pool_entry pelem;
46 struct rxe_ah {
47 struct ib_ah ibah;
48 struct rxe_pool_entry pelem;
49 struct rxe_pd *pd;
50 struct rxe_av av;
53 struct rxe_cqe {
54 union {
55 struct ib_wc ibwc;
56 struct ib_uverbs_wc uibwc;
60 struct rxe_cq {
61 struct ib_cq ibcq;
62 struct rxe_pool_entry pelem;
63 struct rxe_queue *queue;
64 spinlock_t cq_lock;
65 u8 notify;
66 bool is_dying;
67 int is_user;
68 struct tasklet_struct comp_task;
71 enum wqe_state {
72 wqe_state_posted,
73 wqe_state_processing,
74 wqe_state_pending,
75 wqe_state_done,
76 wqe_state_error,
79 struct rxe_sq {
80 int max_wr;
81 int max_sge;
82 int max_inline;
83 spinlock_t sq_lock; /* guard queue */
84 struct rxe_queue *queue;
87 struct rxe_rq {
88 int max_wr;
89 int max_sge;
90 spinlock_t producer_lock; /* guard queue producer */
91 spinlock_t consumer_lock; /* guard queue consumer */
92 struct rxe_queue *queue;
95 struct rxe_srq {
96 struct ib_srq ibsrq;
97 struct rxe_pool_entry pelem;
98 struct rxe_pd *pd;
99 struct rxe_rq rq;
100 u32 srq_num;
102 int limit;
103 int error;
106 enum rxe_qp_state {
107 QP_STATE_RESET,
108 QP_STATE_INIT,
109 QP_STATE_READY,
110 QP_STATE_DRAIN, /* req only */
111 QP_STATE_DRAINED, /* req only */
112 QP_STATE_ERROR
115 struct rxe_req_info {
116 enum rxe_qp_state state;
117 int wqe_index;
118 u32 psn;
119 int opcode;
120 atomic_t rd_atomic;
121 int wait_fence;
122 int need_rd_atomic;
123 int wait_psn;
124 int need_retry;
125 int noack_pkts;
126 struct rxe_task task;
129 struct rxe_comp_info {
130 u32 psn;
131 int opcode;
132 int timeout;
133 int timeout_retry;
134 int started_retry;
135 u32 retry_cnt;
136 u32 rnr_retry;
137 struct rxe_task task;
140 enum rdatm_res_state {
141 rdatm_res_state_next,
142 rdatm_res_state_new,
143 rdatm_res_state_replay,
146 struct resp_res {
147 int type;
148 int replay;
149 u32 first_psn;
150 u32 last_psn;
151 u32 cur_psn;
152 enum rdatm_res_state state;
154 union {
155 struct {
156 struct sk_buff *skb;
157 } atomic;
158 struct {
159 struct rxe_mem *mr;
160 u64 va_org;
161 u32 rkey;
162 u32 length;
163 u64 va;
164 u32 resid;
165 } read;
169 struct rxe_resp_info {
170 enum rxe_qp_state state;
171 u32 msn;
172 u32 psn;
173 u32 ack_psn;
174 int opcode;
175 int drop_msg;
176 int goto_error;
177 int sent_psn_nak;
178 enum ib_wc_status status;
179 u8 aeth_syndrome;
181 /* Receive only */
182 struct rxe_recv_wqe *wqe;
184 /* RDMA read / atomic only */
185 u64 va;
186 struct rxe_mem *mr;
187 u32 resid;
188 u32 rkey;
189 u32 length;
190 u64 atomic_orig;
192 /* SRQ only */
193 struct {
194 struct rxe_recv_wqe wqe;
195 struct ib_sge sge[RXE_MAX_SGE];
196 } srq_wqe;
198 /* Responder resources. It's a circular list where the oldest
199 * resource is dropped first.
201 struct resp_res *resources;
202 unsigned int res_head;
203 unsigned int res_tail;
204 struct resp_res *res;
205 struct rxe_task task;
208 struct rxe_qp {
209 struct rxe_pool_entry pelem;
210 struct ib_qp ibqp;
211 struct ib_qp_attr attr;
212 unsigned int valid;
213 unsigned int mtu;
214 int is_user;
216 struct rxe_pd *pd;
217 struct rxe_srq *srq;
218 struct rxe_cq *scq;
219 struct rxe_cq *rcq;
221 enum ib_sig_type sq_sig_type;
223 struct rxe_sq sq;
224 struct rxe_rq rq;
226 struct socket *sk;
227 u32 dst_cookie;
228 u16 src_port;
230 struct rxe_av pri_av;
231 struct rxe_av alt_av;
233 /* list of mcast groups qp has joined (for cleanup) */
234 struct list_head grp_list;
235 spinlock_t grp_lock; /* guard grp_list */
237 struct sk_buff_head req_pkts;
238 struct sk_buff_head resp_pkts;
239 struct sk_buff_head send_pkts;
241 struct rxe_req_info req;
242 struct rxe_comp_info comp;
243 struct rxe_resp_info resp;
245 atomic_t ssn;
246 atomic_t skb_out;
247 int need_req_skb;
249 /* Timer for retranmitting packet when ACKs have been lost. RC
250 * only. The requester sets it when it is not already
251 * started. The responder resets it whenever an ack is
252 * received.
254 struct timer_list retrans_timer;
255 u64 qp_timeout_jiffies;
257 /* Timer for handling RNR NAKS. */
258 struct timer_list rnr_nak_timer;
260 spinlock_t state_lock; /* guard requester and completer */
262 struct execute_work cleanup_work;
265 enum rxe_mem_state {
266 RXE_MEM_STATE_ZOMBIE,
267 RXE_MEM_STATE_INVALID,
268 RXE_MEM_STATE_FREE,
269 RXE_MEM_STATE_VALID,
272 enum rxe_mem_type {
273 RXE_MEM_TYPE_NONE,
274 RXE_MEM_TYPE_DMA,
275 RXE_MEM_TYPE_MR,
276 RXE_MEM_TYPE_MW,
279 #define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
281 struct rxe_phys_buf {
282 u64 addr;
283 u64 size;
286 struct rxe_map {
287 struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
290 struct rxe_mem {
291 struct rxe_pool_entry pelem;
292 union {
293 struct ib_mr ibmr;
294 struct ib_mw ibmw;
297 struct ib_umem *umem;
299 enum rxe_mem_state state;
300 enum rxe_mem_type type;
301 u64 va;
302 u64 iova;
303 size_t length;
304 u32 offset;
305 int access;
307 int page_shift;
308 int page_mask;
309 int map_shift;
310 int map_mask;
312 u32 num_buf;
313 u32 nbuf;
315 u32 max_buf;
316 u32 num_map;
318 struct rxe_map **map;
321 struct rxe_mc_grp {
322 struct rxe_pool_entry pelem;
323 spinlock_t mcg_lock; /* guard group */
324 struct rxe_dev *rxe;
325 struct list_head qp_list;
326 union ib_gid mgid;
327 int num_qp;
328 u32 qkey;
329 u16 pkey;
332 struct rxe_mc_elem {
333 struct rxe_pool_entry pelem;
334 struct list_head qp_list;
335 struct list_head grp_list;
336 struct rxe_qp *qp;
337 struct rxe_mc_grp *grp;
340 struct rxe_port {
341 struct ib_port_attr attr;
342 __be64 port_guid;
343 __be64 subnet_prefix;
344 spinlock_t port_lock; /* guard port */
345 unsigned int mtu_cap;
346 /* special QPs */
347 u32 qp_smi_index;
348 u32 qp_gsi_index;
351 struct rxe_dev {
352 struct ib_device ib_dev;
353 struct ib_device_attr attr;
354 int max_ucontext;
355 int max_inline_data;
356 struct mutex usdev_lock;
358 struct net_device *ndev;
360 int xmit_errors;
362 struct rxe_pool uc_pool;
363 struct rxe_pool pd_pool;
364 struct rxe_pool ah_pool;
365 struct rxe_pool srq_pool;
366 struct rxe_pool qp_pool;
367 struct rxe_pool cq_pool;
368 struct rxe_pool mr_pool;
369 struct rxe_pool mw_pool;
370 struct rxe_pool mc_grp_pool;
371 struct rxe_pool mc_elem_pool;
373 spinlock_t pending_lock; /* guard pending_mmaps */
374 struct list_head pending_mmaps;
376 spinlock_t mmap_offset_lock; /* guard mmap_offset */
377 u64 mmap_offset;
379 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
381 struct rxe_port port;
382 struct crypto_shash *tfm;
385 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
387 atomic64_inc(&rxe->stats_counters[index]);
390 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
392 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
395 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
397 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
400 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
402 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
405 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
407 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
410 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
412 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
415 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
417 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
420 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
422 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
425 static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
427 return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
430 static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
432 return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
435 static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
437 return to_rpd(mr->ibmr.pd);
440 static inline u32 mr_lkey(struct rxe_mem *mr)
442 return mr->ibmr.lkey;
445 static inline u32 mr_rkey(struct rxe_mem *mr)
447 return mr->ibmr.rkey;
450 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
452 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
454 #endif /* RXE_VERBS_H */