2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/interrupt.h>
38 #include <linux/workqueue.h>
39 #include <rdma/rdma_user_rxe.h>
42 #include "rxe_hw_counters.h"
44 static inline int pkey_match(u16 key1
, u16 key2
)
46 return (((key1
& 0x7fff) != 0) &&
47 ((key1
& 0x7fff) == (key2
& 0x7fff)) &&
48 ((key1
& 0x8000) || (key2
& 0x8000))) ? 1 : 0;
51 /* Return >0 if psn_a > psn_b
55 static inline int psn_compare(u32 psn_a
, u32 psn_b
)
59 diff
= (psn_a
- psn_b
) << 8;
64 struct rxe_pool_entry pelem
;
65 struct ib_ucontext ibuc
;
69 struct rxe_pool_entry pelem
;
74 struct rxe_pool_entry pelem
;
83 struct ib_uverbs_wc uibwc
;
88 struct rxe_pool_entry pelem
;
90 struct rxe_queue
*queue
;
95 struct tasklet_struct comp_task
;
100 wqe_state_processing
,
110 spinlock_t sq_lock
; /* guard queue */
111 struct rxe_queue
*queue
;
117 spinlock_t producer_lock
; /* guard queue producer */
118 spinlock_t consumer_lock
; /* guard queue consumer */
119 struct rxe_queue
*queue
;
123 struct rxe_pool_entry pelem
;
137 QP_STATE_DRAIN
, /* req only */
138 QP_STATE_DRAINED
, /* req only */
142 extern char *rxe_qp_state_name
[];
144 struct rxe_req_info
{
145 enum rxe_qp_state state
;
155 struct rxe_task task
;
158 struct rxe_comp_info
{
165 struct rxe_task task
;
168 enum rdatm_res_state
{
169 rdatm_res_state_next
,
171 rdatm_res_state_replay
,
179 enum rdatm_res_state state
;
196 struct rxe_resp_info
{
197 enum rxe_qp_state state
;
204 enum ib_wc_status status
;
208 struct rxe_recv_wqe
*wqe
;
210 /* RDMA read / atomic only */
219 struct rxe_recv_wqe wqe
;
220 struct ib_sge sge
[RXE_MAX_SGE
];
223 /* Responder resources. It's a circular list where the oldest
224 * resource is dropped first.
226 struct resp_res
*resources
;
227 unsigned int res_head
;
228 unsigned int res_tail
;
229 struct resp_res
*res
;
230 struct rxe_task task
;
234 struct rxe_pool_entry pelem
;
236 struct ib_qp_attr attr
;
246 enum ib_sig_type sq_sig_type
;
254 struct rxe_av pri_av
;
255 struct rxe_av alt_av
;
257 /* list of mcast groups qp has joined (for cleanup) */
258 struct list_head grp_list
;
259 spinlock_t grp_lock
; /* guard grp_list */
261 struct sk_buff_head req_pkts
;
262 struct sk_buff_head resp_pkts
;
263 struct sk_buff_head send_pkts
;
265 struct rxe_req_info req
;
266 struct rxe_comp_info comp
;
267 struct rxe_resp_info resp
;
273 /* Timer for retranmitting packet when ACKs have been lost. RC
274 * only. The requester sets it when it is not already
275 * started. The responder resets it whenever an ack is
278 struct timer_list retrans_timer
;
279 u64 qp_timeout_jiffies
;
281 /* Timer for handling RNR NAKS. */
282 struct timer_list rnr_nak_timer
;
284 spinlock_t state_lock
; /* guard requester and completer */
286 struct execute_work cleanup_work
;
290 RXE_MEM_STATE_ZOMBIE
,
291 RXE_MEM_STATE_INVALID
,
304 #define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
306 struct rxe_phys_buf
{
312 struct rxe_phys_buf buf
[RXE_BUF_PER_MAP
];
316 struct rxe_pool_entry pelem
;
323 struct ib_umem
*umem
;
328 enum rxe_mem_state state
;
329 enum rxe_mem_type type
;
347 struct rxe_map
**map
;
351 struct rxe_pool_entry pelem
;
352 spinlock_t mcg_lock
; /* guard group */
354 struct list_head qp_list
;
362 struct rxe_pool_entry pelem
;
363 struct list_head qp_list
;
364 struct list_head grp_list
;
366 struct rxe_mc_grp
*grp
;
370 struct ib_port_attr attr
;
373 __be64 subnet_prefix
;
374 spinlock_t port_lock
; /* guard port */
375 unsigned int mtu_cap
;
382 struct ib_device ib_dev
;
383 struct ib_device_attr attr
;
387 struct mutex usdev_lock
;
389 struct net_device
*ndev
;
393 struct rxe_pool uc_pool
;
394 struct rxe_pool pd_pool
;
395 struct rxe_pool ah_pool
;
396 struct rxe_pool srq_pool
;
397 struct rxe_pool qp_pool
;
398 struct rxe_pool cq_pool
;
399 struct rxe_pool mr_pool
;
400 struct rxe_pool mw_pool
;
401 struct rxe_pool mc_grp_pool
;
402 struct rxe_pool mc_elem_pool
;
404 spinlock_t pending_lock
; /* guard pending_mmaps */
405 struct list_head pending_mmaps
;
407 spinlock_t mmap_offset_lock
; /* guard mmap_offset */
410 u64 stats_counters
[RXE_NUM_OF_COUNTERS
];
412 struct rxe_port port
;
413 struct list_head list
;
414 struct crypto_shash
*tfm
;
417 static inline void rxe_counter_inc(struct rxe_dev
*rxe
, enum rxe_counters cnt
)
419 rxe
->stats_counters
[cnt
]++;
422 static inline struct rxe_dev
*to_rdev(struct ib_device
*dev
)
424 return dev
? container_of(dev
, struct rxe_dev
, ib_dev
) : NULL
;
427 static inline struct rxe_ucontext
*to_ruc(struct ib_ucontext
*uc
)
429 return uc
? container_of(uc
, struct rxe_ucontext
, ibuc
) : NULL
;
432 static inline struct rxe_pd
*to_rpd(struct ib_pd
*pd
)
434 return pd
? container_of(pd
, struct rxe_pd
, ibpd
) : NULL
;
437 static inline struct rxe_ah
*to_rah(struct ib_ah
*ah
)
439 return ah
? container_of(ah
, struct rxe_ah
, ibah
) : NULL
;
442 static inline struct rxe_srq
*to_rsrq(struct ib_srq
*srq
)
444 return srq
? container_of(srq
, struct rxe_srq
, ibsrq
) : NULL
;
447 static inline struct rxe_qp
*to_rqp(struct ib_qp
*qp
)
449 return qp
? container_of(qp
, struct rxe_qp
, ibqp
) : NULL
;
452 static inline struct rxe_cq
*to_rcq(struct ib_cq
*cq
)
454 return cq
? container_of(cq
, struct rxe_cq
, ibcq
) : NULL
;
457 static inline struct rxe_mem
*to_rmr(struct ib_mr
*mr
)
459 return mr
? container_of(mr
, struct rxe_mem
, ibmr
) : NULL
;
462 static inline struct rxe_mem
*to_rmw(struct ib_mw
*mw
)
464 return mw
? container_of(mw
, struct rxe_mem
, ibmw
) : NULL
;
467 int rxe_register_device(struct rxe_dev
*rxe
);
468 int rxe_unregister_device(struct rxe_dev
*rxe
);
470 void rxe_mc_cleanup(struct rxe_pool_entry
*arg
);
472 #endif /* RXE_VERBS_H */