1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
31 #include <linux/mutex.h>
32 #include <linux/list.h>
33 #include <linux/spinlock.h>
34 #include <linux/pci.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/ib_user_verbs.h>
40 #include "ocrdma_sli.h"
42 #define OCRDMA_ROCE_DEV_VERSION "1.0.0"
43 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
45 #define OCRDMA_MAX_AH 512
47 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
49 struct ocrdma_dev_attr
{
68 int max_pages_per_frmr
;
73 u8 cq_overflow_detect
;
79 u8 local_ca_ack_delay
;
89 struct ocrdma_queue_info
{
94 u16 entry_size
; /* Size of an element in the queue */
95 u16 id
; /* qid, where to ring the doorbell. */
101 struct ocrdma_queue_info q
;
104 struct ocrdma_dev
*dev
;
109 struct ocrdma_queue_info sq
;
110 struct ocrdma_queue_info cq
;
115 struct mutex lock
; /* for serializing mailbox commands on MQ */
116 wait_queue_head_t cmd_wait
;
124 struct ib_device ibdev
;
125 struct ocrdma_dev_attr attr
;
127 struct mutex dev_lock
; /* provides syncronise access to device data */
128 spinlock_t flush_q_lock ____cacheline_aligned
;
130 struct ocrdma_cq
**cq_tbl
;
131 struct ocrdma_qp
**qp_tbl
;
133 struct ocrdma_eq meq
;
134 struct ocrdma_eq
*qp_eq_tbl
;
139 union ib_gid
*sgid_tbl
;
140 /* provided synchronization to sgid table for
141 * updating gid entries triggered by notifier.
143 spinlock_t sgid_lock
;
146 struct ocrdma_cq
*gsi_sqcq
;
147 struct ocrdma_cq
*gsi_rqcq
;
150 struct ocrdma_av
*va
;
154 /* provide synchronization for av
159 struct ocrdma_pbl pbl
;
164 struct mqe_ctx mqe_ctx
;
166 struct be_dev_info nic_info
;
168 struct list_head entry
;
175 struct ocrdma_dev
*dev
;
176 struct ocrdma_cqe
*va
;
178 u32 getp
; /* pointer to pending wrs to
179 * return to stack, wrap arounds
184 bool armed
, solicited
;
187 spinlock_t cq_lock ____cacheline_aligned
; /* provide synchronization
190 /* syncronizes cq completion handler invoked from multiple context */
191 spinlock_t comp_handler_lock ____cacheline_aligned
;
195 struct ocrdma_ucontext
*ucontext
;
199 /* head of all qp's sq and rq for which cqes need to be flushed
202 struct list_head sq_head
, rq_head
;
207 struct ocrdma_dev
*dev
;
208 struct ocrdma_ucontext
*uctx
;
217 struct ocrdma_dev
*dev
;
218 struct ocrdma_av
*av
;
223 struct ocrdma_qp_hwq_info
{
224 u8
*va
; /* virtual address */
230 u16 dbid
; /* qid, where to ring the doorbell. */
237 struct ocrdma_dev
*dev
;
239 struct ocrdma_qp_hwq_info rq
;
244 /* provide synchronization to multiple context(s) posting rqe */
245 spinlock_t q_lock ____cacheline_aligned
;
247 struct ocrdma_pd
*pd
;
253 struct ocrdma_dev
*dev
;
256 struct ocrdma_qp_hwq_info sq
;
259 uint16_t dpp_wqe_idx
;
266 /* provide synchronization to multiple context(s) posting wqe, rqe */
267 spinlock_t q_lock ____cacheline_aligned
;
268 struct ocrdma_cq
*sq_cq
;
269 /* list maintained per CQ to flush SQ errors */
270 struct list_head sq_entry
;
273 struct ocrdma_qp_hwq_info rq
;
275 struct ocrdma_cq
*rq_cq
;
276 struct ocrdma_srq
*srq
;
277 /* list maintained per CQ to flush RQ errors */
278 struct list_head rq_entry
;
280 enum ocrdma_qp_state state
; /* QP state */
282 u32 max_ord
, max_ird
;
285 struct ocrdma_pd
*pd
;
287 enum ib_qp_type qp_type
;
295 struct ocrdma_hw_mr
{
296 struct ocrdma_dev
*dev
;
307 struct ocrdma_pbl
*pbl_table
;
318 struct ib_umem
*umem
;
319 struct ocrdma_hw_mr hwmr
;
320 struct ocrdma_pd
*pd
;
323 struct ocrdma_ucontext
{
324 struct ib_ucontext ibucontext
;
325 struct ocrdma_dev
*dev
;
327 struct list_head mm_head
;
328 struct mutex mm_list_lock
; /* protects list entries of mm type */
341 struct list_head entry
;
344 static inline struct ocrdma_dev
*get_ocrdma_dev(struct ib_device
*ibdev
)
346 return container_of(ibdev
, struct ocrdma_dev
, ibdev
);
349 static inline struct ocrdma_ucontext
*get_ocrdma_ucontext(struct ib_ucontext
352 return container_of(ibucontext
, struct ocrdma_ucontext
, ibucontext
);
355 static inline struct ocrdma_pd
*get_ocrdma_pd(struct ib_pd
*ibpd
)
357 return container_of(ibpd
, struct ocrdma_pd
, ibpd
);
360 static inline struct ocrdma_cq
*get_ocrdma_cq(struct ib_cq
*ibcq
)
362 return container_of(ibcq
, struct ocrdma_cq
, ibcq
);
365 static inline struct ocrdma_qp
*get_ocrdma_qp(struct ib_qp
*ibqp
)
367 return container_of(ibqp
, struct ocrdma_qp
, ibqp
);
370 static inline struct ocrdma_mr
*get_ocrdma_mr(struct ib_mr
*ibmr
)
372 return container_of(ibmr
, struct ocrdma_mr
, ibmr
);
375 static inline struct ocrdma_ah
*get_ocrdma_ah(struct ib_ah
*ibah
)
377 return container_of(ibah
, struct ocrdma_ah
, ibah
);
380 static inline struct ocrdma_srq
*get_ocrdma_srq(struct ib_srq
*ibsrq
)
382 return container_of(ibsrq
, struct ocrdma_srq
, ibsrq
);
386 static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp
*qp
)
388 return ((qp
->dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
&&
389 qp
->id
< 64) ? 24 : 16);
392 static inline int is_cqe_valid(struct ocrdma_cq
*cq
, struct ocrdma_cqe
*cqe
)
395 cqe_valid
= le32_to_cpu(cqe
->flags_status_srcqpn
) & OCRDMA_CQE_VALID
;
396 return ((cqe_valid
== cq
->phase
) ? 1 : 0);
399 static inline int is_cqe_for_sq(struct ocrdma_cqe
*cqe
)
401 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
402 OCRDMA_CQE_QTYPE
) ? 0 : 1;
405 static inline int is_cqe_invalidated(struct ocrdma_cqe
*cqe
)
407 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
408 OCRDMA_CQE_INVALIDATE
) ? 1 : 0;
411 static inline int is_cqe_imm(struct ocrdma_cqe
*cqe
)
413 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
414 OCRDMA_CQE_IMM
) ? 1 : 0;
417 static inline int is_cqe_wr_imm(struct ocrdma_cqe
*cqe
)
419 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
420 OCRDMA_CQE_WRITE_IMM
) ? 1 : 0;