1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
31 #include <linux/mutex.h>
32 #include <linux/list.h>
33 #include <linux/spinlock.h>
34 #include <linux/pci.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/ib_user_verbs.h>
40 #include "ocrdma_sli.h"
42 #define OCRDMA_ROCE_DEV_VERSION "1.0.0"
43 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
45 #define OCRDMA_MAX_AH 512
47 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
49 struct ocrdma_dev_attr
{
70 int max_pages_per_frmr
;
75 u8 cq_overflow_detect
;
81 u8 local_ca_ack_delay
;
91 struct ocrdma_queue_info
{
96 u16 entry_size
; /* Size of an element in the queue */
97 u16 id
; /* qid, where to ring the doorbell. */
103 struct ocrdma_queue_info q
;
106 struct ocrdma_dev
*dev
;
111 struct ocrdma_queue_info sq
;
112 struct ocrdma_queue_info cq
;
117 struct mutex lock
; /* for serializing mailbox commands on MQ */
118 wait_queue_head_t cmd_wait
;
125 struct ocrdma_hw_mr
{
136 struct ocrdma_pbl
*pbl_table
;
147 struct ib_umem
*umem
;
148 struct ocrdma_hw_mr hwmr
;
152 struct ib_device ibdev
;
153 struct ocrdma_dev_attr attr
;
155 struct mutex dev_lock
; /* provides syncronise access to device data */
156 spinlock_t flush_q_lock ____cacheline_aligned
;
158 struct ocrdma_cq
**cq_tbl
;
159 struct ocrdma_qp
**qp_tbl
;
161 struct ocrdma_eq
*eq_tbl
;
166 union ib_gid
*sgid_tbl
;
167 /* provided synchronization to sgid table for
168 * updating gid entries triggered by notifier.
170 spinlock_t sgid_lock
;
173 struct ocrdma_cq
*gsi_sqcq
;
174 struct ocrdma_cq
*gsi_rqcq
;
177 struct ocrdma_av
*va
;
181 /* provide synchronization for av
186 struct ocrdma_pbl pbl
;
191 struct mqe_ctx mqe_ctx
;
193 struct be_dev_info nic_info
;
195 struct list_head entry
;
198 struct ocrdma_mr
*stag_arr
[OCRDMA_MAX_STAG
];
204 struct ocrdma_cqe
*va
;
206 u32 getp
; /* pointer to pending wrs to
207 * return to stack, wrap arounds
212 bool armed
, solicited
;
215 spinlock_t cq_lock ____cacheline_aligned
; /* provide synchronization
218 /* syncronizes cq completion handler invoked from multiple context */
219 spinlock_t comp_handler_lock ____cacheline_aligned
;
223 struct ocrdma_ucontext
*ucontext
;
227 /* head of all qp's sq and rq for which cqes need to be flushed
230 struct list_head sq_head
, rq_head
;
235 struct ocrdma_dev
*dev
;
236 struct ocrdma_ucontext
*uctx
;
245 struct ocrdma_av
*av
;
250 struct ocrdma_qp_hwq_info
{
251 u8
*va
; /* virtual address */
257 u16 dbid
; /* qid, where to ring the doorbell. */
265 struct ocrdma_qp_hwq_info rq
;
270 /* provide synchronization to multiple context(s) posting rqe */
271 spinlock_t q_lock ____cacheline_aligned
;
273 struct ocrdma_pd
*pd
;
279 struct ocrdma_dev
*dev
;
282 struct ocrdma_qp_hwq_info sq
;
285 uint16_t dpp_wqe_idx
;
292 /* provide synchronization to multiple context(s) posting wqe, rqe */
293 spinlock_t q_lock ____cacheline_aligned
;
294 struct ocrdma_cq
*sq_cq
;
295 /* list maintained per CQ to flush SQ errors */
296 struct list_head sq_entry
;
299 struct ocrdma_qp_hwq_info rq
;
301 struct ocrdma_cq
*rq_cq
;
302 struct ocrdma_srq
*srq
;
303 /* list maintained per CQ to flush RQ errors */
304 struct list_head rq_entry
;
306 enum ocrdma_qp_state state
; /* QP state */
308 u32 max_ord
, max_ird
;
311 struct ocrdma_pd
*pd
;
313 enum ib_qp_type qp_type
;
324 struct ocrdma_ucontext
{
325 struct ib_ucontext ibucontext
;
327 struct list_head mm_head
;
328 struct mutex mm_list_lock
; /* protects list entries of mm type */
329 struct ocrdma_pd
*cntxt_pd
;
344 struct list_head entry
;
347 static inline struct ocrdma_dev
*get_ocrdma_dev(struct ib_device
*ibdev
)
349 return container_of(ibdev
, struct ocrdma_dev
, ibdev
);
352 static inline struct ocrdma_ucontext
*get_ocrdma_ucontext(struct ib_ucontext
355 return container_of(ibucontext
, struct ocrdma_ucontext
, ibucontext
);
358 static inline struct ocrdma_pd
*get_ocrdma_pd(struct ib_pd
*ibpd
)
360 return container_of(ibpd
, struct ocrdma_pd
, ibpd
);
363 static inline struct ocrdma_cq
*get_ocrdma_cq(struct ib_cq
*ibcq
)
365 return container_of(ibcq
, struct ocrdma_cq
, ibcq
);
368 static inline struct ocrdma_qp
*get_ocrdma_qp(struct ib_qp
*ibqp
)
370 return container_of(ibqp
, struct ocrdma_qp
, ibqp
);
373 static inline struct ocrdma_mr
*get_ocrdma_mr(struct ib_mr
*ibmr
)
375 return container_of(ibmr
, struct ocrdma_mr
, ibmr
);
378 static inline struct ocrdma_ah
*get_ocrdma_ah(struct ib_ah
*ibah
)
380 return container_of(ibah
, struct ocrdma_ah
, ibah
);
383 static inline struct ocrdma_srq
*get_ocrdma_srq(struct ib_srq
*ibsrq
)
385 return container_of(ibsrq
, struct ocrdma_srq
, ibsrq
);
389 static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp
*qp
)
391 return ((qp
->dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
&&
392 qp
->id
< 128) ? 24 : 16);
395 static inline int is_cqe_valid(struct ocrdma_cq
*cq
, struct ocrdma_cqe
*cqe
)
398 cqe_valid
= le32_to_cpu(cqe
->flags_status_srcqpn
) & OCRDMA_CQE_VALID
;
399 return (cqe_valid
== cq
->phase
);
402 static inline int is_cqe_for_sq(struct ocrdma_cqe
*cqe
)
404 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
405 OCRDMA_CQE_QTYPE
) ? 0 : 1;
408 static inline int is_cqe_invalidated(struct ocrdma_cqe
*cqe
)
410 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
411 OCRDMA_CQE_INVALIDATE
) ? 1 : 0;
414 static inline int is_cqe_imm(struct ocrdma_cqe
*cqe
)
416 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
417 OCRDMA_CQE_IMM
) ? 1 : 0;
420 static inline int is_cqe_wr_imm(struct ocrdma_cqe
*cqe
)
422 return (le32_to_cpu(cqe
->flags_status_srcqpn
) &
423 OCRDMA_CQE_WRITE_IMM
) ? 1 : 0;
426 static inline int ocrdma_resolve_dmac(struct ocrdma_dev
*dev
,
427 struct ib_ah_attr
*ah_attr
, u8
*mac_addr
)
431 memcpy(&in6
, ah_attr
->grh
.dgid
.raw
, sizeof(in6
));
432 if (rdma_is_multicast_addr(&in6
))
433 rdma_get_mcast_mac(&in6
, mac_addr
);
435 memcpy(mac_addr
, ah_attr
->dmac
, ETH_ALEN
);