1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
40 * Costa Mesa, CA 92626
43 #include <linux/sched.h>
44 #include <linux/interrupt.h>
45 #include <linux/log2.h>
46 #include <linux/dma-mapping.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_cache.h>
53 #include "ocrdma_hw.h"
54 #include "ocrdma_verbs.h"
55 #include "ocrdma_ah.h"
58 OCRDMA_MBX_STATUS_FAILED
= 1,
59 OCRDMA_MBX_STATUS_ILLEGAL_FIELD
= 3,
60 OCRDMA_MBX_STATUS_OOR
= 100,
61 OCRDMA_MBX_STATUS_INVALID_PD
= 101,
62 OCRDMA_MBX_STATUS_PD_INUSE
= 102,
63 OCRDMA_MBX_STATUS_INVALID_CQ
= 103,
64 OCRDMA_MBX_STATUS_INVALID_QP
= 104,
65 OCRDMA_MBX_STATUS_INVALID_LKEY
= 105,
66 OCRDMA_MBX_STATUS_ORD_EXCEEDS
= 106,
67 OCRDMA_MBX_STATUS_IRD_EXCEEDS
= 107,
68 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS
= 108,
69 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS
= 109,
70 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS
= 110,
71 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS
= 111,
72 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS
= 112,
73 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE
= 113,
74 OCRDMA_MBX_STATUS_MW_BOUND
= 114,
75 OCRDMA_MBX_STATUS_INVALID_VA
= 115,
76 OCRDMA_MBX_STATUS_INVALID_LENGTH
= 116,
77 OCRDMA_MBX_STATUS_INVALID_FBO
= 117,
78 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS
= 118,
79 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE
= 119,
80 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY
= 120,
81 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT
= 121,
82 OCRDMA_MBX_STATUS_INVALID_SRQ_ID
= 129,
83 OCRDMA_MBX_STATUS_SRQ_ERROR
= 133,
84 OCRDMA_MBX_STATUS_RQE_EXCEEDS
= 134,
85 OCRDMA_MBX_STATUS_MTU_EXCEEDS
= 135,
86 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS
= 136,
87 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS
= 137,
88 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS
= 138,
89 OCRDMA_MBX_STATUS_QP_BOUND
= 130,
90 OCRDMA_MBX_STATUS_INVALID_CHANGE
= 139,
91 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP
= 140,
92 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER
= 141,
93 OCRDMA_MBX_STATUS_MW_STILL_BOUND
= 142,
94 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID
= 143,
95 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS
= 144
98 enum additional_status
{
99 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES
= 22
103 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES
= 1,
104 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER
= 2,
105 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES
= 3,
106 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING
= 4,
107 OCRDMA_MBX_CQE_STATUS_DMA_FAILED
= 5
110 static inline void *ocrdma_get_eqe(struct ocrdma_eq
*eq
)
112 return eq
->q
.va
+ (eq
->q
.tail
* sizeof(struct ocrdma_eqe
));
115 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq
*eq
)
117 eq
->q
.tail
= (eq
->q
.tail
+ 1) & (OCRDMA_EQ_LEN
- 1);
120 static inline void *ocrdma_get_mcqe(struct ocrdma_dev
*dev
)
122 struct ocrdma_mcqe
*cqe
= (struct ocrdma_mcqe
*)
123 (dev
->mq
.cq
.va
+ (dev
->mq
.cq
.tail
* sizeof(struct ocrdma_mcqe
)));
125 if (!(le32_to_cpu(cqe
->valid_ae_cmpl_cons
) & OCRDMA_MCQE_VALID_MASK
))
130 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev
*dev
)
132 dev
->mq
.cq
.tail
= (dev
->mq
.cq
.tail
+ 1) & (OCRDMA_MQ_CQ_LEN
- 1);
135 static inline struct ocrdma_mqe
*ocrdma_get_mqe(struct ocrdma_dev
*dev
)
137 return dev
->mq
.sq
.va
+ (dev
->mq
.sq
.head
* sizeof(struct ocrdma_mqe
));
140 static inline void ocrdma_mq_inc_head(struct ocrdma_dev
*dev
)
142 dev
->mq
.sq
.head
= (dev
->mq
.sq
.head
+ 1) & (OCRDMA_MQ_LEN
- 1);
145 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev
*dev
)
147 return dev
->mq
.sq
.va
+ (dev
->mqe_ctx
.tag
* sizeof(struct ocrdma_mqe
));
150 enum ib_qp_state
get_ibqp_state(enum ocrdma_qp_state qps
)
155 case OCRDMA_QPS_INIT
:
162 case OCRDMA_QPS_SQ_DRAINING
:
172 static enum ocrdma_qp_state
get_ocrdma_qp_state(enum ib_qp_state qps
)
176 return OCRDMA_QPS_RST
;
178 return OCRDMA_QPS_INIT
;
180 return OCRDMA_QPS_RTR
;
182 return OCRDMA_QPS_RTS
;
184 return OCRDMA_QPS_SQD
;
186 return OCRDMA_QPS_SQE
;
188 return OCRDMA_QPS_ERR
;
190 return OCRDMA_QPS_ERR
;
193 static int ocrdma_get_mbx_errno(u32 status
)
196 u8 mbox_status
= (status
& OCRDMA_MBX_RSP_STATUS_MASK
) >>
197 OCRDMA_MBX_RSP_STATUS_SHIFT
;
198 u8 add_status
= (status
& OCRDMA_MBX_RSP_ASTATUS_MASK
) >>
199 OCRDMA_MBX_RSP_ASTATUS_SHIFT
;
201 switch (mbox_status
) {
202 case OCRDMA_MBX_STATUS_OOR
:
203 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS
:
207 case OCRDMA_MBX_STATUS_INVALID_PD
:
208 case OCRDMA_MBX_STATUS_INVALID_CQ
:
209 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID
:
210 case OCRDMA_MBX_STATUS_INVALID_QP
:
211 case OCRDMA_MBX_STATUS_INVALID_CHANGE
:
212 case OCRDMA_MBX_STATUS_MTU_EXCEEDS
:
213 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER
:
214 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID
:
215 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS
:
216 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD
:
217 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY
:
218 case OCRDMA_MBX_STATUS_INVALID_LKEY
:
219 case OCRDMA_MBX_STATUS_INVALID_VA
:
220 case OCRDMA_MBX_STATUS_INVALID_LENGTH
:
221 case OCRDMA_MBX_STATUS_INVALID_FBO
:
222 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS
:
223 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE
:
224 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP
:
225 case OCRDMA_MBX_STATUS_SRQ_ERROR
:
226 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS
:
230 case OCRDMA_MBX_STATUS_PD_INUSE
:
231 case OCRDMA_MBX_STATUS_QP_BOUND
:
232 case OCRDMA_MBX_STATUS_MW_STILL_BOUND
:
233 case OCRDMA_MBX_STATUS_MW_BOUND
:
237 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS
:
238 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS
:
239 case OCRDMA_MBX_STATUS_RQE_EXCEEDS
:
240 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS
:
241 case OCRDMA_MBX_STATUS_ORD_EXCEEDS
:
242 case OCRDMA_MBX_STATUS_IRD_EXCEEDS
:
243 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS
:
244 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS
:
245 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS
:
249 case OCRDMA_MBX_STATUS_FAILED
:
250 switch (add_status
) {
251 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES
:
261 char *port_speed_string(struct ocrdma_dev
*dev
)
264 u16 speeds_supported
;
266 speeds_supported
= dev
->phy
.fixed_speeds_supported
|
267 dev
->phy
.auto_speeds_supported
;
268 if (speeds_supported
& OCRDMA_PHY_SPEED_40GBPS
)
270 else if (speeds_supported
& OCRDMA_PHY_SPEED_10GBPS
)
272 else if (speeds_supported
& OCRDMA_PHY_SPEED_1GBPS
)
278 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status
)
280 int err_num
= -EINVAL
;
282 switch (cqe_status
) {
283 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES
:
286 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER
:
289 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES
:
290 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING
:
293 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED
:
301 void ocrdma_ring_cq_db(struct ocrdma_dev
*dev
, u16 cq_id
, bool armed
,
302 bool solicited
, u16 cqe_popped
)
304 u32 val
= cq_id
& OCRDMA_DB_CQ_RING_ID_MASK
;
306 val
|= ((cq_id
& OCRDMA_DB_CQ_RING_ID_EXT_MASK
) <<
307 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT
);
310 val
|= (1 << OCRDMA_DB_CQ_REARM_SHIFT
);
312 val
|= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT
);
313 val
|= (cqe_popped
<< OCRDMA_DB_CQ_NUM_POPPED_SHIFT
);
314 iowrite32(val
, dev
->nic_info
.db
+ OCRDMA_DB_CQ_OFFSET
);
317 static void ocrdma_ring_mq_db(struct ocrdma_dev
*dev
)
321 val
|= dev
->mq
.sq
.id
& OCRDMA_MQ_ID_MASK
;
322 val
|= 1 << OCRDMA_MQ_NUM_MQE_SHIFT
;
323 iowrite32(val
, dev
->nic_info
.db
+ OCRDMA_DB_MQ_OFFSET
);
326 static void ocrdma_ring_eq_db(struct ocrdma_dev
*dev
, u16 eq_id
,
327 bool arm
, bool clear_int
, u16 num_eqe
)
331 val
|= eq_id
& OCRDMA_EQ_ID_MASK
;
332 val
|= ((eq_id
& OCRDMA_EQ_ID_EXT_MASK
) << OCRDMA_EQ_ID_EXT_MASK_SHIFT
);
334 val
|= (1 << OCRDMA_REARM_SHIFT
);
336 val
|= (1 << OCRDMA_EQ_CLR_SHIFT
);
337 val
|= (1 << OCRDMA_EQ_TYPE_SHIFT
);
338 val
|= (num_eqe
<< OCRDMA_NUM_EQE_SHIFT
);
339 iowrite32(val
, dev
->nic_info
.db
+ OCRDMA_DB_EQ_OFFSET
);
342 static void ocrdma_init_mch(struct ocrdma_mbx_hdr
*cmd_hdr
,
343 u8 opcode
, u8 subsys
, u32 cmd_len
)
345 cmd_hdr
->subsys_op
= (opcode
| (subsys
<< OCRDMA_MCH_SUBSYS_SHIFT
));
346 cmd_hdr
->timeout
= 20; /* seconds */
347 cmd_hdr
->cmd_len
= cmd_len
- sizeof(struct ocrdma_mbx_hdr
);
350 static void *ocrdma_init_emb_mqe(u8 opcode
, u32 cmd_len
)
352 struct ocrdma_mqe
*mqe
;
354 mqe
= kzalloc(sizeof(struct ocrdma_mqe
), GFP_KERNEL
);
357 mqe
->hdr
.spcl_sge_cnt_emb
|=
358 (OCRDMA_MQE_EMBEDDED
<< OCRDMA_MQE_HDR_EMB_SHIFT
) &
359 OCRDMA_MQE_HDR_EMB_MASK
;
360 mqe
->hdr
.pyld_len
= cmd_len
- sizeof(struct ocrdma_mqe_hdr
);
362 ocrdma_init_mch(&mqe
->u
.emb_req
.mch
, opcode
, OCRDMA_SUBSYS_ROCE
,
367 static void ocrdma_free_q(struct ocrdma_dev
*dev
, struct ocrdma_queue_info
*q
)
369 dma_free_coherent(&dev
->nic_info
.pdev
->dev
, q
->size
, q
->va
, q
->dma
);
372 static int ocrdma_alloc_q(struct ocrdma_dev
*dev
,
373 struct ocrdma_queue_info
*q
, u16 len
, u16 entry_size
)
375 memset(q
, 0, sizeof(*q
));
377 q
->entry_size
= entry_size
;
378 q
->size
= len
* entry_size
;
379 q
->va
= dma_alloc_coherent(&dev
->nic_info
.pdev
->dev
, q
->size
,
380 &q
->dma
, GFP_KERNEL
);
383 memset(q
->va
, 0, q
->size
);
387 static void ocrdma_build_q_pages(struct ocrdma_pa
*q_pa
, int cnt
,
388 dma_addr_t host_pa
, int hw_page_size
)
392 for (i
= 0; i
< cnt
; i
++) {
393 q_pa
[i
].lo
= (u32
) (host_pa
& 0xffffffff);
394 q_pa
[i
].hi
= (u32
) upper_32_bits(host_pa
);
395 host_pa
+= hw_page_size
;
399 static int ocrdma_mbx_delete_q(struct ocrdma_dev
*dev
,
400 struct ocrdma_queue_info
*q
, int queue_type
)
404 struct ocrdma_delete_q_req
*cmd
= dev
->mbx_cmd
;
406 switch (queue_type
) {
408 opcode
= OCRDMA_CMD_DELETE_MQ
;
411 opcode
= OCRDMA_CMD_DELETE_CQ
;
414 opcode
= OCRDMA_CMD_DELETE_EQ
;
419 memset(cmd
, 0, sizeof(*cmd
));
420 ocrdma_init_mch(&cmd
->req
, opcode
, OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
423 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
,
424 cmd
, sizeof(*cmd
), NULL
, NULL
);
430 static int ocrdma_mbx_create_eq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
433 struct ocrdma_create_eq_req
*cmd
= dev
->mbx_cmd
;
434 struct ocrdma_create_eq_rsp
*rsp
= dev
->mbx_cmd
;
436 memset(cmd
, 0, sizeof(*cmd
));
437 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_EQ
, OCRDMA_SUBSYS_COMMON
,
440 cmd
->req
.rsvd_version
= 2;
442 cmd
->valid
= OCRDMA_CREATE_EQ_VALID
;
443 cmd
->cnt
= 4 << OCRDMA_CREATE_EQ_CNT_SHIFT
;
445 ocrdma_build_q_pages(&cmd
->pa
[0], cmd
->num_pages
, eq
->q
.dma
,
447 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
, cmd
, sizeof(*cmd
), NULL
,
450 eq
->q
.id
= rsp
->vector_eqid
& 0xffff;
451 eq
->vector
= (rsp
->vector_eqid
>> 16) & 0xffff;
452 eq
->q
.created
= true;
457 static int ocrdma_create_eq(struct ocrdma_dev
*dev
,
458 struct ocrdma_eq
*eq
, u16 q_len
)
462 status
= ocrdma_alloc_q(dev
, &eq
->q
, OCRDMA_EQ_LEN
,
463 sizeof(struct ocrdma_eqe
));
467 status
= ocrdma_mbx_create_eq(dev
, eq
);
471 ocrdma_ring_eq_db(dev
, eq
->q
.id
, true, true, 0);
475 ocrdma_free_q(dev
, &eq
->q
);
479 int ocrdma_get_irq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
483 if (dev
->nic_info
.intr_mode
== BE_INTERRUPT_MODE_INTX
)
484 irq
= dev
->nic_info
.pdev
->irq
;
486 irq
= dev
->nic_info
.msix
.vector_list
[eq
->vector
];
490 static void _ocrdma_destroy_eq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
493 ocrdma_mbx_delete_q(dev
, &eq
->q
, QTYPE_EQ
);
494 ocrdma_free_q(dev
, &eq
->q
);
498 static void ocrdma_destroy_eq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
502 /* disarm EQ so that interrupts are not generated
503 * during freeing and EQ delete is in progress.
505 ocrdma_ring_eq_db(dev
, eq
->q
.id
, false, false, 0);
507 irq
= ocrdma_get_irq(dev
, eq
);
509 _ocrdma_destroy_eq(dev
, eq
);
512 static void ocrdma_destroy_eqs(struct ocrdma_dev
*dev
)
516 for (i
= 0; i
< dev
->eq_cnt
; i
++)
517 ocrdma_destroy_eq(dev
, &dev
->eq_tbl
[i
]);
520 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev
*dev
,
521 struct ocrdma_queue_info
*cq
,
522 struct ocrdma_queue_info
*eq
)
524 struct ocrdma_create_cq_cmd
*cmd
= dev
->mbx_cmd
;
525 struct ocrdma_create_cq_cmd_rsp
*rsp
= dev
->mbx_cmd
;
528 memset(cmd
, 0, sizeof(*cmd
));
529 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_CQ
,
530 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
532 cmd
->req
.rsvd_version
= OCRDMA_CREATE_CQ_VER2
;
533 cmd
->pgsz_pgcnt
= (cq
->size
/ OCRDMA_MIN_Q_PAGE_SIZE
) <<
534 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT
;
535 cmd
->pgsz_pgcnt
|= PAGES_4K_SPANNED(cq
->va
, cq
->size
);
537 cmd
->ev_cnt_flags
= OCRDMA_CREATE_CQ_DEF_FLAGS
;
539 cmd
->pdid_cqecnt
= cq
->size
/ sizeof(struct ocrdma_mcqe
);
541 ocrdma_build_q_pages(&cmd
->pa
[0], cq
->size
/ OCRDMA_MIN_Q_PAGE_SIZE
,
542 cq
->dma
, PAGE_SIZE_4K
);
543 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
,
544 cmd
, sizeof(*cmd
), NULL
, NULL
);
546 cq
->id
= (u16
) (rsp
->cq_id
& OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK
);
552 static u32
ocrdma_encoded_q_len(int q_len
)
554 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
556 if (len_encoded
== 16)
561 static int ocrdma_mbx_create_mq(struct ocrdma_dev
*dev
,
562 struct ocrdma_queue_info
*mq
,
563 struct ocrdma_queue_info
*cq
)
565 int num_pages
, status
;
566 struct ocrdma_create_mq_req
*cmd
= dev
->mbx_cmd
;
567 struct ocrdma_create_mq_rsp
*rsp
= dev
->mbx_cmd
;
568 struct ocrdma_pa
*pa
;
570 memset(cmd
, 0, sizeof(*cmd
));
571 num_pages
= PAGES_4K_SPANNED(mq
->va
, mq
->size
);
573 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_MQ_EXT
,
574 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
575 cmd
->req
.rsvd_version
= 1;
576 cmd
->cqid_pages
= num_pages
;
577 cmd
->cqid_pages
|= (cq
->id
<< OCRDMA_CREATE_MQ_CQ_ID_SHIFT
);
578 cmd
->async_cqid_valid
= OCRDMA_CREATE_MQ_ASYNC_CQ_VALID
;
580 cmd
->async_event_bitmap
= BIT(OCRDMA_ASYNC_GRP5_EVE_CODE
);
581 cmd
->async_event_bitmap
|= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE
);
582 /* Request link events on this MQ. */
583 cmd
->async_event_bitmap
|= BIT(OCRDMA_ASYNC_LINK_EVE_CODE
);
585 cmd
->async_cqid_ringsize
= cq
->id
;
586 cmd
->async_cqid_ringsize
|= (ocrdma_encoded_q_len(mq
->len
) <<
587 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT
);
588 cmd
->valid
= OCRDMA_CREATE_MQ_VALID
;
591 ocrdma_build_q_pages(pa
, num_pages
, mq
->dma
, PAGE_SIZE_4K
);
592 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
,
593 cmd
, sizeof(*cmd
), NULL
, NULL
);
601 static int ocrdma_create_mq(struct ocrdma_dev
*dev
)
605 /* Alloc completion queue for Mailbox queue */
606 status
= ocrdma_alloc_q(dev
, &dev
->mq
.cq
, OCRDMA_MQ_CQ_LEN
,
607 sizeof(struct ocrdma_mcqe
));
611 dev
->eq_tbl
[0].cq_cnt
++;
612 status
= ocrdma_mbx_mq_cq_create(dev
, &dev
->mq
.cq
, &dev
->eq_tbl
[0].q
);
616 memset(&dev
->mqe_ctx
, 0, sizeof(dev
->mqe_ctx
));
617 init_waitqueue_head(&dev
->mqe_ctx
.cmd_wait
);
618 mutex_init(&dev
->mqe_ctx
.lock
);
620 /* Alloc Mailbox queue */
621 status
= ocrdma_alloc_q(dev
, &dev
->mq
.sq
, OCRDMA_MQ_LEN
,
622 sizeof(struct ocrdma_mqe
));
625 status
= ocrdma_mbx_create_mq(dev
, &dev
->mq
.sq
, &dev
->mq
.cq
);
628 ocrdma_ring_cq_db(dev
, dev
->mq
.cq
.id
, true, false, 0);
632 ocrdma_free_q(dev
, &dev
->mq
.sq
);
634 ocrdma_mbx_delete_q(dev
, &dev
->mq
.cq
, QTYPE_CQ
);
636 ocrdma_free_q(dev
, &dev
->mq
.cq
);
641 static void ocrdma_destroy_mq(struct ocrdma_dev
*dev
)
643 struct ocrdma_queue_info
*mbxq
, *cq
;
645 /* mqe_ctx lock synchronizes with any other pending cmds. */
646 mutex_lock(&dev
->mqe_ctx
.lock
);
649 ocrdma_mbx_delete_q(dev
, mbxq
, QTYPE_MCCQ
);
650 ocrdma_free_q(dev
, mbxq
);
652 mutex_unlock(&dev
->mqe_ctx
.lock
);
656 ocrdma_mbx_delete_q(dev
, cq
, QTYPE_CQ
);
657 ocrdma_free_q(dev
, cq
);
661 static void ocrdma_process_qpcat_error(struct ocrdma_dev
*dev
,
662 struct ocrdma_qp
*qp
)
664 enum ib_qp_state new_ib_qps
= IB_QPS_ERR
;
665 enum ib_qp_state old_ib_qps
;
669 ocrdma_qp_state_change(qp
, new_ib_qps
, &old_ib_qps
);
672 static void ocrdma_dispatch_ibevent(struct ocrdma_dev
*dev
,
673 struct ocrdma_ae_mcqe
*cqe
)
675 struct ocrdma_qp
*qp
= NULL
;
676 struct ocrdma_cq
*cq
= NULL
;
677 struct ib_event ib_evt
;
682 int type
= (cqe
->valid_ae_event
& OCRDMA_AE_MCQE_EVENT_TYPE_MASK
) >>
683 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT
;
684 u16 qpid
= cqe
->qpvalid_qpid
& OCRDMA_AE_MCQE_QPID_MASK
;
685 u16 cqid
= cqe
->cqvalid_cqid
& OCRDMA_AE_MCQE_CQID_MASK
;
688 * Some FW version returns wrong qp or cq ids in CQEs.
689 * Checking whether the IDs are valid
692 if (cqe
->qpvalid_qpid
& OCRDMA_AE_MCQE_QPVALID
) {
693 if (qpid
< dev
->attr
.max_qp
)
694 qp
= dev
->qp_tbl
[qpid
];
696 pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
702 if (cqe
->cqvalid_cqid
& OCRDMA_AE_MCQE_CQVALID
) {
703 if (cqid
< dev
->attr
.max_cq
)
704 cq
= dev
->cq_tbl
[cqid
];
706 pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
712 memset(&ib_evt
, 0, sizeof(ib_evt
));
714 ib_evt
.device
= &dev
->ibdev
;
717 case OCRDMA_CQ_ERROR
:
718 ib_evt
.element
.cq
= &cq
->ibcq
;
719 ib_evt
.event
= IB_EVENT_CQ_ERR
;
723 case OCRDMA_CQ_OVERRUN_ERROR
:
724 ib_evt
.element
.cq
= &cq
->ibcq
;
725 ib_evt
.event
= IB_EVENT_CQ_ERR
;
729 case OCRDMA_CQ_QPCAT_ERROR
:
730 ib_evt
.element
.qp
= &qp
->ibqp
;
731 ib_evt
.event
= IB_EVENT_QP_FATAL
;
732 ocrdma_process_qpcat_error(dev
, qp
);
734 case OCRDMA_QP_ACCESS_ERROR
:
735 ib_evt
.element
.qp
= &qp
->ibqp
;
736 ib_evt
.event
= IB_EVENT_QP_ACCESS_ERR
;
738 case OCRDMA_QP_COMM_EST_EVENT
:
739 ib_evt
.element
.qp
= &qp
->ibqp
;
740 ib_evt
.event
= IB_EVENT_COMM_EST
;
742 case OCRDMA_SQ_DRAINED_EVENT
:
743 ib_evt
.element
.qp
= &qp
->ibqp
;
744 ib_evt
.event
= IB_EVENT_SQ_DRAINED
;
746 case OCRDMA_DEVICE_FATAL_EVENT
:
747 ib_evt
.element
.port_num
= 1;
748 ib_evt
.event
= IB_EVENT_DEVICE_FATAL
;
752 case OCRDMA_SRQCAT_ERROR
:
753 ib_evt
.element
.srq
= &qp
->srq
->ibsrq
;
754 ib_evt
.event
= IB_EVENT_SRQ_ERR
;
758 case OCRDMA_SRQ_LIMIT_EVENT
:
759 ib_evt
.element
.srq
= &qp
->srq
->ibsrq
;
760 ib_evt
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
764 case OCRDMA_QP_LAST_WQE_EVENT
:
765 ib_evt
.element
.qp
= &qp
->ibqp
;
766 ib_evt
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
773 pr_err("%s() unknown type=0x%x\n", __func__
, type
);
777 if (type
< OCRDMA_MAX_ASYNC_ERRORS
)
778 atomic_inc(&dev
->async_err_stats
[type
]);
781 if (qp
->ibqp
.event_handler
)
782 qp
->ibqp
.event_handler(&ib_evt
, qp
->ibqp
.qp_context
);
783 } else if (cq_event
) {
784 if (cq
->ibcq
.event_handler
)
785 cq
->ibcq
.event_handler(&ib_evt
, cq
->ibcq
.cq_context
);
786 } else if (srq_event
) {
787 if (qp
->srq
->ibsrq
.event_handler
)
788 qp
->srq
->ibsrq
.event_handler(&ib_evt
,
791 } else if (dev_event
) {
792 pr_err("%s: Fatal event received\n", dev
->ibdev
.name
);
793 ib_dispatch_event(&ib_evt
);
798 static void ocrdma_process_grp5_aync(struct ocrdma_dev
*dev
,
799 struct ocrdma_ae_mcqe
*cqe
)
801 struct ocrdma_ae_pvid_mcqe
*evt
;
802 int type
= (cqe
->valid_ae_event
& OCRDMA_AE_MCQE_EVENT_TYPE_MASK
) >>
803 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT
;
806 case OCRDMA_ASYNC_EVENT_PVID_STATE
:
807 evt
= (struct ocrdma_ae_pvid_mcqe
*)cqe
;
808 if ((evt
->tag_enabled
& OCRDMA_AE_PVID_MCQE_ENABLED_MASK
) >>
809 OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT
)
810 dev
->pvid
= ((evt
->tag_enabled
&
811 OCRDMA_AE_PVID_MCQE_TAG_MASK
) >>
812 OCRDMA_AE_PVID_MCQE_TAG_SHIFT
);
815 case OCRDMA_ASYNC_EVENT_COS_VALUE
:
816 atomic_set(&dev
->update_sl
, 1);
819 /* Not interested evts. */
824 static void ocrdma_process_link_state(struct ocrdma_dev
*dev
,
825 struct ocrdma_ae_mcqe
*cqe
)
827 struct ocrdma_ae_lnkst_mcqe
*evt
;
830 evt
= (struct ocrdma_ae_lnkst_mcqe
*)cqe
;
831 lstate
= ocrdma_get_ae_link_state(evt
->speed_state_ptn
);
833 if (!(lstate
& OCRDMA_AE_LSC_LLINK_MASK
))
836 if (dev
->flags
& OCRDMA_FLAGS_LINK_STATUS_INIT
)
837 ocrdma_update_link_state(dev
, (lstate
& OCRDMA_LINK_ST_MASK
));
840 static void ocrdma_process_acqe(struct ocrdma_dev
*dev
, void *ae_cqe
)
842 /* async CQE processing */
843 struct ocrdma_ae_mcqe
*cqe
= ae_cqe
;
844 u32 evt_code
= (cqe
->valid_ae_event
& OCRDMA_AE_MCQE_EVENT_CODE_MASK
) >>
845 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT
;
847 case OCRDMA_ASYNC_LINK_EVE_CODE
:
848 ocrdma_process_link_state(dev
, cqe
);
850 case OCRDMA_ASYNC_RDMA_EVE_CODE
:
851 ocrdma_dispatch_ibevent(dev
, cqe
);
853 case OCRDMA_ASYNC_GRP5_EVE_CODE
:
854 ocrdma_process_grp5_aync(dev
, cqe
);
857 pr_err("%s(%d) invalid evt code=0x%x\n", __func__
,
862 static void ocrdma_process_mcqe(struct ocrdma_dev
*dev
, struct ocrdma_mcqe
*cqe
)
864 if (dev
->mqe_ctx
.tag
== cqe
->tag_lo
&& dev
->mqe_ctx
.cmd_done
== false) {
865 dev
->mqe_ctx
.cqe_status
= (cqe
->status
&
866 OCRDMA_MCQE_STATUS_MASK
) >> OCRDMA_MCQE_STATUS_SHIFT
;
867 dev
->mqe_ctx
.ext_status
=
868 (cqe
->status
& OCRDMA_MCQE_ESTATUS_MASK
)
869 >> OCRDMA_MCQE_ESTATUS_SHIFT
;
870 dev
->mqe_ctx
.cmd_done
= true;
871 wake_up(&dev
->mqe_ctx
.cmd_wait
);
873 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
874 __func__
, cqe
->tag_lo
, dev
->mqe_ctx
.tag
);
877 static int ocrdma_mq_cq_handler(struct ocrdma_dev
*dev
, u16 cq_id
)
880 struct ocrdma_mcqe
*cqe
;
883 cqe
= ocrdma_get_mcqe(dev
);
886 ocrdma_le32_to_cpu(cqe
, sizeof(*cqe
));
888 if (cqe
->valid_ae_cmpl_cons
& OCRDMA_MCQE_AE_MASK
)
889 ocrdma_process_acqe(dev
, cqe
);
890 else if (cqe
->valid_ae_cmpl_cons
& OCRDMA_MCQE_CMPL_MASK
)
891 ocrdma_process_mcqe(dev
, cqe
);
892 memset(cqe
, 0, sizeof(struct ocrdma_mcqe
));
893 ocrdma_mcq_inc_tail(dev
);
895 ocrdma_ring_cq_db(dev
, dev
->mq
.cq
.id
, true, false, cqe_popped
);
899 static struct ocrdma_cq
*_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev
*dev
,
900 struct ocrdma_cq
*cq
, bool sq
)
902 struct ocrdma_qp
*qp
;
903 struct list_head
*cur
;
904 struct ocrdma_cq
*bcq
= NULL
;
905 struct list_head
*head
= sq
?(&cq
->sq_head
):(&cq
->rq_head
);
907 list_for_each(cur
, head
) {
909 qp
= list_entry(cur
, struct ocrdma_qp
, sq_entry
);
911 qp
= list_entry(cur
, struct ocrdma_qp
, rq_entry
);
915 /* if wq and rq share the same cq, than comp_handler
916 * is already invoked.
918 if (qp
->sq_cq
== qp
->rq_cq
)
920 /* if completion came on sq, rq's cq is buddy cq.
921 * if completion came on rq, sq's cq is buddy cq.
932 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev
*dev
,
933 struct ocrdma_cq
*cq
)
936 struct ocrdma_cq
*bcq
= NULL
;
938 /* Go through list of QPs in error state which are using this CQ
939 * and invoke its callback handler to trigger CQE processing for
940 * error/flushed CQE. It is rare to find more than few entries in
941 * this list as most consumers stops after getting error CQE.
942 * List is traversed only once when a matching buddy cq found for a QP.
944 spin_lock_irqsave(&dev
->flush_q_lock
, flags
);
945 /* Check if buddy CQ is present.
946 * true - Check for SQ CQ
947 * false - Check for RQ CQ
949 bcq
= _ocrdma_qp_buddy_cq_handler(dev
, cq
, true);
951 bcq
= _ocrdma_qp_buddy_cq_handler(dev
, cq
, false);
952 spin_unlock_irqrestore(&dev
->flush_q_lock
, flags
);
954 /* if there is valid buddy cq, look for its completion handler */
955 if (bcq
&& bcq
->ibcq
.comp_handler
) {
956 spin_lock_irqsave(&bcq
->comp_handler_lock
, flags
);
957 (*bcq
->ibcq
.comp_handler
) (&bcq
->ibcq
, bcq
->ibcq
.cq_context
);
958 spin_unlock_irqrestore(&bcq
->comp_handler_lock
, flags
);
962 static void ocrdma_qp_cq_handler(struct ocrdma_dev
*dev
, u16 cq_idx
)
965 struct ocrdma_cq
*cq
;
967 if (cq_idx
>= OCRDMA_MAX_CQ
)
970 cq
= dev
->cq_tbl
[cq_idx
];
974 if (cq
->ibcq
.comp_handler
) {
975 spin_lock_irqsave(&cq
->comp_handler_lock
, flags
);
976 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
977 spin_unlock_irqrestore(&cq
->comp_handler_lock
, flags
);
979 ocrdma_qp_buddy_cq_handler(dev
, cq
);
982 static void ocrdma_cq_handler(struct ocrdma_dev
*dev
, u16 cq_id
)
984 /* process the MQ-CQE. */
985 if (cq_id
== dev
->mq
.cq
.id
)
986 ocrdma_mq_cq_handler(dev
, cq_id
);
988 ocrdma_qp_cq_handler(dev
, cq_id
);
991 static irqreturn_t
ocrdma_irq_handler(int irq
, void *handle
)
993 struct ocrdma_eq
*eq
= handle
;
994 struct ocrdma_dev
*dev
= eq
->dev
;
995 struct ocrdma_eqe eqe
;
996 struct ocrdma_eqe
*ptr
;
999 int budget
= eq
->cq_cnt
;
1002 ptr
= ocrdma_get_eqe(eq
);
1004 ocrdma_le32_to_cpu(&eqe
, sizeof(eqe
));
1005 mcode
= (eqe
.id_valid
& OCRDMA_EQE_MAJOR_CODE_MASK
)
1006 >> OCRDMA_EQE_MAJOR_CODE_SHIFT
;
1007 if (mcode
== OCRDMA_MAJOR_CODE_SENTINAL
)
1008 pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
1009 eq
->q
.id
, eqe
.id_valid
);
1010 if ((eqe
.id_valid
& OCRDMA_EQE_VALID_MASK
) == 0)
1014 /* ring eq doorbell as soon as its consumed. */
1015 ocrdma_ring_eq_db(dev
, eq
->q
.id
, false, true, 1);
1016 /* check whether its CQE or not. */
1017 if ((eqe
.id_valid
& OCRDMA_EQE_FOR_CQE_MASK
) == 0) {
1018 cq_id
= eqe
.id_valid
>> OCRDMA_EQE_RESOURCE_ID_SHIFT
;
1019 ocrdma_cq_handler(dev
, cq_id
);
1021 ocrdma_eq_inc_tail(eq
);
1023 /* There can be a stale EQE after the last bound CQ is
1024 * destroyed. EQE valid and budget == 0 implies this.
1031 eq
->aic_obj
.eq_intr_cnt
++;
1032 ocrdma_ring_eq_db(dev
, eq
->q
.id
, true, true, 0);
1036 static void ocrdma_post_mqe(struct ocrdma_dev
*dev
, struct ocrdma_mqe
*cmd
)
1038 struct ocrdma_mqe
*mqe
;
1040 dev
->mqe_ctx
.tag
= dev
->mq
.sq
.head
;
1041 dev
->mqe_ctx
.cmd_done
= false;
1042 mqe
= ocrdma_get_mqe(dev
);
1043 cmd
->hdr
.tag_lo
= dev
->mq
.sq
.head
;
1044 ocrdma_copy_cpu_to_le32(mqe
, cmd
, sizeof(*mqe
));
1045 /* make sure descriptor is written before ringing doorbell */
1047 ocrdma_mq_inc_head(dev
);
1048 ocrdma_ring_mq_db(dev
);
1051 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev
*dev
)
1054 /* 30 sec timeout */
1055 status
= wait_event_timeout(dev
->mqe_ctx
.cmd_wait
,
1056 (dev
->mqe_ctx
.cmd_done
!= false),
1057 msecs_to_jiffies(30000));
1061 dev
->mqe_ctx
.fw_error_state
= true;
1062 pr_err("%s(%d) mailbox timeout: fw not responding\n",
1068 /* issue a mailbox command on the MQ */
1069 static int ocrdma_mbx_cmd(struct ocrdma_dev
*dev
, struct ocrdma_mqe
*mqe
)
1072 u16 cqe_status
, ext_status
;
1073 struct ocrdma_mqe
*rsp_mqe
;
1074 struct ocrdma_mbx_rsp
*rsp
= NULL
;
1076 mutex_lock(&dev
->mqe_ctx
.lock
);
1077 if (dev
->mqe_ctx
.fw_error_state
)
1079 ocrdma_post_mqe(dev
, mqe
);
1080 status
= ocrdma_wait_mqe_cmpl(dev
);
1083 cqe_status
= dev
->mqe_ctx
.cqe_status
;
1084 ext_status
= dev
->mqe_ctx
.ext_status
;
1085 rsp_mqe
= ocrdma_get_mqe_rsp(dev
);
1086 ocrdma_copy_le32_to_cpu(mqe
, rsp_mqe
, (sizeof(*mqe
)));
1087 if ((mqe
->hdr
.spcl_sge_cnt_emb
& OCRDMA_MQE_HDR_EMB_MASK
) >>
1088 OCRDMA_MQE_HDR_EMB_SHIFT
)
1091 if (cqe_status
|| ext_status
) {
1092 pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
1093 __func__
, cqe_status
, ext_status
);
1095 /* This is for embedded cmds. */
1096 pr_err("opcode=0x%x, subsystem=0x%x\n",
1097 (rsp
->subsys_op
& OCRDMA_MBX_RSP_OPCODE_MASK
) >>
1098 OCRDMA_MBX_RSP_OPCODE_SHIFT
,
1099 (rsp
->subsys_op
& OCRDMA_MBX_RSP_SUBSYS_MASK
) >>
1100 OCRDMA_MBX_RSP_SUBSYS_SHIFT
);
1102 status
= ocrdma_get_mbx_cqe_errno(cqe_status
);
1105 /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1106 if (rsp
&& (mqe
->u
.rsp
.status
& OCRDMA_MBX_RSP_STATUS_MASK
))
1107 status
= ocrdma_get_mbx_errno(mqe
->u
.rsp
.status
);
1109 mutex_unlock(&dev
->mqe_ctx
.lock
);
1113 static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev
*dev
, struct ocrdma_mqe
*mqe
,
1117 struct ocrdma_mbx_rsp
*rsp
= payload_va
;
1119 if ((mqe
->hdr
.spcl_sge_cnt_emb
& OCRDMA_MQE_HDR_EMB_MASK
) >>
1120 OCRDMA_MQE_HDR_EMB_SHIFT
)
1123 status
= ocrdma_mbx_cmd(dev
, mqe
);
1125 /* For non embedded, only CQE failures are handled in
1126 * ocrdma_mbx_cmd. We need to check for RSP errors.
1128 if (rsp
->status
& OCRDMA_MBX_RSP_STATUS_MASK
)
1129 status
= ocrdma_get_mbx_errno(rsp
->status
);
1132 pr_err("opcode=0x%x, subsystem=0x%x\n",
1133 (rsp
->subsys_op
& OCRDMA_MBX_RSP_OPCODE_MASK
) >>
1134 OCRDMA_MBX_RSP_OPCODE_SHIFT
,
1135 (rsp
->subsys_op
& OCRDMA_MBX_RSP_SUBSYS_MASK
) >>
1136 OCRDMA_MBX_RSP_SUBSYS_SHIFT
);
1140 static void ocrdma_get_attr(struct ocrdma_dev
*dev
,
1141 struct ocrdma_dev_attr
*attr
,
1142 struct ocrdma_mbx_query_config
*rsp
)
1145 (rsp
->max_pd_ca_ack_delay
& OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK
) >>
1146 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT
;
1147 attr
->udp_encap
= (rsp
->max_pd_ca_ack_delay
&
1148 OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK
) >>
1149 OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT
;
1151 (rsp
->max_dpp_pds_credits
& OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK
) >>
1152 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET
;
1154 (rsp
->qp_srq_cq_ird_ord
& OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK
) >>
1155 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT
;
1157 (rsp
->max_srq_rpir_qps
& OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK
) >>
1158 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET
;
1159 attr
->max_send_sge
= ((rsp
->max_write_send_sge
&
1160 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK
) >>
1161 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT
);
1162 attr
->max_recv_sge
= (rsp
->max_write_send_sge
&
1163 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK
) >>
1164 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT
;
1165 attr
->max_srq_sge
= (rsp
->max_srq_rqe_sge
&
1166 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK
) >>
1167 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET
;
1168 attr
->max_rdma_sge
= (rsp
->max_write_send_sge
&
1169 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK
) >>
1170 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT
;
1171 attr
->max_ord_per_qp
= (rsp
->max_ird_ord_per_qp
&
1172 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK
) >>
1173 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT
;
1174 attr
->max_ird_per_qp
= (rsp
->max_ird_ord_per_qp
&
1175 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK
) >>
1176 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT
;
1177 attr
->cq_overflow_detect
= (rsp
->qp_srq_cq_ird_ord
&
1178 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK
) >>
1179 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT
;
1180 attr
->srq_supported
= (rsp
->qp_srq_cq_ird_ord
&
1181 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK
) >>
1182 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT
;
1183 attr
->local_ca_ack_delay
= (rsp
->max_pd_ca_ack_delay
&
1184 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK
) >>
1185 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT
;
1186 attr
->max_mw
= rsp
->max_mw
;
1187 attr
->max_mr
= rsp
->max_mr
;
1188 attr
->max_mr_size
= ((u64
)rsp
->max_mr_size_hi
<< 32) |
1189 rsp
->max_mr_size_lo
;
1191 attr
->max_pages_per_frmr
= rsp
->max_pages_per_frmr
;
1192 attr
->max_num_mr_pbl
= rsp
->max_num_mr_pbl
;
1193 attr
->max_cqe
= rsp
->max_cq_cqes_per_cq
&
1194 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK
;
1195 attr
->max_cq
= (rsp
->max_cq_cqes_per_cq
&
1196 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK
) >>
1197 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET
;
1198 attr
->wqe_size
= ((rsp
->wqe_rqe_stride_max_dpp_cqs
&
1199 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK
) >>
1200 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET
) *
1202 attr
->rqe_size
= ((rsp
->wqe_rqe_stride_max_dpp_cqs
&
1203 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK
) >>
1204 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET
) *
1206 attr
->max_inline_data
=
1207 attr
->wqe_size
- (sizeof(struct ocrdma_hdr_wqe
) +
1208 sizeof(struct ocrdma_sge
));
1209 if (ocrdma_get_asic_type(dev
) == OCRDMA_ASIC_GEN_SKH_R
) {
1211 attr
->ird_page_size
= OCRDMA_MIN_Q_PAGE_SIZE
;
1212 attr
->num_ird_pages
= MAX_OCRDMA_IRD_PAGES
;
1214 dev
->attr
.max_wqe
= rsp
->max_wqes_rqes_per_q
>>
1215 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET
;
1216 dev
->attr
.max_rqe
= rsp
->max_wqes_rqes_per_q
&
1217 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK
;
1220 static int ocrdma_check_fw_config(struct ocrdma_dev
*dev
,
1221 struct ocrdma_fw_conf_rsp
*conf
)
1225 fn_mode
= conf
->fn_mode
& OCRDMA_FN_MODE_RDMA
;
1226 if (fn_mode
!= OCRDMA_FN_MODE_RDMA
)
1228 dev
->base_eqid
= conf
->base_eqid
;
1229 dev
->max_eq
= conf
->max_eq
;
1233 /* can be issued only during init time. */
1234 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev
*dev
)
1236 int status
= -ENOMEM
;
1237 struct ocrdma_mqe
*cmd
;
1238 struct ocrdma_fw_ver_rsp
*rsp
;
1240 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER
, sizeof(*cmd
));
1243 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)&cmd
->u
.cmd
[0],
1244 OCRDMA_CMD_GET_FW_VER
,
1245 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1247 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1250 rsp
= (struct ocrdma_fw_ver_rsp
*)cmd
;
1251 memset(&dev
->attr
.fw_ver
[0], 0, sizeof(dev
->attr
.fw_ver
));
1252 memcpy(&dev
->attr
.fw_ver
[0], &rsp
->running_ver
[0],
1253 sizeof(rsp
->running_ver
));
1254 ocrdma_le32_to_cpu(dev
->attr
.fw_ver
, sizeof(rsp
->running_ver
));
1260 /* can be issued only during init time. */
1261 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev
*dev
)
1263 int status
= -ENOMEM
;
1264 struct ocrdma_mqe
*cmd
;
1265 struct ocrdma_fw_conf_rsp
*rsp
;
1267 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG
, sizeof(*cmd
));
1270 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)&cmd
->u
.cmd
[0],
1271 OCRDMA_CMD_GET_FW_CONFIG
,
1272 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1273 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1276 rsp
= (struct ocrdma_fw_conf_rsp
*)cmd
;
1277 status
= ocrdma_check_fw_config(dev
, rsp
);
1283 int ocrdma_mbx_rdma_stats(struct ocrdma_dev
*dev
, bool reset
)
1285 struct ocrdma_rdma_stats_req
*req
= dev
->stats_mem
.va
;
1286 struct ocrdma_mqe
*mqe
= &dev
->stats_mem
.mqe
;
1287 struct ocrdma_rdma_stats_resp
*old_stats
;
1290 old_stats
= kmalloc(sizeof(*old_stats
), GFP_KERNEL
);
1291 if (old_stats
== NULL
)
1294 memset(mqe
, 0, sizeof(*mqe
));
1295 mqe
->hdr
.pyld_len
= dev
->stats_mem
.size
;
1296 mqe
->hdr
.spcl_sge_cnt_emb
|=
1297 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT
) &
1298 OCRDMA_MQE_HDR_SGE_CNT_MASK
;
1299 mqe
->u
.nonemb_req
.sge
[0].pa_lo
= (u32
) (dev
->stats_mem
.pa
& 0xffffffff);
1300 mqe
->u
.nonemb_req
.sge
[0].pa_hi
= (u32
) upper_32_bits(dev
->stats_mem
.pa
);
1301 mqe
->u
.nonemb_req
.sge
[0].len
= dev
->stats_mem
.size
;
1303 /* Cache the old stats */
1304 memcpy(old_stats
, req
, sizeof(struct ocrdma_rdma_stats_resp
));
1305 memset(req
, 0, dev
->stats_mem
.size
);
1307 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)req
,
1308 OCRDMA_CMD_GET_RDMA_STATS
,
1310 dev
->stats_mem
.size
);
1312 req
->reset_stats
= reset
;
1314 status
= ocrdma_nonemb_mbx_cmd(dev
, mqe
, dev
->stats_mem
.va
);
1316 /* Copy from cache, if mbox fails */
1317 memcpy(req
, old_stats
, sizeof(struct ocrdma_rdma_stats_resp
));
1319 ocrdma_le32_to_cpu(req
, dev
->stats_mem
.size
);
1325 static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev
*dev
)
1327 int status
= -ENOMEM
;
1328 struct ocrdma_dma_mem dma
;
1329 struct ocrdma_mqe
*mqe
;
1330 struct ocrdma_get_ctrl_attribs_rsp
*ctrl_attr_rsp
;
1331 struct mgmt_hba_attribs
*hba_attribs
;
1333 mqe
= kzalloc(sizeof(struct ocrdma_mqe
), GFP_KERNEL
);
1337 dma
.size
= sizeof(struct ocrdma_get_ctrl_attribs_rsp
);
1338 dma
.va
= dma_alloc_coherent(&dev
->nic_info
.pdev
->dev
,
1339 dma
.size
, &dma
.pa
, GFP_KERNEL
);
1343 mqe
->hdr
.pyld_len
= dma
.size
;
1344 mqe
->hdr
.spcl_sge_cnt_emb
|=
1345 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT
) &
1346 OCRDMA_MQE_HDR_SGE_CNT_MASK
;
1347 mqe
->u
.nonemb_req
.sge
[0].pa_lo
= (u32
) (dma
.pa
& 0xffffffff);
1348 mqe
->u
.nonemb_req
.sge
[0].pa_hi
= (u32
) upper_32_bits(dma
.pa
);
1349 mqe
->u
.nonemb_req
.sge
[0].len
= dma
.size
;
1351 memset(dma
.va
, 0, dma
.size
);
1352 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)dma
.va
,
1353 OCRDMA_CMD_GET_CTRL_ATTRIBUTES
,
1354 OCRDMA_SUBSYS_COMMON
,
1357 status
= ocrdma_nonemb_mbx_cmd(dev
, mqe
, dma
.va
);
1359 ctrl_attr_rsp
= (struct ocrdma_get_ctrl_attribs_rsp
*)dma
.va
;
1360 hba_attribs
= &ctrl_attr_rsp
->ctrl_attribs
.hba_attribs
;
1362 dev
->hba_port_num
= (hba_attribs
->ptpnum_maxdoms_hbast_cv
&
1363 OCRDMA_HBA_ATTRB_PTNUM_MASK
)
1364 >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT
;
1365 strncpy(dev
->model_number
,
1366 hba_attribs
->controller_model_number
, 31);
1368 dma_free_coherent(&dev
->nic_info
.pdev
->dev
, dma
.size
, dma
.va
, dma
.pa
);
1374 static int ocrdma_mbx_query_dev(struct ocrdma_dev
*dev
)
1376 int status
= -ENOMEM
;
1377 struct ocrdma_mbx_query_config
*rsp
;
1378 struct ocrdma_mqe
*cmd
;
1380 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG
, sizeof(*cmd
));
1383 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1386 rsp
= (struct ocrdma_mbx_query_config
*)cmd
;
1387 ocrdma_get_attr(dev
, &dev
->attr
, rsp
);
1393 int ocrdma_mbx_get_link_speed(struct ocrdma_dev
*dev
, u8
*lnk_speed
,
1396 int status
= -ENOMEM
;
1397 struct ocrdma_get_link_speed_rsp
*rsp
;
1398 struct ocrdma_mqe
*cmd
;
1400 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1
,
1404 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)&cmd
->u
.cmd
[0],
1405 OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1
,
1406 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1408 ((struct ocrdma_mbx_hdr
*)cmd
->u
.cmd
)->rsvd_version
= 0x1;
1410 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1414 rsp
= (struct ocrdma_get_link_speed_rsp
*)cmd
;
1416 *lnk_speed
= (rsp
->pflt_pps_ld_pnum
& OCRDMA_PHY_PS_MASK
)
1417 >> OCRDMA_PHY_PS_SHIFT
;
1419 *lnk_state
= (rsp
->res_lnk_st
& OCRDMA_LINK_ST_MASK
);
1426 static int ocrdma_mbx_get_phy_info(struct ocrdma_dev
*dev
)
1428 int status
= -ENOMEM
;
1429 struct ocrdma_mqe
*cmd
;
1430 struct ocrdma_get_phy_info_rsp
*rsp
;
1432 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS
, sizeof(*cmd
));
1436 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)&cmd
->u
.cmd
[0],
1437 OCRDMA_CMD_PHY_DETAILS
, OCRDMA_SUBSYS_COMMON
,
1440 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1444 rsp
= (struct ocrdma_get_phy_info_rsp
*)cmd
;
1446 (rsp
->ityp_ptyp
& OCRDMA_PHY_TYPE_MASK
);
1447 dev
->phy
.interface_type
=
1448 (rsp
->ityp_ptyp
& OCRDMA_IF_TYPE_MASK
)
1449 >> OCRDMA_IF_TYPE_SHIFT
;
1450 dev
->phy
.auto_speeds_supported
=
1451 (rsp
->fspeed_aspeed
& OCRDMA_ASPEED_SUPP_MASK
);
1452 dev
->phy
.fixed_speeds_supported
=
1453 (rsp
->fspeed_aspeed
& OCRDMA_FSPEED_SUPP_MASK
)
1454 >> OCRDMA_FSPEED_SUPP_SHIFT
;
1460 int ocrdma_mbx_alloc_pd(struct ocrdma_dev
*dev
, struct ocrdma_pd
*pd
)
1462 int status
= -ENOMEM
;
1463 struct ocrdma_alloc_pd
*cmd
;
1464 struct ocrdma_alloc_pd_rsp
*rsp
;
1466 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD
, sizeof(*cmd
));
1469 if (pd
->dpp_enabled
)
1470 cmd
->enable_dpp_rsvd
|= OCRDMA_ALLOC_PD_ENABLE_DPP
;
1471 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1474 rsp
= (struct ocrdma_alloc_pd_rsp
*)cmd
;
1475 pd
->id
= rsp
->dpp_page_pdid
& OCRDMA_ALLOC_PD_RSP_PDID_MASK
;
1476 if (rsp
->dpp_page_pdid
& OCRDMA_ALLOC_PD_RSP_DPP
) {
1477 pd
->dpp_enabled
= true;
1478 pd
->dpp_page
= rsp
->dpp_page_pdid
>>
1479 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT
;
1481 pd
->dpp_enabled
= false;
1489 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev
*dev
, struct ocrdma_pd
*pd
)
1491 int status
= -ENOMEM
;
1492 struct ocrdma_dealloc_pd
*cmd
;
1494 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD
, sizeof(*cmd
));
1498 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1504 static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev
*dev
)
1506 int status
= -ENOMEM
;
1507 size_t pd_bitmap_size
;
1508 struct ocrdma_alloc_pd_range
*cmd
;
1509 struct ocrdma_alloc_pd_range_rsp
*rsp
;
1511 /* Pre allocate the DPP PDs */
1512 if (dev
->attr
.max_dpp_pds
) {
1513 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE
,
1517 cmd
->pd_count
= dev
->attr
.max_dpp_pds
;
1518 cmd
->enable_dpp_rsvd
|= OCRDMA_ALLOC_PD_ENABLE_DPP
;
1519 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1520 rsp
= (struct ocrdma_alloc_pd_range_rsp
*)cmd
;
1522 if (!status
&& (rsp
->dpp_page_pdid
& OCRDMA_ALLOC_PD_RSP_DPP
) &&
1524 dev
->pd_mgr
->dpp_page_index
= rsp
->dpp_page_pdid
>>
1525 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT
;
1526 dev
->pd_mgr
->pd_dpp_start
= rsp
->dpp_page_pdid
&
1527 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK
;
1528 dev
->pd_mgr
->max_dpp_pd
= rsp
->pd_count
;
1530 BITS_TO_LONGS(rsp
->pd_count
) * sizeof(long);
1531 dev
->pd_mgr
->pd_dpp_bitmap
= kzalloc(pd_bitmap_size
,
1537 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE
, sizeof(*cmd
));
1541 cmd
->pd_count
= dev
->attr
.max_pd
- dev
->attr
.max_dpp_pds
;
1542 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1543 rsp
= (struct ocrdma_alloc_pd_range_rsp
*)cmd
;
1544 if (!status
&& rsp
->pd_count
) {
1545 dev
->pd_mgr
->pd_norm_start
= rsp
->dpp_page_pdid
&
1546 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK
;
1547 dev
->pd_mgr
->max_normal_pd
= rsp
->pd_count
;
1548 pd_bitmap_size
= BITS_TO_LONGS(rsp
->pd_count
) * sizeof(long);
1549 dev
->pd_mgr
->pd_norm_bitmap
= kzalloc(pd_bitmap_size
,
1554 if (dev
->pd_mgr
->pd_norm_bitmap
|| dev
->pd_mgr
->pd_dpp_bitmap
) {
1555 /* Enable PD resource manager */
1556 dev
->pd_mgr
->pd_prealloc_valid
= true;
1562 static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev
*dev
)
1564 struct ocrdma_dealloc_pd_range
*cmd
;
1566 /* return normal PDs to firmware */
1567 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE
, sizeof(*cmd
));
1571 if (dev
->pd_mgr
->max_normal_pd
) {
1572 cmd
->start_pd_id
= dev
->pd_mgr
->pd_norm_start
;
1573 cmd
->pd_count
= dev
->pd_mgr
->max_normal_pd
;
1574 ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1577 if (dev
->pd_mgr
->max_dpp_pd
) {
1579 /* return DPP PDs to firmware */
1580 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE
,
1585 cmd
->start_pd_id
= dev
->pd_mgr
->pd_dpp_start
;
1586 cmd
->pd_count
= dev
->pd_mgr
->max_dpp_pd
;
1587 ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1593 void ocrdma_alloc_pd_pool(struct ocrdma_dev
*dev
)
1597 dev
->pd_mgr
= kzalloc(sizeof(struct ocrdma_pd_resource_mgr
),
1600 pr_err("%s(%d)Memory allocation failure.\n", __func__
, dev
->id
);
1603 status
= ocrdma_mbx_alloc_pd_range(dev
);
1605 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1610 static void ocrdma_free_pd_pool(struct ocrdma_dev
*dev
)
1612 ocrdma_mbx_dealloc_pd_range(dev
);
1613 kfree(dev
->pd_mgr
->pd_norm_bitmap
);
1614 kfree(dev
->pd_mgr
->pd_dpp_bitmap
);
1618 static int ocrdma_build_q_conf(u32
*num_entries
, int entry_size
,
1619 int *num_pages
, int *page_size
)
1624 *num_entries
= roundup_pow_of_two(*num_entries
);
1625 mem_size
= *num_entries
* entry_size
;
1626 /* find the possible lowest possible multiplier */
1627 for (i
= 0; i
< OCRDMA_MAX_Q_PAGE_SIZE_CNT
; i
++) {
1628 if (mem_size
<= (OCRDMA_Q_PAGE_BASE_SIZE
<< i
))
1631 if (i
>= OCRDMA_MAX_Q_PAGE_SIZE_CNT
)
1633 mem_size
= roundup(mem_size
,
1634 ((OCRDMA_Q_PAGE_BASE_SIZE
<< i
) / OCRDMA_MAX_Q_PAGES
));
1636 mem_size
/ ((OCRDMA_Q_PAGE_BASE_SIZE
<< i
) / OCRDMA_MAX_Q_PAGES
);
1637 *page_size
= ((OCRDMA_Q_PAGE_BASE_SIZE
<< i
) / OCRDMA_MAX_Q_PAGES
);
1638 *num_entries
= mem_size
/ entry_size
;
1642 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev
*dev
)
1647 struct ocrdma_create_ah_tbl
*cmd
;
1648 struct ocrdma_create_ah_tbl_rsp
*rsp
;
1649 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1651 struct ocrdma_pbe
*pbes
;
1653 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL
, sizeof(*cmd
));
1657 max_ah
= OCRDMA_MAX_AH
;
1658 dev
->av_tbl
.size
= sizeof(struct ocrdma_av
) * max_ah
;
1660 /* number of PBEs in PBL */
1661 cmd
->ah_conf
= (OCRDMA_AH_TBL_PAGES
<<
1662 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT
) &
1663 OCRDMA_CREATE_AH_NUM_PAGES_MASK
;
1666 for (i
= 0; i
< OCRDMA_MAX_Q_PAGE_SIZE_CNT
; i
++) {
1667 if (PAGE_SIZE
== (OCRDMA_MIN_Q_PAGE_SIZE
<< i
))
1670 cmd
->ah_conf
|= (i
<< OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT
) &
1671 OCRDMA_CREATE_AH_PAGE_SIZE_MASK
;
1674 cmd
->ah_conf
|= (sizeof(struct ocrdma_av
) <<
1675 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT
) &
1676 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK
;
1678 dev
->av_tbl
.pbl
.va
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
1679 &dev
->av_tbl
.pbl
.pa
,
1681 if (dev
->av_tbl
.pbl
.va
== NULL
)
1684 dev
->av_tbl
.va
= dma_alloc_coherent(&pdev
->dev
, dev
->av_tbl
.size
,
1686 if (dev
->av_tbl
.va
== NULL
)
1688 dev
->av_tbl
.pa
= pa
;
1689 dev
->av_tbl
.num_ah
= max_ah
;
1690 memset(dev
->av_tbl
.va
, 0, dev
->av_tbl
.size
);
1692 pbes
= (struct ocrdma_pbe
*)dev
->av_tbl
.pbl
.va
;
1693 for (i
= 0; i
< dev
->av_tbl
.size
/ OCRDMA_MIN_Q_PAGE_SIZE
; i
++) {
1694 pbes
[i
].pa_lo
= (u32
)cpu_to_le32(pa
& 0xffffffff);
1695 pbes
[i
].pa_hi
= (u32
)cpu_to_le32(upper_32_bits(pa
));
1698 cmd
->tbl_addr
[0].lo
= (u32
)(dev
->av_tbl
.pbl
.pa
& 0xFFFFFFFF);
1699 cmd
->tbl_addr
[0].hi
= (u32
)upper_32_bits(dev
->av_tbl
.pbl
.pa
);
1700 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1703 rsp
= (struct ocrdma_create_ah_tbl_rsp
*)cmd
;
1704 dev
->av_tbl
.ahid
= rsp
->ahid
& 0xFFFF;
1709 dma_free_coherent(&pdev
->dev
, dev
->av_tbl
.size
, dev
->av_tbl
.va
,
1711 dev
->av_tbl
.va
= NULL
;
1713 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, dev
->av_tbl
.pbl
.va
,
1714 dev
->av_tbl
.pbl
.pa
);
1715 dev
->av_tbl
.pbl
.va
= NULL
;
1716 dev
->av_tbl
.size
= 0;
1722 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev
*dev
)
1724 struct ocrdma_delete_ah_tbl
*cmd
;
1725 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1727 if (dev
->av_tbl
.va
== NULL
)
1730 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL
, sizeof(*cmd
));
1733 cmd
->ahid
= dev
->av_tbl
.ahid
;
1735 ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1736 dma_free_coherent(&pdev
->dev
, dev
->av_tbl
.size
, dev
->av_tbl
.va
,
1738 dev
->av_tbl
.va
= NULL
;
1739 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, dev
->av_tbl
.pbl
.va
,
1740 dev
->av_tbl
.pbl
.pa
);
1744 /* Multiple CQs uses the EQ. This routine returns least used
1745 * EQ to associate with CQ. This will distributes the interrupt
1746 * processing and CPU load to associated EQ, vector and so to that CPU.
1748 static u16
ocrdma_bind_eq(struct ocrdma_dev
*dev
)
1750 int i
, selected_eq
= 0, cq_cnt
= 0;
1753 mutex_lock(&dev
->dev_lock
);
1754 cq_cnt
= dev
->eq_tbl
[0].cq_cnt
;
1755 eq_id
= dev
->eq_tbl
[0].q
.id
;
1756 /* find the EQ which is has the least number of
1757 * CQs associated with it.
1759 for (i
= 0; i
< dev
->eq_cnt
; i
++) {
1760 if (dev
->eq_tbl
[i
].cq_cnt
< cq_cnt
) {
1761 cq_cnt
= dev
->eq_tbl
[i
].cq_cnt
;
1762 eq_id
= dev
->eq_tbl
[i
].q
.id
;
1766 dev
->eq_tbl
[selected_eq
].cq_cnt
+= 1;
1767 mutex_unlock(&dev
->dev_lock
);
1771 static void ocrdma_unbind_eq(struct ocrdma_dev
*dev
, u16 eq_id
)
1775 mutex_lock(&dev
->dev_lock
);
1776 i
= ocrdma_get_eq_table_index(dev
, eq_id
);
1779 dev
->eq_tbl
[i
].cq_cnt
-= 1;
1780 mutex_unlock(&dev
->dev_lock
);
1783 int ocrdma_mbx_create_cq(struct ocrdma_dev
*dev
, struct ocrdma_cq
*cq
,
1784 int entries
, int dpp_cq
, u16 pd_id
)
1786 int status
= -ENOMEM
; int max_hw_cqe
;
1787 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1788 struct ocrdma_create_cq
*cmd
;
1789 struct ocrdma_create_cq_rsp
*rsp
;
1790 u32 hw_pages
, cqe_size
, page_size
, cqe_count
;
1792 if (entries
> dev
->attr
.max_cqe
) {
1793 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1794 __func__
, dev
->id
, dev
->attr
.max_cqe
, entries
);
1797 if (dpp_cq
&& (ocrdma_get_asic_type(dev
) != OCRDMA_ASIC_GEN_SKH_R
))
1803 cqe_size
= OCRDMA_DPP_CQE_SIZE
;
1806 cq
->max_hw_cqe
= dev
->attr
.max_cqe
;
1807 max_hw_cqe
= dev
->attr
.max_cqe
;
1808 cqe_size
= sizeof(struct ocrdma_cqe
);
1809 hw_pages
= OCRDMA_CREATE_CQ_MAX_PAGES
;
1812 cq
->len
= roundup(max_hw_cqe
* cqe_size
, OCRDMA_MIN_Q_PAGE_SIZE
);
1814 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ
, sizeof(*cmd
));
1817 ocrdma_init_mch(&cmd
->cmd
.req
, OCRDMA_CMD_CREATE_CQ
,
1818 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1819 cq
->va
= dma_alloc_coherent(&pdev
->dev
, cq
->len
, &cq
->pa
, GFP_KERNEL
);
1824 memset(cq
->va
, 0, cq
->len
);
1825 page_size
= cq
->len
/ hw_pages
;
1826 cmd
->cmd
.pgsz_pgcnt
= (page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
) <<
1827 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT
;
1828 cmd
->cmd
.pgsz_pgcnt
|= hw_pages
;
1829 cmd
->cmd
.ev_cnt_flags
= OCRDMA_CREATE_CQ_DEF_FLAGS
;
1831 cq
->eqn
= ocrdma_bind_eq(dev
);
1832 cmd
->cmd
.req
.rsvd_version
= OCRDMA_CREATE_CQ_VER3
;
1833 cqe_count
= cq
->len
/ cqe_size
;
1834 cq
->cqe_cnt
= cqe_count
;
1835 if (cqe_count
> 1024) {
1836 /* Set cnt to 3 to indicate more than 1024 cq entries */
1837 cmd
->cmd
.ev_cnt_flags
|= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT
);
1840 switch (cqe_count
) {
1853 cmd
->cmd
.ev_cnt_flags
|= (count
<< OCRDMA_CREATE_CQ_CNT_SHIFT
);
1855 /* shared eq between all the consumer cqs. */
1856 cmd
->cmd
.eqn
= cq
->eqn
;
1857 if (ocrdma_get_asic_type(dev
) == OCRDMA_ASIC_GEN_SKH_R
) {
1859 cmd
->cmd
.pgsz_pgcnt
|= OCRDMA_CREATE_CQ_DPP
<<
1860 OCRDMA_CREATE_CQ_TYPE_SHIFT
;
1861 cq
->phase_change
= false;
1862 cmd
->cmd
.pdid_cqecnt
= (cq
->len
/ cqe_size
);
1864 cmd
->cmd
.pdid_cqecnt
= (cq
->len
/ cqe_size
) - 1;
1865 cmd
->cmd
.ev_cnt_flags
|= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID
;
1866 cq
->phase_change
= true;
1869 /* pd_id valid only for v3 */
1870 cmd
->cmd
.pdid_cqecnt
|= (pd_id
<<
1871 OCRDMA_CREATE_CQ_CMD_PDID_SHIFT
);
1872 ocrdma_build_q_pages(&cmd
->cmd
.pa
[0], hw_pages
, cq
->pa
, page_size
);
1873 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1877 rsp
= (struct ocrdma_create_cq_rsp
*)cmd
;
1878 cq
->id
= (u16
) (rsp
->rsp
.cq_id
& OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK
);
1882 ocrdma_unbind_eq(dev
, cq
->eqn
);
1883 dma_free_coherent(&pdev
->dev
, cq
->len
, cq
->va
, cq
->pa
);
1889 int ocrdma_mbx_destroy_cq(struct ocrdma_dev
*dev
, struct ocrdma_cq
*cq
)
1891 int status
= -ENOMEM
;
1892 struct ocrdma_destroy_cq
*cmd
;
1894 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ
, sizeof(*cmd
));
1897 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_DELETE_CQ
,
1898 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1900 cmd
->bypass_flush_qid
|=
1901 (cq
->id
<< OCRDMA_DESTROY_CQ_QID_SHIFT
) &
1902 OCRDMA_DESTROY_CQ_QID_MASK
;
1904 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1905 ocrdma_unbind_eq(dev
, cq
->eqn
);
1906 dma_free_coherent(&dev
->nic_info
.pdev
->dev
, cq
->len
, cq
->va
, cq
->pa
);
1911 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev
*dev
, struct ocrdma_hw_mr
*hwmr
,
1912 u32 pdid
, int addr_check
)
1914 int status
= -ENOMEM
;
1915 struct ocrdma_alloc_lkey
*cmd
;
1916 struct ocrdma_alloc_lkey_rsp
*rsp
;
1918 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY
, sizeof(*cmd
));
1922 cmd
->pbl_sz_flags
|= addr_check
;
1923 cmd
->pbl_sz_flags
|= (hwmr
->fr_mr
<< OCRDMA_ALLOC_LKEY_FMR_SHIFT
);
1924 cmd
->pbl_sz_flags
|=
1925 (hwmr
->remote_wr
<< OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT
);
1926 cmd
->pbl_sz_flags
|=
1927 (hwmr
->remote_rd
<< OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT
);
1928 cmd
->pbl_sz_flags
|=
1929 (hwmr
->local_wr
<< OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT
);
1930 cmd
->pbl_sz_flags
|=
1931 (hwmr
->remote_atomic
<< OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT
);
1932 cmd
->pbl_sz_flags
|=
1933 (hwmr
->num_pbls
<< OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT
);
1935 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1938 rsp
= (struct ocrdma_alloc_lkey_rsp
*)cmd
;
1939 hwmr
->lkey
= rsp
->lrkey
;
1945 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev
*dev
, int fr_mr
, u32 lkey
)
1947 int status
= -ENOMEM
;
1948 struct ocrdma_dealloc_lkey
*cmd
;
1950 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY
, sizeof(*cmd
));
1954 cmd
->rsvd_frmr
= fr_mr
? 1 : 0;
1955 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1963 static int ocrdma_mbx_reg_mr(struct ocrdma_dev
*dev
, struct ocrdma_hw_mr
*hwmr
,
1964 u32 pdid
, u32 pbl_cnt
, u32 pbe_size
, u32 last
)
1966 int status
= -ENOMEM
;
1968 struct ocrdma_reg_nsmr
*cmd
;
1969 struct ocrdma_reg_nsmr_rsp
*rsp
;
1971 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR
, sizeof(*cmd
));
1975 pdid
| (hwmr
->num_pbls
<< OCRDMA_REG_NSMR_NUM_PBL_SHIFT
);
1976 cmd
->fr_mr
= hwmr
->fr_mr
;
1978 cmd
->flags_hpage_pbe_sz
|= (hwmr
->remote_wr
<<
1979 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT
);
1980 cmd
->flags_hpage_pbe_sz
|= (hwmr
->remote_rd
<<
1981 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT
);
1982 cmd
->flags_hpage_pbe_sz
|= (hwmr
->local_wr
<<
1983 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT
);
1984 cmd
->flags_hpage_pbe_sz
|= (hwmr
->remote_atomic
<<
1985 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT
);
1986 cmd
->flags_hpage_pbe_sz
|= (hwmr
->mw_bind
<<
1987 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT
);
1988 cmd
->flags_hpage_pbe_sz
|= (last
<< OCRDMA_REG_NSMR_LAST_SHIFT
);
1990 cmd
->flags_hpage_pbe_sz
|= (hwmr
->pbe_size
/ OCRDMA_MIN_HPAGE_SIZE
);
1991 cmd
->flags_hpage_pbe_sz
|= (hwmr
->pbl_size
/ OCRDMA_MIN_HPAGE_SIZE
) <<
1992 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT
;
1993 cmd
->totlen_low
= hwmr
->len
;
1994 cmd
->totlen_high
= upper_32_bits(hwmr
->len
);
1995 cmd
->fbo_low
= (u32
) (hwmr
->fbo
& 0xffffffff);
1996 cmd
->fbo_high
= (u32
) upper_32_bits(hwmr
->fbo
);
1997 cmd
->va_loaddr
= (u32
) hwmr
->va
;
1998 cmd
->va_hiaddr
= (u32
) upper_32_bits(hwmr
->va
);
2000 for (i
= 0; i
< pbl_cnt
; i
++) {
2001 cmd
->pbl
[i
].lo
= (u32
) (hwmr
->pbl_table
[i
].pa
& 0xffffffff);
2002 cmd
->pbl
[i
].hi
= upper_32_bits(hwmr
->pbl_table
[i
].pa
);
2004 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2007 rsp
= (struct ocrdma_reg_nsmr_rsp
*)cmd
;
2008 hwmr
->lkey
= rsp
->lrkey
;
2014 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev
*dev
,
2015 struct ocrdma_hw_mr
*hwmr
, u32 pbl_cnt
,
2016 u32 pbl_offset
, u32 last
)
2018 int status
= -ENOMEM
;
2020 struct ocrdma_reg_nsmr_cont
*cmd
;
2022 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT
, sizeof(*cmd
));
2025 cmd
->lrkey
= hwmr
->lkey
;
2026 cmd
->num_pbl_offset
= (pbl_cnt
<< OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT
) |
2027 (pbl_offset
& OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK
);
2028 cmd
->last
= last
<< OCRDMA_REG_NSMR_CONT_LAST_SHIFT
;
2030 for (i
= 0; i
< pbl_cnt
; i
++) {
2032 (u32
) (hwmr
->pbl_table
[i
+ pbl_offset
].pa
& 0xffffffff);
2034 upper_32_bits(hwmr
->pbl_table
[i
+ pbl_offset
].pa
);
2036 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2044 int ocrdma_reg_mr(struct ocrdma_dev
*dev
,
2045 struct ocrdma_hw_mr
*hwmr
, u32 pdid
, int acc
)
2049 u32 cur_pbl_cnt
, pbl_offset
;
2050 u32 pending_pbl_cnt
= hwmr
->num_pbls
;
2053 cur_pbl_cnt
= min(pending_pbl_cnt
, MAX_OCRDMA_NSMR_PBL
);
2054 if (cur_pbl_cnt
== pending_pbl_cnt
)
2057 status
= ocrdma_mbx_reg_mr(dev
, hwmr
, pdid
,
2058 cur_pbl_cnt
, hwmr
->pbe_size
, last
);
2060 pr_err("%s() status=%d\n", __func__
, status
);
2063 /* if there is no more pbls to register then exit. */
2068 pbl_offset
+= cur_pbl_cnt
;
2069 pending_pbl_cnt
-= cur_pbl_cnt
;
2070 cur_pbl_cnt
= min(pending_pbl_cnt
, MAX_OCRDMA_NSMR_PBL
);
2071 /* if we reach the end of the pbls, then need to set the last
2072 * bit, indicating no more pbls to register for this memory key.
2074 if (cur_pbl_cnt
== pending_pbl_cnt
)
2077 status
= ocrdma_mbx_reg_mr_cont(dev
, hwmr
, cur_pbl_cnt
,
2083 pr_err("%s() err. status=%d\n", __func__
, status
);
2088 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq
*cq
, struct ocrdma_qp
*qp
)
2090 struct ocrdma_qp
*tmp
;
2092 list_for_each_entry(tmp
, &cq
->sq_head
, sq_entry
) {
2101 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq
*cq
, struct ocrdma_qp
*qp
)
2103 struct ocrdma_qp
*tmp
;
2105 list_for_each_entry(tmp
, &cq
->rq_head
, rq_entry
) {
2114 void ocrdma_flush_qp(struct ocrdma_qp
*qp
)
2117 unsigned long flags
;
2118 struct ocrdma_dev
*dev
= get_ocrdma_dev(qp
->ibqp
.device
);
2120 spin_lock_irqsave(&dev
->flush_q_lock
, flags
);
2121 found
= ocrdma_is_qp_in_sq_flushlist(qp
->sq_cq
, qp
);
2123 list_add_tail(&qp
->sq_entry
, &qp
->sq_cq
->sq_head
);
2125 found
= ocrdma_is_qp_in_rq_flushlist(qp
->rq_cq
, qp
);
2127 list_add_tail(&qp
->rq_entry
, &qp
->rq_cq
->rq_head
);
2129 spin_unlock_irqrestore(&dev
->flush_q_lock
, flags
);
2132 static void ocrdma_init_hwq_ptr(struct ocrdma_qp
*qp
)
2140 int ocrdma_qp_state_change(struct ocrdma_qp
*qp
, enum ib_qp_state new_ib_state
,
2141 enum ib_qp_state
*old_ib_state
)
2143 unsigned long flags
;
2144 enum ocrdma_qp_state new_state
;
2145 new_state
= get_ocrdma_qp_state(new_ib_state
);
2147 /* sync with wqe and rqe posting */
2148 spin_lock_irqsave(&qp
->q_lock
, flags
);
2151 *old_ib_state
= get_ibqp_state(qp
->state
);
2152 if (new_state
== qp
->state
) {
2153 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2158 if (new_state
== OCRDMA_QPS_INIT
) {
2159 ocrdma_init_hwq_ptr(qp
);
2160 ocrdma_del_flush_qp(qp
);
2161 } else if (new_state
== OCRDMA_QPS_ERR
) {
2162 ocrdma_flush_qp(qp
);
2165 qp
->state
= new_state
;
2167 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
2171 static u32
ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp
*qp
)
2174 if (qp
->cap_flags
& OCRDMA_QP_INB_RD
)
2175 flags
|= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK
;
2176 if (qp
->cap_flags
& OCRDMA_QP_INB_WR
)
2177 flags
|= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK
;
2178 if (qp
->cap_flags
& OCRDMA_QP_MW_BIND
)
2179 flags
|= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK
;
2180 if (qp
->cap_flags
& OCRDMA_QP_LKEY0
)
2181 flags
|= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK
;
2182 if (qp
->cap_flags
& OCRDMA_QP_FAST_REG
)
2183 flags
|= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK
;
2187 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req
*cmd
,
2188 struct ib_qp_init_attr
*attrs
,
2189 struct ocrdma_qp
*qp
)
2192 u32 len
, hw_pages
, hw_page_size
;
2194 struct ocrdma_pd
*pd
= qp
->pd
;
2195 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
2196 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2197 u32 max_wqe_allocated
;
2198 u32 max_sges
= attrs
->cap
.max_send_sge
;
2200 /* QP1 may exceed 127 */
2201 max_wqe_allocated
= min_t(u32
, attrs
->cap
.max_send_wr
+ 1,
2204 status
= ocrdma_build_q_conf(&max_wqe_allocated
,
2205 dev
->attr
.wqe_size
, &hw_pages
, &hw_page_size
);
2207 pr_err("%s() req. max_send_wr=0x%x\n", __func__
,
2211 qp
->sq
.max_cnt
= max_wqe_allocated
;
2212 len
= (hw_pages
* hw_page_size
);
2214 qp
->sq
.va
= dma_alloc_coherent(&pdev
->dev
, len
, &pa
, GFP_KERNEL
);
2217 memset(qp
->sq
.va
, 0, len
);
2220 qp
->sq
.entry_size
= dev
->attr
.wqe_size
;
2221 ocrdma_build_q_pages(&cmd
->wq_addr
[0], hw_pages
, pa
, hw_page_size
);
2223 cmd
->type_pgsz_pdn
|= (ilog2(hw_page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
)
2224 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT
);
2225 cmd
->num_wq_rq_pages
|= (hw_pages
<<
2226 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT
) &
2227 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK
;
2228 cmd
->max_sge_send_write
|= (max_sges
<<
2229 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT
) &
2230 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK
;
2231 cmd
->max_sge_send_write
|= (max_sges
<<
2232 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT
) &
2233 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK
;
2234 cmd
->max_wqe_rqe
|= (ilog2(qp
->sq
.max_cnt
) <<
2235 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT
) &
2236 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK
;
2237 cmd
->wqe_rqe_size
|= (dev
->attr
.wqe_size
<<
2238 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT
) &
2239 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK
;
2243 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req
*cmd
,
2244 struct ib_qp_init_attr
*attrs
,
2245 struct ocrdma_qp
*qp
)
2248 u32 len
, hw_pages
, hw_page_size
;
2250 struct ocrdma_pd
*pd
= qp
->pd
;
2251 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
2252 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2253 u32 max_rqe_allocated
= attrs
->cap
.max_recv_wr
+ 1;
2255 status
= ocrdma_build_q_conf(&max_rqe_allocated
, dev
->attr
.rqe_size
,
2256 &hw_pages
, &hw_page_size
);
2258 pr_err("%s() req. max_recv_wr=0x%x\n", __func__
,
2259 attrs
->cap
.max_recv_wr
+ 1);
2262 qp
->rq
.max_cnt
= max_rqe_allocated
;
2263 len
= (hw_pages
* hw_page_size
);
2265 qp
->rq
.va
= dma_alloc_coherent(&pdev
->dev
, len
, &pa
, GFP_KERNEL
);
2268 memset(qp
->rq
.va
, 0, len
);
2271 qp
->rq
.entry_size
= dev
->attr
.rqe_size
;
2273 ocrdma_build_q_pages(&cmd
->rq_addr
[0], hw_pages
, pa
, hw_page_size
);
2274 cmd
->type_pgsz_pdn
|= (ilog2(hw_page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
) <<
2275 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT
);
2276 cmd
->num_wq_rq_pages
|=
2277 (hw_pages
<< OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT
) &
2278 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK
;
2279 cmd
->max_sge_recv_flags
|= (attrs
->cap
.max_recv_sge
<<
2280 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT
) &
2281 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK
;
2282 cmd
->max_wqe_rqe
|= (ilog2(qp
->rq
.max_cnt
) <<
2283 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT
) &
2284 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK
;
2285 cmd
->wqe_rqe_size
|= (dev
->attr
.rqe_size
<<
2286 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT
) &
2287 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK
;
2291 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req
*cmd
,
2292 struct ocrdma_pd
*pd
,
2293 struct ocrdma_qp
*qp
,
2294 u8 enable_dpp_cq
, u16 dpp_cq_id
)
2297 qp
->dpp_enabled
= true;
2298 cmd
->max_sge_recv_flags
|= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK
;
2301 cmd
->max_sge_recv_flags
|= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK
;
2302 cmd
->dpp_credits_cqid
= dpp_cq_id
;
2303 cmd
->dpp_credits_cqid
|= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT
<<
2304 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT
;
2307 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req
*cmd
,
2308 struct ocrdma_qp
*qp
)
2310 struct ocrdma_pd
*pd
= qp
->pd
;
2311 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
2312 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2314 int ird_page_size
= dev
->attr
.ird_page_size
;
2315 int ird_q_len
= dev
->attr
.num_ird_pages
* ird_page_size
;
2316 struct ocrdma_hdr_wqe
*rqe
;
2319 if (dev
->attr
.ird
== 0)
2322 qp
->ird_q_va
= dma_alloc_coherent(&pdev
->dev
, ird_q_len
,
2326 memset(qp
->ird_q_va
, 0, ird_q_len
);
2327 ocrdma_build_q_pages(&cmd
->ird_addr
[0], dev
->attr
.num_ird_pages
,
2329 for (; i
< ird_q_len
/ dev
->attr
.rqe_size
; i
++) {
2330 rqe
= (struct ocrdma_hdr_wqe
*)(qp
->ird_q_va
+
2331 (i
* dev
->attr
.rqe_size
));
2334 rqe
->cw
|= (OCRDMA_TYPE_LKEY
<< OCRDMA_WQE_TYPE_SHIFT
);
2335 rqe
->cw
|= (8 << OCRDMA_WQE_SIZE_SHIFT
);
2336 rqe
->cw
|= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT
);
2341 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp
*rsp
,
2342 struct ocrdma_qp
*qp
,
2343 struct ib_qp_init_attr
*attrs
,
2344 u16
*dpp_offset
, u16
*dpp_credit_lmt
)
2346 u32 max_wqe_allocated
, max_rqe_allocated
;
2347 qp
->id
= rsp
->qp_id
& OCRDMA_CREATE_QP_RSP_QP_ID_MASK
;
2348 qp
->rq
.dbid
= rsp
->sq_rq_id
& OCRDMA_CREATE_QP_RSP_RQ_ID_MASK
;
2349 qp
->sq
.dbid
= rsp
->sq_rq_id
>> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT
;
2350 qp
->max_ird
= rsp
->max_ord_ird
& OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK
;
2351 qp
->max_ord
= (rsp
->max_ord_ird
>> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT
);
2352 qp
->dpp_enabled
= false;
2353 if (rsp
->dpp_response
& OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK
) {
2354 qp
->dpp_enabled
= true;
2355 *dpp_credit_lmt
= (rsp
->dpp_response
&
2356 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK
) >>
2357 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT
;
2358 *dpp_offset
= (rsp
->dpp_response
&
2359 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK
) >>
2360 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT
;
2363 rsp
->max_wqe_rqe
>> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT
;
2364 max_wqe_allocated
= 1 << max_wqe_allocated
;
2365 max_rqe_allocated
= 1 << ((u16
)rsp
->max_wqe_rqe
);
2367 qp
->sq
.max_cnt
= max_wqe_allocated
;
2368 qp
->sq
.max_wqe_idx
= max_wqe_allocated
- 1;
2371 qp
->rq
.max_cnt
= max_rqe_allocated
;
2372 qp
->rq
.max_wqe_idx
= max_rqe_allocated
- 1;
2376 int ocrdma_mbx_create_qp(struct ocrdma_qp
*qp
, struct ib_qp_init_attr
*attrs
,
2377 u8 enable_dpp_cq
, u16 dpp_cq_id
, u16
*dpp_offset
,
2378 u16
*dpp_credit_lmt
)
2380 int status
= -ENOMEM
;
2382 struct ocrdma_pd
*pd
= qp
->pd
;
2383 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
2384 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2385 struct ocrdma_cq
*cq
;
2386 struct ocrdma_create_qp_req
*cmd
;
2387 struct ocrdma_create_qp_rsp
*rsp
;
2390 switch (attrs
->qp_type
) {
2392 qptype
= OCRDMA_QPT_GSI
;
2395 qptype
= OCRDMA_QPT_RC
;
2398 qptype
= OCRDMA_QPT_UD
;
2404 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP
, sizeof(*cmd
));
2407 cmd
->type_pgsz_pdn
|= (qptype
<< OCRDMA_CREATE_QP_REQ_QPT_SHIFT
) &
2408 OCRDMA_CREATE_QP_REQ_QPT_MASK
;
2409 status
= ocrdma_set_create_qp_sq_cmd(cmd
, attrs
, qp
);
2414 struct ocrdma_srq
*srq
= get_ocrdma_srq(attrs
->srq
);
2415 cmd
->max_sge_recv_flags
|= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK
;
2416 cmd
->rq_addr
[0].lo
= srq
->id
;
2419 status
= ocrdma_set_create_qp_rq_cmd(cmd
, attrs
, qp
);
2424 status
= ocrdma_set_create_qp_ird_cmd(cmd
, qp
);
2428 cmd
->type_pgsz_pdn
|= (pd
->id
<< OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT
) &
2429 OCRDMA_CREATE_QP_REQ_PD_ID_MASK
;
2431 flags
= ocrdma_set_create_qp_mbx_access_flags(qp
);
2433 cmd
->max_sge_recv_flags
|= flags
;
2434 cmd
->max_ord_ird
|= (dev
->attr
.max_ord_per_qp
<<
2435 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT
) &
2436 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK
;
2437 cmd
->max_ord_ird
|= (dev
->attr
.max_ird_per_qp
<<
2438 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT
) &
2439 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK
;
2440 cq
= get_ocrdma_cq(attrs
->send_cq
);
2441 cmd
->wq_rq_cqid
|= (cq
->id
<< OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT
) &
2442 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK
;
2444 cq
= get_ocrdma_cq(attrs
->recv_cq
);
2445 cmd
->wq_rq_cqid
|= (cq
->id
<< OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT
) &
2446 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK
;
2449 if (pd
->dpp_enabled
&& attrs
->cap
.max_inline_data
&& pd
->num_dpp_qp
&&
2450 (attrs
->cap
.max_inline_data
<= dev
->attr
.max_inline_data
)) {
2451 ocrdma_set_create_qp_dpp_cmd(cmd
, pd
, qp
, enable_dpp_cq
,
2455 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2458 rsp
= (struct ocrdma_create_qp_rsp
*)cmd
;
2459 ocrdma_get_create_qp_rsp(rsp
, qp
, attrs
, dpp_offset
, dpp_credit_lmt
);
2460 qp
->state
= OCRDMA_QPS_RST
;
2465 dma_free_coherent(&pdev
->dev
, qp
->rq
.len
, qp
->rq
.va
, qp
->rq
.pa
);
2467 pr_err("%s(%d) rq_err\n", __func__
, dev
->id
);
2468 dma_free_coherent(&pdev
->dev
, qp
->sq
.len
, qp
->sq
.va
, qp
->sq
.pa
);
2470 pr_err("%s(%d) sq_err\n", __func__
, dev
->id
);
2475 int ocrdma_mbx_query_qp(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
,
2476 struct ocrdma_qp_params
*param
)
2478 int status
= -ENOMEM
;
2479 struct ocrdma_query_qp
*cmd
;
2480 struct ocrdma_query_qp_rsp
*rsp
;
2482 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP
, sizeof(*rsp
));
2485 cmd
->qp_id
= qp
->id
;
2486 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2489 rsp
= (struct ocrdma_query_qp_rsp
*)cmd
;
2490 memcpy(param
, &rsp
->params
, sizeof(struct ocrdma_qp_params
));
2496 static int ocrdma_set_av_params(struct ocrdma_qp
*qp
,
2497 struct ocrdma_modify_qp
*cmd
,
2498 struct ib_qp_attr
*attrs
,
2502 struct ib_ah_attr
*ah_attr
= &attrs
->ah_attr
;
2503 union ib_gid sgid
, zgid
;
2504 struct ib_gid_attr sgid_attr
;
2505 u32 vlan_id
= 0xFFFF;
2506 u8 mac_addr
[6], hdr_type
;
2508 struct sockaddr _sockaddr
;
2509 struct sockaddr_in _sockaddr_in
;
2510 struct sockaddr_in6 _sockaddr_in6
;
2511 } sgid_addr
, dgid_addr
;
2512 struct ocrdma_dev
*dev
= get_ocrdma_dev(qp
->ibqp
.device
);
2514 if ((ah_attr
->ah_flags
& IB_AH_GRH
) == 0)
2516 if (atomic_cmpxchg(&dev
->update_sl
, 1, 0))
2517 ocrdma_init_service_level(dev
);
2518 cmd
->params
.tclass_sq_psn
|=
2519 (ah_attr
->grh
.traffic_class
<< OCRDMA_QP_PARAMS_TCLASS_SHIFT
);
2520 cmd
->params
.rnt_rc_sl_fl
|=
2521 (ah_attr
->grh
.flow_label
& OCRDMA_QP_PARAMS_FLOW_LABEL_MASK
);
2522 cmd
->params
.rnt_rc_sl_fl
|= (ah_attr
->sl
<< OCRDMA_QP_PARAMS_SL_SHIFT
);
2523 cmd
->params
.hop_lmt_rq_psn
|=
2524 (ah_attr
->grh
.hop_limit
<< OCRDMA_QP_PARAMS_HOP_LMT_SHIFT
);
2525 cmd
->flags
|= OCRDMA_QP_PARA_FLOW_LBL_VALID
;
2528 memcpy(&cmd
->params
.dgid
[0], &ah_attr
->grh
.dgid
.raw
[0],
2529 sizeof(cmd
->params
.dgid
));
2531 status
= ib_get_cached_gid(&dev
->ibdev
, 1, ah_attr
->grh
.sgid_index
,
2533 if (!status
&& sgid_attr
.ndev
) {
2534 vlan_id
= rdma_vlan_dev_vlan_id(sgid_attr
.ndev
);
2535 memcpy(mac_addr
, sgid_attr
.ndev
->dev_addr
, ETH_ALEN
);
2536 dev_put(sgid_attr
.ndev
);
2539 memset(&zgid
, 0, sizeof(zgid
));
2540 if (!memcmp(&sgid
, &zgid
, sizeof(zgid
)))
2543 qp
->sgid_idx
= ah_attr
->grh
.sgid_index
;
2544 memcpy(&cmd
->params
.sgid
[0], &sgid
.raw
[0], sizeof(cmd
->params
.sgid
));
2545 status
= ocrdma_resolve_dmac(dev
, ah_attr
, &mac_addr
[0]);
2548 cmd
->params
.dmac_b0_to_b3
= mac_addr
[0] | (mac_addr
[1] << 8) |
2549 (mac_addr
[2] << 16) | (mac_addr
[3] << 24);
2551 hdr_type
= ib_gid_to_network_type(sgid_attr
.gid_type
, &sgid
);
2552 if (hdr_type
== RDMA_NETWORK_IPV4
) {
2553 rdma_gid2ip(&sgid_addr
._sockaddr
, &sgid
);
2554 rdma_gid2ip(&dgid_addr
._sockaddr
, &ah_attr
->grh
.dgid
);
2555 memcpy(&cmd
->params
.dgid
[0],
2556 &dgid_addr
._sockaddr_in
.sin_addr
.s_addr
, 4);
2557 memcpy(&cmd
->params
.sgid
[0],
2558 &sgid_addr
._sockaddr_in
.sin_addr
.s_addr
, 4);
2560 /* convert them to LE format. */
2561 ocrdma_cpu_to_le32(&cmd
->params
.dgid
[0], sizeof(cmd
->params
.dgid
));
2562 ocrdma_cpu_to_le32(&cmd
->params
.sgid
[0], sizeof(cmd
->params
.sgid
));
2563 cmd
->params
.vlan_dmac_b4_to_b5
= mac_addr
[4] | (mac_addr
[5] << 8);
2565 if (vlan_id
== 0xFFFF)
2567 if (vlan_id
|| dev
->pfc_state
) {
2569 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2571 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2574 cmd
->params
.vlan_dmac_b4_to_b5
|=
2575 vlan_id
<< OCRDMA_QP_PARAMS_VLAN_SHIFT
;
2576 cmd
->flags
|= OCRDMA_QP_PARA_VLAN_EN_VALID
;
2577 cmd
->params
.rnt_rc_sl_fl
|=
2578 (dev
->sl
& 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT
;
2580 cmd
->params
.max_sge_recv_flags
|= ((hdr_type
<<
2581 OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT
) &
2582 OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK
);
2586 static int ocrdma_set_qp_params(struct ocrdma_qp
*qp
,
2587 struct ocrdma_modify_qp
*cmd
,
2588 struct ib_qp_attr
*attrs
, int attr_mask
)
2591 struct ocrdma_dev
*dev
= get_ocrdma_dev(qp
->ibqp
.device
);
2593 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2594 cmd
->params
.path_mtu_pkey_indx
|= (attrs
->pkey_index
&
2595 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK
);
2596 cmd
->flags
|= OCRDMA_QP_PARA_PKEY_VALID
;
2598 if (attr_mask
& IB_QP_QKEY
) {
2599 qp
->qkey
= attrs
->qkey
;
2600 cmd
->params
.qkey
= attrs
->qkey
;
2601 cmd
->flags
|= OCRDMA_QP_PARA_QKEY_VALID
;
2603 if (attr_mask
& IB_QP_AV
) {
2604 status
= ocrdma_set_av_params(qp
, cmd
, attrs
, attr_mask
);
2607 } else if (qp
->qp_type
== IB_QPT_GSI
|| qp
->qp_type
== IB_QPT_UD
) {
2608 /* set the default mac address for UD, GSI QPs */
2609 cmd
->params
.dmac_b0_to_b3
= dev
->nic_info
.mac_addr
[0] |
2610 (dev
->nic_info
.mac_addr
[1] << 8) |
2611 (dev
->nic_info
.mac_addr
[2] << 16) |
2612 (dev
->nic_info
.mac_addr
[3] << 24);
2613 cmd
->params
.vlan_dmac_b4_to_b5
= dev
->nic_info
.mac_addr
[4] |
2614 (dev
->nic_info
.mac_addr
[5] << 8);
2616 if ((attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
) &&
2617 attrs
->en_sqd_async_notify
) {
2618 cmd
->params
.max_sge_recv_flags
|=
2619 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC
;
2620 cmd
->flags
|= OCRDMA_QP_PARA_DST_QPN_VALID
;
2622 if (attr_mask
& IB_QP_DEST_QPN
) {
2623 cmd
->params
.ack_to_rnr_rtc_dest_qpn
|= (attrs
->dest_qp_num
&
2624 OCRDMA_QP_PARAMS_DEST_QPN_MASK
);
2625 cmd
->flags
|= OCRDMA_QP_PARA_DST_QPN_VALID
;
2627 if (attr_mask
& IB_QP_PATH_MTU
) {
2628 if (attrs
->path_mtu
< IB_MTU_512
||
2629 attrs
->path_mtu
> IB_MTU_4096
) {
2630 pr_err("ocrdma%d: IB MTU %d is not supported\n",
2631 dev
->id
, ib_mtu_enum_to_int(attrs
->path_mtu
));
2635 cmd
->params
.path_mtu_pkey_indx
|=
2636 (ib_mtu_enum_to_int(attrs
->path_mtu
) <<
2637 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT
) &
2638 OCRDMA_QP_PARAMS_PATH_MTU_MASK
;
2639 cmd
->flags
|= OCRDMA_QP_PARA_PMTU_VALID
;
2641 if (attr_mask
& IB_QP_TIMEOUT
) {
2642 cmd
->params
.ack_to_rnr_rtc_dest_qpn
|= attrs
->timeout
<<
2643 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT
;
2644 cmd
->flags
|= OCRDMA_QP_PARA_ACK_TO_VALID
;
2646 if (attr_mask
& IB_QP_RETRY_CNT
) {
2647 cmd
->params
.rnt_rc_sl_fl
|= (attrs
->retry_cnt
<<
2648 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT
) &
2649 OCRDMA_QP_PARAMS_RETRY_CNT_MASK
;
2650 cmd
->flags
|= OCRDMA_QP_PARA_RETRY_CNT_VALID
;
2652 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
2653 cmd
->params
.rnt_rc_sl_fl
|= (attrs
->min_rnr_timer
<<
2654 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT
) &
2655 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK
;
2656 cmd
->flags
|= OCRDMA_QP_PARA_RNT_VALID
;
2658 if (attr_mask
& IB_QP_RNR_RETRY
) {
2659 cmd
->params
.ack_to_rnr_rtc_dest_qpn
|= (attrs
->rnr_retry
<<
2660 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT
)
2661 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK
;
2662 cmd
->flags
|= OCRDMA_QP_PARA_RRC_VALID
;
2664 if (attr_mask
& IB_QP_SQ_PSN
) {
2665 cmd
->params
.tclass_sq_psn
|= (attrs
->sq_psn
& 0x00ffffff);
2666 cmd
->flags
|= OCRDMA_QP_PARA_SQPSN_VALID
;
2668 if (attr_mask
& IB_QP_RQ_PSN
) {
2669 cmd
->params
.hop_lmt_rq_psn
|= (attrs
->rq_psn
& 0x00ffffff);
2670 cmd
->flags
|= OCRDMA_QP_PARA_RQPSN_VALID
;
2672 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2673 if (attrs
->max_rd_atomic
> dev
->attr
.max_ord_per_qp
) {
2677 qp
->max_ord
= attrs
->max_rd_atomic
;
2678 cmd
->flags
|= OCRDMA_QP_PARA_MAX_ORD_VALID
;
2680 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2681 if (attrs
->max_dest_rd_atomic
> dev
->attr
.max_ird_per_qp
) {
2685 qp
->max_ird
= attrs
->max_dest_rd_atomic
;
2686 cmd
->flags
|= OCRDMA_QP_PARA_MAX_IRD_VALID
;
2688 cmd
->params
.max_ord_ird
= (qp
->max_ord
<<
2689 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT
) |
2690 (qp
->max_ird
& OCRDMA_QP_PARAMS_MAX_IRD_MASK
);
2695 int ocrdma_mbx_modify_qp(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
,
2696 struct ib_qp_attr
*attrs
, int attr_mask
)
2698 int status
= -ENOMEM
;
2699 struct ocrdma_modify_qp
*cmd
;
2701 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP
, sizeof(*cmd
));
2705 cmd
->params
.id
= qp
->id
;
2707 if (attr_mask
& IB_QP_STATE
) {
2708 cmd
->params
.max_sge_recv_flags
|=
2709 (get_ocrdma_qp_state(attrs
->qp_state
) <<
2710 OCRDMA_QP_PARAMS_STATE_SHIFT
) &
2711 OCRDMA_QP_PARAMS_STATE_MASK
;
2712 cmd
->flags
|= OCRDMA_QP_PARA_QPS_VALID
;
2714 cmd
->params
.max_sge_recv_flags
|=
2715 (qp
->state
<< OCRDMA_QP_PARAMS_STATE_SHIFT
) &
2716 OCRDMA_QP_PARAMS_STATE_MASK
;
2719 status
= ocrdma_set_qp_params(qp
, cmd
, attrs
, attr_mask
);
2722 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2731 int ocrdma_mbx_destroy_qp(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
)
2733 int status
= -ENOMEM
;
2734 struct ocrdma_destroy_qp
*cmd
;
2735 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2737 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP
, sizeof(*cmd
));
2740 cmd
->qp_id
= qp
->id
;
2741 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2748 dma_free_coherent(&pdev
->dev
, qp
->sq
.len
, qp
->sq
.va
, qp
->sq
.pa
);
2749 if (!qp
->srq
&& qp
->rq
.va
)
2750 dma_free_coherent(&pdev
->dev
, qp
->rq
.len
, qp
->rq
.va
, qp
->rq
.pa
);
2751 if (qp
->dpp_enabled
)
2752 qp
->pd
->num_dpp_qp
++;
2756 int ocrdma_mbx_create_srq(struct ocrdma_dev
*dev
, struct ocrdma_srq
*srq
,
2757 struct ib_srq_init_attr
*srq_attr
,
2758 struct ocrdma_pd
*pd
)
2760 int status
= -ENOMEM
;
2761 int hw_pages
, hw_page_size
;
2763 struct ocrdma_create_srq_rsp
*rsp
;
2764 struct ocrdma_create_srq
*cmd
;
2766 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2767 u32 max_rqe_allocated
;
2769 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ
, sizeof(*cmd
));
2773 cmd
->pgsz_pdid
= pd
->id
& OCRDMA_CREATE_SRQ_PD_ID_MASK
;
2774 max_rqe_allocated
= srq_attr
->attr
.max_wr
+ 1;
2775 status
= ocrdma_build_q_conf(&max_rqe_allocated
,
2777 &hw_pages
, &hw_page_size
);
2779 pr_err("%s() req. max_wr=0x%x\n", __func__
,
2780 srq_attr
->attr
.max_wr
);
2784 len
= hw_pages
* hw_page_size
;
2785 srq
->rq
.va
= dma_alloc_coherent(&pdev
->dev
, len
, &pa
, GFP_KERNEL
);
2790 ocrdma_build_q_pages(&cmd
->rq_addr
[0], hw_pages
, pa
, hw_page_size
);
2792 srq
->rq
.entry_size
= dev
->attr
.rqe_size
;
2795 srq
->rq
.max_cnt
= max_rqe_allocated
;
2797 cmd
->max_sge_rqe
= ilog2(max_rqe_allocated
);
2798 cmd
->max_sge_rqe
|= srq_attr
->attr
.max_sge
<<
2799 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT
;
2801 cmd
->pgsz_pdid
|= (ilog2(hw_page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
)
2802 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT
);
2803 cmd
->pages_rqe_sz
|= (dev
->attr
.rqe_size
2804 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT
)
2805 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK
;
2806 cmd
->pages_rqe_sz
|= hw_pages
<< OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT
;
2808 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2811 rsp
= (struct ocrdma_create_srq_rsp
*)cmd
;
2813 srq
->rq
.dbid
= rsp
->id
;
2814 max_rqe_allocated
= ((rsp
->max_sge_rqe_allocated
&
2815 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK
) >>
2816 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT
);
2817 max_rqe_allocated
= (1 << max_rqe_allocated
);
2818 srq
->rq
.max_cnt
= max_rqe_allocated
;
2819 srq
->rq
.max_wqe_idx
= max_rqe_allocated
- 1;
2820 srq
->rq
.max_sges
= (rsp
->max_sge_rqe_allocated
&
2821 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK
) >>
2822 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT
;
2825 dma_free_coherent(&pdev
->dev
, srq
->rq
.len
, srq
->rq
.va
, pa
);
2831 int ocrdma_mbx_modify_srq(struct ocrdma_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2833 int status
= -ENOMEM
;
2834 struct ocrdma_modify_srq
*cmd
;
2835 struct ocrdma_pd
*pd
= srq
->pd
;
2836 struct ocrdma_dev
*dev
= get_ocrdma_dev(pd
->ibpd
.device
);
2838 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ
, sizeof(*cmd
));
2842 cmd
->limit_max_rqe
|= srq_attr
->srq_limit
<<
2843 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT
;
2844 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2849 int ocrdma_mbx_query_srq(struct ocrdma_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2851 int status
= -ENOMEM
;
2852 struct ocrdma_query_srq
*cmd
;
2853 struct ocrdma_dev
*dev
= get_ocrdma_dev(srq
->ibsrq
.device
);
2855 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ
, sizeof(*cmd
));
2858 cmd
->id
= srq
->rq
.dbid
;
2859 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2861 struct ocrdma_query_srq_rsp
*rsp
=
2862 (struct ocrdma_query_srq_rsp
*)cmd
;
2864 rsp
->srq_lmt_max_sge
&
2865 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK
;
2867 rsp
->max_rqe_pdid
>> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT
;
2868 srq_attr
->srq_limit
= rsp
->srq_lmt_max_sge
>>
2869 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT
;
2875 int ocrdma_mbx_destroy_srq(struct ocrdma_dev
*dev
, struct ocrdma_srq
*srq
)
2877 int status
= -ENOMEM
;
2878 struct ocrdma_destroy_srq
*cmd
;
2879 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2880 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ
, sizeof(*cmd
));
2884 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2886 dma_free_coherent(&pdev
->dev
, srq
->rq
.len
,
2887 srq
->rq
.va
, srq
->rq
.pa
);
2892 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev
*dev
, u32 ptype
,
2893 struct ocrdma_dcbx_cfg
*dcbxcfg
)
2897 struct ocrdma_mqe cmd
;
2899 struct ocrdma_get_dcbx_cfg_req
*req
= NULL
;
2900 struct ocrdma_get_dcbx_cfg_rsp
*rsp
= NULL
;
2901 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2902 struct ocrdma_mqe_sge
*mqe_sge
= cmd
.u
.nonemb_req
.sge
;
2904 memset(&cmd
, 0, sizeof(struct ocrdma_mqe
));
2905 cmd
.hdr
.pyld_len
= max_t (u32
, sizeof(struct ocrdma_get_dcbx_cfg_rsp
),
2906 sizeof(struct ocrdma_get_dcbx_cfg_req
));
2907 req
= dma_alloc_coherent(&pdev
->dev
, cmd
.hdr
.pyld_len
, &pa
, GFP_KERNEL
);
2913 cmd
.hdr
.spcl_sge_cnt_emb
|= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT
) &
2914 OCRDMA_MQE_HDR_SGE_CNT_MASK
;
2915 mqe_sge
->pa_lo
= (u32
) (pa
& 0xFFFFFFFFUL
);
2916 mqe_sge
->pa_hi
= (u32
) upper_32_bits(pa
);
2917 mqe_sge
->len
= cmd
.hdr
.pyld_len
;
2919 memset(req
, 0, sizeof(struct ocrdma_get_dcbx_cfg_req
));
2920 ocrdma_init_mch(&req
->hdr
, OCRDMA_CMD_GET_DCBX_CONFIG
,
2921 OCRDMA_SUBSYS_DCBX
, cmd
.hdr
.pyld_len
);
2922 req
->param_type
= ptype
;
2924 status
= ocrdma_mbx_cmd(dev
, &cmd
);
2928 rsp
= (struct ocrdma_get_dcbx_cfg_rsp
*)req
;
2929 ocrdma_le32_to_cpu(rsp
, sizeof(struct ocrdma_get_dcbx_cfg_rsp
));
2930 memcpy(dcbxcfg
, &rsp
->cfg
, sizeof(struct ocrdma_dcbx_cfg
));
2933 dma_free_coherent(&pdev
->dev
, cmd
.hdr
.pyld_len
, req
, pa
);
2938 #define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
2939 #define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
2941 static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev
*dev
, int ptype
,
2942 struct ocrdma_dcbx_cfg
*dcbxcfg
,
2945 int status
= -EINVAL
, indx
, slindx
;
2947 struct ocrdma_app_parameter
*app_param
;
2948 u8 valid
, proto_sel
;
2949 u8 app_prio
, pfc_prio
;
2952 if (!(dcbxcfg
->tcv_aev_opv_st
& OCRDMA_DCBX_STATE_MASK
)) {
2953 pr_info("%s ocrdma%d DCBX is disabled\n",
2954 dev_name(&dev
->nic_info
.pdev
->dev
), dev
->id
);
2958 if (!ocrdma_is_enabled_and_synced(dcbxcfg
->pfc_state
)) {
2959 pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2960 dev_name(&dev
->nic_info
.pdev
->dev
), dev
->id
,
2961 (ptype
> 0 ? "operational" : "admin"),
2962 (dcbxcfg
->pfc_state
& OCRDMA_STATE_FLAG_ENABLED
) ?
2963 "enabled" : "disabled",
2964 (dcbxcfg
->pfc_state
& OCRDMA_STATE_FLAG_SYNC
) ?
2965 "" : ", not sync'ed");
2968 pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2969 dev_name(&dev
->nic_info
.pdev
->dev
), dev
->id
);
2972 ventry_cnt
= (dcbxcfg
->tcv_aev_opv_st
>>
2973 OCRDMA_DCBX_APP_ENTRY_SHIFT
)
2974 & OCRDMA_DCBX_STATE_MASK
;
2976 for (indx
= 0; indx
< ventry_cnt
; indx
++) {
2977 app_param
= &dcbxcfg
->app_param
[indx
];
2978 valid
= (app_param
->valid_proto_app
>>
2979 OCRDMA_APP_PARAM_VALID_SHIFT
)
2980 & OCRDMA_APP_PARAM_VALID_MASK
;
2981 proto_sel
= (app_param
->valid_proto_app
2982 >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT
)
2983 & OCRDMA_APP_PARAM_PROTO_SEL_MASK
;
2984 proto
= app_param
->valid_proto_app
&
2985 OCRDMA_APP_PARAM_APP_PROTO_MASK
;
2988 valid
&& proto
== OCRDMA_APP_PROTO_ROCE
&&
2989 proto_sel
== OCRDMA_PROTO_SELECT_L2
) {
2990 for (slindx
= 0; slindx
<
2991 OCRDMA_MAX_SERVICE_LEVEL_INDEX
; slindx
++) {
2992 app_prio
= ocrdma_get_app_prio(
2993 (u8
*)app_param
->app_prio
,
2995 pfc_prio
= ocrdma_get_pfc_prio(
2996 (u8
*)dcbxcfg
->pfc_prio
,
2999 if (app_prio
&& pfc_prio
) {
3005 if (slindx
== OCRDMA_MAX_SERVICE_LEVEL_INDEX
) {
3006 pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
3007 dev_name(&dev
->nic_info
.pdev
->dev
),
3017 void ocrdma_init_service_level(struct ocrdma_dev
*dev
)
3019 int status
= 0, indx
;
3020 struct ocrdma_dcbx_cfg dcbxcfg
;
3021 u8 srvc_lvl
= OCRDMA_DEFAULT_SERVICE_LEVEL
;
3022 int ptype
= OCRDMA_PARAMETER_TYPE_OPER
;
3024 for (indx
= 0; indx
< 2; indx
++) {
3025 status
= ocrdma_mbx_get_dcbx_config(dev
, ptype
, &dcbxcfg
);
3027 pr_err("%s(): status=%d\n", __func__
, status
);
3028 ptype
= OCRDMA_PARAMETER_TYPE_ADMIN
;
3032 status
= ocrdma_parse_dcbxcfg_rsp(dev
, ptype
,
3033 &dcbxcfg
, &srvc_lvl
);
3035 ptype
= OCRDMA_PARAMETER_TYPE_ADMIN
;
3043 pr_info("%s ocrdma%d service level default\n",
3044 dev_name(&dev
->nic_info
.pdev
->dev
), dev
->id
);
3046 pr_info("%s ocrdma%d service level %d\n",
3047 dev_name(&dev
->nic_info
.pdev
->dev
), dev
->id
,
3050 dev
->pfc_state
= ocrdma_is_enabled_and_synced(dcbxcfg
.pfc_state
);
3054 int ocrdma_alloc_av(struct ocrdma_dev
*dev
, struct ocrdma_ah
*ah
)
3057 int status
= -EINVAL
;
3058 struct ocrdma_av
*av
;
3059 unsigned long flags
;
3061 av
= dev
->av_tbl
.va
;
3062 spin_lock_irqsave(&dev
->av_tbl
.lock
, flags
);
3063 for (i
= 0; i
< dev
->av_tbl
.num_ah
; i
++) {
3064 if (av
->valid
== 0) {
3065 av
->valid
= OCRDMA_AV_VALID
;
3073 if (i
== dev
->av_tbl
.num_ah
)
3075 spin_unlock_irqrestore(&dev
->av_tbl
.lock
, flags
);
3079 int ocrdma_free_av(struct ocrdma_dev
*dev
, struct ocrdma_ah
*ah
)
3081 unsigned long flags
;
3082 spin_lock_irqsave(&dev
->av_tbl
.lock
, flags
);
3084 spin_unlock_irqrestore(&dev
->av_tbl
.lock
, flags
);
3088 static int ocrdma_create_eqs(struct ocrdma_dev
*dev
)
3090 int num_eq
, i
, status
= 0;
3092 unsigned long flags
= 0;
3094 num_eq
= dev
->nic_info
.msix
.num_vectors
-
3095 dev
->nic_info
.msix
.start_vector
;
3096 if (dev
->nic_info
.intr_mode
== BE_INTERRUPT_MODE_INTX
) {
3098 flags
= IRQF_SHARED
;
3100 num_eq
= min_t(u32
, num_eq
, num_online_cpus());
3106 dev
->eq_tbl
= kzalloc(sizeof(struct ocrdma_eq
) * num_eq
, GFP_KERNEL
);
3110 for (i
= 0; i
< num_eq
; i
++) {
3111 status
= ocrdma_create_eq(dev
, &dev
->eq_tbl
[i
],
3117 sprintf(dev
->eq_tbl
[i
].irq_name
, "ocrdma%d-%d",
3119 irq
= ocrdma_get_irq(dev
, &dev
->eq_tbl
[i
]);
3120 status
= request_irq(irq
, ocrdma_irq_handler
, flags
,
3121 dev
->eq_tbl
[i
].irq_name
,
3127 /* one eq is sufficient for data path to work */
3130 ocrdma_destroy_eqs(dev
);
3134 static int ocrdma_mbx_modify_eqd(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
,
3137 int i
, status
= -ENOMEM
;
3138 struct ocrdma_modify_eqd_req
*cmd
;
3140 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY
, sizeof(*cmd
));
3144 ocrdma_init_mch(&cmd
->cmd
.req
, OCRDMA_CMD_MODIFY_EQ_DELAY
,
3145 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
3147 cmd
->cmd
.num_eq
= num
;
3148 for (i
= 0; i
< num
; i
++) {
3149 cmd
->cmd
.set_eqd
[i
].eq_id
= eq
[i
].q
.id
;
3150 cmd
->cmd
.set_eqd
[i
].phase
= 0;
3151 cmd
->cmd
.set_eqd
[i
].delay_multiplier
=
3152 (eq
[i
].aic_obj
.prev_eqd
* 65)/100;
3154 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
3162 static int ocrdma_modify_eqd(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
,
3168 num_eqs
= min(num
, 8);
3169 ocrdma_mbx_modify_eqd(dev
, &eq
[i
], num_eqs
);
3174 ocrdma_mbx_modify_eqd(dev
, eq
, num
);
3179 void ocrdma_eqd_set_task(struct work_struct
*work
)
3181 struct ocrdma_dev
*dev
=
3182 container_of(work
, struct ocrdma_dev
, eqd_work
.work
);
3183 struct ocrdma_eq
*eq
= 0;
3184 int i
, num
= 0, status
= -EINVAL
;
3187 for (i
= 0; i
< dev
->eq_cnt
; i
++) {
3188 eq
= &dev
->eq_tbl
[i
];
3189 if (eq
->aic_obj
.eq_intr_cnt
> eq
->aic_obj
.prev_eq_intr_cnt
) {
3190 eq_intr
= eq
->aic_obj
.eq_intr_cnt
-
3191 eq
->aic_obj
.prev_eq_intr_cnt
;
3192 if ((eq_intr
> EQ_INTR_PER_SEC_THRSH_HI
) &&
3193 (eq
->aic_obj
.prev_eqd
== EQ_AIC_MIN_EQD
)) {
3194 eq
->aic_obj
.prev_eqd
= EQ_AIC_MAX_EQD
;
3196 } else if ((eq_intr
< EQ_INTR_PER_SEC_THRSH_LOW
) &&
3197 (eq
->aic_obj
.prev_eqd
== EQ_AIC_MAX_EQD
)) {
3198 eq
->aic_obj
.prev_eqd
= EQ_AIC_MIN_EQD
;
3202 eq
->aic_obj
.prev_eq_intr_cnt
= eq
->aic_obj
.eq_intr_cnt
;
3206 status
= ocrdma_modify_eqd(dev
, &dev
->eq_tbl
[0], num
);
3207 schedule_delayed_work(&dev
->eqd_work
, msecs_to_jiffies(1000));
3210 int ocrdma_init_hw(struct ocrdma_dev
*dev
)
3214 /* create the eqs */
3215 status
= ocrdma_create_eqs(dev
);
3218 status
= ocrdma_create_mq(dev
);
3221 status
= ocrdma_mbx_query_fw_config(dev
);
3224 status
= ocrdma_mbx_query_dev(dev
);
3227 status
= ocrdma_mbx_query_fw_ver(dev
);
3230 status
= ocrdma_mbx_create_ah_tbl(dev
);
3233 status
= ocrdma_mbx_get_phy_info(dev
);
3235 goto info_attrb_err
;
3236 status
= ocrdma_mbx_get_ctrl_attribs(dev
);
3238 goto info_attrb_err
;
3243 ocrdma_mbx_delete_ah_tbl(dev
);
3245 ocrdma_destroy_mq(dev
);
3247 ocrdma_destroy_eqs(dev
);
3249 pr_err("%s() status=%d\n", __func__
, status
);
3253 void ocrdma_cleanup_hw(struct ocrdma_dev
*dev
)
3255 ocrdma_free_pd_pool(dev
);
3256 ocrdma_mbx_delete_ah_tbl(dev
);
3258 /* cleanup the control path */
3259 ocrdma_destroy_mq(dev
);
3261 /* cleanup the eqs */
3262 ocrdma_destroy_eqs(dev
);