1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
28 #include <linux/sched.h>
29 #include <linux/interrupt.h>
30 #include <linux/log2.h>
31 #include <linux/dma-mapping.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_addr.h>
38 #include "ocrdma_hw.h"
39 #include "ocrdma_verbs.h"
40 #include "ocrdma_ah.h"
43 OCRDMA_MBX_STATUS_FAILED
= 1,
44 OCRDMA_MBX_STATUS_ILLEGAL_FIELD
= 3,
45 OCRDMA_MBX_STATUS_OOR
= 100,
46 OCRDMA_MBX_STATUS_INVALID_PD
= 101,
47 OCRDMA_MBX_STATUS_PD_INUSE
= 102,
48 OCRDMA_MBX_STATUS_INVALID_CQ
= 103,
49 OCRDMA_MBX_STATUS_INVALID_QP
= 104,
50 OCRDMA_MBX_STATUS_INVALID_LKEY
= 105,
51 OCRDMA_MBX_STATUS_ORD_EXCEEDS
= 106,
52 OCRDMA_MBX_STATUS_IRD_EXCEEDS
= 107,
53 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS
= 108,
54 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS
= 109,
55 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS
= 110,
56 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS
= 111,
57 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS
= 112,
58 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE
= 113,
59 OCRDMA_MBX_STATUS_MW_BOUND
= 114,
60 OCRDMA_MBX_STATUS_INVALID_VA
= 115,
61 OCRDMA_MBX_STATUS_INVALID_LENGTH
= 116,
62 OCRDMA_MBX_STATUS_INVALID_FBO
= 117,
63 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS
= 118,
64 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE
= 119,
65 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY
= 120,
66 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT
= 121,
67 OCRDMA_MBX_STATUS_INVALID_SRQ_ID
= 129,
68 OCRDMA_MBX_STATUS_SRQ_ERROR
= 133,
69 OCRDMA_MBX_STATUS_RQE_EXCEEDS
= 134,
70 OCRDMA_MBX_STATUS_MTU_EXCEEDS
= 135,
71 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS
= 136,
72 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS
= 137,
73 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS
= 138,
74 OCRDMA_MBX_STATUS_QP_BOUND
= 130,
75 OCRDMA_MBX_STATUS_INVALID_CHANGE
= 139,
76 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP
= 140,
77 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER
= 141,
78 OCRDMA_MBX_STATUS_MW_STILL_BOUND
= 142,
79 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID
= 143,
80 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS
= 144
83 enum additional_status
{
84 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES
= 22
88 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES
= 1,
89 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER
= 2,
90 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES
= 3,
91 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING
= 4,
92 OCRDMA_MBX_CQE_STATUS_DMA_FAILED
= 5
95 static inline void *ocrdma_get_eqe(struct ocrdma_eq
*eq
)
97 return (u8
*)eq
->q
.va
+ (eq
->q
.tail
* sizeof(struct ocrdma_eqe
));
100 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq
*eq
)
102 eq
->q
.tail
= (eq
->q
.tail
+ 1) & (OCRDMA_EQ_LEN
- 1);
105 static inline void *ocrdma_get_mcqe(struct ocrdma_dev
*dev
)
107 struct ocrdma_mcqe
*cqe
= (struct ocrdma_mcqe
*)
108 ((u8
*) dev
->mq
.cq
.va
+
109 (dev
->mq
.cq
.tail
* sizeof(struct ocrdma_mcqe
)));
111 if (!(le32_to_cpu(cqe
->valid_ae_cmpl_cons
) & OCRDMA_MCQE_VALID_MASK
))
116 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev
*dev
)
118 dev
->mq
.cq
.tail
= (dev
->mq
.cq
.tail
+ 1) & (OCRDMA_MQ_CQ_LEN
- 1);
121 static inline struct ocrdma_mqe
*ocrdma_get_mqe(struct ocrdma_dev
*dev
)
123 return (struct ocrdma_mqe
*)((u8
*) dev
->mq
.sq
.va
+
125 sizeof(struct ocrdma_mqe
)));
128 static inline void ocrdma_mq_inc_head(struct ocrdma_dev
*dev
)
130 dev
->mq
.sq
.head
= (dev
->mq
.sq
.head
+ 1) & (OCRDMA_MQ_LEN
- 1);
131 atomic_inc(&dev
->mq
.sq
.used
);
134 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev
*dev
)
136 return (void *)((u8
*) dev
->mq
.sq
.va
+
137 (dev
->mqe_ctx
.tag
* sizeof(struct ocrdma_mqe
)));
140 enum ib_qp_state
get_ibqp_state(enum ocrdma_qp_state qps
)
145 case OCRDMA_QPS_INIT
:
152 case OCRDMA_QPS_SQ_DRAINING
:
162 static enum ocrdma_qp_state
get_ocrdma_qp_state(enum ib_qp_state qps
)
166 return OCRDMA_QPS_RST
;
168 return OCRDMA_QPS_INIT
;
170 return OCRDMA_QPS_RTR
;
172 return OCRDMA_QPS_RTS
;
174 return OCRDMA_QPS_SQD
;
176 return OCRDMA_QPS_SQE
;
178 return OCRDMA_QPS_ERR
;
180 return OCRDMA_QPS_ERR
;
183 static int ocrdma_get_mbx_errno(u32 status
)
185 int err_num
= -EFAULT
;
186 u8 mbox_status
= (status
& OCRDMA_MBX_RSP_STATUS_MASK
) >>
187 OCRDMA_MBX_RSP_STATUS_SHIFT
;
188 u8 add_status
= (status
& OCRDMA_MBX_RSP_ASTATUS_MASK
) >>
189 OCRDMA_MBX_RSP_ASTATUS_SHIFT
;
191 switch (mbox_status
) {
192 case OCRDMA_MBX_STATUS_OOR
:
193 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS
:
197 case OCRDMA_MBX_STATUS_INVALID_PD
:
198 case OCRDMA_MBX_STATUS_INVALID_CQ
:
199 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID
:
200 case OCRDMA_MBX_STATUS_INVALID_QP
:
201 case OCRDMA_MBX_STATUS_INVALID_CHANGE
:
202 case OCRDMA_MBX_STATUS_MTU_EXCEEDS
:
203 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER
:
204 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID
:
205 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS
:
206 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD
:
207 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY
:
208 case OCRDMA_MBX_STATUS_INVALID_LKEY
:
209 case OCRDMA_MBX_STATUS_INVALID_VA
:
210 case OCRDMA_MBX_STATUS_INVALID_LENGTH
:
211 case OCRDMA_MBX_STATUS_INVALID_FBO
:
212 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS
:
213 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE
:
214 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP
:
215 case OCRDMA_MBX_STATUS_SRQ_ERROR
:
216 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS
:
220 case OCRDMA_MBX_STATUS_PD_INUSE
:
221 case OCRDMA_MBX_STATUS_QP_BOUND
:
222 case OCRDMA_MBX_STATUS_MW_STILL_BOUND
:
223 case OCRDMA_MBX_STATUS_MW_BOUND
:
227 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS
:
228 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS
:
229 case OCRDMA_MBX_STATUS_RQE_EXCEEDS
:
230 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS
:
231 case OCRDMA_MBX_STATUS_ORD_EXCEEDS
:
232 case OCRDMA_MBX_STATUS_IRD_EXCEEDS
:
233 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS
:
234 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS
:
235 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS
:
239 case OCRDMA_MBX_STATUS_FAILED
:
240 switch (add_status
) {
241 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES
:
251 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status
)
253 int err_num
= -EINVAL
;
255 switch (cqe_status
) {
256 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES
:
259 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER
:
262 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES
:
263 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING
:
266 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED
:
273 void ocrdma_ring_cq_db(struct ocrdma_dev
*dev
, u16 cq_id
, bool armed
,
274 bool solicited
, u16 cqe_popped
)
276 u32 val
= cq_id
& OCRDMA_DB_CQ_RING_ID_MASK
;
278 val
|= ((cq_id
& OCRDMA_DB_CQ_RING_ID_EXT_MASK
) <<
279 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT
);
282 val
|= (1 << OCRDMA_DB_CQ_REARM_SHIFT
);
284 val
|= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT
);
285 val
|= (cqe_popped
<< OCRDMA_DB_CQ_NUM_POPPED_SHIFT
);
286 iowrite32(val
, dev
->nic_info
.db
+ OCRDMA_DB_CQ_OFFSET
);
289 static void ocrdma_ring_mq_db(struct ocrdma_dev
*dev
)
293 val
|= dev
->mq
.sq
.id
& OCRDMA_MQ_ID_MASK
;
294 val
|= 1 << OCRDMA_MQ_NUM_MQE_SHIFT
;
295 iowrite32(val
, dev
->nic_info
.db
+ OCRDMA_DB_MQ_OFFSET
);
298 static void ocrdma_ring_eq_db(struct ocrdma_dev
*dev
, u16 eq_id
,
299 bool arm
, bool clear_int
, u16 num_eqe
)
303 val
|= eq_id
& OCRDMA_EQ_ID_MASK
;
304 val
|= ((eq_id
& OCRDMA_EQ_ID_EXT_MASK
) << OCRDMA_EQ_ID_EXT_MASK_SHIFT
);
306 val
|= (1 << OCRDMA_REARM_SHIFT
);
308 val
|= (1 << OCRDMA_EQ_CLR_SHIFT
);
309 val
|= (1 << OCRDMA_EQ_TYPE_SHIFT
);
310 val
|= (num_eqe
<< OCRDMA_NUM_EQE_SHIFT
);
311 iowrite32(val
, dev
->nic_info
.db
+ OCRDMA_DB_EQ_OFFSET
);
314 static void ocrdma_init_mch(struct ocrdma_mbx_hdr
*cmd_hdr
,
315 u8 opcode
, u8 subsys
, u32 cmd_len
)
317 cmd_hdr
->subsys_op
= (opcode
| (subsys
<< OCRDMA_MCH_SUBSYS_SHIFT
));
318 cmd_hdr
->timeout
= 20; /* seconds */
319 cmd_hdr
->cmd_len
= cmd_len
- sizeof(struct ocrdma_mbx_hdr
);
322 static void *ocrdma_init_emb_mqe(u8 opcode
, u32 cmd_len
)
324 struct ocrdma_mqe
*mqe
;
326 mqe
= kzalloc(sizeof(struct ocrdma_mqe
), GFP_KERNEL
);
329 mqe
->hdr
.spcl_sge_cnt_emb
|=
330 (OCRDMA_MQE_EMBEDDED
<< OCRDMA_MQE_HDR_EMB_SHIFT
) &
331 OCRDMA_MQE_HDR_EMB_MASK
;
332 mqe
->hdr
.pyld_len
= cmd_len
- sizeof(struct ocrdma_mqe_hdr
);
334 ocrdma_init_mch(&mqe
->u
.emb_req
.mch
, opcode
, OCRDMA_SUBSYS_ROCE
,
339 static void ocrdma_free_q(struct ocrdma_dev
*dev
, struct ocrdma_queue_info
*q
)
341 dma_free_coherent(&dev
->nic_info
.pdev
->dev
, q
->size
, q
->va
, q
->dma
);
344 static int ocrdma_alloc_q(struct ocrdma_dev
*dev
,
345 struct ocrdma_queue_info
*q
, u16 len
, u16 entry_size
)
347 memset(q
, 0, sizeof(*q
));
349 q
->entry_size
= entry_size
;
350 q
->size
= len
* entry_size
;
351 q
->va
= dma_alloc_coherent(&dev
->nic_info
.pdev
->dev
, q
->size
,
352 &q
->dma
, GFP_KERNEL
);
355 memset(q
->va
, 0, q
->size
);
359 static void ocrdma_build_q_pages(struct ocrdma_pa
*q_pa
, int cnt
,
360 dma_addr_t host_pa
, int hw_page_size
)
364 for (i
= 0; i
< cnt
; i
++) {
365 q_pa
[i
].lo
= (u32
) (host_pa
& 0xffffffff);
366 q_pa
[i
].hi
= (u32
) upper_32_bits(host_pa
);
367 host_pa
+= hw_page_size
;
371 static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev
*dev
,
372 struct ocrdma_eq
*eq
)
374 /* assign vector and update vector id for next EQ */
375 eq
->vector
= dev
->nic_info
.msix
.start_vector
;
376 dev
->nic_info
.msix
.start_vector
+= 1;
379 static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev
*dev
)
381 /* this assumes that EQs are freed in exactly reverse order
384 dev
->nic_info
.msix
.start_vector
-= 1;
387 static int ocrdma_mbx_delete_q(struct ocrdma_dev
*dev
, struct ocrdma_queue_info
*q
,
392 struct ocrdma_delete_q_req
*cmd
= dev
->mbx_cmd
;
394 switch (queue_type
) {
396 opcode
= OCRDMA_CMD_DELETE_MQ
;
399 opcode
= OCRDMA_CMD_DELETE_CQ
;
402 opcode
= OCRDMA_CMD_DELETE_EQ
;
407 memset(cmd
, 0, sizeof(*cmd
));
408 ocrdma_init_mch(&cmd
->req
, opcode
, OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
411 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
,
412 cmd
, sizeof(*cmd
), NULL
, NULL
);
418 static int ocrdma_mbx_create_eq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
421 struct ocrdma_create_eq_req
*cmd
= dev
->mbx_cmd
;
422 struct ocrdma_create_eq_rsp
*rsp
= dev
->mbx_cmd
;
424 memset(cmd
, 0, sizeof(*cmd
));
425 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_EQ
, OCRDMA_SUBSYS_COMMON
,
427 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
)
428 cmd
->req
.rsvd_version
= 0;
430 cmd
->req
.rsvd_version
= 2;
433 cmd
->valid
= OCRDMA_CREATE_EQ_VALID
;
434 cmd
->cnt
= 4 << OCRDMA_CREATE_EQ_CNT_SHIFT
;
436 ocrdma_build_q_pages(&cmd
->pa
[0], cmd
->num_pages
, eq
->q
.dma
,
438 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
, cmd
, sizeof(*cmd
), NULL
,
441 eq
->q
.id
= rsp
->vector_eqid
& 0xffff;
442 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
)
443 ocrdma_assign_eq_vect_gen2(dev
, eq
);
445 eq
->vector
= (rsp
->vector_eqid
>> 16) & 0xffff;
446 dev
->nic_info
.msix
.start_vector
+= 1;
448 eq
->q
.created
= true;
453 static int ocrdma_create_eq(struct ocrdma_dev
*dev
,
454 struct ocrdma_eq
*eq
, u16 q_len
)
458 status
= ocrdma_alloc_q(dev
, &eq
->q
, OCRDMA_EQ_LEN
,
459 sizeof(struct ocrdma_eqe
));
463 status
= ocrdma_mbx_create_eq(dev
, eq
);
467 ocrdma_ring_eq_db(dev
, eq
->q
.id
, true, true, 0);
471 ocrdma_free_q(dev
, &eq
->q
);
475 static int ocrdma_get_irq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
479 if (dev
->nic_info
.intr_mode
== BE_INTERRUPT_MODE_INTX
)
480 irq
= dev
->nic_info
.pdev
->irq
;
482 irq
= dev
->nic_info
.msix
.vector_list
[eq
->vector
];
486 static void _ocrdma_destroy_eq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
489 ocrdma_mbx_delete_q(dev
, &eq
->q
, QTYPE_EQ
);
490 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
)
491 ocrdma_free_eq_vect_gen2(dev
);
492 ocrdma_free_q(dev
, &eq
->q
);
496 static void ocrdma_destroy_eq(struct ocrdma_dev
*dev
, struct ocrdma_eq
*eq
)
500 /* disarm EQ so that interrupts are not generated
501 * during freeing and EQ delete is in progress.
503 ocrdma_ring_eq_db(dev
, eq
->q
.id
, false, false, 0);
505 irq
= ocrdma_get_irq(dev
, eq
);
507 _ocrdma_destroy_eq(dev
, eq
);
510 static void ocrdma_destroy_qp_eqs(struct ocrdma_dev
*dev
)
514 /* deallocate the data path eqs */
515 for (i
= 0; i
< dev
->eq_cnt
; i
++)
516 ocrdma_destroy_eq(dev
, &dev
->qp_eq_tbl
[i
]);
519 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev
*dev
,
520 struct ocrdma_queue_info
*cq
,
521 struct ocrdma_queue_info
*eq
)
523 struct ocrdma_create_cq_cmd
*cmd
= dev
->mbx_cmd
;
524 struct ocrdma_create_cq_cmd_rsp
*rsp
= dev
->mbx_cmd
;
527 memset(cmd
, 0, sizeof(*cmd
));
528 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_CQ
,
529 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
531 cmd
->pgsz_pgcnt
= PAGES_4K_SPANNED(cq
->va
, cq
->size
);
532 cmd
->ev_cnt_flags
= OCRDMA_CREATE_CQ_DEF_FLAGS
;
533 cmd
->eqn
= (eq
->id
<< OCRDMA_CREATE_CQ_EQID_SHIFT
);
535 ocrdma_build_q_pages(&cmd
->pa
[0], cmd
->pgsz_pgcnt
,
536 cq
->dma
, PAGE_SIZE_4K
);
537 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
,
538 cmd
, sizeof(*cmd
), NULL
, NULL
);
540 cq
->id
= (rsp
->cq_id
& OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK
);
546 static u32
ocrdma_encoded_q_len(int q_len
)
548 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
550 if (len_encoded
== 16)
555 static int ocrdma_mbx_create_mq(struct ocrdma_dev
*dev
,
556 struct ocrdma_queue_info
*mq
,
557 struct ocrdma_queue_info
*cq
)
559 int num_pages
, status
;
560 struct ocrdma_create_mq_req
*cmd
= dev
->mbx_cmd
;
561 struct ocrdma_create_mq_rsp
*rsp
= dev
->mbx_cmd
;
562 struct ocrdma_pa
*pa
;
564 memset(cmd
, 0, sizeof(*cmd
));
565 num_pages
= PAGES_4K_SPANNED(mq
->va
, mq
->size
);
567 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
) {
568 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_MQ
,
569 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
570 cmd
->v0
.pages
= num_pages
;
571 cmd
->v0
.async_cqid_valid
= OCRDMA_CREATE_MQ_ASYNC_CQ_VALID
;
572 cmd
->v0
.async_cqid_valid
= (cq
->id
<< 1);
573 cmd
->v0
.cqid_ringsize
|= (ocrdma_encoded_q_len(mq
->len
) <<
574 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT
);
575 cmd
->v0
.cqid_ringsize
|=
576 (cq
->id
<< OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT
);
577 cmd
->v0
.valid
= OCRDMA_CREATE_MQ_VALID
;
580 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_CREATE_MQ_EXT
,
581 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
582 cmd
->req
.rsvd_version
= 1;
583 cmd
->v1
.cqid_pages
= num_pages
;
584 cmd
->v1
.cqid_pages
|= (cq
->id
<< OCRDMA_CREATE_MQ_CQ_ID_SHIFT
);
585 cmd
->v1
.async_cqid_valid
= OCRDMA_CREATE_MQ_ASYNC_CQ_VALID
;
586 cmd
->v1
.async_event_bitmap
= Bit(20);
587 cmd
->v1
.async_cqid_ringsize
= cq
->id
;
588 cmd
->v1
.async_cqid_ringsize
|= (ocrdma_encoded_q_len(mq
->len
) <<
589 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT
);
590 cmd
->v1
.valid
= OCRDMA_CREATE_MQ_VALID
;
593 ocrdma_build_q_pages(pa
, num_pages
, mq
->dma
, PAGE_SIZE_4K
);
594 status
= be_roce_mcc_cmd(dev
->nic_info
.netdev
,
595 cmd
, sizeof(*cmd
), NULL
, NULL
);
603 static int ocrdma_create_mq(struct ocrdma_dev
*dev
)
607 /* Alloc completion queue for Mailbox queue */
608 status
= ocrdma_alloc_q(dev
, &dev
->mq
.cq
, OCRDMA_MQ_CQ_LEN
,
609 sizeof(struct ocrdma_mcqe
));
613 status
= ocrdma_mbx_mq_cq_create(dev
, &dev
->mq
.cq
, &dev
->meq
.q
);
617 memset(&dev
->mqe_ctx
, 0, sizeof(dev
->mqe_ctx
));
618 init_waitqueue_head(&dev
->mqe_ctx
.cmd_wait
);
619 mutex_init(&dev
->mqe_ctx
.lock
);
621 /* Alloc Mailbox queue */
622 status
= ocrdma_alloc_q(dev
, &dev
->mq
.sq
, OCRDMA_MQ_LEN
,
623 sizeof(struct ocrdma_mqe
));
626 status
= ocrdma_mbx_create_mq(dev
, &dev
->mq
.sq
, &dev
->mq
.cq
);
629 ocrdma_ring_cq_db(dev
, dev
->mq
.cq
.id
, true, false, 0);
633 ocrdma_free_q(dev
, &dev
->mq
.sq
);
635 ocrdma_mbx_delete_q(dev
, &dev
->mq
.cq
, QTYPE_CQ
);
637 ocrdma_free_q(dev
, &dev
->mq
.cq
);
642 static void ocrdma_destroy_mq(struct ocrdma_dev
*dev
)
644 struct ocrdma_queue_info
*mbxq
, *cq
;
646 /* mqe_ctx lock synchronizes with any other pending cmds. */
647 mutex_lock(&dev
->mqe_ctx
.lock
);
650 ocrdma_mbx_delete_q(dev
, mbxq
, QTYPE_MCCQ
);
651 ocrdma_free_q(dev
, mbxq
);
653 mutex_unlock(&dev
->mqe_ctx
.lock
);
657 ocrdma_mbx_delete_q(dev
, cq
, QTYPE_CQ
);
658 ocrdma_free_q(dev
, cq
);
662 static void ocrdma_process_qpcat_error(struct ocrdma_dev
*dev
,
663 struct ocrdma_qp
*qp
)
665 enum ib_qp_state new_ib_qps
= IB_QPS_ERR
;
666 enum ib_qp_state old_ib_qps
;
670 ocrdma_qp_state_machine(qp
, new_ib_qps
, &old_ib_qps
);
673 static void ocrdma_dispatch_ibevent(struct ocrdma_dev
*dev
,
674 struct ocrdma_ae_mcqe
*cqe
)
676 struct ocrdma_qp
*qp
= NULL
;
677 struct ocrdma_cq
*cq
= NULL
;
678 struct ib_event ib_evt
;
683 int type
= (cqe
->valid_ae_event
& OCRDMA_AE_MCQE_EVENT_TYPE_MASK
) >>
684 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT
;
686 if (cqe
->qpvalid_qpid
& OCRDMA_AE_MCQE_QPVALID
)
687 qp
= dev
->qp_tbl
[cqe
->qpvalid_qpid
& OCRDMA_AE_MCQE_QPID_MASK
];
688 if (cqe
->cqvalid_cqid
& OCRDMA_AE_MCQE_CQVALID
)
689 cq
= dev
->cq_tbl
[cqe
->cqvalid_cqid
& OCRDMA_AE_MCQE_CQID_MASK
];
691 ib_evt
.device
= &dev
->ibdev
;
694 case OCRDMA_CQ_ERROR
:
695 ib_evt
.element
.cq
= &cq
->ibcq
;
696 ib_evt
.event
= IB_EVENT_CQ_ERR
;
700 case OCRDMA_CQ_OVERRUN_ERROR
:
701 ib_evt
.element
.cq
= &cq
->ibcq
;
702 ib_evt
.event
= IB_EVENT_CQ_ERR
;
704 case OCRDMA_CQ_QPCAT_ERROR
:
705 ib_evt
.element
.qp
= &qp
->ibqp
;
706 ib_evt
.event
= IB_EVENT_QP_FATAL
;
707 ocrdma_process_qpcat_error(dev
, qp
);
709 case OCRDMA_QP_ACCESS_ERROR
:
710 ib_evt
.element
.qp
= &qp
->ibqp
;
711 ib_evt
.event
= IB_EVENT_QP_ACCESS_ERR
;
713 case OCRDMA_QP_COMM_EST_EVENT
:
714 ib_evt
.element
.qp
= &qp
->ibqp
;
715 ib_evt
.event
= IB_EVENT_COMM_EST
;
717 case OCRDMA_SQ_DRAINED_EVENT
:
718 ib_evt
.element
.qp
= &qp
->ibqp
;
719 ib_evt
.event
= IB_EVENT_SQ_DRAINED
;
721 case OCRDMA_DEVICE_FATAL_EVENT
:
722 ib_evt
.element
.port_num
= 1;
723 ib_evt
.event
= IB_EVENT_DEVICE_FATAL
;
727 case OCRDMA_SRQCAT_ERROR
:
728 ib_evt
.element
.srq
= &qp
->srq
->ibsrq
;
729 ib_evt
.event
= IB_EVENT_SRQ_ERR
;
733 case OCRDMA_SRQ_LIMIT_EVENT
:
734 ib_evt
.element
.srq
= &qp
->srq
->ibsrq
;
735 ib_evt
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
739 case OCRDMA_QP_LAST_WQE_EVENT
:
740 ib_evt
.element
.qp
= &qp
->ibqp
;
741 ib_evt
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
748 ocrdma_err("%s() unknown type=0x%x\n", __func__
, type
);
753 if (qp
->ibqp
.event_handler
)
754 qp
->ibqp
.event_handler(&ib_evt
, qp
->ibqp
.qp_context
);
755 } else if (cq_event
) {
756 if (cq
->ibcq
.event_handler
)
757 cq
->ibcq
.event_handler(&ib_evt
, cq
->ibcq
.cq_context
);
758 } else if (srq_event
) {
759 if (qp
->srq
->ibsrq
.event_handler
)
760 qp
->srq
->ibsrq
.event_handler(&ib_evt
,
763 } else if (dev_event
)
764 ib_dispatch_event(&ib_evt
);
768 static void ocrdma_process_acqe(struct ocrdma_dev
*dev
, void *ae_cqe
)
770 /* async CQE processing */
771 struct ocrdma_ae_mcqe
*cqe
= ae_cqe
;
772 u32 evt_code
= (cqe
->valid_ae_event
& OCRDMA_AE_MCQE_EVENT_CODE_MASK
) >>
773 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT
;
775 if (evt_code
== OCRDMA_ASYNC_EVE_CODE
)
776 ocrdma_dispatch_ibevent(dev
, cqe
);
778 ocrdma_err("%s(%d) invalid evt code=0x%x\n",
779 __func__
, dev
->id
, evt_code
);
782 static void ocrdma_process_mcqe(struct ocrdma_dev
*dev
, struct ocrdma_mcqe
*cqe
)
784 if (dev
->mqe_ctx
.tag
== cqe
->tag_lo
&& dev
->mqe_ctx
.cmd_done
== false) {
785 dev
->mqe_ctx
.cqe_status
= (cqe
->status
&
786 OCRDMA_MCQE_STATUS_MASK
) >> OCRDMA_MCQE_STATUS_SHIFT
;
787 dev
->mqe_ctx
.ext_status
=
788 (cqe
->status
& OCRDMA_MCQE_ESTATUS_MASK
)
789 >> OCRDMA_MCQE_ESTATUS_SHIFT
;
790 dev
->mqe_ctx
.cmd_done
= true;
791 wake_up(&dev
->mqe_ctx
.cmd_wait
);
793 ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
794 __func__
, cqe
->tag_lo
, dev
->mqe_ctx
.tag
);
797 static int ocrdma_mq_cq_handler(struct ocrdma_dev
*dev
, u16 cq_id
)
800 struct ocrdma_mcqe
*cqe
;
803 cqe
= ocrdma_get_mcqe(dev
);
806 ocrdma_le32_to_cpu(cqe
, sizeof(*cqe
));
808 if (cqe
->valid_ae_cmpl_cons
& OCRDMA_MCQE_AE_MASK
)
809 ocrdma_process_acqe(dev
, cqe
);
810 else if (cqe
->valid_ae_cmpl_cons
& OCRDMA_MCQE_CMPL_MASK
)
811 ocrdma_process_mcqe(dev
, cqe
);
813 ocrdma_err("%s() cqe->compl is not set.\n", __func__
);
814 memset(cqe
, 0, sizeof(struct ocrdma_mcqe
));
815 ocrdma_mcq_inc_tail(dev
);
817 ocrdma_ring_cq_db(dev
, dev
->mq
.cq
.id
, true, false, cqe_popped
);
821 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev
*dev
,
822 struct ocrdma_cq
*cq
)
825 struct ocrdma_qp
*qp
;
826 bool buddy_cq_found
= false;
827 /* Go through list of QPs in error state which are using this CQ
828 * and invoke its callback handler to trigger CQE processing for
829 * error/flushed CQE. It is rare to find more than few entries in
830 * this list as most consumers stops after getting error CQE.
831 * List is traversed only once when a matching buddy cq found for a QP.
833 spin_lock_irqsave(&dev
->flush_q_lock
, flags
);
834 list_for_each_entry(qp
, &cq
->sq_head
, sq_entry
) {
837 /* if wq and rq share the same cq, than comp_handler
838 * is already invoked.
840 if (qp
->sq_cq
== qp
->rq_cq
)
842 /* if completion came on sq, rq's cq is buddy cq.
843 * if completion came on rq, sq's cq is buddy cq.
849 buddy_cq_found
= true;
852 spin_unlock_irqrestore(&dev
->flush_q_lock
, flags
);
853 if (buddy_cq_found
== false)
855 if (cq
->ibcq
.comp_handler
) {
856 spin_lock_irqsave(&cq
->comp_handler_lock
, flags
);
857 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
858 spin_unlock_irqrestore(&cq
->comp_handler_lock
, flags
);
862 static void ocrdma_qp_cq_handler(struct ocrdma_dev
*dev
, u16 cq_idx
)
865 struct ocrdma_cq
*cq
;
867 if (cq_idx
>= OCRDMA_MAX_CQ
)
870 cq
= dev
->cq_tbl
[cq_idx
];
872 ocrdma_err("%s%d invalid id=0x%x\n", __func__
, dev
->id
, cq_idx
);
875 spin_lock_irqsave(&cq
->cq_lock
, flags
);
877 cq
->solicited
= false;
878 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
880 ocrdma_ring_cq_db(dev
, cq
->id
, false, false, 0);
882 if (cq
->ibcq
.comp_handler
) {
883 spin_lock_irqsave(&cq
->comp_handler_lock
, flags
);
884 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
885 spin_unlock_irqrestore(&cq
->comp_handler_lock
, flags
);
887 ocrdma_qp_buddy_cq_handler(dev
, cq
);
890 static void ocrdma_cq_handler(struct ocrdma_dev
*dev
, u16 cq_id
)
892 /* process the MQ-CQE. */
893 if (cq_id
== dev
->mq
.cq
.id
)
894 ocrdma_mq_cq_handler(dev
, cq_id
);
896 ocrdma_qp_cq_handler(dev
, cq_id
);
899 static irqreturn_t
ocrdma_irq_handler(int irq
, void *handle
)
901 struct ocrdma_eq
*eq
= handle
;
902 struct ocrdma_dev
*dev
= eq
->dev
;
903 struct ocrdma_eqe eqe
;
904 struct ocrdma_eqe
*ptr
;
908 ptr
= ocrdma_get_eqe(eq
);
910 ocrdma_le32_to_cpu(&eqe
, sizeof(eqe
));
911 if ((eqe
.id_valid
& OCRDMA_EQE_VALID_MASK
) == 0)
915 /* check whether its CQE or not. */
916 if ((eqe
.id_valid
& OCRDMA_EQE_FOR_CQE_MASK
) == 0) {
917 cq_id
= eqe
.id_valid
>> OCRDMA_EQE_RESOURCE_ID_SHIFT
;
918 ocrdma_cq_handler(dev
, cq_id
);
920 ocrdma_eq_inc_tail(eq
);
922 ocrdma_ring_eq_db(dev
, eq
->q
.id
, true, true, eqe_popped
);
923 /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
924 if (dev
->nic_info
.intr_mode
== BE_INTERRUPT_MODE_INTX
)
925 ocrdma_ring_eq_db(dev
, eq
->q
.id
, true, true, 0);
929 static void ocrdma_post_mqe(struct ocrdma_dev
*dev
, struct ocrdma_mqe
*cmd
)
931 struct ocrdma_mqe
*mqe
;
933 dev
->mqe_ctx
.tag
= dev
->mq
.sq
.head
;
934 dev
->mqe_ctx
.cmd_done
= false;
935 mqe
= ocrdma_get_mqe(dev
);
936 cmd
->hdr
.tag_lo
= dev
->mq
.sq
.head
;
937 ocrdma_copy_cpu_to_le32(mqe
, cmd
, sizeof(*mqe
));
938 /* make sure descriptor is written before ringing doorbell */
940 ocrdma_mq_inc_head(dev
);
941 ocrdma_ring_mq_db(dev
);
944 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev
*dev
)
948 status
= wait_event_timeout(dev
->mqe_ctx
.cmd_wait
,
949 (dev
->mqe_ctx
.cmd_done
!= false),
950 msecs_to_jiffies(30000));
957 /* issue a mailbox command on the MQ */
958 static int ocrdma_mbx_cmd(struct ocrdma_dev
*dev
, struct ocrdma_mqe
*mqe
)
961 u16 cqe_status
, ext_status
;
962 struct ocrdma_mqe
*rsp
;
964 mutex_lock(&dev
->mqe_ctx
.lock
);
965 ocrdma_post_mqe(dev
, mqe
);
966 status
= ocrdma_wait_mqe_cmpl(dev
);
969 cqe_status
= dev
->mqe_ctx
.cqe_status
;
970 ext_status
= dev
->mqe_ctx
.ext_status
;
971 rsp
= ocrdma_get_mqe_rsp(dev
);
972 ocrdma_copy_le32_to_cpu(mqe
, rsp
, (sizeof(*mqe
)));
973 if (cqe_status
|| ext_status
) {
975 ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
977 (rsp
->u
.rsp
.subsys_op
& OCRDMA_MBX_RSP_OPCODE_MASK
) >>
978 OCRDMA_MBX_RSP_OPCODE_SHIFT
, cqe_status
, ext_status
);
979 status
= ocrdma_get_mbx_cqe_errno(cqe_status
);
982 if (mqe
->u
.rsp
.status
& OCRDMA_MBX_RSP_STATUS_MASK
)
983 status
= ocrdma_get_mbx_errno(mqe
->u
.rsp
.status
);
985 mutex_unlock(&dev
->mqe_ctx
.lock
);
989 static void ocrdma_get_attr(struct ocrdma_dev
*dev
,
990 struct ocrdma_dev_attr
*attr
,
991 struct ocrdma_mbx_query_config
*rsp
)
994 (rsp
->max_pd_ca_ack_delay
& OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK
) >>
995 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT
;
997 (rsp
->qp_srq_cq_ird_ord
& OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK
) >>
998 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT
;
999 attr
->max_send_sge
= ((rsp
->max_write_send_sge
&
1000 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK
) >>
1001 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT
);
1002 attr
->max_recv_sge
= (rsp
->max_write_send_sge
&
1003 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK
) >>
1004 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT
;
1005 attr
->max_srq_sge
= (rsp
->max_srq_rqe_sge
&
1006 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK
) >>
1007 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET
;
1008 attr
->max_ord_per_qp
= (rsp
->max_ird_ord_per_qp
&
1009 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK
) >>
1010 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT
;
1011 attr
->max_ird_per_qp
= (rsp
->max_ird_ord_per_qp
&
1012 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK
) >>
1013 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT
;
1014 attr
->cq_overflow_detect
= (rsp
->qp_srq_cq_ird_ord
&
1015 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK
) >>
1016 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT
;
1017 attr
->srq_supported
= (rsp
->qp_srq_cq_ird_ord
&
1018 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK
) >>
1019 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT
;
1020 attr
->local_ca_ack_delay
= (rsp
->max_pd_ca_ack_delay
&
1021 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK
) >>
1022 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT
;
1023 attr
->max_mr
= rsp
->max_mr
;
1024 attr
->max_mr_size
= ~0ull;
1026 attr
->max_pages_per_frmr
= rsp
->max_pages_per_frmr
;
1027 attr
->max_num_mr_pbl
= rsp
->max_num_mr_pbl
;
1028 attr
->max_cqe
= rsp
->max_cq_cqes_per_cq
&
1029 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK
;
1030 attr
->wqe_size
= ((rsp
->wqe_rqe_stride_max_dpp_cqs
&
1031 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK
) >>
1032 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET
) *
1034 attr
->rqe_size
= ((rsp
->wqe_rqe_stride_max_dpp_cqs
&
1035 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK
) >>
1036 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET
) *
1038 attr
->max_inline_data
=
1039 attr
->wqe_size
- (sizeof(struct ocrdma_hdr_wqe
) +
1040 sizeof(struct ocrdma_sge
));
1041 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
) {
1043 attr
->ird_page_size
= OCRDMA_MIN_Q_PAGE_SIZE
;
1044 attr
->num_ird_pages
= MAX_OCRDMA_IRD_PAGES
;
1046 dev
->attr
.max_wqe
= rsp
->max_wqes_rqes_per_q
>>
1047 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET
;
1048 dev
->attr
.max_rqe
= rsp
->max_wqes_rqes_per_q
&
1049 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK
;
1052 static int ocrdma_check_fw_config(struct ocrdma_dev
*dev
,
1053 struct ocrdma_fw_conf_rsp
*conf
)
1057 fn_mode
= conf
->fn_mode
& OCRDMA_FN_MODE_RDMA
;
1058 if (fn_mode
!= OCRDMA_FN_MODE_RDMA
)
1060 dev
->base_eqid
= conf
->base_eqid
;
1061 dev
->max_eq
= conf
->max_eq
;
1062 dev
->attr
.max_cq
= OCRDMA_MAX_CQ
- 1;
1066 /* can be issued only during init time. */
1067 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev
*dev
)
1069 int status
= -ENOMEM
;
1070 struct ocrdma_mqe
*cmd
;
1071 struct ocrdma_fw_ver_rsp
*rsp
;
1073 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER
, sizeof(*cmd
));
1076 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)&cmd
->u
.cmd
[0],
1077 OCRDMA_CMD_GET_FW_VER
,
1078 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1080 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1083 rsp
= (struct ocrdma_fw_ver_rsp
*)cmd
;
1084 memset(&dev
->attr
.fw_ver
[0], 0, sizeof(dev
->attr
.fw_ver
));
1085 memcpy(&dev
->attr
.fw_ver
[0], &rsp
->running_ver
[0],
1086 sizeof(rsp
->running_ver
));
1087 ocrdma_le32_to_cpu(dev
->attr
.fw_ver
, sizeof(rsp
->running_ver
));
1093 /* can be issued only during init time. */
1094 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev
*dev
)
1096 int status
= -ENOMEM
;
1097 struct ocrdma_mqe
*cmd
;
1098 struct ocrdma_fw_conf_rsp
*rsp
;
1100 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG
, sizeof(*cmd
));
1103 ocrdma_init_mch((struct ocrdma_mbx_hdr
*)&cmd
->u
.cmd
[0],
1104 OCRDMA_CMD_GET_FW_CONFIG
,
1105 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1106 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1109 rsp
= (struct ocrdma_fw_conf_rsp
*)cmd
;
1110 status
= ocrdma_check_fw_config(dev
, rsp
);
1116 static int ocrdma_mbx_query_dev(struct ocrdma_dev
*dev
)
1118 int status
= -ENOMEM
;
1119 struct ocrdma_mbx_query_config
*rsp
;
1120 struct ocrdma_mqe
*cmd
;
1122 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG
, sizeof(*cmd
));
1125 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1128 rsp
= (struct ocrdma_mbx_query_config
*)cmd
;
1129 ocrdma_get_attr(dev
, &dev
->attr
, rsp
);
1135 int ocrdma_mbx_alloc_pd(struct ocrdma_dev
*dev
, struct ocrdma_pd
*pd
)
1137 int status
= -ENOMEM
;
1138 struct ocrdma_alloc_pd
*cmd
;
1139 struct ocrdma_alloc_pd_rsp
*rsp
;
1141 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD
, sizeof(*cmd
));
1144 if (pd
->dpp_enabled
)
1145 cmd
->enable_dpp_rsvd
|= OCRDMA_ALLOC_PD_ENABLE_DPP
;
1146 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1149 rsp
= (struct ocrdma_alloc_pd_rsp
*)cmd
;
1150 pd
->id
= rsp
->dpp_page_pdid
& OCRDMA_ALLOC_PD_RSP_PDID_MASK
;
1151 if (rsp
->dpp_page_pdid
& OCRDMA_ALLOC_PD_RSP_DPP
) {
1152 pd
->dpp_enabled
= true;
1153 pd
->dpp_page
= rsp
->dpp_page_pdid
>>
1154 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT
;
1156 pd
->dpp_enabled
= false;
1164 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev
*dev
, struct ocrdma_pd
*pd
)
1166 int status
= -ENOMEM
;
1167 struct ocrdma_dealloc_pd
*cmd
;
1169 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD
, sizeof(*cmd
));
1173 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1178 static int ocrdma_build_q_conf(u32
*num_entries
, int entry_size
,
1179 int *num_pages
, int *page_size
)
1184 *num_entries
= roundup_pow_of_two(*num_entries
);
1185 mem_size
= *num_entries
* entry_size
;
1186 /* find the possible lowest possible multiplier */
1187 for (i
= 0; i
< OCRDMA_MAX_Q_PAGE_SIZE_CNT
; i
++) {
1188 if (mem_size
<= (OCRDMA_Q_PAGE_BASE_SIZE
<< i
))
1191 if (i
>= OCRDMA_MAX_Q_PAGE_SIZE_CNT
)
1193 mem_size
= roundup(mem_size
,
1194 ((OCRDMA_Q_PAGE_BASE_SIZE
<< i
) / OCRDMA_MAX_Q_PAGES
));
1196 mem_size
/ ((OCRDMA_Q_PAGE_BASE_SIZE
<< i
) / OCRDMA_MAX_Q_PAGES
);
1197 *page_size
= ((OCRDMA_Q_PAGE_BASE_SIZE
<< i
) / OCRDMA_MAX_Q_PAGES
);
1198 *num_entries
= mem_size
/ entry_size
;
1202 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev
*dev
)
1207 struct ocrdma_create_ah_tbl
*cmd
;
1208 struct ocrdma_create_ah_tbl_rsp
*rsp
;
1209 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1211 struct ocrdma_pbe
*pbes
;
1213 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL
, sizeof(*cmd
));
1217 max_ah
= OCRDMA_MAX_AH
;
1218 dev
->av_tbl
.size
= sizeof(struct ocrdma_av
) * max_ah
;
1220 /* number of PBEs in PBL */
1221 cmd
->ah_conf
= (OCRDMA_AH_TBL_PAGES
<<
1222 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT
) &
1223 OCRDMA_CREATE_AH_NUM_PAGES_MASK
;
1226 for (i
= 0; i
< OCRDMA_MAX_Q_PAGE_SIZE_CNT
; i
++) {
1227 if (PAGE_SIZE
== (OCRDMA_MIN_Q_PAGE_SIZE
<< i
))
1230 cmd
->ah_conf
|= (i
<< OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT
) &
1231 OCRDMA_CREATE_AH_PAGE_SIZE_MASK
;
1234 cmd
->ah_conf
|= (sizeof(struct ocrdma_av
) <<
1235 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT
) &
1236 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK
;
1238 dev
->av_tbl
.pbl
.va
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
1239 &dev
->av_tbl
.pbl
.pa
,
1241 if (dev
->av_tbl
.pbl
.va
== NULL
)
1244 dev
->av_tbl
.va
= dma_alloc_coherent(&pdev
->dev
, dev
->av_tbl
.size
,
1246 if (dev
->av_tbl
.va
== NULL
)
1248 dev
->av_tbl
.pa
= pa
;
1249 dev
->av_tbl
.num_ah
= max_ah
;
1250 memset(dev
->av_tbl
.va
, 0, dev
->av_tbl
.size
);
1252 pbes
= (struct ocrdma_pbe
*)dev
->av_tbl
.pbl
.va
;
1253 for (i
= 0; i
< dev
->av_tbl
.size
/ OCRDMA_MIN_Q_PAGE_SIZE
; i
++) {
1254 pbes
[i
].pa_lo
= (u32
) (pa
& 0xffffffff);
1255 pbes
[i
].pa_hi
= (u32
) upper_32_bits(pa
);
1258 cmd
->tbl_addr
[0].lo
= (u32
)(dev
->av_tbl
.pbl
.pa
& 0xFFFFFFFF);
1259 cmd
->tbl_addr
[0].hi
= (u32
)upper_32_bits(dev
->av_tbl
.pbl
.pa
);
1260 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1263 rsp
= (struct ocrdma_create_ah_tbl_rsp
*)cmd
;
1264 dev
->av_tbl
.ahid
= rsp
->ahid
& 0xFFFF;
1269 dma_free_coherent(&pdev
->dev
, dev
->av_tbl
.size
, dev
->av_tbl
.va
,
1271 dev
->av_tbl
.va
= NULL
;
1273 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, dev
->av_tbl
.pbl
.va
,
1274 dev
->av_tbl
.pbl
.pa
);
1275 dev
->av_tbl
.pbl
.va
= NULL
;
1276 dev
->av_tbl
.size
= 0;
1282 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev
*dev
)
1284 struct ocrdma_delete_ah_tbl
*cmd
;
1285 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1287 if (dev
->av_tbl
.va
== NULL
)
1290 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL
, sizeof(*cmd
));
1293 cmd
->ahid
= dev
->av_tbl
.ahid
;
1295 ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1296 dma_free_coherent(&pdev
->dev
, dev
->av_tbl
.size
, dev
->av_tbl
.va
,
1298 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, dev
->av_tbl
.pbl
.va
,
1299 dev
->av_tbl
.pbl
.pa
);
1303 /* Multiple CQs uses the EQ. This routine returns least used
1304 * EQ to associate with CQ. This will distributes the interrupt
1305 * processing and CPU load to associated EQ, vector and so to that CPU.
1307 static u16
ocrdma_bind_eq(struct ocrdma_dev
*dev
)
1309 int i
, selected_eq
= 0, cq_cnt
= 0;
1312 mutex_lock(&dev
->dev_lock
);
1313 cq_cnt
= dev
->qp_eq_tbl
[0].cq_cnt
;
1314 eq_id
= dev
->qp_eq_tbl
[0].q
.id
;
1315 /* find the EQ which is has the least number of
1316 * CQs associated with it.
1318 for (i
= 0; i
< dev
->eq_cnt
; i
++) {
1319 if (dev
->qp_eq_tbl
[i
].cq_cnt
< cq_cnt
) {
1320 cq_cnt
= dev
->qp_eq_tbl
[i
].cq_cnt
;
1321 eq_id
= dev
->qp_eq_tbl
[i
].q
.id
;
1325 dev
->qp_eq_tbl
[selected_eq
].cq_cnt
+= 1;
1326 mutex_unlock(&dev
->dev_lock
);
1330 static void ocrdma_unbind_eq(struct ocrdma_dev
*dev
, u16 eq_id
)
1334 mutex_lock(&dev
->dev_lock
);
1335 for (i
= 0; i
< dev
->eq_cnt
; i
++) {
1336 if (dev
->qp_eq_tbl
[i
].q
.id
!= eq_id
)
1338 dev
->qp_eq_tbl
[i
].cq_cnt
-= 1;
1341 mutex_unlock(&dev
->dev_lock
);
1344 int ocrdma_mbx_create_cq(struct ocrdma_dev
*dev
, struct ocrdma_cq
*cq
,
1345 int entries
, int dpp_cq
)
1347 int status
= -ENOMEM
; int max_hw_cqe
;
1348 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1349 struct ocrdma_create_cq
*cmd
;
1350 struct ocrdma_create_cq_rsp
*rsp
;
1351 u32 hw_pages
, cqe_size
, page_size
, cqe_count
;
1355 if (entries
> dev
->attr
.max_cqe
) {
1356 ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1357 __func__
, dev
->id
, dev
->attr
.max_cqe
, entries
);
1360 if (dpp_cq
&& (dev
->nic_info
.dev_family
!= OCRDMA_GEN2_FAMILY
))
1366 cqe_size
= OCRDMA_DPP_CQE_SIZE
;
1369 cq
->max_hw_cqe
= dev
->attr
.max_cqe
;
1370 max_hw_cqe
= dev
->attr
.max_cqe
;
1371 cqe_size
= sizeof(struct ocrdma_cqe
);
1372 hw_pages
= OCRDMA_CREATE_CQ_MAX_PAGES
;
1375 cq
->len
= roundup(max_hw_cqe
* cqe_size
, OCRDMA_MIN_Q_PAGE_SIZE
);
1377 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ
, sizeof(*cmd
));
1380 ocrdma_init_mch(&cmd
->cmd
.req
, OCRDMA_CMD_CREATE_CQ
,
1381 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1382 cq
->va
= dma_alloc_coherent(&pdev
->dev
, cq
->len
, &cq
->pa
, GFP_KERNEL
);
1387 memset(cq
->va
, 0, cq
->len
);
1388 page_size
= cq
->len
/ hw_pages
;
1389 cmd
->cmd
.pgsz_pgcnt
= (page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
) <<
1390 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT
;
1391 cmd
->cmd
.pgsz_pgcnt
|= hw_pages
;
1392 cmd
->cmd
.ev_cnt_flags
= OCRDMA_CREATE_CQ_DEF_FLAGS
;
1394 if (dev
->eq_cnt
< 0)
1396 cq
->eqn
= ocrdma_bind_eq(dev
);
1397 cmd
->cmd
.req
.rsvd_version
= OCRDMA_CREATE_CQ_VER2
;
1398 cqe_count
= cq
->len
/ cqe_size
;
1399 if (cqe_count
> 1024)
1400 /* Set cnt to 3 to indicate more than 1024 cq entries */
1401 cmd
->cmd
.ev_cnt_flags
|= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT
);
1404 switch (cqe_count
) {
1417 cmd
->cmd
.ev_cnt_flags
|= (count
<< OCRDMA_CREATE_CQ_CNT_SHIFT
);
1419 /* shared eq between all the consumer cqs. */
1420 cmd
->cmd
.eqn
= cq
->eqn
;
1421 if (dev
->nic_info
.dev_family
== OCRDMA_GEN2_FAMILY
) {
1423 cmd
->cmd
.pgsz_pgcnt
|= OCRDMA_CREATE_CQ_DPP
<<
1424 OCRDMA_CREATE_CQ_TYPE_SHIFT
;
1425 cq
->phase_change
= false;
1426 cmd
->cmd
.cqe_count
= (cq
->len
/ cqe_size
);
1428 cmd
->cmd
.cqe_count
= (cq
->len
/ cqe_size
) - 1;
1429 cmd
->cmd
.ev_cnt_flags
|= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID
;
1430 cq
->phase_change
= true;
1433 ocrdma_build_q_pages(&cmd
->cmd
.pa
[0], hw_pages
, cq
->pa
, page_size
);
1434 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1438 rsp
= (struct ocrdma_create_cq_rsp
*)cmd
;
1439 cq
->id
= (u16
) (rsp
->rsp
.cq_id
& OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK
);
1443 ocrdma_unbind_eq(dev
, cq
->eqn
);
1445 dma_free_coherent(&pdev
->dev
, cq
->len
, cq
->va
, cq
->pa
);
1451 int ocrdma_mbx_destroy_cq(struct ocrdma_dev
*dev
, struct ocrdma_cq
*cq
)
1453 int status
= -ENOMEM
;
1454 struct ocrdma_destroy_cq
*cmd
;
1456 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ
, sizeof(*cmd
));
1459 ocrdma_init_mch(&cmd
->req
, OCRDMA_CMD_DELETE_CQ
,
1460 OCRDMA_SUBSYS_COMMON
, sizeof(*cmd
));
1462 cmd
->bypass_flush_qid
|=
1463 (cq
->id
<< OCRDMA_DESTROY_CQ_QID_SHIFT
) &
1464 OCRDMA_DESTROY_CQ_QID_MASK
;
1466 ocrdma_unbind_eq(dev
, cq
->eqn
);
1467 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1470 dma_free_coherent(&dev
->nic_info
.pdev
->dev
, cq
->len
, cq
->va
, cq
->pa
);
1476 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev
*dev
, struct ocrdma_hw_mr
*hwmr
,
1477 u32 pdid
, int addr_check
)
1479 int status
= -ENOMEM
;
1480 struct ocrdma_alloc_lkey
*cmd
;
1481 struct ocrdma_alloc_lkey_rsp
*rsp
;
1483 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY
, sizeof(*cmd
));
1487 cmd
->pbl_sz_flags
|= addr_check
;
1488 cmd
->pbl_sz_flags
|= (hwmr
->fr_mr
<< OCRDMA_ALLOC_LKEY_FMR_SHIFT
);
1489 cmd
->pbl_sz_flags
|=
1490 (hwmr
->remote_wr
<< OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT
);
1491 cmd
->pbl_sz_flags
|=
1492 (hwmr
->remote_rd
<< OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT
);
1493 cmd
->pbl_sz_flags
|=
1494 (hwmr
->local_wr
<< OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT
);
1495 cmd
->pbl_sz_flags
|=
1496 (hwmr
->remote_atomic
<< OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT
);
1497 cmd
->pbl_sz_flags
|=
1498 (hwmr
->num_pbls
<< OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT
);
1500 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1503 rsp
= (struct ocrdma_alloc_lkey_rsp
*)cmd
;
1504 hwmr
->lkey
= rsp
->lrkey
;
1510 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev
*dev
, int fr_mr
, u32 lkey
)
1512 int status
= -ENOMEM
;
1513 struct ocrdma_dealloc_lkey
*cmd
;
1515 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY
, sizeof(*cmd
));
1519 cmd
->rsvd_frmr
= fr_mr
? 1 : 0;
1520 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1528 static int ocrdma_mbx_reg_mr(struct ocrdma_dev
*dev
, struct ocrdma_hw_mr
*hwmr
,
1529 u32 pdid
, u32 pbl_cnt
, u32 pbe_size
, u32 last
)
1531 int status
= -ENOMEM
;
1533 struct ocrdma_reg_nsmr
*cmd
;
1534 struct ocrdma_reg_nsmr_rsp
*rsp
;
1536 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR
, sizeof(*cmd
));
1540 pdid
| (hwmr
->num_pbls
<< OCRDMA_REG_NSMR_NUM_PBL_SHIFT
);
1542 cmd
->flags_hpage_pbe_sz
|= (hwmr
->remote_wr
<<
1543 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT
);
1544 cmd
->flags_hpage_pbe_sz
|= (hwmr
->remote_rd
<<
1545 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT
);
1546 cmd
->flags_hpage_pbe_sz
|= (hwmr
->local_wr
<<
1547 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT
);
1548 cmd
->flags_hpage_pbe_sz
|= (hwmr
->remote_atomic
<<
1549 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT
);
1550 cmd
->flags_hpage_pbe_sz
|= (hwmr
->mw_bind
<<
1551 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT
);
1552 cmd
->flags_hpage_pbe_sz
|= (last
<< OCRDMA_REG_NSMR_LAST_SHIFT
);
1554 cmd
->flags_hpage_pbe_sz
|= (hwmr
->pbe_size
/ OCRDMA_MIN_HPAGE_SIZE
);
1555 cmd
->flags_hpage_pbe_sz
|= (hwmr
->pbl_size
/ OCRDMA_MIN_HPAGE_SIZE
) <<
1556 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT
;
1557 cmd
->totlen_low
= hwmr
->len
;
1558 cmd
->totlen_high
= upper_32_bits(hwmr
->len
);
1559 cmd
->fbo_low
= (u32
) (hwmr
->fbo
& 0xffffffff);
1560 cmd
->fbo_high
= (u32
) upper_32_bits(hwmr
->fbo
);
1561 cmd
->va_loaddr
= (u32
) hwmr
->va
;
1562 cmd
->va_hiaddr
= (u32
) upper_32_bits(hwmr
->va
);
1564 for (i
= 0; i
< pbl_cnt
; i
++) {
1565 cmd
->pbl
[i
].lo
= (u32
) (hwmr
->pbl_table
[i
].pa
& 0xffffffff);
1566 cmd
->pbl
[i
].hi
= upper_32_bits(hwmr
->pbl_table
[i
].pa
);
1568 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1571 rsp
= (struct ocrdma_reg_nsmr_rsp
*)cmd
;
1572 hwmr
->lkey
= rsp
->lrkey
;
1578 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev
*dev
,
1579 struct ocrdma_hw_mr
*hwmr
, u32 pbl_cnt
,
1580 u32 pbl_offset
, u32 last
)
1582 int status
= -ENOMEM
;
1584 struct ocrdma_reg_nsmr_cont
*cmd
;
1586 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT
, sizeof(*cmd
));
1589 cmd
->lrkey
= hwmr
->lkey
;
1590 cmd
->num_pbl_offset
= (pbl_cnt
<< OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT
) |
1591 (pbl_offset
& OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK
);
1592 cmd
->last
= last
<< OCRDMA_REG_NSMR_CONT_LAST_SHIFT
;
1594 for (i
= 0; i
< pbl_cnt
; i
++) {
1596 (u32
) (hwmr
->pbl_table
[i
+ pbl_offset
].pa
& 0xffffffff);
1598 upper_32_bits(hwmr
->pbl_table
[i
+ pbl_offset
].pa
);
1600 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
1608 int ocrdma_reg_mr(struct ocrdma_dev
*dev
,
1609 struct ocrdma_hw_mr
*hwmr
, u32 pdid
, int acc
)
1613 u32 cur_pbl_cnt
, pbl_offset
;
1614 u32 pending_pbl_cnt
= hwmr
->num_pbls
;
1617 cur_pbl_cnt
= min(pending_pbl_cnt
, MAX_OCRDMA_NSMR_PBL
);
1618 if (cur_pbl_cnt
== pending_pbl_cnt
)
1621 status
= ocrdma_mbx_reg_mr(dev
, hwmr
, pdid
,
1622 cur_pbl_cnt
, hwmr
->pbe_size
, last
);
1624 ocrdma_err("%s() status=%d\n", __func__
, status
);
1627 /* if there is no more pbls to register then exit. */
1632 pbl_offset
+= cur_pbl_cnt
;
1633 pending_pbl_cnt
-= cur_pbl_cnt
;
1634 cur_pbl_cnt
= min(pending_pbl_cnt
, MAX_OCRDMA_NSMR_PBL
);
1635 /* if we reach the end of the pbls, then need to set the last
1636 * bit, indicating no more pbls to register for this memory key.
1638 if (cur_pbl_cnt
== pending_pbl_cnt
)
1641 status
= ocrdma_mbx_reg_mr_cont(dev
, hwmr
, cur_pbl_cnt
,
1647 ocrdma_err("%s() err. status=%d\n", __func__
, status
);
1652 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq
*cq
, struct ocrdma_qp
*qp
)
1654 struct ocrdma_qp
*tmp
;
1656 list_for_each_entry(tmp
, &cq
->sq_head
, sq_entry
) {
1665 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq
*cq
, struct ocrdma_qp
*qp
)
1667 struct ocrdma_qp
*tmp
;
1669 list_for_each_entry(tmp
, &cq
->rq_head
, rq_entry
) {
1678 void ocrdma_flush_qp(struct ocrdma_qp
*qp
)
1681 unsigned long flags
;
1683 spin_lock_irqsave(&qp
->dev
->flush_q_lock
, flags
);
1684 found
= ocrdma_is_qp_in_sq_flushlist(qp
->sq_cq
, qp
);
1686 list_add_tail(&qp
->sq_entry
, &qp
->sq_cq
->sq_head
);
1688 found
= ocrdma_is_qp_in_rq_flushlist(qp
->rq_cq
, qp
);
1690 list_add_tail(&qp
->rq_entry
, &qp
->rq_cq
->rq_head
);
1692 spin_unlock_irqrestore(&qp
->dev
->flush_q_lock
, flags
);
1695 int ocrdma_qp_state_machine(struct ocrdma_qp
*qp
, enum ib_qp_state new_ib_state
,
1696 enum ib_qp_state
*old_ib_state
)
1698 unsigned long flags
;
1700 enum ocrdma_qp_state new_state
;
1701 new_state
= get_ocrdma_qp_state(new_ib_state
);
1703 /* sync with wqe and rqe posting */
1704 spin_lock_irqsave(&qp
->q_lock
, flags
);
1707 *old_ib_state
= get_ibqp_state(qp
->state
);
1708 if (new_state
== qp
->state
) {
1709 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
1713 switch (qp
->state
) {
1714 case OCRDMA_QPS_RST
:
1715 switch (new_state
) {
1716 case OCRDMA_QPS_RST
:
1717 case OCRDMA_QPS_INIT
:
1724 case OCRDMA_QPS_INIT
:
1725 /* qps: INIT->XXX */
1726 switch (new_state
) {
1727 case OCRDMA_QPS_INIT
:
1728 case OCRDMA_QPS_RTR
:
1730 case OCRDMA_QPS_ERR
:
1731 ocrdma_flush_qp(qp
);
1738 case OCRDMA_QPS_RTR
:
1740 switch (new_state
) {
1741 case OCRDMA_QPS_RTS
:
1743 case OCRDMA_QPS_ERR
:
1744 ocrdma_flush_qp(qp
);
1751 case OCRDMA_QPS_RTS
:
1753 switch (new_state
) {
1754 case OCRDMA_QPS_SQD
:
1755 case OCRDMA_QPS_SQE
:
1757 case OCRDMA_QPS_ERR
:
1758 ocrdma_flush_qp(qp
);
1765 case OCRDMA_QPS_SQD
:
1767 switch (new_state
) {
1768 case OCRDMA_QPS_RTS
:
1769 case OCRDMA_QPS_SQE
:
1770 case OCRDMA_QPS_ERR
:
1777 case OCRDMA_QPS_SQE
:
1778 switch (new_state
) {
1779 case OCRDMA_QPS_RTS
:
1780 case OCRDMA_QPS_ERR
:
1787 case OCRDMA_QPS_ERR
:
1789 switch (new_state
) {
1790 case OCRDMA_QPS_RST
:
1802 qp
->state
= new_state
;
1804 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
1808 static u32
ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp
*qp
)
1811 if (qp
->cap_flags
& OCRDMA_QP_INB_RD
)
1812 flags
|= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK
;
1813 if (qp
->cap_flags
& OCRDMA_QP_INB_WR
)
1814 flags
|= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK
;
1815 if (qp
->cap_flags
& OCRDMA_QP_MW_BIND
)
1816 flags
|= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK
;
1817 if (qp
->cap_flags
& OCRDMA_QP_LKEY0
)
1818 flags
|= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK
;
1819 if (qp
->cap_flags
& OCRDMA_QP_FAST_REG
)
1820 flags
|= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK
;
1824 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req
*cmd
,
1825 struct ib_qp_init_attr
*attrs
,
1826 struct ocrdma_qp
*qp
)
1829 u32 len
, hw_pages
, hw_page_size
;
1831 struct ocrdma_dev
*dev
= qp
->dev
;
1832 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1833 u32 max_wqe_allocated
;
1834 u32 max_sges
= attrs
->cap
.max_send_sge
;
1836 max_wqe_allocated
= attrs
->cap
.max_send_wr
;
1837 /* need to allocate one extra to for GEN1 family */
1838 if (dev
->nic_info
.dev_family
!= OCRDMA_GEN2_FAMILY
)
1839 max_wqe_allocated
+= 1;
1841 status
= ocrdma_build_q_conf(&max_wqe_allocated
,
1842 dev
->attr
.wqe_size
, &hw_pages
, &hw_page_size
);
1844 ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__
,
1848 qp
->sq
.max_cnt
= max_wqe_allocated
;
1849 len
= (hw_pages
* hw_page_size
);
1851 qp
->sq
.va
= dma_alloc_coherent(&pdev
->dev
, len
, &pa
, GFP_KERNEL
);
1854 memset(qp
->sq
.va
, 0, len
);
1857 qp
->sq
.entry_size
= dev
->attr
.wqe_size
;
1858 ocrdma_build_q_pages(&cmd
->wq_addr
[0], hw_pages
, pa
, hw_page_size
);
1860 cmd
->type_pgsz_pdn
|= (ilog2(hw_page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
)
1861 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT
);
1862 cmd
->num_wq_rq_pages
|= (hw_pages
<<
1863 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT
) &
1864 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK
;
1865 cmd
->max_sge_send_write
|= (max_sges
<<
1866 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT
) &
1867 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK
;
1868 cmd
->max_sge_send_write
|= (max_sges
<<
1869 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT
) &
1870 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK
;
1871 cmd
->max_wqe_rqe
|= (ilog2(qp
->sq
.max_cnt
) <<
1872 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT
) &
1873 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK
;
1874 cmd
->wqe_rqe_size
|= (dev
->attr
.wqe_size
<<
1875 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT
) &
1876 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK
;
1880 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req
*cmd
,
1881 struct ib_qp_init_attr
*attrs
,
1882 struct ocrdma_qp
*qp
)
1885 u32 len
, hw_pages
, hw_page_size
;
1887 struct ocrdma_dev
*dev
= qp
->dev
;
1888 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1889 u32 max_rqe_allocated
= attrs
->cap
.max_recv_wr
+ 1;
1891 status
= ocrdma_build_q_conf(&max_rqe_allocated
, dev
->attr
.rqe_size
,
1892 &hw_pages
, &hw_page_size
);
1894 ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__
,
1895 attrs
->cap
.max_recv_wr
+ 1);
1898 qp
->rq
.max_cnt
= max_rqe_allocated
;
1899 len
= (hw_pages
* hw_page_size
);
1901 qp
->rq
.va
= dma_alloc_coherent(&pdev
->dev
, len
, &pa
, GFP_KERNEL
);
1904 memset(qp
->rq
.va
, 0, len
);
1907 qp
->rq
.entry_size
= dev
->attr
.rqe_size
;
1909 ocrdma_build_q_pages(&cmd
->rq_addr
[0], hw_pages
, pa
, hw_page_size
);
1910 cmd
->type_pgsz_pdn
|= (ilog2(hw_page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
) <<
1911 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT
);
1912 cmd
->num_wq_rq_pages
|=
1913 (hw_pages
<< OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT
) &
1914 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK
;
1915 cmd
->max_sge_recv_flags
|= (attrs
->cap
.max_recv_sge
<<
1916 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT
) &
1917 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK
;
1918 cmd
->max_wqe_rqe
|= (ilog2(qp
->rq
.max_cnt
) <<
1919 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT
) &
1920 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK
;
1921 cmd
->wqe_rqe_size
|= (dev
->attr
.rqe_size
<<
1922 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT
) &
1923 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK
;
1927 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req
*cmd
,
1928 struct ocrdma_pd
*pd
,
1929 struct ocrdma_qp
*qp
,
1930 u8 enable_dpp_cq
, u16 dpp_cq_id
)
1933 qp
->dpp_enabled
= true;
1934 cmd
->max_sge_recv_flags
|= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK
;
1937 cmd
->max_sge_recv_flags
|= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK
;
1938 cmd
->dpp_credits_cqid
= dpp_cq_id
;
1939 cmd
->dpp_credits_cqid
|= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT
<<
1940 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT
;
1943 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req
*cmd
,
1944 struct ocrdma_qp
*qp
)
1946 struct ocrdma_dev
*dev
= qp
->dev
;
1947 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
1949 int ird_page_size
= dev
->attr
.ird_page_size
;
1950 int ird_q_len
= dev
->attr
.num_ird_pages
* ird_page_size
;
1952 if (dev
->attr
.ird
== 0)
1955 qp
->ird_q_va
= dma_alloc_coherent(&pdev
->dev
, ird_q_len
,
1959 memset(qp
->ird_q_va
, 0, ird_q_len
);
1960 ocrdma_build_q_pages(&cmd
->ird_addr
[0], dev
->attr
.num_ird_pages
,
1965 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp
*rsp
,
1966 struct ocrdma_qp
*qp
,
1967 struct ib_qp_init_attr
*attrs
,
1968 u16
*dpp_offset
, u16
*dpp_credit_lmt
)
1970 u32 max_wqe_allocated
, max_rqe_allocated
;
1971 qp
->id
= rsp
->qp_id
& OCRDMA_CREATE_QP_RSP_QP_ID_MASK
;
1972 qp
->rq
.dbid
= rsp
->sq_rq_id
& OCRDMA_CREATE_QP_RSP_RQ_ID_MASK
;
1973 qp
->sq
.dbid
= rsp
->sq_rq_id
>> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT
;
1974 qp
->max_ird
= rsp
->max_ord_ird
& OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK
;
1975 qp
->max_ord
= (rsp
->max_ord_ird
>> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT
);
1976 qp
->dpp_enabled
= false;
1977 if (rsp
->dpp_response
& OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK
) {
1978 qp
->dpp_enabled
= true;
1979 *dpp_credit_lmt
= (rsp
->dpp_response
&
1980 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK
) >>
1981 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT
;
1982 *dpp_offset
= (rsp
->dpp_response
&
1983 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK
) >>
1984 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT
;
1987 rsp
->max_wqe_rqe
>> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT
;
1988 max_wqe_allocated
= 1 << max_wqe_allocated
;
1989 max_rqe_allocated
= 1 << ((u16
)rsp
->max_wqe_rqe
);
1991 qp
->sq
.max_cnt
= max_wqe_allocated
;
1992 qp
->sq
.max_wqe_idx
= max_wqe_allocated
- 1;
1995 qp
->rq
.max_cnt
= max_rqe_allocated
;
1996 qp
->rq
.max_wqe_idx
= max_rqe_allocated
- 1;
2000 int ocrdma_mbx_create_qp(struct ocrdma_qp
*qp
, struct ib_qp_init_attr
*attrs
,
2001 u8 enable_dpp_cq
, u16 dpp_cq_id
, u16
*dpp_offset
,
2002 u16
*dpp_credit_lmt
)
2004 int status
= -ENOMEM
;
2006 struct ocrdma_dev
*dev
= qp
->dev
;
2007 struct ocrdma_pd
*pd
= qp
->pd
;
2008 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2009 struct ocrdma_cq
*cq
;
2010 struct ocrdma_create_qp_req
*cmd
;
2011 struct ocrdma_create_qp_rsp
*rsp
;
2014 switch (attrs
->qp_type
) {
2016 qptype
= OCRDMA_QPT_GSI
;
2019 qptype
= OCRDMA_QPT_RC
;
2022 qptype
= OCRDMA_QPT_UD
;
2028 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP
, sizeof(*cmd
));
2031 cmd
->type_pgsz_pdn
|= (qptype
<< OCRDMA_CREATE_QP_REQ_QPT_SHIFT
) &
2032 OCRDMA_CREATE_QP_REQ_QPT_MASK
;
2033 status
= ocrdma_set_create_qp_sq_cmd(cmd
, attrs
, qp
);
2038 struct ocrdma_srq
*srq
= get_ocrdma_srq(attrs
->srq
);
2039 cmd
->max_sge_recv_flags
|= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK
;
2040 cmd
->rq_addr
[0].lo
= srq
->id
;
2043 status
= ocrdma_set_create_qp_rq_cmd(cmd
, attrs
, qp
);
2048 status
= ocrdma_set_create_qp_ird_cmd(cmd
, qp
);
2052 cmd
->type_pgsz_pdn
|= (pd
->id
<< OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT
) &
2053 OCRDMA_CREATE_QP_REQ_PD_ID_MASK
;
2055 flags
= ocrdma_set_create_qp_mbx_access_flags(qp
);
2057 cmd
->max_sge_recv_flags
|= flags
;
2058 cmd
->max_ord_ird
|= (dev
->attr
.max_ord_per_qp
<<
2059 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT
) &
2060 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK
;
2061 cmd
->max_ord_ird
|= (dev
->attr
.max_ird_per_qp
<<
2062 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT
) &
2063 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK
;
2064 cq
= get_ocrdma_cq(attrs
->send_cq
);
2065 cmd
->wq_rq_cqid
|= (cq
->id
<< OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT
) &
2066 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK
;
2068 cq
= get_ocrdma_cq(attrs
->recv_cq
);
2069 cmd
->wq_rq_cqid
|= (cq
->id
<< OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT
) &
2070 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK
;
2073 if (pd
->dpp_enabled
&& attrs
->cap
.max_inline_data
&& pd
->num_dpp_qp
&&
2074 (attrs
->cap
.max_inline_data
<= dev
->attr
.max_inline_data
))
2075 ocrdma_set_create_qp_dpp_cmd(cmd
, pd
, qp
, enable_dpp_cq
,
2078 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2081 rsp
= (struct ocrdma_create_qp_rsp
*)cmd
;
2082 ocrdma_get_create_qp_rsp(rsp
, qp
, attrs
, dpp_offset
, dpp_credit_lmt
);
2083 qp
->state
= OCRDMA_QPS_RST
;
2088 dma_free_coherent(&pdev
->dev
, qp
->rq
.len
, qp
->rq
.va
, qp
->rq
.pa
);
2090 ocrdma_err("%s(%d) rq_err\n", __func__
, dev
->id
);
2091 dma_free_coherent(&pdev
->dev
, qp
->sq
.len
, qp
->sq
.va
, qp
->sq
.pa
);
2093 ocrdma_err("%s(%d) sq_err\n", __func__
, dev
->id
);
2098 int ocrdma_mbx_query_qp(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
,
2099 struct ocrdma_qp_params
*param
)
2101 int status
= -ENOMEM
;
2102 struct ocrdma_query_qp
*cmd
;
2103 struct ocrdma_query_qp_rsp
*rsp
;
2105 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP
, sizeof(*cmd
));
2108 cmd
->qp_id
= qp
->id
;
2109 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2112 rsp
= (struct ocrdma_query_qp_rsp
*)cmd
;
2113 memcpy(param
, &rsp
->params
, sizeof(struct ocrdma_qp_params
));
2119 int ocrdma_resolve_dgid(struct ocrdma_dev
*dev
, union ib_gid
*dgid
,
2122 struct in6_addr in6
;
2124 memcpy(&in6
, dgid
, sizeof in6
);
2125 if (rdma_is_multicast_addr(&in6
))
2126 rdma_get_mcast_mac(&in6
, mac_addr
);
2127 else if (rdma_link_local_addr(&in6
))
2128 rdma_get_ll_mac(&in6
, mac_addr
);
2130 ocrdma_err("%s() fail to resolve mac_addr.\n", __func__
);
2136 static void ocrdma_set_av_params(struct ocrdma_qp
*qp
,
2137 struct ocrdma_modify_qp
*cmd
,
2138 struct ib_qp_attr
*attrs
)
2140 struct ib_ah_attr
*ah_attr
= &attrs
->ah_attr
;
2144 if ((ah_attr
->ah_flags
& IB_AH_GRH
) == 0)
2146 cmd
->params
.tclass_sq_psn
|=
2147 (ah_attr
->grh
.traffic_class
<< OCRDMA_QP_PARAMS_TCLASS_SHIFT
);
2148 cmd
->params
.rnt_rc_sl_fl
|=
2149 (ah_attr
->grh
.flow_label
& OCRDMA_QP_PARAMS_FLOW_LABEL_MASK
);
2150 cmd
->params
.hop_lmt_rq_psn
|=
2151 (ah_attr
->grh
.hop_limit
<< OCRDMA_QP_PARAMS_HOP_LMT_SHIFT
);
2152 cmd
->flags
|= OCRDMA_QP_PARA_FLOW_LBL_VALID
;
2153 memcpy(&cmd
->params
.dgid
[0], &ah_attr
->grh
.dgid
.raw
[0],
2154 sizeof(cmd
->params
.dgid
));
2155 ocrdma_query_gid(&qp
->dev
->ibdev
, 1,
2156 ah_attr
->grh
.sgid_index
, &sgid
);
2157 qp
->sgid_idx
= ah_attr
->grh
.sgid_index
;
2158 memcpy(&cmd
->params
.sgid
[0], &sgid
.raw
[0], sizeof(cmd
->params
.sgid
));
2159 ocrdma_resolve_dgid(qp
->dev
, &ah_attr
->grh
.dgid
, &mac_addr
[0]);
2160 cmd
->params
.dmac_b0_to_b3
= mac_addr
[0] | (mac_addr
[1] << 8) |
2161 (mac_addr
[2] << 16) | (mac_addr
[3] << 24);
2162 /* convert them to LE format. */
2163 ocrdma_cpu_to_le32(&cmd
->params
.dgid
[0], sizeof(cmd
->params
.dgid
));
2164 ocrdma_cpu_to_le32(&cmd
->params
.sgid
[0], sizeof(cmd
->params
.sgid
));
2165 cmd
->params
.vlan_dmac_b4_to_b5
= mac_addr
[4] | (mac_addr
[5] << 8);
2166 vlan_id
= rdma_get_vlan_id(&sgid
);
2167 if (vlan_id
&& (vlan_id
< 0x1000)) {
2168 cmd
->params
.vlan_dmac_b4_to_b5
|=
2169 vlan_id
<< OCRDMA_QP_PARAMS_VLAN_SHIFT
;
2170 cmd
->flags
|= OCRDMA_QP_PARA_VLAN_EN_VALID
;
2174 static int ocrdma_set_qp_params(struct ocrdma_qp
*qp
,
2175 struct ocrdma_modify_qp
*cmd
,
2176 struct ib_qp_attr
*attrs
, int attr_mask
,
2177 enum ib_qp_state old_qps
)
2180 struct net_device
*netdev
= qp
->dev
->nic_info
.netdev
;
2181 int eth_mtu
= iboe_get_mtu(netdev
->mtu
);
2183 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2184 cmd
->params
.path_mtu_pkey_indx
|= (attrs
->pkey_index
&
2185 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK
);
2186 cmd
->flags
|= OCRDMA_QP_PARA_PKEY_VALID
;
2188 if (attr_mask
& IB_QP_QKEY
) {
2189 qp
->qkey
= attrs
->qkey
;
2190 cmd
->params
.qkey
= attrs
->qkey
;
2191 cmd
->flags
|= OCRDMA_QP_PARA_QKEY_VALID
;
2193 if (attr_mask
& IB_QP_AV
)
2194 ocrdma_set_av_params(qp
, cmd
, attrs
);
2195 else if (qp
->qp_type
== IB_QPT_GSI
|| qp
->qp_type
== IB_QPT_UD
) {
2196 /* set the default mac address for UD, GSI QPs */
2197 cmd
->params
.dmac_b0_to_b3
= qp
->dev
->nic_info
.mac_addr
[0] |
2198 (qp
->dev
->nic_info
.mac_addr
[1] << 8) |
2199 (qp
->dev
->nic_info
.mac_addr
[2] << 16) |
2200 (qp
->dev
->nic_info
.mac_addr
[3] << 24);
2201 cmd
->params
.vlan_dmac_b4_to_b5
= qp
->dev
->nic_info
.mac_addr
[4] |
2202 (qp
->dev
->nic_info
.mac_addr
[5] << 8);
2204 if ((attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
) &&
2205 attrs
->en_sqd_async_notify
) {
2206 cmd
->params
.max_sge_recv_flags
|=
2207 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC
;
2208 cmd
->flags
|= OCRDMA_QP_PARA_DST_QPN_VALID
;
2210 if (attr_mask
& IB_QP_DEST_QPN
) {
2211 cmd
->params
.ack_to_rnr_rtc_dest_qpn
|= (attrs
->dest_qp_num
&
2212 OCRDMA_QP_PARAMS_DEST_QPN_MASK
);
2213 cmd
->flags
|= OCRDMA_QP_PARA_DST_QPN_VALID
;
2215 if (attr_mask
& IB_QP_PATH_MTU
) {
2216 if (ib_mtu_enum_to_int(eth_mtu
) <
2217 ib_mtu_enum_to_int(attrs
->path_mtu
)) {
2221 cmd
->params
.path_mtu_pkey_indx
|=
2222 (ib_mtu_enum_to_int(attrs
->path_mtu
) <<
2223 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT
) &
2224 OCRDMA_QP_PARAMS_PATH_MTU_MASK
;
2225 cmd
->flags
|= OCRDMA_QP_PARA_PMTU_VALID
;
2227 if (attr_mask
& IB_QP_TIMEOUT
) {
2228 cmd
->params
.ack_to_rnr_rtc_dest_qpn
|= attrs
->timeout
<<
2229 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT
;
2230 cmd
->flags
|= OCRDMA_QP_PARA_ACK_TO_VALID
;
2232 if (attr_mask
& IB_QP_RETRY_CNT
) {
2233 cmd
->params
.rnt_rc_sl_fl
|= (attrs
->retry_cnt
<<
2234 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT
) &
2235 OCRDMA_QP_PARAMS_RETRY_CNT_MASK
;
2236 cmd
->flags
|= OCRDMA_QP_PARA_RETRY_CNT_VALID
;
2238 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
2239 cmd
->params
.rnt_rc_sl_fl
|= (attrs
->min_rnr_timer
<<
2240 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT
) &
2241 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK
;
2242 cmd
->flags
|= OCRDMA_QP_PARA_RNT_VALID
;
2244 if (attr_mask
& IB_QP_RNR_RETRY
) {
2245 cmd
->params
.ack_to_rnr_rtc_dest_qpn
|= (attrs
->rnr_retry
<<
2246 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT
)
2247 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK
;
2248 cmd
->flags
|= OCRDMA_QP_PARA_RRC_VALID
;
2250 if (attr_mask
& IB_QP_SQ_PSN
) {
2251 cmd
->params
.tclass_sq_psn
|= (attrs
->sq_psn
& 0x00ffffff);
2252 cmd
->flags
|= OCRDMA_QP_PARA_SQPSN_VALID
;
2254 if (attr_mask
& IB_QP_RQ_PSN
) {
2255 cmd
->params
.hop_lmt_rq_psn
|= (attrs
->rq_psn
& 0x00ffffff);
2256 cmd
->flags
|= OCRDMA_QP_PARA_RQPSN_VALID
;
2258 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2259 if (attrs
->max_rd_atomic
> qp
->dev
->attr
.max_ord_per_qp
) {
2263 qp
->max_ord
= attrs
->max_rd_atomic
;
2264 cmd
->flags
|= OCRDMA_QP_PARA_MAX_ORD_VALID
;
2266 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2267 if (attrs
->max_dest_rd_atomic
> qp
->dev
->attr
.max_ird_per_qp
) {
2271 qp
->max_ird
= attrs
->max_dest_rd_atomic
;
2272 cmd
->flags
|= OCRDMA_QP_PARA_MAX_IRD_VALID
;
2274 cmd
->params
.max_ord_ird
= (qp
->max_ord
<<
2275 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT
) |
2276 (qp
->max_ird
& OCRDMA_QP_PARAMS_MAX_IRD_MASK
);
2281 int ocrdma_mbx_modify_qp(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
,
2282 struct ib_qp_attr
*attrs
, int attr_mask
,
2283 enum ib_qp_state old_qps
)
2285 int status
= -ENOMEM
;
2286 struct ocrdma_modify_qp
*cmd
;
2288 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP
, sizeof(*cmd
));
2292 cmd
->params
.id
= qp
->id
;
2294 if (attr_mask
& IB_QP_STATE
) {
2295 cmd
->params
.max_sge_recv_flags
|=
2296 (get_ocrdma_qp_state(attrs
->qp_state
) <<
2297 OCRDMA_QP_PARAMS_STATE_SHIFT
) &
2298 OCRDMA_QP_PARAMS_STATE_MASK
;
2299 cmd
->flags
|= OCRDMA_QP_PARA_QPS_VALID
;
2301 cmd
->params
.max_sge_recv_flags
|=
2302 (qp
->state
<< OCRDMA_QP_PARAMS_STATE_SHIFT
) &
2303 OCRDMA_QP_PARAMS_STATE_MASK
;
2304 status
= ocrdma_set_qp_params(qp
, cmd
, attrs
, attr_mask
, old_qps
);
2307 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2316 int ocrdma_mbx_destroy_qp(struct ocrdma_dev
*dev
, struct ocrdma_qp
*qp
)
2318 int status
= -ENOMEM
;
2319 struct ocrdma_destroy_qp
*cmd
;
2320 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2322 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP
, sizeof(*cmd
));
2325 cmd
->qp_id
= qp
->id
;
2326 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2333 dma_free_coherent(&pdev
->dev
, qp
->sq
.len
, qp
->sq
.va
, qp
->sq
.pa
);
2334 if (!qp
->srq
&& qp
->rq
.va
)
2335 dma_free_coherent(&pdev
->dev
, qp
->rq
.len
, qp
->rq
.va
, qp
->rq
.pa
);
2336 if (qp
->dpp_enabled
)
2337 qp
->pd
->num_dpp_qp
++;
2341 int ocrdma_mbx_create_srq(struct ocrdma_srq
*srq
,
2342 struct ib_srq_init_attr
*srq_attr
,
2343 struct ocrdma_pd
*pd
)
2345 int status
= -ENOMEM
;
2346 int hw_pages
, hw_page_size
;
2348 struct ocrdma_create_srq_rsp
*rsp
;
2349 struct ocrdma_create_srq
*cmd
;
2351 struct ocrdma_dev
*dev
= srq
->dev
;
2352 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2353 u32 max_rqe_allocated
;
2355 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ
, sizeof(*cmd
));
2359 cmd
->pgsz_pdid
= pd
->id
& OCRDMA_CREATE_SRQ_PD_ID_MASK
;
2360 max_rqe_allocated
= srq_attr
->attr
.max_wr
+ 1;
2361 status
= ocrdma_build_q_conf(&max_rqe_allocated
,
2363 &hw_pages
, &hw_page_size
);
2365 ocrdma_err("%s() req. max_wr=0x%x\n", __func__
,
2366 srq_attr
->attr
.max_wr
);
2370 len
= hw_pages
* hw_page_size
;
2371 srq
->rq
.va
= dma_alloc_coherent(&pdev
->dev
, len
, &pa
, GFP_KERNEL
);
2376 ocrdma_build_q_pages(&cmd
->rq_addr
[0], hw_pages
, pa
, hw_page_size
);
2378 srq
->rq
.entry_size
= dev
->attr
.rqe_size
;
2381 srq
->rq
.max_cnt
= max_rqe_allocated
;
2383 cmd
->max_sge_rqe
= ilog2(max_rqe_allocated
);
2384 cmd
->max_sge_rqe
|= srq_attr
->attr
.max_sge
<<
2385 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT
;
2387 cmd
->pgsz_pdid
|= (ilog2(hw_page_size
/ OCRDMA_MIN_Q_PAGE_SIZE
)
2388 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT
);
2389 cmd
->pages_rqe_sz
|= (dev
->attr
.rqe_size
2390 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT
)
2391 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK
;
2392 cmd
->pages_rqe_sz
|= hw_pages
<< OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT
;
2394 status
= ocrdma_mbx_cmd(dev
, (struct ocrdma_mqe
*)cmd
);
2397 rsp
= (struct ocrdma_create_srq_rsp
*)cmd
;
2399 srq
->rq
.dbid
= rsp
->id
;
2400 max_rqe_allocated
= ((rsp
->max_sge_rqe_allocated
&
2401 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK
) >>
2402 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT
);
2403 max_rqe_allocated
= (1 << max_rqe_allocated
);
2404 srq
->rq
.max_cnt
= max_rqe_allocated
;
2405 srq
->rq
.max_wqe_idx
= max_rqe_allocated
- 1;
2406 srq
->rq
.max_sges
= (rsp
->max_sge_rqe_allocated
&
2407 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK
) >>
2408 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT
;
2411 dma_free_coherent(&pdev
->dev
, srq
->rq
.len
, srq
->rq
.va
, pa
);
2417 int ocrdma_mbx_modify_srq(struct ocrdma_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2419 int status
= -ENOMEM
;
2420 struct ocrdma_modify_srq
*cmd
;
2421 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ
, sizeof(*cmd
));
2425 cmd
->limit_max_rqe
|= srq_attr
->srq_limit
<<
2426 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT
;
2427 status
= ocrdma_mbx_cmd(srq
->dev
, (struct ocrdma_mqe
*)cmd
);
2432 int ocrdma_mbx_query_srq(struct ocrdma_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2434 int status
= -ENOMEM
;
2435 struct ocrdma_query_srq
*cmd
;
2436 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ
, sizeof(*cmd
));
2439 cmd
->id
= srq
->rq
.dbid
;
2440 status
= ocrdma_mbx_cmd(srq
->dev
, (struct ocrdma_mqe
*)cmd
);
2442 struct ocrdma_query_srq_rsp
*rsp
=
2443 (struct ocrdma_query_srq_rsp
*)cmd
;
2445 rsp
->srq_lmt_max_sge
&
2446 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK
;
2448 rsp
->max_rqe_pdid
>> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT
;
2449 srq_attr
->srq_limit
= rsp
->srq_lmt_max_sge
>>
2450 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT
;
2456 int ocrdma_mbx_destroy_srq(struct ocrdma_dev
*dev
, struct ocrdma_srq
*srq
)
2458 int status
= -ENOMEM
;
2459 struct ocrdma_destroy_srq
*cmd
;
2460 struct pci_dev
*pdev
= dev
->nic_info
.pdev
;
2461 cmd
= ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ
, sizeof(*cmd
));
2465 status
= ocrdma_mbx_cmd(srq
->dev
, (struct ocrdma_mqe
*)cmd
);
2467 dma_free_coherent(&pdev
->dev
, srq
->rq
.len
,
2468 srq
->rq
.va
, srq
->rq
.pa
);
2473 int ocrdma_alloc_av(struct ocrdma_dev
*dev
, struct ocrdma_ah
*ah
)
2476 int status
= -EINVAL
;
2477 struct ocrdma_av
*av
;
2478 unsigned long flags
;
2480 av
= dev
->av_tbl
.va
;
2481 spin_lock_irqsave(&dev
->av_tbl
.lock
, flags
);
2482 for (i
= 0; i
< dev
->av_tbl
.num_ah
; i
++) {
2483 if (av
->valid
== 0) {
2484 av
->valid
= OCRDMA_AV_VALID
;
2492 if (i
== dev
->av_tbl
.num_ah
)
2494 spin_unlock_irqrestore(&dev
->av_tbl
.lock
, flags
);
2498 int ocrdma_free_av(struct ocrdma_dev
*dev
, struct ocrdma_ah
*ah
)
2500 unsigned long flags
;
2501 spin_lock_irqsave(&dev
->av_tbl
.lock
, flags
);
2503 spin_unlock_irqrestore(&dev
->av_tbl
.lock
, flags
);
2507 static int ocrdma_create_mq_eq(struct ocrdma_dev
*dev
)
2511 unsigned long flags
= 0;
2514 if (dev
->nic_info
.intr_mode
== BE_INTERRUPT_MODE_INTX
)
2515 flags
= IRQF_SHARED
;
2517 num_eq
= dev
->nic_info
.msix
.num_vectors
-
2518 dev
->nic_info
.msix
.start_vector
;
2519 /* minimum two vectors/eq are required for rdma to work.
2520 * one for control path and one for data path.
2526 status
= ocrdma_create_eq(dev
, &dev
->meq
, OCRDMA_EQ_LEN
);
2529 sprintf(dev
->meq
.irq_name
, "ocrdma_mq%d", dev
->id
);
2530 irq
= ocrdma_get_irq(dev
, &dev
->meq
);
2531 status
= request_irq(irq
, ocrdma_irq_handler
, flags
, dev
->meq
.irq_name
,
2534 _ocrdma_destroy_eq(dev
, &dev
->meq
);
2538 static int ocrdma_create_qp_eqs(struct ocrdma_dev
*dev
)
2540 int num_eq
, i
, status
= 0;
2542 unsigned long flags
= 0;
2544 num_eq
= dev
->nic_info
.msix
.num_vectors
-
2545 dev
->nic_info
.msix
.start_vector
;
2546 if (dev
->nic_info
.intr_mode
== BE_INTERRUPT_MODE_INTX
) {
2548 flags
= IRQF_SHARED
;
2550 num_eq
= min_t(u32
, num_eq
, num_online_cpus());
2551 dev
->qp_eq_tbl
= kzalloc(sizeof(struct ocrdma_eq
) * num_eq
, GFP_KERNEL
);
2552 if (!dev
->qp_eq_tbl
)
2555 for (i
= 0; i
< num_eq
; i
++) {
2556 status
= ocrdma_create_eq(dev
, &dev
->qp_eq_tbl
[i
],
2562 sprintf(dev
->qp_eq_tbl
[i
].irq_name
, "ocrdma_qp%d-%d",
2564 irq
= ocrdma_get_irq(dev
, &dev
->qp_eq_tbl
[i
]);
2565 status
= request_irq(irq
, ocrdma_irq_handler
, flags
,
2566 dev
->qp_eq_tbl
[i
].irq_name
,
2567 &dev
->qp_eq_tbl
[i
]);
2569 _ocrdma_destroy_eq(dev
, &dev
->qp_eq_tbl
[i
]);
2575 /* one eq is sufficient for data path to work */
2576 if (dev
->eq_cnt
>= 1)
2579 ocrdma_destroy_qp_eqs(dev
);
2583 int ocrdma_init_hw(struct ocrdma_dev
*dev
)
2586 /* set up control path eq */
2587 status
= ocrdma_create_mq_eq(dev
);
2590 /* set up data path eq */
2591 status
= ocrdma_create_qp_eqs(dev
);
2594 status
= ocrdma_create_mq(dev
);
2597 status
= ocrdma_mbx_query_fw_config(dev
);
2600 status
= ocrdma_mbx_query_dev(dev
);
2603 status
= ocrdma_mbx_query_fw_ver(dev
);
2606 status
= ocrdma_mbx_create_ah_tbl(dev
);
2612 ocrdma_destroy_mq(dev
);
2614 ocrdma_destroy_qp_eqs(dev
);
2616 ocrdma_destroy_eq(dev
, &dev
->meq
);
2617 ocrdma_err("%s() status=%d\n", __func__
, status
);
2621 void ocrdma_cleanup_hw(struct ocrdma_dev
*dev
)
2623 ocrdma_mbx_delete_ah_tbl(dev
);
2625 /* cleanup the data path eqs */
2626 ocrdma_destroy_qp_eqs(dev
);
2628 /* cleanup the control path */
2629 ocrdma_destroy_mq(dev
);
2630 ocrdma_destroy_eq(dev
, &dev
->meq
);