Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / infiniband / hw / ocrdma / ocrdma_hw.c
blobc51c3f40700ee021384cab8a711c532210a58b4b
1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
5 * www.emulex.com
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
38 * Emulex
39 * 3333 Susan Street
40 * Costa Mesa, CA 92626
43 #include <linux/sched.h>
44 #include <linux/interrupt.h>
45 #include <linux/log2.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/if_ether.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_cache.h>
53 #include "ocrdma.h"
54 #include "ocrdma_hw.h"
55 #include "ocrdma_verbs.h"
56 #include "ocrdma_ah.h"
58 enum mbx_status {
59 OCRDMA_MBX_STATUS_FAILED = 1,
60 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
61 OCRDMA_MBX_STATUS_OOR = 100,
62 OCRDMA_MBX_STATUS_INVALID_PD = 101,
63 OCRDMA_MBX_STATUS_PD_INUSE = 102,
64 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
65 OCRDMA_MBX_STATUS_INVALID_QP = 104,
66 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
67 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
68 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
69 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
70 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
71 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
72 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
73 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
74 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
75 OCRDMA_MBX_STATUS_MW_BOUND = 114,
76 OCRDMA_MBX_STATUS_INVALID_VA = 115,
77 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
78 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
79 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
80 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
81 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
82 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
83 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
84 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
85 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
86 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
87 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
88 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
89 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
90 OCRDMA_MBX_STATUS_QP_BOUND = 130,
91 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
92 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
93 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
94 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
95 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
96 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
99 enum additional_status {
100 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
103 enum cqe_status {
104 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
105 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
106 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
107 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
108 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
111 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
113 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
116 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
118 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
121 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
123 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
124 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
126 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
127 return NULL;
128 return cqe;
131 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
133 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
136 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
138 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
141 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
143 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
146 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
148 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
151 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
153 switch (qps) {
154 case OCRDMA_QPS_RST:
155 return IB_QPS_RESET;
156 case OCRDMA_QPS_INIT:
157 return IB_QPS_INIT;
158 case OCRDMA_QPS_RTR:
159 return IB_QPS_RTR;
160 case OCRDMA_QPS_RTS:
161 return IB_QPS_RTS;
162 case OCRDMA_QPS_SQD:
163 case OCRDMA_QPS_SQ_DRAINING:
164 return IB_QPS_SQD;
165 case OCRDMA_QPS_SQE:
166 return IB_QPS_SQE;
167 case OCRDMA_QPS_ERR:
168 return IB_QPS_ERR;
170 return IB_QPS_ERR;
173 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
175 switch (qps) {
176 case IB_QPS_RESET:
177 return OCRDMA_QPS_RST;
178 case IB_QPS_INIT:
179 return OCRDMA_QPS_INIT;
180 case IB_QPS_RTR:
181 return OCRDMA_QPS_RTR;
182 case IB_QPS_RTS:
183 return OCRDMA_QPS_RTS;
184 case IB_QPS_SQD:
185 return OCRDMA_QPS_SQD;
186 case IB_QPS_SQE:
187 return OCRDMA_QPS_SQE;
188 case IB_QPS_ERR:
189 return OCRDMA_QPS_ERR;
191 return OCRDMA_QPS_ERR;
194 static int ocrdma_get_mbx_errno(u32 status)
196 int err_num;
197 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
198 OCRDMA_MBX_RSP_STATUS_SHIFT;
199 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
200 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
202 switch (mbox_status) {
203 case OCRDMA_MBX_STATUS_OOR:
204 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
205 err_num = -EAGAIN;
206 break;
208 case OCRDMA_MBX_STATUS_INVALID_PD:
209 case OCRDMA_MBX_STATUS_INVALID_CQ:
210 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
211 case OCRDMA_MBX_STATUS_INVALID_QP:
212 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
213 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
214 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
215 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
216 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
217 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
218 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
219 case OCRDMA_MBX_STATUS_INVALID_LKEY:
220 case OCRDMA_MBX_STATUS_INVALID_VA:
221 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
222 case OCRDMA_MBX_STATUS_INVALID_FBO:
223 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
224 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
225 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
226 case OCRDMA_MBX_STATUS_SRQ_ERROR:
227 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
228 err_num = -EINVAL;
229 break;
231 case OCRDMA_MBX_STATUS_PD_INUSE:
232 case OCRDMA_MBX_STATUS_QP_BOUND:
233 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
234 case OCRDMA_MBX_STATUS_MW_BOUND:
235 err_num = -EBUSY;
236 break;
238 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
239 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
240 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
241 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
242 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
243 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
244 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
245 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
246 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
247 err_num = -ENOBUFS;
248 break;
250 case OCRDMA_MBX_STATUS_FAILED:
251 switch (add_status) {
252 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
253 err_num = -EAGAIN;
254 break;
255 default:
256 err_num = -EFAULT;
258 break;
259 default:
260 err_num = -EFAULT;
262 return err_num;
265 char *port_speed_string(struct ocrdma_dev *dev)
267 char *str = "";
268 u16 speeds_supported;
270 speeds_supported = dev->phy.fixed_speeds_supported |
271 dev->phy.auto_speeds_supported;
272 if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
273 str = "40Gbps ";
274 else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
275 str = "10Gbps ";
276 else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
277 str = "1Gbps ";
279 return str;
282 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
284 int err_num = -EINVAL;
286 switch (cqe_status) {
287 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
288 err_num = -EPERM;
289 break;
290 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
291 err_num = -EINVAL;
292 break;
293 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
294 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
295 err_num = -EINVAL;
296 break;
297 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
298 default:
299 err_num = -EINVAL;
300 break;
302 return err_num;
305 void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
306 bool solicited, u16 cqe_popped)
308 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
310 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
311 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
313 if (armed)
314 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
315 if (solicited)
316 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
317 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
318 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
321 static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
323 u32 val = 0;
325 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
326 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
327 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
330 static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
331 bool arm, bool clear_int, u16 num_eqe)
333 u32 val = 0;
335 val |= eq_id & OCRDMA_EQ_ID_MASK;
336 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
337 if (arm)
338 val |= (1 << OCRDMA_REARM_SHIFT);
339 if (clear_int)
340 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
341 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
342 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
343 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
346 static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
347 u8 opcode, u8 subsys, u32 cmd_len)
349 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
350 cmd_hdr->timeout = 20; /* seconds */
351 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
354 static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
356 struct ocrdma_mqe *mqe;
358 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
359 if (!mqe)
360 return NULL;
361 mqe->hdr.spcl_sge_cnt_emb |=
362 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
363 OCRDMA_MQE_HDR_EMB_MASK;
364 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
366 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
367 mqe->hdr.pyld_len);
368 return mqe;
371 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
373 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
376 static int ocrdma_alloc_q(struct ocrdma_dev *dev,
377 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
379 memset(q, 0, sizeof(*q));
380 q->len = len;
381 q->entry_size = entry_size;
382 q->size = len * entry_size;
383 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
384 GFP_KERNEL);
385 if (!q->va)
386 return -ENOMEM;
387 return 0;
390 static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
391 dma_addr_t host_pa, int hw_page_size)
393 int i;
395 for (i = 0; i < cnt; i++) {
396 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
397 q_pa[i].hi = (u32) upper_32_bits(host_pa);
398 host_pa += hw_page_size;
402 static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
403 struct ocrdma_queue_info *q, int queue_type)
405 u8 opcode = 0;
406 int status;
407 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
409 switch (queue_type) {
410 case QTYPE_MCCQ:
411 opcode = OCRDMA_CMD_DELETE_MQ;
412 break;
413 case QTYPE_CQ:
414 opcode = OCRDMA_CMD_DELETE_CQ;
415 break;
416 case QTYPE_EQ:
417 opcode = OCRDMA_CMD_DELETE_EQ;
418 break;
419 default:
420 BUG();
422 memset(cmd, 0, sizeof(*cmd));
423 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
424 cmd->id = q->id;
426 status = be_roce_mcc_cmd(dev->nic_info.netdev,
427 cmd, sizeof(*cmd), NULL, NULL);
428 if (!status)
429 q->created = false;
430 return status;
433 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
435 int status;
436 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
437 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
439 memset(cmd, 0, sizeof(*cmd));
440 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
441 sizeof(*cmd));
443 cmd->req.rsvd_version = 2;
444 cmd->num_pages = 4;
445 cmd->valid = OCRDMA_CREATE_EQ_VALID;
446 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
448 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
449 PAGE_SIZE_4K);
450 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
451 NULL);
452 if (!status) {
453 eq->q.id = rsp->vector_eqid & 0xffff;
454 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
455 eq->q.created = true;
457 return status;
460 static int ocrdma_create_eq(struct ocrdma_dev *dev,
461 struct ocrdma_eq *eq, u16 q_len)
463 int status;
465 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
466 sizeof(struct ocrdma_eqe));
467 if (status)
468 return status;
470 status = ocrdma_mbx_create_eq(dev, eq);
471 if (status)
472 goto mbx_err;
473 eq->dev = dev;
474 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
476 return 0;
477 mbx_err:
478 ocrdma_free_q(dev, &eq->q);
479 return status;
482 int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
484 int irq;
486 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
487 irq = dev->nic_info.pdev->irq;
488 else
489 irq = dev->nic_info.msix.vector_list[eq->vector];
490 return irq;
493 static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
495 if (eq->q.created) {
496 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
497 ocrdma_free_q(dev, &eq->q);
501 static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
503 int irq;
505 /* disarm EQ so that interrupts are not generated
506 * during freeing and EQ delete is in progress.
508 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
510 irq = ocrdma_get_irq(dev, eq);
511 free_irq(irq, eq);
512 _ocrdma_destroy_eq(dev, eq);
515 static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
517 int i;
519 for (i = 0; i < dev->eq_cnt; i++)
520 ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
523 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
524 struct ocrdma_queue_info *cq,
525 struct ocrdma_queue_info *eq)
527 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
528 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
529 int status;
531 memset(cmd, 0, sizeof(*cmd));
532 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
533 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
535 cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
536 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
537 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
538 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
540 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
541 cmd->eqn = eq->id;
542 cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
544 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
545 cq->dma, PAGE_SIZE_4K);
546 status = be_roce_mcc_cmd(dev->nic_info.netdev,
547 cmd, sizeof(*cmd), NULL, NULL);
548 if (!status) {
549 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
550 cq->created = true;
552 return status;
555 static u32 ocrdma_encoded_q_len(int q_len)
557 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
559 if (len_encoded == 16)
560 len_encoded = 0;
561 return len_encoded;
564 static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
565 struct ocrdma_queue_info *mq,
566 struct ocrdma_queue_info *cq)
568 int num_pages, status;
569 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
570 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
571 struct ocrdma_pa *pa;
573 memset(cmd, 0, sizeof(*cmd));
574 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
576 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
577 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
578 cmd->req.rsvd_version = 1;
579 cmd->cqid_pages = num_pages;
580 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
581 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
583 cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
584 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
585 /* Request link events on this MQ. */
586 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
588 cmd->async_cqid_ringsize = cq->id;
589 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
590 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
591 cmd->valid = OCRDMA_CREATE_MQ_VALID;
592 pa = &cmd->pa[0];
594 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
595 status = be_roce_mcc_cmd(dev->nic_info.netdev,
596 cmd, sizeof(*cmd), NULL, NULL);
597 if (!status) {
598 mq->id = rsp->id;
599 mq->created = true;
601 return status;
604 static int ocrdma_create_mq(struct ocrdma_dev *dev)
606 int status;
608 /* Alloc completion queue for Mailbox queue */
609 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
610 sizeof(struct ocrdma_mcqe));
611 if (status)
612 goto alloc_err;
614 dev->eq_tbl[0].cq_cnt++;
615 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
616 if (status)
617 goto mbx_cq_free;
619 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
620 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
621 mutex_init(&dev->mqe_ctx.lock);
623 /* Alloc Mailbox queue */
624 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
625 sizeof(struct ocrdma_mqe));
626 if (status)
627 goto mbx_cq_destroy;
628 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
629 if (status)
630 goto mbx_q_free;
631 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
632 return 0;
634 mbx_q_free:
635 ocrdma_free_q(dev, &dev->mq.sq);
636 mbx_cq_destroy:
637 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
638 mbx_cq_free:
639 ocrdma_free_q(dev, &dev->mq.cq);
640 alloc_err:
641 return status;
644 static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
646 struct ocrdma_queue_info *mbxq, *cq;
648 /* mqe_ctx lock synchronizes with any other pending cmds. */
649 mutex_lock(&dev->mqe_ctx.lock);
650 mbxq = &dev->mq.sq;
651 if (mbxq->created) {
652 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
653 ocrdma_free_q(dev, mbxq);
655 mutex_unlock(&dev->mqe_ctx.lock);
657 cq = &dev->mq.cq;
658 if (cq->created) {
659 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
660 ocrdma_free_q(dev, cq);
664 static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
665 struct ocrdma_qp *qp)
667 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
668 enum ib_qp_state old_ib_qps;
670 if (qp == NULL)
671 BUG();
672 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
675 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
676 struct ocrdma_ae_mcqe *cqe)
678 struct ocrdma_qp *qp = NULL;
679 struct ocrdma_cq *cq = NULL;
680 struct ib_event ib_evt;
681 int cq_event = 0;
682 int qp_event = 1;
683 int srq_event = 0;
684 int dev_event = 0;
685 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
686 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
687 u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
688 u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
691 * Some FW version returns wrong qp or cq ids in CQEs.
692 * Checking whether the IDs are valid
695 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
696 if (qpid < dev->attr.max_qp)
697 qp = dev->qp_tbl[qpid];
698 if (qp == NULL) {
699 pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
700 dev->id, qpid);
701 return;
705 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
706 if (cqid < dev->attr.max_cq)
707 cq = dev->cq_tbl[cqid];
708 if (cq == NULL) {
709 pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
710 dev->id, cqid);
711 return;
715 memset(&ib_evt, 0, sizeof(ib_evt));
717 ib_evt.device = &dev->ibdev;
719 switch (type) {
720 case OCRDMA_CQ_ERROR:
721 ib_evt.element.cq = &cq->ibcq;
722 ib_evt.event = IB_EVENT_CQ_ERR;
723 cq_event = 1;
724 qp_event = 0;
725 break;
726 case OCRDMA_CQ_OVERRUN_ERROR:
727 ib_evt.element.cq = &cq->ibcq;
728 ib_evt.event = IB_EVENT_CQ_ERR;
729 cq_event = 1;
730 qp_event = 0;
731 break;
732 case OCRDMA_CQ_QPCAT_ERROR:
733 ib_evt.element.qp = &qp->ibqp;
734 ib_evt.event = IB_EVENT_QP_FATAL;
735 ocrdma_process_qpcat_error(dev, qp);
736 break;
737 case OCRDMA_QP_ACCESS_ERROR:
738 ib_evt.element.qp = &qp->ibqp;
739 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
740 break;
741 case OCRDMA_QP_COMM_EST_EVENT:
742 ib_evt.element.qp = &qp->ibqp;
743 ib_evt.event = IB_EVENT_COMM_EST;
744 break;
745 case OCRDMA_SQ_DRAINED_EVENT:
746 ib_evt.element.qp = &qp->ibqp;
747 ib_evt.event = IB_EVENT_SQ_DRAINED;
748 break;
749 case OCRDMA_DEVICE_FATAL_EVENT:
750 ib_evt.element.port_num = 1;
751 ib_evt.event = IB_EVENT_DEVICE_FATAL;
752 qp_event = 0;
753 dev_event = 1;
754 break;
755 case OCRDMA_SRQCAT_ERROR:
756 ib_evt.element.srq = &qp->srq->ibsrq;
757 ib_evt.event = IB_EVENT_SRQ_ERR;
758 srq_event = 1;
759 qp_event = 0;
760 break;
761 case OCRDMA_SRQ_LIMIT_EVENT:
762 ib_evt.element.srq = &qp->srq->ibsrq;
763 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
764 srq_event = 1;
765 qp_event = 0;
766 break;
767 case OCRDMA_QP_LAST_WQE_EVENT:
768 ib_evt.element.qp = &qp->ibqp;
769 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
770 break;
771 default:
772 cq_event = 0;
773 qp_event = 0;
774 srq_event = 0;
775 dev_event = 0;
776 pr_err("%s() unknown type=0x%x\n", __func__, type);
777 break;
780 if (type < OCRDMA_MAX_ASYNC_ERRORS)
781 atomic_inc(&dev->async_err_stats[type]);
783 if (qp_event) {
784 if (qp->ibqp.event_handler)
785 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
786 } else if (cq_event) {
787 if (cq->ibcq.event_handler)
788 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
789 } else if (srq_event) {
790 if (qp->srq->ibsrq.event_handler)
791 qp->srq->ibsrq.event_handler(&ib_evt,
792 qp->srq->ibsrq.
793 srq_context);
794 } else if (dev_event) {
795 dev_err(&dev->ibdev.dev, "Fatal event received\n");
796 ib_dispatch_event(&ib_evt);
801 static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
802 struct ocrdma_ae_mcqe *cqe)
804 struct ocrdma_ae_pvid_mcqe *evt;
805 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
806 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
808 switch (type) {
809 case OCRDMA_ASYNC_EVENT_PVID_STATE:
810 evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
811 if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
812 OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
813 dev->pvid = ((evt->tag_enabled &
814 OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
815 OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
816 break;
818 case OCRDMA_ASYNC_EVENT_COS_VALUE:
819 atomic_set(&dev->update_sl, 1);
820 break;
821 default:
822 /* Not interested evts. */
823 break;
827 static void ocrdma_process_link_state(struct ocrdma_dev *dev,
828 struct ocrdma_ae_mcqe *cqe)
830 struct ocrdma_ae_lnkst_mcqe *evt;
831 u8 lstate;
833 evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
834 lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
836 if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
837 return;
839 if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
840 ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
843 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
845 /* async CQE processing */
846 struct ocrdma_ae_mcqe *cqe = ae_cqe;
847 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
848 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
849 switch (evt_code) {
850 case OCRDMA_ASYNC_LINK_EVE_CODE:
851 ocrdma_process_link_state(dev, cqe);
852 break;
853 case OCRDMA_ASYNC_RDMA_EVE_CODE:
854 ocrdma_dispatch_ibevent(dev, cqe);
855 break;
856 case OCRDMA_ASYNC_GRP5_EVE_CODE:
857 ocrdma_process_grp5_aync(dev, cqe);
858 break;
859 default:
860 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
861 dev->id, evt_code);
865 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
867 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
868 dev->mqe_ctx.cqe_status = (cqe->status &
869 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
870 dev->mqe_ctx.ext_status =
871 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
872 >> OCRDMA_MCQE_ESTATUS_SHIFT;
873 dev->mqe_ctx.cmd_done = true;
874 wake_up(&dev->mqe_ctx.cmd_wait);
875 } else
876 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
877 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
880 static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
882 u16 cqe_popped = 0;
883 struct ocrdma_mcqe *cqe;
885 while (1) {
886 cqe = ocrdma_get_mcqe(dev);
887 if (cqe == NULL)
888 break;
889 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
890 cqe_popped += 1;
891 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
892 ocrdma_process_acqe(dev, cqe);
893 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
894 ocrdma_process_mcqe(dev, cqe);
895 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
896 ocrdma_mcq_inc_tail(dev);
898 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
899 return 0;
902 static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
903 struct ocrdma_cq *cq, bool sq)
905 struct ocrdma_qp *qp;
906 struct list_head *cur;
907 struct ocrdma_cq *bcq = NULL;
908 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
910 list_for_each(cur, head) {
911 if (sq)
912 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
913 else
914 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
916 if (qp->srq)
917 continue;
918 /* if wq and rq share the same cq, than comp_handler
919 * is already invoked.
921 if (qp->sq_cq == qp->rq_cq)
922 continue;
923 /* if completion came on sq, rq's cq is buddy cq.
924 * if completion came on rq, sq's cq is buddy cq.
926 if (qp->sq_cq == cq)
927 bcq = qp->rq_cq;
928 else
929 bcq = qp->sq_cq;
930 return bcq;
932 return NULL;
935 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
936 struct ocrdma_cq *cq)
938 unsigned long flags;
939 struct ocrdma_cq *bcq = NULL;
941 /* Go through list of QPs in error state which are using this CQ
942 * and invoke its callback handler to trigger CQE processing for
943 * error/flushed CQE. It is rare to find more than few entries in
944 * this list as most consumers stops after getting error CQE.
945 * List is traversed only once when a matching buddy cq found for a QP.
947 spin_lock_irqsave(&dev->flush_q_lock, flags);
948 /* Check if buddy CQ is present.
949 * true - Check for SQ CQ
950 * false - Check for RQ CQ
952 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
953 if (bcq == NULL)
954 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
955 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
957 /* if there is valid buddy cq, look for its completion handler */
958 if (bcq && bcq->ibcq.comp_handler) {
959 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
960 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
961 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
965 static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
967 unsigned long flags;
968 struct ocrdma_cq *cq;
970 if (cq_idx >= OCRDMA_MAX_CQ)
971 BUG();
973 cq = dev->cq_tbl[cq_idx];
974 if (cq == NULL)
975 return;
977 if (cq->ibcq.comp_handler) {
978 spin_lock_irqsave(&cq->comp_handler_lock, flags);
979 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
980 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
982 ocrdma_qp_buddy_cq_handler(dev, cq);
985 static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
987 /* process the MQ-CQE. */
988 if (cq_id == dev->mq.cq.id)
989 ocrdma_mq_cq_handler(dev, cq_id);
990 else
991 ocrdma_qp_cq_handler(dev, cq_id);
994 static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
996 struct ocrdma_eq *eq = handle;
997 struct ocrdma_dev *dev = eq->dev;
998 struct ocrdma_eqe eqe;
999 struct ocrdma_eqe *ptr;
1000 u16 cq_id;
1001 u8 mcode;
1002 int budget = eq->cq_cnt;
1004 do {
1005 ptr = ocrdma_get_eqe(eq);
1006 eqe = *ptr;
1007 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
1008 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
1009 >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
1010 if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
1011 pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
1012 eq->q.id, eqe.id_valid);
1013 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
1014 break;
1016 ptr->id_valid = 0;
1017 /* ring eq doorbell as soon as its consumed. */
1018 ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
1019 /* check whether its CQE or not. */
1020 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
1021 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
1022 ocrdma_cq_handler(dev, cq_id);
1024 ocrdma_eq_inc_tail(eq);
1026 /* There can be a stale EQE after the last bound CQ is
1027 * destroyed. EQE valid and budget == 0 implies this.
1029 if (budget)
1030 budget--;
1032 } while (budget);
1034 eq->aic_obj.eq_intr_cnt++;
1035 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
1036 return IRQ_HANDLED;
1039 static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
1041 struct ocrdma_mqe *mqe;
1043 dev->mqe_ctx.tag = dev->mq.sq.head;
1044 dev->mqe_ctx.cmd_done = false;
1045 mqe = ocrdma_get_mqe(dev);
1046 cmd->hdr.tag_lo = dev->mq.sq.head;
1047 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
1048 /* make sure descriptor is written before ringing doorbell */
1049 wmb();
1050 ocrdma_mq_inc_head(dev);
1051 ocrdma_ring_mq_db(dev);
1054 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
1056 long status;
1057 /* 30 sec timeout */
1058 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
1059 (dev->mqe_ctx.cmd_done != false),
1060 msecs_to_jiffies(30000));
1061 if (status)
1062 return 0;
1063 else {
1064 dev->mqe_ctx.fw_error_state = true;
1065 pr_err("%s(%d) mailbox timeout: fw not responding\n",
1066 __func__, dev->id);
1067 return -1;
1071 /* issue a mailbox command on the MQ */
1072 static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
1074 int status = 0;
1075 u16 cqe_status, ext_status;
1076 struct ocrdma_mqe *rsp_mqe;
1077 struct ocrdma_mbx_rsp *rsp = NULL;
1079 mutex_lock(&dev->mqe_ctx.lock);
1080 if (dev->mqe_ctx.fw_error_state)
1081 goto mbx_err;
1082 ocrdma_post_mqe(dev, mqe);
1083 status = ocrdma_wait_mqe_cmpl(dev);
1084 if (status)
1085 goto mbx_err;
1086 cqe_status = dev->mqe_ctx.cqe_status;
1087 ext_status = dev->mqe_ctx.ext_status;
1088 rsp_mqe = ocrdma_get_mqe_rsp(dev);
1089 ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
1090 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1091 OCRDMA_MQE_HDR_EMB_SHIFT)
1092 rsp = &mqe->u.rsp;
1094 if (cqe_status || ext_status) {
1095 pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
1096 __func__, cqe_status, ext_status);
1097 if (rsp) {
1098 /* This is for embedded cmds. */
1099 pr_err("opcode=0x%x, subsystem=0x%x\n",
1100 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1101 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1102 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1103 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1105 status = ocrdma_get_mbx_cqe_errno(cqe_status);
1106 goto mbx_err;
1108 /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1109 if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
1110 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
1111 mbx_err:
1112 mutex_unlock(&dev->mqe_ctx.lock);
1113 return status;
1116 static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
1117 void *payload_va)
1119 int status;
1120 struct ocrdma_mbx_rsp *rsp = payload_va;
1122 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1123 OCRDMA_MQE_HDR_EMB_SHIFT)
1124 BUG();
1126 status = ocrdma_mbx_cmd(dev, mqe);
1127 if (!status)
1128 /* For non embedded, only CQE failures are handled in
1129 * ocrdma_mbx_cmd. We need to check for RSP errors.
1131 if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
1132 status = ocrdma_get_mbx_errno(rsp->status);
1134 if (status)
1135 pr_err("opcode=0x%x, subsystem=0x%x\n",
1136 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1137 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1138 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1139 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1140 return status;
1143 static void ocrdma_get_attr(struct ocrdma_dev *dev,
1144 struct ocrdma_dev_attr *attr,
1145 struct ocrdma_mbx_query_config *rsp)
1147 attr->max_pd =
1148 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1149 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1150 attr->udp_encap = (rsp->max_pd_ca_ack_delay &
1151 OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
1152 OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
1153 attr->max_dpp_pds =
1154 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1155 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
1156 attr->max_qp =
1157 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1158 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
1159 attr->max_srq =
1160 (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1161 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
1162 attr->max_send_sge = ((rsp->max_recv_send_sge &
1163 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1164 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1165 attr->max_recv_sge = (rsp->max_recv_send_sge &
1166 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
1167 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
1168 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1169 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1170 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1171 attr->max_rdma_sge = (rsp->max_wr_rd_sge &
1172 OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
1173 OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
1174 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1175 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1176 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1177 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1178 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1179 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1180 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1181 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1182 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1183 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1184 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1185 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1186 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1187 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1188 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1189 attr->max_mw = rsp->max_mw;
1190 attr->max_mr = rsp->max_mr;
1191 attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
1192 rsp->max_mr_size_lo;
1193 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1194 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1195 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1196 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1197 attr->max_cq = (rsp->max_cq_cqes_per_cq &
1198 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
1199 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
1200 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1201 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1202 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1203 OCRDMA_WQE_STRIDE;
1204 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1205 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1206 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1207 OCRDMA_WQE_STRIDE;
1208 attr->max_inline_data =
1209 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1210 sizeof(struct ocrdma_sge));
1211 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1212 attr->ird = 1;
1213 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1214 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1216 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1217 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1218 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1219 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1222 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1223 struct ocrdma_fw_conf_rsp *conf)
1225 u32 fn_mode;
1227 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1228 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1229 return -EINVAL;
1230 dev->base_eqid = conf->base_eqid;
1231 dev->max_eq = conf->max_eq;
1232 return 0;
1235 /* can be issued only during init time. */
1236 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1238 int status = -ENOMEM;
1239 struct ocrdma_mqe *cmd;
1240 struct ocrdma_fw_ver_rsp *rsp;
1242 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1243 if (!cmd)
1244 return -ENOMEM;
1245 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1246 OCRDMA_CMD_GET_FW_VER,
1247 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1249 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1250 if (status)
1251 goto mbx_err;
1252 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1253 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1254 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1255 sizeof(rsp->running_ver));
1256 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1257 mbx_err:
1258 kfree(cmd);
1259 return status;
1262 /* can be issued only during init time. */
1263 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1265 int status = -ENOMEM;
1266 struct ocrdma_mqe *cmd;
1267 struct ocrdma_fw_conf_rsp *rsp;
1269 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1270 if (!cmd)
1271 return -ENOMEM;
1272 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1273 OCRDMA_CMD_GET_FW_CONFIG,
1274 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1275 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1276 if (status)
1277 goto mbx_err;
1278 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1279 status = ocrdma_check_fw_config(dev, rsp);
1280 mbx_err:
1281 kfree(cmd);
1282 return status;
1285 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1287 struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1288 struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
1289 struct ocrdma_rdma_stats_resp *old_stats;
1290 int status;
1292 old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
1293 if (old_stats == NULL)
1294 return -ENOMEM;
1296 memset(mqe, 0, sizeof(*mqe));
1297 mqe->hdr.pyld_len = dev->stats_mem.size;
1298 mqe->hdr.spcl_sge_cnt_emb |=
1299 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1300 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1301 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1302 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1303 mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1305 /* Cache the old stats */
1306 memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1307 memset(req, 0, dev->stats_mem.size);
1309 ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1310 OCRDMA_CMD_GET_RDMA_STATS,
1311 OCRDMA_SUBSYS_ROCE,
1312 dev->stats_mem.size);
1313 if (reset)
1314 req->reset_stats = reset;
1316 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1317 if (status)
1318 /* Copy from cache, if mbox fails */
1319 memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1320 else
1321 ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1323 kfree(old_stats);
1324 return status;
1327 static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1329 int status = -ENOMEM;
1330 struct ocrdma_dma_mem dma;
1331 struct ocrdma_mqe *mqe;
1332 struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1333 struct mgmt_hba_attribs *hba_attribs;
1335 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
1336 if (!mqe)
1337 return status;
1339 dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1340 dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1341 dma.size, &dma.pa, GFP_KERNEL);
1342 if (!dma.va)
1343 goto free_mqe;
1345 mqe->hdr.pyld_len = dma.size;
1346 mqe->hdr.spcl_sge_cnt_emb |=
1347 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1348 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1349 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1350 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1351 mqe->u.nonemb_req.sge[0].len = dma.size;
1353 ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1354 OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1355 OCRDMA_SUBSYS_COMMON,
1356 dma.size);
1358 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1359 if (!status) {
1360 ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1361 hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1363 dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
1364 OCRDMA_HBA_ATTRB_PTNUM_MASK)
1365 >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
1366 strlcpy(dev->model_number,
1367 hba_attribs->controller_model_number,
1368 sizeof(dev->model_number));
1370 dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1371 free_mqe:
1372 kfree(mqe);
1373 return status;
1376 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1378 int status = -ENOMEM;
1379 struct ocrdma_mbx_query_config *rsp;
1380 struct ocrdma_mqe *cmd;
1382 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1383 if (!cmd)
1384 return status;
1385 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1386 if (status)
1387 goto mbx_err;
1388 rsp = (struct ocrdma_mbx_query_config *)cmd;
1389 ocrdma_get_attr(dev, &dev->attr, rsp);
1390 mbx_err:
1391 kfree(cmd);
1392 return status;
1395 int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
1396 u8 *lnk_state)
1398 int status = -ENOMEM;
1399 struct ocrdma_get_link_speed_rsp *rsp;
1400 struct ocrdma_mqe *cmd;
1402 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1403 sizeof(*cmd));
1404 if (!cmd)
1405 return status;
1406 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1407 OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1408 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1410 ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
1412 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1413 if (status)
1414 goto mbx_err;
1416 rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
1417 if (lnk_speed)
1418 *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1419 >> OCRDMA_PHY_PS_SHIFT;
1420 if (lnk_state)
1421 *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
1423 mbx_err:
1424 kfree(cmd);
1425 return status;
1428 static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1430 int status = -ENOMEM;
1431 struct ocrdma_mqe *cmd;
1432 struct ocrdma_get_phy_info_rsp *rsp;
1434 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1435 if (!cmd)
1436 return status;
1438 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1439 OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1440 sizeof(*cmd));
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1443 if (status)
1444 goto mbx_err;
1446 rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
1447 dev->phy.phy_type =
1448 (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
1449 dev->phy.interface_type =
1450 (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
1451 >> OCRDMA_IF_TYPE_SHIFT;
1452 dev->phy.auto_speeds_supported =
1453 (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
1454 dev->phy.fixed_speeds_supported =
1455 (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
1456 >> OCRDMA_FSPEED_SUPP_SHIFT;
1457 mbx_err:
1458 kfree(cmd);
1459 return status;
1462 int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1464 int status = -ENOMEM;
1465 struct ocrdma_alloc_pd *cmd;
1466 struct ocrdma_alloc_pd_rsp *rsp;
1468 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1469 if (!cmd)
1470 return status;
1471 if (pd->dpp_enabled)
1472 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1473 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1474 if (status)
1475 goto mbx_err;
1476 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1477 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1478 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1479 pd->dpp_enabled = true;
1480 pd->dpp_page = rsp->dpp_page_pdid >>
1481 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1482 } else {
1483 pd->dpp_enabled = false;
1484 pd->num_dpp_qp = 0;
1486 mbx_err:
1487 kfree(cmd);
1488 return status;
1491 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1493 int status = -ENOMEM;
1494 struct ocrdma_dealloc_pd *cmd;
1496 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1497 if (!cmd)
1498 return status;
1499 cmd->id = pd->id;
1500 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501 kfree(cmd);
1502 return status;
1506 static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1508 int status = -ENOMEM;
1509 size_t pd_bitmap_size;
1510 struct ocrdma_alloc_pd_range *cmd;
1511 struct ocrdma_alloc_pd_range_rsp *rsp;
1513 /* Pre allocate the DPP PDs */
1514 if (dev->attr.max_dpp_pds) {
1515 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1516 sizeof(*cmd));
1517 if (!cmd)
1518 return -ENOMEM;
1519 cmd->pd_count = dev->attr.max_dpp_pds;
1520 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1521 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1522 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1524 if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1525 rsp->pd_count) {
1526 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1527 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1528 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1529 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1530 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1531 pd_bitmap_size =
1532 BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1533 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1534 GFP_KERNEL);
1536 kfree(cmd);
1539 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1540 if (!cmd)
1541 return -ENOMEM;
1543 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1544 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1545 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1546 if (!status && rsp->pd_count) {
1547 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1548 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1549 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1550 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1551 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1552 GFP_KERNEL);
1554 kfree(cmd);
1556 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1557 /* Enable PD resource manager */
1558 dev->pd_mgr->pd_prealloc_valid = true;
1559 return 0;
1561 return status;
1564 static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1566 struct ocrdma_dealloc_pd_range *cmd;
1568 /* return normal PDs to firmware */
1569 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1570 if (!cmd)
1571 goto mbx_err;
1573 if (dev->pd_mgr->max_normal_pd) {
1574 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1575 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1576 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1579 if (dev->pd_mgr->max_dpp_pd) {
1580 kfree(cmd);
1581 /* return DPP PDs to firmware */
1582 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1583 sizeof(*cmd));
1584 if (!cmd)
1585 goto mbx_err;
1587 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1588 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1589 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1591 mbx_err:
1592 kfree(cmd);
1595 void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1597 int status;
1599 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1600 GFP_KERNEL);
1601 if (!dev->pd_mgr)
1602 return;
1604 status = ocrdma_mbx_alloc_pd_range(dev);
1605 if (status) {
1606 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1607 __func__, dev->id);
1611 static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1613 ocrdma_mbx_dealloc_pd_range(dev);
1614 kfree(dev->pd_mgr->pd_norm_bitmap);
1615 kfree(dev->pd_mgr->pd_dpp_bitmap);
1616 kfree(dev->pd_mgr);
1619 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1620 int *num_pages, int *page_size)
1622 int i;
1623 int mem_size;
1625 *num_entries = roundup_pow_of_two(*num_entries);
1626 mem_size = *num_entries * entry_size;
1627 /* find the possible lowest possible multiplier */
1628 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1629 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1630 break;
1632 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1633 return -EINVAL;
1634 mem_size = roundup(mem_size,
1635 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1636 *num_pages =
1637 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1638 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1639 *num_entries = mem_size / entry_size;
1640 return 0;
1643 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1645 int i;
1646 int status = -ENOMEM;
1647 int max_ah;
1648 struct ocrdma_create_ah_tbl *cmd;
1649 struct ocrdma_create_ah_tbl_rsp *rsp;
1650 struct pci_dev *pdev = dev->nic_info.pdev;
1651 dma_addr_t pa;
1652 struct ocrdma_pbe *pbes;
1654 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1655 if (!cmd)
1656 return status;
1658 max_ah = OCRDMA_MAX_AH;
1659 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1661 /* number of PBEs in PBL */
1662 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1663 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1664 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1666 /* page size */
1667 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1668 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1669 break;
1671 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1672 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1674 /* ah_entry size */
1675 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1676 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1677 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1679 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1680 &dev->av_tbl.pbl.pa,
1681 GFP_KERNEL);
1682 if (dev->av_tbl.pbl.va == NULL)
1683 goto mem_err;
1685 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1686 &pa, GFP_KERNEL);
1687 if (dev->av_tbl.va == NULL)
1688 goto mem_err_ah;
1689 dev->av_tbl.pa = pa;
1690 dev->av_tbl.num_ah = max_ah;
1692 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1693 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1694 pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
1695 pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
1696 pa += PAGE_SIZE;
1698 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1699 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1700 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1701 if (status)
1702 goto mbx_err;
1703 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1704 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1705 kfree(cmd);
1706 return 0;
1708 mbx_err:
1709 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1710 dev->av_tbl.pa);
1711 dev->av_tbl.va = NULL;
1712 mem_err_ah:
1713 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1714 dev->av_tbl.pbl.pa);
1715 dev->av_tbl.pbl.va = NULL;
1716 dev->av_tbl.size = 0;
1717 mem_err:
1718 kfree(cmd);
1719 return status;
1722 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1724 struct ocrdma_delete_ah_tbl *cmd;
1725 struct pci_dev *pdev = dev->nic_info.pdev;
1727 if (dev->av_tbl.va == NULL)
1728 return;
1730 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1731 if (!cmd)
1732 return;
1733 cmd->ahid = dev->av_tbl.ahid;
1735 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1736 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1737 dev->av_tbl.pa);
1738 dev->av_tbl.va = NULL;
1739 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1740 dev->av_tbl.pbl.pa);
1741 kfree(cmd);
1744 /* Multiple CQs uses the EQ. This routine returns least used
1745 * EQ to associate with CQ. This will distributes the interrupt
1746 * processing and CPU load to associated EQ, vector and so to that CPU.
1748 static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1750 int i, selected_eq = 0, cq_cnt = 0;
1751 u16 eq_id;
1753 mutex_lock(&dev->dev_lock);
1754 cq_cnt = dev->eq_tbl[0].cq_cnt;
1755 eq_id = dev->eq_tbl[0].q.id;
1756 /* find the EQ which is has the least number of
1757 * CQs associated with it.
1759 for (i = 0; i < dev->eq_cnt; i++) {
1760 if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
1761 cq_cnt = dev->eq_tbl[i].cq_cnt;
1762 eq_id = dev->eq_tbl[i].q.id;
1763 selected_eq = i;
1766 dev->eq_tbl[selected_eq].cq_cnt += 1;
1767 mutex_unlock(&dev->dev_lock);
1768 return eq_id;
1771 static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1773 int i;
1775 mutex_lock(&dev->dev_lock);
1776 i = ocrdma_get_eq_table_index(dev, eq_id);
1777 if (i == -EINVAL)
1778 BUG();
1779 dev->eq_tbl[i].cq_cnt -= 1;
1780 mutex_unlock(&dev->dev_lock);
1783 int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1784 int entries, int dpp_cq, u16 pd_id)
1786 int status = -ENOMEM; int max_hw_cqe;
1787 struct pci_dev *pdev = dev->nic_info.pdev;
1788 struct ocrdma_create_cq *cmd;
1789 struct ocrdma_create_cq_rsp *rsp;
1790 u32 hw_pages, cqe_size, page_size, cqe_count;
1792 if (entries > dev->attr.max_cqe) {
1793 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1794 __func__, dev->id, dev->attr.max_cqe, entries);
1795 return -EINVAL;
1797 if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
1798 return -EINVAL;
1800 if (dpp_cq) {
1801 cq->max_hw_cqe = 1;
1802 max_hw_cqe = 1;
1803 cqe_size = OCRDMA_DPP_CQE_SIZE;
1804 hw_pages = 1;
1805 } else {
1806 cq->max_hw_cqe = dev->attr.max_cqe;
1807 max_hw_cqe = dev->attr.max_cqe;
1808 cqe_size = sizeof(struct ocrdma_cqe);
1809 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1812 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1814 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1815 if (!cmd)
1816 return -ENOMEM;
1817 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1818 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1819 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1820 if (!cq->va) {
1821 status = -ENOMEM;
1822 goto mem_err;
1824 page_size = cq->len / hw_pages;
1825 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1826 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1827 cmd->cmd.pgsz_pgcnt |= hw_pages;
1828 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1830 cq->eqn = ocrdma_bind_eq(dev);
1831 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
1832 cqe_count = cq->len / cqe_size;
1833 cq->cqe_cnt = cqe_count;
1834 if (cqe_count > 1024) {
1835 /* Set cnt to 3 to indicate more than 1024 cq entries */
1836 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1837 } else {
1838 u8 count = 0;
1839 switch (cqe_count) {
1840 case 256:
1841 count = 0;
1842 break;
1843 case 512:
1844 count = 1;
1845 break;
1846 case 1024:
1847 count = 2;
1848 break;
1849 default:
1850 goto mbx_err;
1852 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1854 /* shared eq between all the consumer cqs. */
1855 cmd->cmd.eqn = cq->eqn;
1856 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1857 if (dpp_cq)
1858 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1859 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1860 cq->phase_change = false;
1861 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
1862 } else {
1863 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
1864 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1865 cq->phase_change = true;
1868 /* pd_id valid only for v3 */
1869 cmd->cmd.pdid_cqecnt |= (pd_id <<
1870 OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
1871 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1872 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1873 if (status)
1874 goto mbx_err;
1876 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1877 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1878 kfree(cmd);
1879 return 0;
1880 mbx_err:
1881 ocrdma_unbind_eq(dev, cq->eqn);
1882 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1883 mem_err:
1884 kfree(cmd);
1885 return status;
1888 void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1890 struct ocrdma_destroy_cq *cmd;
1892 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1893 if (!cmd)
1894 return;
1895 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1896 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1898 cmd->bypass_flush_qid |=
1899 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1900 OCRDMA_DESTROY_CQ_QID_MASK;
1902 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1903 ocrdma_unbind_eq(dev, cq->eqn);
1904 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1905 kfree(cmd);
1908 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1909 u32 pdid, int addr_check)
1911 int status = -ENOMEM;
1912 struct ocrdma_alloc_lkey *cmd;
1913 struct ocrdma_alloc_lkey_rsp *rsp;
1915 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1916 if (!cmd)
1917 return status;
1918 cmd->pdid = pdid;
1919 cmd->pbl_sz_flags |= addr_check;
1920 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1921 cmd->pbl_sz_flags |=
1922 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1923 cmd->pbl_sz_flags |=
1924 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1925 cmd->pbl_sz_flags |=
1926 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1927 cmd->pbl_sz_flags |=
1928 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1929 cmd->pbl_sz_flags |=
1930 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1932 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1933 if (status)
1934 goto mbx_err;
1935 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1936 hwmr->lkey = rsp->lrkey;
1937 mbx_err:
1938 kfree(cmd);
1939 return status;
1942 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1944 int status;
1945 struct ocrdma_dealloc_lkey *cmd;
1947 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1948 if (!cmd)
1949 return -ENOMEM;
1950 cmd->lkey = lkey;
1951 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1952 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1954 kfree(cmd);
1955 return status;
1958 static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1959 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1961 int status = -ENOMEM;
1962 int i;
1963 struct ocrdma_reg_nsmr *cmd;
1964 struct ocrdma_reg_nsmr_rsp *rsp;
1965 u64 fbo = hwmr->va & (hwmr->pbe_size - 1);
1967 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1968 if (!cmd)
1969 return -ENOMEM;
1970 cmd->num_pbl_pdid =
1971 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1972 cmd->fr_mr = hwmr->fr_mr;
1974 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1975 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1976 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1977 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1978 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1979 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1980 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1981 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1982 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1983 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1984 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1986 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1987 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1988 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1989 cmd->totlen_low = hwmr->len;
1990 cmd->totlen_high = upper_32_bits(hwmr->len);
1991 cmd->fbo_low = lower_32_bits(fbo);
1992 cmd->fbo_high = upper_32_bits(fbo);
1993 cmd->va_loaddr = (u32) hwmr->va;
1994 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1996 for (i = 0; i < pbl_cnt; i++) {
1997 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1998 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
2000 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2001 if (status)
2002 goto mbx_err;
2003 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
2004 hwmr->lkey = rsp->lrkey;
2005 mbx_err:
2006 kfree(cmd);
2007 return status;
2010 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
2011 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
2012 u32 pbl_offset, u32 last)
2014 int status;
2015 int i;
2016 struct ocrdma_reg_nsmr_cont *cmd;
2018 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
2019 if (!cmd)
2020 return -ENOMEM;
2021 cmd->lrkey = hwmr->lkey;
2022 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
2023 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
2024 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
2026 for (i = 0; i < pbl_cnt; i++) {
2027 cmd->pbl[i].lo =
2028 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
2029 cmd->pbl[i].hi =
2030 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
2032 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2034 kfree(cmd);
2035 return status;
2038 int ocrdma_reg_mr(struct ocrdma_dev *dev,
2039 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
2041 int status;
2042 u32 last = 0;
2043 u32 cur_pbl_cnt, pbl_offset;
2044 u32 pending_pbl_cnt = hwmr->num_pbls;
2046 pbl_offset = 0;
2047 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2048 if (cur_pbl_cnt == pending_pbl_cnt)
2049 last = 1;
2051 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
2052 cur_pbl_cnt, hwmr->pbe_size, last);
2053 if (status) {
2054 pr_err("%s() status=%d\n", __func__, status);
2055 return status;
2057 /* if there is no more pbls to register then exit. */
2058 if (last)
2059 return 0;
2061 while (!last) {
2062 pbl_offset += cur_pbl_cnt;
2063 pending_pbl_cnt -= cur_pbl_cnt;
2064 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2065 /* if we reach the end of the pbls, then need to set the last
2066 * bit, indicating no more pbls to register for this memory key.
2068 if (cur_pbl_cnt == pending_pbl_cnt)
2069 last = 1;
2071 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
2072 pbl_offset, last);
2073 if (status)
2074 break;
2076 if (status)
2077 pr_err("%s() err. status=%d\n", __func__, status);
2079 return status;
2082 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2084 struct ocrdma_qp *tmp;
2085 bool found = false;
2086 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
2087 if (qp == tmp) {
2088 found = true;
2089 break;
2092 return found;
2095 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2097 struct ocrdma_qp *tmp;
2098 bool found = false;
2099 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
2100 if (qp == tmp) {
2101 found = true;
2102 break;
2105 return found;
2108 void ocrdma_flush_qp(struct ocrdma_qp *qp)
2110 bool found;
2111 unsigned long flags;
2112 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2114 spin_lock_irqsave(&dev->flush_q_lock, flags);
2115 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
2116 if (!found)
2117 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
2118 if (!qp->srq) {
2119 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
2120 if (!found)
2121 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
2123 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2126 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
2128 qp->sq.head = 0;
2129 qp->sq.tail = 0;
2130 qp->rq.head = 0;
2131 qp->rq.tail = 0;
2134 int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
2135 enum ib_qp_state *old_ib_state)
2137 unsigned long flags;
2138 enum ocrdma_qp_state new_state;
2139 new_state = get_ocrdma_qp_state(new_ib_state);
2141 /* sync with wqe and rqe posting */
2142 spin_lock_irqsave(&qp->q_lock, flags);
2144 if (old_ib_state)
2145 *old_ib_state = get_ibqp_state(qp->state);
2146 if (new_state == qp->state) {
2147 spin_unlock_irqrestore(&qp->q_lock, flags);
2148 return 1;
2152 if (new_state == OCRDMA_QPS_INIT) {
2153 ocrdma_init_hwq_ptr(qp);
2154 ocrdma_del_flush_qp(qp);
2155 } else if (new_state == OCRDMA_QPS_ERR) {
2156 ocrdma_flush_qp(qp);
2159 qp->state = new_state;
2161 spin_unlock_irqrestore(&qp->q_lock, flags);
2162 return 0;
2165 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
2167 u32 flags = 0;
2168 if (qp->cap_flags & OCRDMA_QP_INB_RD)
2169 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
2170 if (qp->cap_flags & OCRDMA_QP_INB_WR)
2171 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
2172 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
2173 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
2174 if (qp->cap_flags & OCRDMA_QP_LKEY0)
2175 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
2176 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
2177 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
2178 return flags;
2181 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2182 struct ib_qp_init_attr *attrs,
2183 struct ocrdma_qp *qp)
2185 int status;
2186 u32 len, hw_pages, hw_page_size;
2187 dma_addr_t pa;
2188 struct ocrdma_pd *pd = qp->pd;
2189 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2190 struct pci_dev *pdev = dev->nic_info.pdev;
2191 u32 max_wqe_allocated;
2192 u32 max_sges = attrs->cap.max_send_sge;
2194 /* QP1 may exceed 127 */
2195 max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
2196 dev->attr.max_wqe);
2198 status = ocrdma_build_q_conf(&max_wqe_allocated,
2199 dev->attr.wqe_size, &hw_pages, &hw_page_size);
2200 if (status) {
2201 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
2202 max_wqe_allocated);
2203 return -EINVAL;
2205 qp->sq.max_cnt = max_wqe_allocated;
2206 len = (hw_pages * hw_page_size);
2208 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2209 if (!qp->sq.va)
2210 return -EINVAL;
2211 qp->sq.len = len;
2212 qp->sq.pa = pa;
2213 qp->sq.entry_size = dev->attr.wqe_size;
2214 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
2216 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2217 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
2218 cmd->num_wq_rq_pages |= (hw_pages <<
2219 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
2220 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
2221 cmd->max_sge_send_write |= (max_sges <<
2222 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
2223 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
2224 cmd->max_sge_send_write |= (max_sges <<
2225 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
2226 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
2227 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
2228 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
2229 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
2230 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
2231 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
2232 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
2233 return 0;
2236 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2237 struct ib_qp_init_attr *attrs,
2238 struct ocrdma_qp *qp)
2240 int status;
2241 u32 len, hw_pages, hw_page_size;
2242 dma_addr_t pa = 0;
2243 struct ocrdma_pd *pd = qp->pd;
2244 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2245 struct pci_dev *pdev = dev->nic_info.pdev;
2246 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2248 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
2249 &hw_pages, &hw_page_size);
2250 if (status) {
2251 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
2252 attrs->cap.max_recv_wr + 1);
2253 return status;
2255 qp->rq.max_cnt = max_rqe_allocated;
2256 len = (hw_pages * hw_page_size);
2258 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2259 if (!qp->rq.va)
2260 return -ENOMEM;
2261 qp->rq.pa = pa;
2262 qp->rq.len = len;
2263 qp->rq.entry_size = dev->attr.rqe_size;
2265 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2266 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
2267 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
2268 cmd->num_wq_rq_pages |=
2269 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
2270 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
2271 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
2272 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
2273 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
2274 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
2275 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
2276 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
2277 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
2278 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
2279 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
2280 return 0;
2283 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2284 struct ocrdma_pd *pd,
2285 struct ocrdma_qp *qp,
2286 u8 enable_dpp_cq, u16 dpp_cq_id)
2288 pd->num_dpp_qp--;
2289 qp->dpp_enabled = true;
2290 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2291 if (!enable_dpp_cq)
2292 return;
2293 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2294 cmd->dpp_credits_cqid = dpp_cq_id;
2295 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
2296 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
2299 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2300 struct ocrdma_qp *qp)
2302 struct ocrdma_pd *pd = qp->pd;
2303 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2304 struct pci_dev *pdev = dev->nic_info.pdev;
2305 dma_addr_t pa = 0;
2306 int ird_page_size = dev->attr.ird_page_size;
2307 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
2308 struct ocrdma_hdr_wqe *rqe;
2309 int i = 0;
2311 if (dev->attr.ird == 0)
2312 return 0;
2314 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
2315 GFP_KERNEL);
2316 if (!qp->ird_q_va)
2317 return -ENOMEM;
2318 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
2319 pa, ird_page_size);
2320 for (; i < ird_q_len / dev->attr.rqe_size; i++) {
2321 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
2322 (i * dev->attr.rqe_size));
2323 rqe->cw = 0;
2324 rqe->cw |= 2;
2325 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2326 rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
2327 rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
2329 return 0;
2332 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
2333 struct ocrdma_qp *qp,
2334 struct ib_qp_init_attr *attrs,
2335 u16 *dpp_offset, u16 *dpp_credit_lmt)
2337 u32 max_wqe_allocated, max_rqe_allocated;
2338 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
2339 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
2340 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
2341 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
2342 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
2343 qp->dpp_enabled = false;
2344 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
2345 qp->dpp_enabled = true;
2346 *dpp_credit_lmt = (rsp->dpp_response &
2347 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
2348 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
2349 *dpp_offset = (rsp->dpp_response &
2350 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
2351 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
2353 max_wqe_allocated =
2354 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
2355 max_wqe_allocated = 1 << max_wqe_allocated;
2356 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
2358 qp->sq.max_cnt = max_wqe_allocated;
2359 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2361 if (!attrs->srq) {
2362 qp->rq.max_cnt = max_rqe_allocated;
2363 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
2367 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2368 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2369 u16 *dpp_credit_lmt)
2371 int status = -ENOMEM;
2372 u32 flags = 0;
2373 struct ocrdma_pd *pd = qp->pd;
2374 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2375 struct pci_dev *pdev = dev->nic_info.pdev;
2376 struct ocrdma_cq *cq;
2377 struct ocrdma_create_qp_req *cmd;
2378 struct ocrdma_create_qp_rsp *rsp;
2379 int qptype;
2381 switch (attrs->qp_type) {
2382 case IB_QPT_GSI:
2383 qptype = OCRDMA_QPT_GSI;
2384 break;
2385 case IB_QPT_RC:
2386 qptype = OCRDMA_QPT_RC;
2387 break;
2388 case IB_QPT_UD:
2389 qptype = OCRDMA_QPT_UD;
2390 break;
2391 default:
2392 return -EINVAL;
2395 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2396 if (!cmd)
2397 return status;
2398 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2399 OCRDMA_CREATE_QP_REQ_QPT_MASK;
2400 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2401 if (status)
2402 goto sq_err;
2404 if (attrs->srq) {
2405 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2406 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2407 cmd->rq_addr[0].lo = srq->id;
2408 qp->srq = srq;
2409 } else {
2410 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2411 if (status)
2412 goto rq_err;
2415 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2416 if (status)
2417 goto mbx_err;
2419 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2420 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2422 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2424 cmd->max_sge_recv_flags |= flags;
2425 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2426 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2427 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2428 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2429 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2430 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2431 cq = get_ocrdma_cq(attrs->send_cq);
2432 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2433 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2434 qp->sq_cq = cq;
2435 cq = get_ocrdma_cq(attrs->recv_cq);
2436 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2437 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2438 qp->rq_cq = cq;
2440 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2441 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
2442 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2443 dpp_cq_id);
2446 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2447 if (status)
2448 goto mbx_err;
2449 rsp = (struct ocrdma_create_qp_rsp *)cmd;
2450 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2451 qp->state = OCRDMA_QPS_RST;
2452 kfree(cmd);
2453 return 0;
2454 mbx_err:
2455 if (qp->rq.va)
2456 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2457 rq_err:
2458 pr_err("%s(%d) rq_err\n", __func__, dev->id);
2459 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2460 sq_err:
2461 pr_err("%s(%d) sq_err\n", __func__, dev->id);
2462 kfree(cmd);
2463 return status;
2466 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2467 struct ocrdma_qp_params *param)
2469 int status = -ENOMEM;
2470 struct ocrdma_query_qp *cmd;
2471 struct ocrdma_query_qp_rsp *rsp;
2473 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
2474 if (!cmd)
2475 return status;
2476 cmd->qp_id = qp->id;
2477 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2478 if (status)
2479 goto mbx_err;
2480 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2481 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2482 mbx_err:
2483 kfree(cmd);
2484 return status;
2487 static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2488 struct ocrdma_modify_qp *cmd,
2489 struct ib_qp_attr *attrs,
2490 int attr_mask)
2492 int status;
2493 struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
2494 const struct ib_gid_attr *sgid_attr;
2495 u16 vlan_id = 0xFFFF;
2496 u8 mac_addr[6], hdr_type;
2497 union {
2498 struct sockaddr_in _sockaddr_in;
2499 struct sockaddr_in6 _sockaddr_in6;
2500 } sgid_addr, dgid_addr;
2501 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2502 const struct ib_global_route *grh;
2504 if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) == 0)
2505 return -EINVAL;
2506 grh = rdma_ah_read_grh(ah_attr);
2507 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2508 ocrdma_init_service_level(dev);
2509 cmd->params.tclass_sq_psn |=
2510 (grh->traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2511 cmd->params.rnt_rc_sl_fl |=
2512 (grh->flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2513 cmd->params.rnt_rc_sl_fl |= (rdma_ah_get_sl(ah_attr) <<
2514 OCRDMA_QP_PARAMS_SL_SHIFT);
2515 cmd->params.hop_lmt_rq_psn |=
2516 (grh->hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2517 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2519 /* GIDs */
2520 memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
2521 sizeof(cmd->params.dgid));
2523 sgid_attr = ah_attr->grh.sgid_attr;
2524 status = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, &mac_addr[0]);
2525 if (status)
2526 return status;
2528 qp->sgid_idx = grh->sgid_index;
2529 memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
2530 sizeof(cmd->params.sgid));
2531 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
2532 if (status)
2533 return status;
2535 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2536 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2538 hdr_type = rdma_gid_attr_network_type(sgid_attr);
2539 if (hdr_type == RDMA_NETWORK_IPV4) {
2540 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
2541 rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
2542 memcpy(&cmd->params.dgid[0],
2543 &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
2544 memcpy(&cmd->params.sgid[0],
2545 &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
2547 /* convert them to LE format. */
2548 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2549 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2550 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2552 if (vlan_id == 0xFFFF)
2553 vlan_id = 0;
2554 if (vlan_id || dev->pfc_state) {
2555 if (!vlan_id) {
2556 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2557 dev->id);
2558 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2559 dev->id);
2561 cmd->params.vlan_dmac_b4_to_b5 |=
2562 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2563 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2564 cmd->params.rnt_rc_sl_fl |=
2565 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2567 cmd->params.max_sge_recv_flags |= ((hdr_type <<
2568 OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
2569 OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
2570 return 0;
2573 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2574 struct ocrdma_modify_qp *cmd,
2575 struct ib_qp_attr *attrs, int attr_mask)
2577 int status = 0;
2578 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2580 if (attr_mask & IB_QP_PKEY_INDEX) {
2581 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2582 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2583 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2585 if (attr_mask & IB_QP_QKEY) {
2586 qp->qkey = attrs->qkey;
2587 cmd->params.qkey = attrs->qkey;
2588 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2590 if (attr_mask & IB_QP_AV) {
2591 status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
2592 if (status)
2593 return status;
2594 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2595 /* set the default mac address for UD, GSI QPs */
2596 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2597 (dev->nic_info.mac_addr[1] << 8) |
2598 (dev->nic_info.mac_addr[2] << 16) |
2599 (dev->nic_info.mac_addr[3] << 24);
2600 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2601 (dev->nic_info.mac_addr[5] << 8);
2603 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2604 attrs->en_sqd_async_notify) {
2605 cmd->params.max_sge_recv_flags |=
2606 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2607 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2609 if (attr_mask & IB_QP_DEST_QPN) {
2610 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2611 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2612 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2614 if (attr_mask & IB_QP_PATH_MTU) {
2615 if (attrs->path_mtu < IB_MTU_512 ||
2616 attrs->path_mtu > IB_MTU_4096) {
2617 pr_err("ocrdma%d: IB MTU %d is not supported\n",
2618 dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
2619 status = -EINVAL;
2620 goto pmtu_err;
2622 cmd->params.path_mtu_pkey_indx |=
2623 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2624 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2625 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2626 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2628 if (attr_mask & IB_QP_TIMEOUT) {
2629 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2630 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2631 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2633 if (attr_mask & IB_QP_RETRY_CNT) {
2634 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2635 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2636 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2637 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2639 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2640 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2641 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2642 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2643 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2645 if (attr_mask & IB_QP_RNR_RETRY) {
2646 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2647 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2648 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2649 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2651 if (attr_mask & IB_QP_SQ_PSN) {
2652 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2653 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2655 if (attr_mask & IB_QP_RQ_PSN) {
2656 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2657 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2659 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2660 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
2661 status = -EINVAL;
2662 goto pmtu_err;
2664 qp->max_ord = attrs->max_rd_atomic;
2665 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2667 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2668 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
2669 status = -EINVAL;
2670 goto pmtu_err;
2672 qp->max_ird = attrs->max_dest_rd_atomic;
2673 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2675 cmd->params.max_ord_ird = (qp->max_ord <<
2676 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2677 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2678 pmtu_err:
2679 return status;
2682 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2683 struct ib_qp_attr *attrs, int attr_mask)
2685 int status = -ENOMEM;
2686 struct ocrdma_modify_qp *cmd;
2688 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2689 if (!cmd)
2690 return status;
2692 cmd->params.id = qp->id;
2693 cmd->flags = 0;
2694 if (attr_mask & IB_QP_STATE) {
2695 cmd->params.max_sge_recv_flags |=
2696 (get_ocrdma_qp_state(attrs->qp_state) <<
2697 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2698 OCRDMA_QP_PARAMS_STATE_MASK;
2699 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
2700 } else {
2701 cmd->params.max_sge_recv_flags |=
2702 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2703 OCRDMA_QP_PARAMS_STATE_MASK;
2706 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
2707 if (status)
2708 goto mbx_err;
2709 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2710 if (status)
2711 goto mbx_err;
2713 mbx_err:
2714 kfree(cmd);
2715 return status;
2718 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2720 int status = -ENOMEM;
2721 struct ocrdma_destroy_qp *cmd;
2722 struct pci_dev *pdev = dev->nic_info.pdev;
2724 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2725 if (!cmd)
2726 return status;
2727 cmd->qp_id = qp->id;
2728 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2729 if (status)
2730 goto mbx_err;
2732 mbx_err:
2733 kfree(cmd);
2734 if (qp->sq.va)
2735 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2736 if (!qp->srq && qp->rq.va)
2737 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2738 if (qp->dpp_enabled)
2739 qp->pd->num_dpp_qp++;
2740 return status;
2743 int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
2744 struct ib_srq_init_attr *srq_attr,
2745 struct ocrdma_pd *pd)
2747 int status = -ENOMEM;
2748 int hw_pages, hw_page_size;
2749 int len;
2750 struct ocrdma_create_srq_rsp *rsp;
2751 struct ocrdma_create_srq *cmd;
2752 dma_addr_t pa;
2753 struct pci_dev *pdev = dev->nic_info.pdev;
2754 u32 max_rqe_allocated;
2756 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2757 if (!cmd)
2758 return status;
2760 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2761 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2762 status = ocrdma_build_q_conf(&max_rqe_allocated,
2763 dev->attr.rqe_size,
2764 &hw_pages, &hw_page_size);
2765 if (status) {
2766 pr_err("%s() req. max_wr=0x%x\n", __func__,
2767 srq_attr->attr.max_wr);
2768 status = -EINVAL;
2769 goto ret;
2771 len = hw_pages * hw_page_size;
2772 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2773 if (!srq->rq.va) {
2774 status = -ENOMEM;
2775 goto ret;
2777 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2779 srq->rq.entry_size = dev->attr.rqe_size;
2780 srq->rq.pa = pa;
2781 srq->rq.len = len;
2782 srq->rq.max_cnt = max_rqe_allocated;
2784 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2785 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2786 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2788 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2789 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2790 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2791 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2792 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2793 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2795 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2796 if (status)
2797 goto mbx_err;
2798 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2799 srq->id = rsp->id;
2800 srq->rq.dbid = rsp->id;
2801 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2802 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2803 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2804 max_rqe_allocated = (1 << max_rqe_allocated);
2805 srq->rq.max_cnt = max_rqe_allocated;
2806 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2807 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2808 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2809 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2810 goto ret;
2811 mbx_err:
2812 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2813 ret:
2814 kfree(cmd);
2815 return status;
2818 int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2820 int status = -ENOMEM;
2821 struct ocrdma_modify_srq *cmd;
2822 struct ocrdma_pd *pd = srq->pd;
2823 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2825 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
2826 if (!cmd)
2827 return status;
2828 cmd->id = srq->id;
2829 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2830 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
2831 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2832 kfree(cmd);
2833 return status;
2836 int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2838 int status = -ENOMEM;
2839 struct ocrdma_query_srq *cmd;
2840 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2842 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
2843 if (!cmd)
2844 return status;
2845 cmd->id = srq->rq.dbid;
2846 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2847 if (status == 0) {
2848 struct ocrdma_query_srq_rsp *rsp =
2849 (struct ocrdma_query_srq_rsp *)cmd;
2850 srq_attr->max_sge =
2851 rsp->srq_lmt_max_sge &
2852 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2853 srq_attr->max_wr =
2854 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2855 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2856 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2858 kfree(cmd);
2859 return status;
2862 void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2864 struct ocrdma_destroy_srq *cmd;
2865 struct pci_dev *pdev = dev->nic_info.pdev;
2866 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2867 if (!cmd)
2868 return;
2869 cmd->id = srq->id;
2870 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2871 if (srq->rq.va)
2872 dma_free_coherent(&pdev->dev, srq->rq.len,
2873 srq->rq.va, srq->rq.pa);
2874 kfree(cmd);
2877 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
2878 struct ocrdma_dcbx_cfg *dcbxcfg)
2880 int status;
2881 dma_addr_t pa;
2882 struct ocrdma_mqe cmd;
2884 struct ocrdma_get_dcbx_cfg_req *req = NULL;
2885 struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
2886 struct pci_dev *pdev = dev->nic_info.pdev;
2887 struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
2889 memset(&cmd, 0, sizeof(struct ocrdma_mqe));
2890 cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
2891 sizeof(struct ocrdma_get_dcbx_cfg_req));
2892 req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
2893 if (!req) {
2894 status = -ENOMEM;
2895 goto mem_err;
2898 cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
2899 OCRDMA_MQE_HDR_SGE_CNT_MASK;
2900 mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
2901 mqe_sge->pa_hi = (u32) upper_32_bits(pa);
2902 mqe_sge->len = cmd.hdr.pyld_len;
2904 ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
2905 OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
2906 req->param_type = ptype;
2908 status = ocrdma_mbx_cmd(dev, &cmd);
2909 if (status)
2910 goto mbx_err;
2912 rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
2913 ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
2914 memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
2916 mbx_err:
2917 dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
2918 mem_err:
2919 return status;
2922 #define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
2923 #define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
2925 static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
2926 struct ocrdma_dcbx_cfg *dcbxcfg,
2927 u8 *srvc_lvl)
2929 int status = -EINVAL, indx, slindx;
2930 int ventry_cnt;
2931 struct ocrdma_app_parameter *app_param;
2932 u8 valid, proto_sel;
2933 u8 app_prio, pfc_prio;
2934 u16 proto;
2936 if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
2937 pr_info("%s ocrdma%d DCBX is disabled\n",
2938 dev_name(&dev->nic_info.pdev->dev), dev->id);
2939 goto out;
2942 if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
2943 pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2944 dev_name(&dev->nic_info.pdev->dev), dev->id,
2945 (ptype > 0 ? "operational" : "admin"),
2946 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
2947 "enabled" : "disabled",
2948 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
2949 "" : ", not sync'ed");
2950 goto out;
2951 } else {
2952 pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2953 dev_name(&dev->nic_info.pdev->dev), dev->id);
2956 ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
2957 OCRDMA_DCBX_APP_ENTRY_SHIFT)
2958 & OCRDMA_DCBX_STATE_MASK;
2960 for (indx = 0; indx < ventry_cnt; indx++) {
2961 app_param = &dcbxcfg->app_param[indx];
2962 valid = (app_param->valid_proto_app >>
2963 OCRDMA_APP_PARAM_VALID_SHIFT)
2964 & OCRDMA_APP_PARAM_VALID_MASK;
2965 proto_sel = (app_param->valid_proto_app
2966 >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
2967 & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
2968 proto = app_param->valid_proto_app &
2969 OCRDMA_APP_PARAM_APP_PROTO_MASK;
2971 if (
2972 valid && proto == ETH_P_IBOE &&
2973 proto_sel == OCRDMA_PROTO_SELECT_L2) {
2974 for (slindx = 0; slindx <
2975 OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
2976 app_prio = ocrdma_get_app_prio(
2977 (u8 *)app_param->app_prio,
2978 slindx);
2979 pfc_prio = ocrdma_get_pfc_prio(
2980 (u8 *)dcbxcfg->pfc_prio,
2981 slindx);
2983 if (app_prio && pfc_prio) {
2984 *srvc_lvl = slindx;
2985 status = 0;
2986 goto out;
2989 if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
2990 pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
2991 dev_name(&dev->nic_info.pdev->dev),
2992 dev->id, proto);
2997 out:
2998 return status;
3001 void ocrdma_init_service_level(struct ocrdma_dev *dev)
3003 int status = 0, indx;
3004 struct ocrdma_dcbx_cfg dcbxcfg;
3005 u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
3006 int ptype = OCRDMA_PARAMETER_TYPE_OPER;
3008 for (indx = 0; indx < 2; indx++) {
3009 status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
3010 if (status) {
3011 pr_err("%s(): status=%d\n", __func__, status);
3012 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
3013 continue;
3016 status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
3017 &dcbxcfg, &srvc_lvl);
3018 if (status) {
3019 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
3020 continue;
3023 break;
3026 if (status)
3027 pr_info("%s ocrdma%d service level default\n",
3028 dev_name(&dev->nic_info.pdev->dev), dev->id);
3029 else
3030 pr_info("%s ocrdma%d service level %d\n",
3031 dev_name(&dev->nic_info.pdev->dev), dev->id,
3032 srvc_lvl);
3034 dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
3035 dev->sl = srvc_lvl;
3038 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3040 int i;
3041 int status = -EINVAL;
3042 struct ocrdma_av *av;
3043 unsigned long flags;
3045 av = dev->av_tbl.va;
3046 spin_lock_irqsave(&dev->av_tbl.lock, flags);
3047 for (i = 0; i < dev->av_tbl.num_ah; i++) {
3048 if (av->valid == 0) {
3049 av->valid = OCRDMA_AV_VALID;
3050 ah->av = av;
3051 ah->id = i;
3052 status = 0;
3053 break;
3055 av++;
3057 if (i == dev->av_tbl.num_ah)
3058 status = -EAGAIN;
3059 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3060 return status;
3063 void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3065 unsigned long flags;
3066 spin_lock_irqsave(&dev->av_tbl.lock, flags);
3067 ah->av->valid = 0;
3068 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3071 static int ocrdma_create_eqs(struct ocrdma_dev *dev)
3073 int num_eq, i, status = 0;
3074 int irq;
3075 unsigned long flags = 0;
3077 num_eq = dev->nic_info.msix.num_vectors -
3078 dev->nic_info.msix.start_vector;
3079 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
3080 num_eq = 1;
3081 flags = IRQF_SHARED;
3082 } else {
3083 num_eq = min_t(u32, num_eq, num_online_cpus());
3086 if (!num_eq)
3087 return -EINVAL;
3089 dev->eq_tbl = kcalloc(num_eq, sizeof(struct ocrdma_eq), GFP_KERNEL);
3090 if (!dev->eq_tbl)
3091 return -ENOMEM;
3093 for (i = 0; i < num_eq; i++) {
3094 status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
3095 OCRDMA_EQ_LEN);
3096 if (status) {
3097 status = -EINVAL;
3098 break;
3100 sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
3101 dev->id, i);
3102 irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
3103 status = request_irq(irq, ocrdma_irq_handler, flags,
3104 dev->eq_tbl[i].irq_name,
3105 &dev->eq_tbl[i]);
3106 if (status)
3107 goto done;
3108 dev->eq_cnt += 1;
3110 /* one eq is sufficient for data path to work */
3111 return 0;
3112 done:
3113 ocrdma_destroy_eqs(dev);
3114 return status;
3117 static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3118 int num)
3120 int i, status;
3121 struct ocrdma_modify_eqd_req *cmd;
3123 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3124 if (!cmd)
3125 return -ENOMEM;
3127 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3128 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3130 cmd->cmd.num_eq = num;
3131 for (i = 0; i < num; i++) {
3132 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3133 cmd->cmd.set_eqd[i].phase = 0;
3134 cmd->cmd.set_eqd[i].delay_multiplier =
3135 (eq[i].aic_obj.prev_eqd * 65)/100;
3137 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3139 kfree(cmd);
3140 return status;
3143 static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3144 int num)
3146 int num_eqs, i = 0;
3147 if (num > 8) {
3148 while (num) {
3149 num_eqs = min(num, 8);
3150 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3151 i += num_eqs;
3152 num -= num_eqs;
3154 } else {
3155 ocrdma_mbx_modify_eqd(dev, eq, num);
3157 return 0;
3160 void ocrdma_eqd_set_task(struct work_struct *work)
3162 struct ocrdma_dev *dev =
3163 container_of(work, struct ocrdma_dev, eqd_work.work);
3164 struct ocrdma_eq *eq = NULL;
3165 int i, num = 0;
3166 u64 eq_intr;
3168 for (i = 0; i < dev->eq_cnt; i++) {
3169 eq = &dev->eq_tbl[i];
3170 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3171 eq_intr = eq->aic_obj.eq_intr_cnt -
3172 eq->aic_obj.prev_eq_intr_cnt;
3173 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3174 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3175 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3176 num++;
3177 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3178 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3179 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3180 num++;
3183 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3186 if (num)
3187 ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3188 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3191 int ocrdma_init_hw(struct ocrdma_dev *dev)
3193 int status;
3195 /* create the eqs */
3196 status = ocrdma_create_eqs(dev);
3197 if (status)
3198 goto qpeq_err;
3199 status = ocrdma_create_mq(dev);
3200 if (status)
3201 goto mq_err;
3202 status = ocrdma_mbx_query_fw_config(dev);
3203 if (status)
3204 goto conf_err;
3205 status = ocrdma_mbx_query_dev(dev);
3206 if (status)
3207 goto conf_err;
3208 status = ocrdma_mbx_query_fw_ver(dev);
3209 if (status)
3210 goto conf_err;
3211 status = ocrdma_mbx_create_ah_tbl(dev);
3212 if (status)
3213 goto conf_err;
3214 status = ocrdma_mbx_get_phy_info(dev);
3215 if (status)
3216 goto info_attrb_err;
3217 status = ocrdma_mbx_get_ctrl_attribs(dev);
3218 if (status)
3219 goto info_attrb_err;
3221 return 0;
3223 info_attrb_err:
3224 ocrdma_mbx_delete_ah_tbl(dev);
3225 conf_err:
3226 ocrdma_destroy_mq(dev);
3227 mq_err:
3228 ocrdma_destroy_eqs(dev);
3229 qpeq_err:
3230 pr_err("%s() status=%d\n", __func__, status);
3231 return status;
3234 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3236 ocrdma_free_pd_pool(dev);
3237 ocrdma_mbx_delete_ah_tbl(dev);
3239 /* cleanup the control path */
3240 ocrdma_destroy_mq(dev);
3242 /* cleanup the eqs */
3243 ocrdma_destroy_eqs(dev);