2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: RDMA Controller HW interface
38 #include <linux/interrupt.h>
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/prefetch.h>
42 #include <linux/delay.h>
45 #include "qplib_res.h"
46 #include "qplib_rcfw.h"
50 static void bnxt_qplib_service_creq(unsigned long data
);
52 /* Hardware communication channel */
53 static int __wait_for_resp(struct bnxt_qplib_rcfw
*rcfw
, u16 cookie
)
58 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
59 rc
= wait_event_timeout(rcfw
->waitq
,
60 !test_bit(cbit
, rcfw
->cmdq_bitmap
),
61 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS
));
62 return rc
? 0 : -ETIMEDOUT
;
65 static int __block_for_resp(struct bnxt_qplib_rcfw
*rcfw
, u16 cookie
)
67 u32 count
= RCFW_BLOCKED_CMD_WAIT_COUNT
;
70 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
71 if (!test_bit(cbit
, rcfw
->cmdq_bitmap
))
74 mdelay(1); /* 1m sec */
75 bnxt_qplib_service_creq((unsigned long)rcfw
);
76 } while (test_bit(cbit
, rcfw
->cmdq_bitmap
) && --count
);
78 return count
? 0 : -ETIMEDOUT
;
81 static int __send_message(struct bnxt_qplib_rcfw
*rcfw
, struct cmdq_base
*req
,
82 struct creq_base
*resp
, void *sb
, u8 is_block
)
84 struct bnxt_qplib_cmdqe
*cmdqe
, **cmdq_ptr
;
85 struct bnxt_qplib_hwq
*cmdq
= &rcfw
->cmdq
;
86 struct bnxt_qplib_crsq
*crsqe
;
87 u32 sw_prod
, cmdq_prod
;
94 if (!test_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
) &&
95 (opcode
!= CMDQ_BASE_OPCODE_QUERY_FUNC
&&
96 opcode
!= CMDQ_BASE_OPCODE_INITIALIZE_FW
&&
97 opcode
!= CMDQ_BASE_OPCODE_QUERY_VERSION
)) {
98 dev_err(&rcfw
->pdev
->dev
,
99 "QPLIB: RCFW not initialized, reject opcode 0x%x",
104 if (test_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
) &&
105 opcode
== CMDQ_BASE_OPCODE_INITIALIZE_FW
) {
106 dev_err(&rcfw
->pdev
->dev
, "QPLIB: RCFW already initialized!");
110 if (test_bit(FIRMWARE_TIMED_OUT
, &rcfw
->flags
))
113 /* Cmdq are in 16-byte units, each request can consume 1 or more
116 spin_lock_irqsave(&cmdq
->lock
, flags
);
117 if (req
->cmd_size
>= HWQ_FREE_SLOTS(cmdq
)) {
118 dev_err(&rcfw
->pdev
->dev
, "QPLIB: RCFW: CMDQ is full!");
119 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
124 cookie
= rcfw
->seq_num
& RCFW_MAX_COOKIE_VALUE
;
125 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
127 cookie
|= RCFW_CMD_IS_BLOCKING
;
129 set_bit(cbit
, rcfw
->cmdq_bitmap
);
130 req
->cookie
= cpu_to_le16(cookie
);
131 crsqe
= &rcfw
->crsqe_tbl
[cbit
];
133 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
136 memset(resp
, 0, sizeof(*resp
));
137 crsqe
->resp
= (struct creq_qp_event
*)resp
;
138 crsqe
->resp
->cookie
= req
->cookie
;
139 crsqe
->req_size
= req
->cmd_size
;
140 if (req
->resp_size
&& sb
) {
141 struct bnxt_qplib_rcfw_sbuf
*sbuf
= sb
;
143 req
->resp_addr
= cpu_to_le64(sbuf
->dma_addr
);
144 req
->resp_size
= (sbuf
->size
+ BNXT_QPLIB_CMDQE_UNITS
- 1) /
145 BNXT_QPLIB_CMDQE_UNITS
;
148 cmdq_ptr
= (struct bnxt_qplib_cmdqe
**)cmdq
->pbl_ptr
;
150 size
= req
->cmd_size
* BNXT_QPLIB_CMDQE_UNITS
;
152 /* Locate the next cmdq slot */
153 sw_prod
= HWQ_CMP(cmdq
->prod
, cmdq
);
154 cmdqe
= &cmdq_ptr
[get_cmdq_pg(sw_prod
)][get_cmdq_idx(sw_prod
)];
156 dev_err(&rcfw
->pdev
->dev
,
157 "QPLIB: RCFW request failed with no cmdqe!");
160 /* Copy a segment of the req cmd to the cmdq */
161 memset(cmdqe
, 0, sizeof(*cmdqe
));
162 memcpy(cmdqe
, preq
, min_t(u32
, size
, sizeof(*cmdqe
)));
163 preq
+= min_t(u32
, size
, sizeof(*cmdqe
));
164 size
-= min_t(u32
, size
, sizeof(*cmdqe
));
171 cmdq_prod
= cmdq
->prod
;
172 if (test_bit(FIRMWARE_FIRST_FLAG
, &rcfw
->flags
)) {
173 /* The very first doorbell write
174 * is required to set this flag
175 * which prompts the FW to reset
176 * its internal pointers
178 cmdq_prod
|= BIT(FIRMWARE_FIRST_FLAG
);
179 clear_bit(FIRMWARE_FIRST_FLAG
, &rcfw
->flags
);
184 writel(cmdq_prod
, rcfw
->cmdq_bar_reg_iomem
+
185 rcfw
->cmdq_bar_reg_prod_off
);
186 writel(RCFW_CMDQ_TRIG_VAL
, rcfw
->cmdq_bar_reg_iomem
+
187 rcfw
->cmdq_bar_reg_trig_off
);
189 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
190 /* Return the CREQ response pointer */
194 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw
*rcfw
,
195 struct cmdq_base
*req
,
196 struct creq_base
*resp
,
197 void *sb
, u8 is_block
)
199 struct creq_qp_event
*evnt
= (struct creq_qp_event
*)resp
;
201 u8 opcode
, retry_cnt
= 0xFF;
205 opcode
= req
->opcode
;
206 rc
= __send_message(rcfw
, req
, resp
, sb
, is_block
);
207 cookie
= le16_to_cpu(req
->cookie
) & RCFW_MAX_COOKIE_VALUE
;
211 if (!retry_cnt
|| (rc
!= -EAGAIN
&& rc
!= -EBUSY
)) {
213 dev_err(&rcfw
->pdev
->dev
, "QPLIB: cmdq[%#x]=%#x send failed",
217 is_block
? mdelay(1) : usleep_range(500, 1000);
219 } while (retry_cnt
--);
222 rc
= __block_for_resp(rcfw
, cookie
);
224 rc
= __wait_for_resp(rcfw
, cookie
);
227 dev_err(&rcfw
->pdev
->dev
, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
228 cookie
, opcode
, RCFW_CMD_WAIT_TIME_MS
);
229 set_bit(FIRMWARE_TIMED_OUT
, &rcfw
->flags
);
234 /* failed with status */
235 dev_err(&rcfw
->pdev
->dev
, "QPLIB: cmdq[%#x]=%#x status %#x",
236 cookie
, opcode
, evnt
->status
);
243 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw
*rcfw
,
244 struct creq_func_event
*func_event
)
246 switch (func_event
->event
) {
247 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
249 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
251 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
253 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
255 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
257 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
259 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
261 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
262 /* SRQ ctx error, call srq_handler??
263 * But there's no SRQ handle!
266 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
268 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
270 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
272 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST
:
274 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED
:
282 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw
*rcfw
,
283 struct creq_qp_event
*qp_event
)
285 struct bnxt_qplib_hwq
*cmdq
= &rcfw
->cmdq
;
286 struct creq_qp_error_notification
*err_event
;
287 struct bnxt_qplib_crsq
*crsqe
;
289 struct bnxt_qplib_qp
*qp
;
290 u16 cbit
, blocked
= 0;
295 switch (qp_event
->event
) {
296 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
:
297 err_event
= (struct creq_qp_error_notification
*)qp_event
;
298 qp_id
= le32_to_cpu(err_event
->xid
);
299 qp
= rcfw
->qp_tbl
[qp_id
].qp_handle
;
300 dev_dbg(&rcfw
->pdev
->dev
,
301 "QPLIB: Received QP error notification");
302 dev_dbg(&rcfw
->pdev
->dev
,
303 "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
304 qp_id
, err_event
->req_err_state_reason
,
305 err_event
->res_err_state_reason
);
308 bnxt_qplib_acquire_cq_locks(qp
, &flags
);
309 bnxt_qplib_mark_qp_error(qp
);
310 bnxt_qplib_release_cq_locks(qp
, &flags
);
313 /* Command Response */
314 spin_lock_irqsave(&cmdq
->lock
, flags
);
315 cookie
= le16_to_cpu(qp_event
->cookie
);
316 mcookie
= qp_event
->cookie
;
317 blocked
= cookie
& RCFW_CMD_IS_BLOCKING
;
318 cookie
&= RCFW_MAX_COOKIE_VALUE
;
319 cbit
= cookie
% RCFW_MAX_OUTSTANDING_CMD
;
320 crsqe
= &rcfw
->crsqe_tbl
[cbit
];
322 crsqe
->resp
->cookie
== mcookie
) {
323 memcpy(crsqe
->resp
, qp_event
, sizeof(*qp_event
));
326 dev_err(&rcfw
->pdev
->dev
,
327 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
328 crsqe
->resp
? "mismatch" : "collision",
329 crsqe
->resp
? crsqe
->resp
->cookie
: 0, mcookie
);
331 if (!test_and_clear_bit(cbit
, rcfw
->cmdq_bitmap
))
332 dev_warn(&rcfw
->pdev
->dev
,
333 "QPLIB: CMD bit %d was not requested", cbit
);
334 cmdq
->cons
+= crsqe
->req_size
;
338 wake_up(&rcfw
->waitq
);
339 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
344 /* SP - CREQ Completion handlers */
345 static void bnxt_qplib_service_creq(unsigned long data
)
347 struct bnxt_qplib_rcfw
*rcfw
= (struct bnxt_qplib_rcfw
*)data
;
348 struct bnxt_qplib_hwq
*creq
= &rcfw
->creq
;
349 struct creq_base
*creqe
, **creq_ptr
;
350 u32 sw_cons
, raw_cons
;
352 u32 type
, budget
= CREQ_ENTRY_POLL_BUDGET
;
354 /* Service the CREQ until budget is over */
355 spin_lock_irqsave(&creq
->lock
, flags
);
356 raw_cons
= creq
->cons
;
358 sw_cons
= HWQ_CMP(raw_cons
, creq
);
359 creq_ptr
= (struct creq_base
**)creq
->pbl_ptr
;
360 creqe
= &creq_ptr
[get_creq_pg(sw_cons
)][get_creq_idx(sw_cons
)];
361 if (!CREQ_CMP_VALID(creqe
, raw_cons
, creq
->max_elements
))
363 /* The valid test of the entry must be done first before
364 * reading any further.
368 type
= creqe
->type
& CREQ_BASE_TYPE_MASK
;
370 case CREQ_BASE_TYPE_QP_EVENT
:
371 bnxt_qplib_process_qp_event
372 (rcfw
, (struct creq_qp_event
*)creqe
);
373 rcfw
->creq_qp_event_processed
++;
375 case CREQ_BASE_TYPE_FUNC_EVENT
:
376 if (!bnxt_qplib_process_func_event
377 (rcfw
, (struct creq_func_event
*)creqe
))
378 rcfw
->creq_func_event_processed
++;
381 (&rcfw
->pdev
->dev
, "QPLIB:aeqe:%#x Not handled",
385 dev_warn(&rcfw
->pdev
->dev
, "QPLIB: creqe with ");
386 dev_warn(&rcfw
->pdev
->dev
,
387 "QPLIB: op_event = 0x%x not handled", type
);
394 if (creq
->cons
!= raw_cons
) {
395 creq
->cons
= raw_cons
;
396 CREQ_DB_REARM(rcfw
->creq_bar_reg_iomem
, raw_cons
,
399 spin_unlock_irqrestore(&creq
->lock
, flags
);
402 static irqreturn_t
bnxt_qplib_creq_irq(int irq
, void *dev_instance
)
404 struct bnxt_qplib_rcfw
*rcfw
= dev_instance
;
405 struct bnxt_qplib_hwq
*creq
= &rcfw
->creq
;
406 struct creq_base
**creq_ptr
;
409 /* Prefetch the CREQ element */
410 sw_cons
= HWQ_CMP(creq
->cons
, creq
);
411 creq_ptr
= (struct creq_base
**)rcfw
->creq
.pbl_ptr
;
412 prefetch(&creq_ptr
[get_creq_pg(sw_cons
)][get_creq_idx(sw_cons
)]);
414 tasklet_schedule(&rcfw
->worker
);
420 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw
*rcfw
)
422 struct cmdq_deinitialize_fw req
;
423 struct creq_deinitialize_fw_resp resp
;
427 RCFW_CMD_PREP(req
, DEINITIALIZE_FW
, cmd_flags
);
428 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
433 clear_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
);
437 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl
*pbl
)
439 return (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
440 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K
:
441 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
442 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K
:
443 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
444 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K
:
445 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
446 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M
:
447 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
448 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M
:
449 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
450 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G
:
451 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K
);
454 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw
*rcfw
,
455 struct bnxt_qplib_ctx
*ctx
, int is_virtfn
)
457 struct cmdq_initialize_fw req
;
458 struct creq_initialize_fw_resp resp
;
459 u16 cmd_flags
= 0, level
;
462 RCFW_CMD_PREP(req
, INITIALIZE_FW
, cmd_flags
);
465 * VFs need not setup the HW context area, PF
466 * shall setup this area for VF. Skipping the
472 level
= ctx
->qpc_tbl
.level
;
473 req
.qpc_pg_size_qpc_lvl
= (level
<< CMDQ_INITIALIZE_FW_QPC_LVL_SFT
) |
474 __get_pbl_pg_idx(&ctx
->qpc_tbl
.pbl
[level
]);
475 level
= ctx
->mrw_tbl
.level
;
476 req
.mrw_pg_size_mrw_lvl
= (level
<< CMDQ_INITIALIZE_FW_MRW_LVL_SFT
) |
477 __get_pbl_pg_idx(&ctx
->mrw_tbl
.pbl
[level
]);
478 level
= ctx
->srqc_tbl
.level
;
479 req
.srq_pg_size_srq_lvl
= (level
<< CMDQ_INITIALIZE_FW_SRQ_LVL_SFT
) |
480 __get_pbl_pg_idx(&ctx
->srqc_tbl
.pbl
[level
]);
481 level
= ctx
->cq_tbl
.level
;
482 req
.cq_pg_size_cq_lvl
= (level
<< CMDQ_INITIALIZE_FW_CQ_LVL_SFT
) |
483 __get_pbl_pg_idx(&ctx
->cq_tbl
.pbl
[level
]);
484 level
= ctx
->srqc_tbl
.level
;
485 req
.srq_pg_size_srq_lvl
= (level
<< CMDQ_INITIALIZE_FW_SRQ_LVL_SFT
) |
486 __get_pbl_pg_idx(&ctx
->srqc_tbl
.pbl
[level
]);
487 level
= ctx
->cq_tbl
.level
;
488 req
.cq_pg_size_cq_lvl
= (level
<< CMDQ_INITIALIZE_FW_CQ_LVL_SFT
) |
489 __get_pbl_pg_idx(&ctx
->cq_tbl
.pbl
[level
]);
490 level
= ctx
->tim_tbl
.level
;
491 req
.tim_pg_size_tim_lvl
= (level
<< CMDQ_INITIALIZE_FW_TIM_LVL_SFT
) |
492 __get_pbl_pg_idx(&ctx
->tim_tbl
.pbl
[level
]);
493 level
= ctx
->tqm_pde_level
;
494 req
.tqm_pg_size_tqm_lvl
= (level
<< CMDQ_INITIALIZE_FW_TQM_LVL_SFT
) |
495 __get_pbl_pg_idx(&ctx
->tqm_pde
.pbl
[level
]);
498 cpu_to_le64(ctx
->qpc_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
500 cpu_to_le64(ctx
->mrw_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
502 cpu_to_le64(ctx
->srqc_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
504 cpu_to_le64(ctx
->cq_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
506 cpu_to_le64(ctx
->tim_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
508 cpu_to_le64(ctx
->tqm_pde
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
510 req
.number_of_qp
= cpu_to_le32(ctx
->qpc_tbl
.max_elements
);
511 req
.number_of_mrw
= cpu_to_le32(ctx
->mrw_tbl
.max_elements
);
512 req
.number_of_srq
= cpu_to_le32(ctx
->srqc_tbl
.max_elements
);
513 req
.number_of_cq
= cpu_to_le32(ctx
->cq_tbl
.max_elements
);
515 req
.max_qp_per_vf
= cpu_to_le32(ctx
->vf_res
.max_qp_per_vf
);
516 req
.max_mrw_per_vf
= cpu_to_le32(ctx
->vf_res
.max_mrw_per_vf
);
517 req
.max_srq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_srq_per_vf
);
518 req
.max_cq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_cq_per_vf
);
519 req
.max_gid_per_vf
= cpu_to_le32(ctx
->vf_res
.max_gid_per_vf
);
522 req
.stat_ctx_id
= cpu_to_le32(ctx
->stats
.fw_id
);
523 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
527 set_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
);
531 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw
*rcfw
)
534 kfree(rcfw
->crsqe_tbl
);
535 bnxt_qplib_free_hwq(rcfw
->pdev
, &rcfw
->cmdq
);
536 bnxt_qplib_free_hwq(rcfw
->pdev
, &rcfw
->creq
);
540 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev
*pdev
,
541 struct bnxt_qplib_rcfw
*rcfw
,
545 rcfw
->creq
.max_elements
= BNXT_QPLIB_CREQE_MAX_CNT
;
546 if (bnxt_qplib_alloc_init_hwq(rcfw
->pdev
, &rcfw
->creq
, NULL
, 0,
547 &rcfw
->creq
.max_elements
,
548 BNXT_QPLIB_CREQE_UNITS
, 0, PAGE_SIZE
,
550 dev_err(&rcfw
->pdev
->dev
,
551 "QPLIB: HW channel CREQ allocation failed");
554 rcfw
->cmdq
.max_elements
= BNXT_QPLIB_CMDQE_MAX_CNT
;
555 if (bnxt_qplib_alloc_init_hwq(rcfw
->pdev
, &rcfw
->cmdq
, NULL
, 0,
556 &rcfw
->cmdq
.max_elements
,
557 BNXT_QPLIB_CMDQE_UNITS
, 0, PAGE_SIZE
,
559 dev_err(&rcfw
->pdev
->dev
,
560 "QPLIB: HW channel CMDQ allocation failed");
564 rcfw
->crsqe_tbl
= kcalloc(rcfw
->cmdq
.max_elements
,
565 sizeof(*rcfw
->crsqe_tbl
), GFP_KERNEL
);
566 if (!rcfw
->crsqe_tbl
)
569 rcfw
->qp_tbl_size
= qp_tbl_sz
;
570 rcfw
->qp_tbl
= kcalloc(qp_tbl_sz
, sizeof(struct bnxt_qplib_qp_node
),
578 bnxt_qplib_free_rcfw_channel(rcfw
);
582 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw
*rcfw
)
586 /* Make sure the HW channel is stopped! */
587 synchronize_irq(rcfw
->vector
);
588 tasklet_disable(&rcfw
->worker
);
589 tasklet_kill(&rcfw
->worker
);
591 if (rcfw
->requested
) {
592 free_irq(rcfw
->vector
, rcfw
);
593 rcfw
->requested
= false;
595 if (rcfw
->cmdq_bar_reg_iomem
)
596 iounmap(rcfw
->cmdq_bar_reg_iomem
);
597 rcfw
->cmdq_bar_reg_iomem
= NULL
;
599 if (rcfw
->creq_bar_reg_iomem
)
600 iounmap(rcfw
->creq_bar_reg_iomem
);
601 rcfw
->creq_bar_reg_iomem
= NULL
;
603 indx
= find_first_bit(rcfw
->cmdq_bitmap
, rcfw
->bmap_size
);
604 if (indx
!= rcfw
->bmap_size
)
605 dev_err(&rcfw
->pdev
->dev
,
606 "QPLIB: disabling RCFW with pending cmd-bit %lx", indx
);
607 kfree(rcfw
->cmdq_bitmap
);
610 rcfw
->aeq_handler
= NULL
;
614 int bnxt_qplib_enable_rcfw_channel(struct pci_dev
*pdev
,
615 struct bnxt_qplib_rcfw
*rcfw
,
617 int cp_bar_reg_off
, int virt_fn
,
618 int (*aeq_handler
)(struct bnxt_qplib_rcfw
*,
621 resource_size_t res_base
;
622 struct cmdq_init init
;
628 set_bit(FIRMWARE_FIRST_FLAG
, &rcfw
->flags
);
629 bmap_size
= BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD
*
630 sizeof(unsigned long));
631 rcfw
->cmdq_bitmap
= kzalloc(bmap_size
, GFP_KERNEL
);
632 if (!rcfw
->cmdq_bitmap
)
634 rcfw
->bmap_size
= bmap_size
;
637 rcfw
->cmdq_bar_reg
= RCFW_COMM_PCI_BAR_REGION
;
638 res_base
= pci_resource_start(pdev
, rcfw
->cmdq_bar_reg
);
642 rcfw
->cmdq_bar_reg_iomem
= ioremap_nocache(res_base
+
643 RCFW_COMM_BASE_OFFSET
,
645 if (!rcfw
->cmdq_bar_reg_iomem
) {
646 dev_err(&rcfw
->pdev
->dev
,
647 "QPLIB: CMDQ BAR region %d mapping failed",
652 rcfw
->cmdq_bar_reg_prod_off
= virt_fn
? RCFW_VF_COMM_PROD_OFFSET
:
653 RCFW_PF_COMM_PROD_OFFSET
;
655 rcfw
->cmdq_bar_reg_trig_off
= RCFW_COMM_TRIG_OFFSET
;
658 rcfw
->creq_bar_reg
= RCFW_COMM_CONS_PCI_BAR_REGION
;
659 res_base
= pci_resource_start(pdev
, rcfw
->creq_bar_reg
);
661 dev_err(&rcfw
->pdev
->dev
,
662 "QPLIB: CREQ BAR region %d resc start is 0!",
664 rcfw
->creq_bar_reg_iomem
= ioremap_nocache(res_base
+ cp_bar_reg_off
,
666 if (!rcfw
->creq_bar_reg_iomem
) {
667 dev_err(&rcfw
->pdev
->dev
,
668 "QPLIB: CREQ BAR region %d mapping failed",
672 rcfw
->creq_qp_event_processed
= 0;
673 rcfw
->creq_func_event_processed
= 0;
675 rcfw
->vector
= msix_vector
;
677 rcfw
->aeq_handler
= aeq_handler
;
679 tasklet_init(&rcfw
->worker
, bnxt_qplib_service_creq
,
680 (unsigned long)rcfw
);
682 rcfw
->requested
= false;
683 rc
= request_irq(rcfw
->vector
, bnxt_qplib_creq_irq
, 0,
684 "bnxt_qplib_creq", rcfw
);
686 dev_err(&rcfw
->pdev
->dev
,
687 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc
);
688 bnxt_qplib_disable_rcfw_channel(rcfw
);
691 rcfw
->requested
= true;
693 init_waitqueue_head(&rcfw
->waitq
);
695 CREQ_DB_REARM(rcfw
->creq_bar_reg_iomem
, 0, rcfw
->creq
.max_elements
);
697 init
.cmdq_pbl
= cpu_to_le64(rcfw
->cmdq
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
698 init
.cmdq_size_cmdq_lvl
= cpu_to_le16(
699 ((BNXT_QPLIB_CMDQE_MAX_CNT
<< CMDQ_INIT_CMDQ_SIZE_SFT
) &
700 CMDQ_INIT_CMDQ_SIZE_MASK
) |
701 ((rcfw
->cmdq
.level
<< CMDQ_INIT_CMDQ_LVL_SFT
) &
702 CMDQ_INIT_CMDQ_LVL_MASK
));
703 init
.creq_ring_id
= cpu_to_le16(rcfw
->creq_ring_id
);
705 /* Write to the Bono mailbox register */
706 __iowrite32_copy(rcfw
->cmdq_bar_reg_iomem
, &init
, sizeof(init
) / 4);
710 struct bnxt_qplib_rcfw_sbuf
*bnxt_qplib_rcfw_alloc_sbuf(
711 struct bnxt_qplib_rcfw
*rcfw
,
714 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
716 sbuf
= kzalloc(sizeof(*sbuf
), GFP_ATOMIC
);
721 sbuf
->sb
= dma_zalloc_coherent(&rcfw
->pdev
->dev
, sbuf
->size
,
722 &sbuf
->dma_addr
, GFP_ATOMIC
);
732 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw
*rcfw
,
733 struct bnxt_qplib_rcfw_sbuf
*sbuf
)
736 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
->size
,
737 sbuf
->sb
, sbuf
->dma_addr
);