2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: RDMA Controller HW interface
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
53 static void bnxt_qplib_service_creq(unsigned long data
);
55 /* Hardware communication channel */
56 static int __wait_for_resp(struct bnxt_qplib_rcfw
*rcfw
, u16 cookie
)
61 cbit
= cookie
% rcfw
->cmdq_depth
;
62 rc
= wait_event_timeout(rcfw
->waitq
,
63 !test_bit(cbit
, rcfw
->cmdq_bitmap
),
64 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS
));
65 return rc
? 0 : -ETIMEDOUT
;
68 static int __block_for_resp(struct bnxt_qplib_rcfw
*rcfw
, u16 cookie
)
70 u32 count
= RCFW_BLOCKED_CMD_WAIT_COUNT
;
73 cbit
= cookie
% rcfw
->cmdq_depth
;
74 if (!test_bit(cbit
, rcfw
->cmdq_bitmap
))
77 mdelay(1); /* 1m sec */
78 bnxt_qplib_service_creq((unsigned long)rcfw
);
79 } while (test_bit(cbit
, rcfw
->cmdq_bitmap
) && --count
);
81 return count
? 0 : -ETIMEDOUT
;
84 static int __send_message(struct bnxt_qplib_rcfw
*rcfw
, struct cmdq_base
*req
,
85 struct creq_base
*resp
, void *sb
, u8 is_block
)
87 struct bnxt_qplib_cmdqe
*cmdqe
, **cmdq_ptr
;
88 struct bnxt_qplib_hwq
*cmdq
= &rcfw
->cmdq
;
89 u32 cmdq_depth
= rcfw
->cmdq_depth
;
90 struct bnxt_qplib_crsq
*crsqe
;
91 u32 sw_prod
, cmdq_prod
;
98 if (!test_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
) &&
99 (opcode
!= CMDQ_BASE_OPCODE_QUERY_FUNC
&&
100 opcode
!= CMDQ_BASE_OPCODE_INITIALIZE_FW
&&
101 opcode
!= CMDQ_BASE_OPCODE_QUERY_VERSION
)) {
102 dev_err(&rcfw
->pdev
->dev
,
103 "RCFW not initialized, reject opcode 0x%x\n", opcode
);
107 if (test_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
) &&
108 opcode
== CMDQ_BASE_OPCODE_INITIALIZE_FW
) {
109 dev_err(&rcfw
->pdev
->dev
, "RCFW already initialized!\n");
113 if (test_bit(FIRMWARE_TIMED_OUT
, &rcfw
->flags
))
116 /* Cmdq are in 16-byte units, each request can consume 1 or more
119 spin_lock_irqsave(&cmdq
->lock
, flags
);
120 if (req
->cmd_size
>= HWQ_FREE_SLOTS(cmdq
)) {
121 dev_err(&rcfw
->pdev
->dev
, "RCFW: CMDQ is full!\n");
122 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
127 cookie
= rcfw
->seq_num
& RCFW_MAX_COOKIE_VALUE
;
128 cbit
= cookie
% rcfw
->cmdq_depth
;
130 cookie
|= RCFW_CMD_IS_BLOCKING
;
132 set_bit(cbit
, rcfw
->cmdq_bitmap
);
133 req
->cookie
= cpu_to_le16(cookie
);
134 crsqe
= &rcfw
->crsqe_tbl
[cbit
];
136 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
140 size
= req
->cmd_size
;
141 /* change the cmd_size to the number of 16byte cmdq unit.
142 * req->cmd_size is modified here
144 bnxt_qplib_set_cmd_slots(req
);
146 memset(resp
, 0, sizeof(*resp
));
147 crsqe
->resp
= (struct creq_qp_event
*)resp
;
148 crsqe
->resp
->cookie
= req
->cookie
;
149 crsqe
->req_size
= req
->cmd_size
;
150 if (req
->resp_size
&& sb
) {
151 struct bnxt_qplib_rcfw_sbuf
*sbuf
= sb
;
153 req
->resp_addr
= cpu_to_le64(sbuf
->dma_addr
);
154 req
->resp_size
= (sbuf
->size
+ BNXT_QPLIB_CMDQE_UNITS
- 1) /
155 BNXT_QPLIB_CMDQE_UNITS
;
158 cmdq_ptr
= (struct bnxt_qplib_cmdqe
**)cmdq
->pbl_ptr
;
161 /* Locate the next cmdq slot */
162 sw_prod
= HWQ_CMP(cmdq
->prod
, cmdq
);
163 cmdqe
= &cmdq_ptr
[get_cmdq_pg(sw_prod
, cmdq_depth
)]
164 [get_cmdq_idx(sw_prod
, cmdq_depth
)];
166 dev_err(&rcfw
->pdev
->dev
,
167 "RCFW request failed with no cmdqe!\n");
170 /* Copy a segment of the req cmd to the cmdq */
171 memset(cmdqe
, 0, sizeof(*cmdqe
));
172 memcpy(cmdqe
, preq
, min_t(u32
, size
, sizeof(*cmdqe
)));
173 preq
+= min_t(u32
, size
, sizeof(*cmdqe
));
174 size
-= min_t(u32
, size
, sizeof(*cmdqe
));
181 cmdq_prod
= cmdq
->prod
;
182 if (test_bit(FIRMWARE_FIRST_FLAG
, &rcfw
->flags
)) {
183 /* The very first doorbell write
184 * is required to set this flag
185 * which prompts the FW to reset
186 * its internal pointers
188 cmdq_prod
|= BIT(FIRMWARE_FIRST_FLAG
);
189 clear_bit(FIRMWARE_FIRST_FLAG
, &rcfw
->flags
);
194 writel(cmdq_prod
, rcfw
->cmdq_bar_reg_iomem
+
195 rcfw
->cmdq_bar_reg_prod_off
);
196 writel(RCFW_CMDQ_TRIG_VAL
, rcfw
->cmdq_bar_reg_iomem
+
197 rcfw
->cmdq_bar_reg_trig_off
);
199 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
200 /* Return the CREQ response pointer */
204 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw
*rcfw
,
205 struct cmdq_base
*req
,
206 struct creq_base
*resp
,
207 void *sb
, u8 is_block
)
209 struct creq_qp_event
*evnt
= (struct creq_qp_event
*)resp
;
211 u8 opcode
, retry_cnt
= 0xFF;
215 opcode
= req
->opcode
;
216 rc
= __send_message(rcfw
, req
, resp
, sb
, is_block
);
217 cookie
= le16_to_cpu(req
->cookie
) & RCFW_MAX_COOKIE_VALUE
;
221 if (!retry_cnt
|| (rc
!= -EAGAIN
&& rc
!= -EBUSY
)) {
223 dev_err(&rcfw
->pdev
->dev
, "cmdq[%#x]=%#x send failed\n",
227 is_block
? mdelay(1) : usleep_range(500, 1000);
229 } while (retry_cnt
--);
232 rc
= __block_for_resp(rcfw
, cookie
);
234 rc
= __wait_for_resp(rcfw
, cookie
);
237 dev_err(&rcfw
->pdev
->dev
, "cmdq[%#x]=%#x timedout (%d)msec\n",
238 cookie
, opcode
, RCFW_CMD_WAIT_TIME_MS
);
239 set_bit(FIRMWARE_TIMED_OUT
, &rcfw
->flags
);
244 /* failed with status */
245 dev_err(&rcfw
->pdev
->dev
, "cmdq[%#x]=%#x status %#x\n",
246 cookie
, opcode
, evnt
->status
);
253 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw
*rcfw
,
254 struct creq_func_event
*func_event
)
256 switch (func_event
->event
) {
257 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
259 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
261 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
263 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
265 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
267 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
269 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
271 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
272 /* SRQ ctx error, call srq_handler??
273 * But there's no SRQ handle!
276 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
278 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
280 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
282 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST
:
284 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED
:
292 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw
*rcfw
,
293 struct creq_qp_event
*qp_event
)
295 struct bnxt_qplib_hwq
*cmdq
= &rcfw
->cmdq
;
296 struct creq_qp_error_notification
*err_event
;
297 struct bnxt_qplib_crsq
*crsqe
;
299 struct bnxt_qplib_qp
*qp
;
300 u16 cbit
, blocked
= 0;
305 switch (qp_event
->event
) {
306 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION
:
307 err_event
= (struct creq_qp_error_notification
*)qp_event
;
308 qp_id
= le32_to_cpu(err_event
->xid
);
309 qp
= rcfw
->qp_tbl
[qp_id
].qp_handle
;
310 dev_dbg(&rcfw
->pdev
->dev
,
311 "Received QP error notification\n");
312 dev_dbg(&rcfw
->pdev
->dev
,
313 "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
314 qp_id
, err_event
->req_err_state_reason
,
315 err_event
->res_err_state_reason
);
318 bnxt_qplib_mark_qp_error(qp
);
319 rcfw
->aeq_handler(rcfw
, qp_event
, qp
);
324 * cmdq->lock needs to be acquired to synchronie
325 * the command send and completion reaping. This function
326 * is always called with creq->lock held. Using
327 * the nested variant of spin_lock.
331 spin_lock_irqsave_nested(&cmdq
->lock
, flags
,
332 SINGLE_DEPTH_NESTING
);
333 cookie
= le16_to_cpu(qp_event
->cookie
);
334 mcookie
= qp_event
->cookie
;
335 blocked
= cookie
& RCFW_CMD_IS_BLOCKING
;
336 cookie
&= RCFW_MAX_COOKIE_VALUE
;
337 cbit
= cookie
% rcfw
->cmdq_depth
;
338 crsqe
= &rcfw
->crsqe_tbl
[cbit
];
340 crsqe
->resp
->cookie
== mcookie
) {
341 memcpy(crsqe
->resp
, qp_event
, sizeof(*qp_event
));
344 if (crsqe
->resp
&& crsqe
->resp
->cookie
)
345 dev_err(&rcfw
->pdev
->dev
,
346 "CMD %s cookie sent=%#x, recd=%#x\n",
347 crsqe
->resp
? "mismatch" : "collision",
348 crsqe
->resp
? crsqe
->resp
->cookie
: 0,
351 if (!test_and_clear_bit(cbit
, rcfw
->cmdq_bitmap
))
352 dev_warn(&rcfw
->pdev
->dev
,
353 "CMD bit %d was not requested\n", cbit
);
354 cmdq
->cons
+= crsqe
->req_size
;
358 wake_up(&rcfw
->waitq
);
359 spin_unlock_irqrestore(&cmdq
->lock
, flags
);
364 /* SP - CREQ Completion handlers */
365 static void bnxt_qplib_service_creq(unsigned long data
)
367 struct bnxt_qplib_rcfw
*rcfw
= (struct bnxt_qplib_rcfw
*)data
;
368 bool gen_p5
= bnxt_qplib_is_chip_gen_p5(rcfw
->res
->cctx
);
369 struct bnxt_qplib_hwq
*creq
= &rcfw
->creq
;
370 u32 type
, budget
= CREQ_ENTRY_POLL_BUDGET
;
371 struct creq_base
*creqe
, **creq_ptr
;
372 u32 sw_cons
, raw_cons
;
375 /* Service the CREQ until budget is over */
376 spin_lock_irqsave(&creq
->lock
, flags
);
377 raw_cons
= creq
->cons
;
379 sw_cons
= HWQ_CMP(raw_cons
, creq
);
380 creq_ptr
= (struct creq_base
**)creq
->pbl_ptr
;
381 creqe
= &creq_ptr
[get_creq_pg(sw_cons
)][get_creq_idx(sw_cons
)];
382 if (!CREQ_CMP_VALID(creqe
, raw_cons
, creq
->max_elements
))
384 /* The valid test of the entry must be done first before
385 * reading any further.
389 type
= creqe
->type
& CREQ_BASE_TYPE_MASK
;
391 case CREQ_BASE_TYPE_QP_EVENT
:
392 bnxt_qplib_process_qp_event
393 (rcfw
, (struct creq_qp_event
*)creqe
);
394 rcfw
->creq_qp_event_processed
++;
396 case CREQ_BASE_TYPE_FUNC_EVENT
:
397 if (!bnxt_qplib_process_func_event
398 (rcfw
, (struct creq_func_event
*)creqe
))
399 rcfw
->creq_func_event_processed
++;
401 dev_warn(&rcfw
->pdev
->dev
,
402 "aeqe:%#x Not handled\n", type
);
405 if (type
!= ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
)
406 dev_warn(&rcfw
->pdev
->dev
,
407 "creqe with event 0x%x not handled\n",
415 if (creq
->cons
!= raw_cons
) {
416 creq
->cons
= raw_cons
;
417 bnxt_qplib_ring_creq_db_rearm(rcfw
->creq_bar_reg_iomem
,
418 raw_cons
, creq
->max_elements
,
419 rcfw
->creq_ring_id
, gen_p5
);
421 spin_unlock_irqrestore(&creq
->lock
, flags
);
424 static irqreturn_t
bnxt_qplib_creq_irq(int irq
, void *dev_instance
)
426 struct bnxt_qplib_rcfw
*rcfw
= dev_instance
;
427 struct bnxt_qplib_hwq
*creq
= &rcfw
->creq
;
428 struct creq_base
**creq_ptr
;
431 /* Prefetch the CREQ element */
432 sw_cons
= HWQ_CMP(creq
->cons
, creq
);
433 creq_ptr
= (struct creq_base
**)rcfw
->creq
.pbl_ptr
;
434 prefetch(&creq_ptr
[get_creq_pg(sw_cons
)][get_creq_idx(sw_cons
)]);
436 tasklet_schedule(&rcfw
->worker
);
442 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw
*rcfw
)
444 struct cmdq_deinitialize_fw req
;
445 struct creq_deinitialize_fw_resp resp
;
449 RCFW_CMD_PREP(req
, DEINITIALIZE_FW
, cmd_flags
);
450 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
455 clear_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
);
459 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl
*pbl
)
461 return (pbl
->pg_size
== ROCE_PG_SIZE_4K
?
462 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K
:
463 pbl
->pg_size
== ROCE_PG_SIZE_8K
?
464 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K
:
465 pbl
->pg_size
== ROCE_PG_SIZE_64K
?
466 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K
:
467 pbl
->pg_size
== ROCE_PG_SIZE_2M
?
468 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M
:
469 pbl
->pg_size
== ROCE_PG_SIZE_8M
?
470 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M
:
471 pbl
->pg_size
== ROCE_PG_SIZE_1G
?
472 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G
:
473 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K
);
476 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw
*rcfw
,
477 struct bnxt_qplib_ctx
*ctx
, int is_virtfn
)
479 struct cmdq_initialize_fw req
;
480 struct creq_initialize_fw_resp resp
;
481 u16 cmd_flags
= 0, level
;
484 RCFW_CMD_PREP(req
, INITIALIZE_FW
, cmd_flags
);
485 /* Supply (log-base-2-of-host-page-size - base-page-shift)
486 * to bono to adjust the doorbell page sizes.
488 req
.log2_dbr_pg_size
= cpu_to_le16(PAGE_SHIFT
-
489 RCFW_DBR_BASE_PAGE_SHIFT
);
491 * Gen P5 devices doesn't require this allocation
492 * as the L2 driver does the same for RoCE also.
493 * Also, VFs need not setup the HW context area, PF
494 * shall setup this area for VF. Skipping the
499 if (bnxt_qplib_is_chip_gen_p5(rcfw
->res
->cctx
))
502 level
= ctx
->qpc_tbl
.level
;
503 req
.qpc_pg_size_qpc_lvl
= (level
<< CMDQ_INITIALIZE_FW_QPC_LVL_SFT
) |
504 __get_pbl_pg_idx(&ctx
->qpc_tbl
.pbl
[level
]);
505 level
= ctx
->mrw_tbl
.level
;
506 req
.mrw_pg_size_mrw_lvl
= (level
<< CMDQ_INITIALIZE_FW_MRW_LVL_SFT
) |
507 __get_pbl_pg_idx(&ctx
->mrw_tbl
.pbl
[level
]);
508 level
= ctx
->srqc_tbl
.level
;
509 req
.srq_pg_size_srq_lvl
= (level
<< CMDQ_INITIALIZE_FW_SRQ_LVL_SFT
) |
510 __get_pbl_pg_idx(&ctx
->srqc_tbl
.pbl
[level
]);
511 level
= ctx
->cq_tbl
.level
;
512 req
.cq_pg_size_cq_lvl
= (level
<< CMDQ_INITIALIZE_FW_CQ_LVL_SFT
) |
513 __get_pbl_pg_idx(&ctx
->cq_tbl
.pbl
[level
]);
514 level
= ctx
->srqc_tbl
.level
;
515 req
.srq_pg_size_srq_lvl
= (level
<< CMDQ_INITIALIZE_FW_SRQ_LVL_SFT
) |
516 __get_pbl_pg_idx(&ctx
->srqc_tbl
.pbl
[level
]);
517 level
= ctx
->cq_tbl
.level
;
518 req
.cq_pg_size_cq_lvl
= (level
<< CMDQ_INITIALIZE_FW_CQ_LVL_SFT
) |
519 __get_pbl_pg_idx(&ctx
->cq_tbl
.pbl
[level
]);
520 level
= ctx
->tim_tbl
.level
;
521 req
.tim_pg_size_tim_lvl
= (level
<< CMDQ_INITIALIZE_FW_TIM_LVL_SFT
) |
522 __get_pbl_pg_idx(&ctx
->tim_tbl
.pbl
[level
]);
523 level
= ctx
->tqm_pde_level
;
524 req
.tqm_pg_size_tqm_lvl
= (level
<< CMDQ_INITIALIZE_FW_TQM_LVL_SFT
) |
525 __get_pbl_pg_idx(&ctx
->tqm_pde
.pbl
[level
]);
528 cpu_to_le64(ctx
->qpc_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
530 cpu_to_le64(ctx
->mrw_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
532 cpu_to_le64(ctx
->srqc_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
534 cpu_to_le64(ctx
->cq_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
536 cpu_to_le64(ctx
->tim_tbl
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
538 cpu_to_le64(ctx
->tqm_pde
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
540 req
.number_of_qp
= cpu_to_le32(ctx
->qpc_tbl
.max_elements
);
541 req
.number_of_mrw
= cpu_to_le32(ctx
->mrw_tbl
.max_elements
);
542 req
.number_of_srq
= cpu_to_le32(ctx
->srqc_tbl
.max_elements
);
543 req
.number_of_cq
= cpu_to_le32(ctx
->cq_tbl
.max_elements
);
546 req
.max_qp_per_vf
= cpu_to_le32(ctx
->vf_res
.max_qp_per_vf
);
547 req
.max_mrw_per_vf
= cpu_to_le32(ctx
->vf_res
.max_mrw_per_vf
);
548 req
.max_srq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_srq_per_vf
);
549 req
.max_cq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_cq_per_vf
);
550 req
.max_gid_per_vf
= cpu_to_le32(ctx
->vf_res
.max_gid_per_vf
);
553 req
.stat_ctx_id
= cpu_to_le32(ctx
->stats
.fw_id
);
554 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
558 set_bit(FIRMWARE_INITIALIZED_FLAG
, &rcfw
->flags
);
562 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw
*rcfw
)
565 kfree(rcfw
->crsqe_tbl
);
566 bnxt_qplib_free_hwq(rcfw
->pdev
, &rcfw
->cmdq
);
567 bnxt_qplib_free_hwq(rcfw
->pdev
, &rcfw
->creq
);
571 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev
*pdev
,
572 struct bnxt_qplib_rcfw
*rcfw
,
573 struct bnxt_qplib_ctx
*ctx
,
579 rcfw
->creq
.max_elements
= BNXT_QPLIB_CREQE_MAX_CNT
;
580 hwq_type
= bnxt_qplib_get_hwq_type(rcfw
->res
);
581 if (bnxt_qplib_alloc_init_hwq(rcfw
->pdev
, &rcfw
->creq
, NULL
,
582 &rcfw
->creq
.max_elements
,
583 BNXT_QPLIB_CREQE_UNITS
,
584 0, PAGE_SIZE
, hwq_type
)) {
585 dev_err(&rcfw
->pdev
->dev
,
586 "HW channel CREQ allocation failed\n");
589 if (ctx
->hwrm_intf_ver
< HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK
)
590 rcfw
->cmdq_depth
= BNXT_QPLIB_CMDQE_MAX_CNT_256
;
592 rcfw
->cmdq_depth
= BNXT_QPLIB_CMDQE_MAX_CNT_8192
;
594 rcfw
->cmdq
.max_elements
= rcfw
->cmdq_depth
;
595 if (bnxt_qplib_alloc_init_hwq
596 (rcfw
->pdev
, &rcfw
->cmdq
, NULL
,
597 &rcfw
->cmdq
.max_elements
,
598 BNXT_QPLIB_CMDQE_UNITS
, 0,
599 bnxt_qplib_cmdqe_page_size(rcfw
->cmdq_depth
),
601 dev_err(&rcfw
->pdev
->dev
,
602 "HW channel CMDQ allocation failed\n");
606 rcfw
->crsqe_tbl
= kcalloc(rcfw
->cmdq
.max_elements
,
607 sizeof(*rcfw
->crsqe_tbl
), GFP_KERNEL
);
608 if (!rcfw
->crsqe_tbl
)
611 rcfw
->qp_tbl_size
= qp_tbl_sz
;
612 rcfw
->qp_tbl
= kcalloc(qp_tbl_sz
, sizeof(struct bnxt_qplib_qp_node
),
620 bnxt_qplib_free_rcfw_channel(rcfw
);
624 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw
*rcfw
, bool kill
)
626 bool gen_p5
= bnxt_qplib_is_chip_gen_p5(rcfw
->res
->cctx
);
628 tasklet_disable(&rcfw
->worker
);
629 /* Mask h/w interrupts */
630 bnxt_qplib_ring_creq_db(rcfw
->creq_bar_reg_iomem
, rcfw
->creq
.cons
,
631 rcfw
->creq
.max_elements
, rcfw
->creq_ring_id
,
633 /* Sync with last running IRQ-handler */
634 synchronize_irq(rcfw
->vector
);
636 tasklet_kill(&rcfw
->worker
);
638 if (rcfw
->requested
) {
639 free_irq(rcfw
->vector
, rcfw
);
640 rcfw
->requested
= false;
644 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw
*rcfw
)
648 bnxt_qplib_rcfw_stop_irq(rcfw
, true);
650 iounmap(rcfw
->cmdq_bar_reg_iomem
);
651 iounmap(rcfw
->creq_bar_reg_iomem
);
653 indx
= find_first_bit(rcfw
->cmdq_bitmap
, rcfw
->bmap_size
);
654 if (indx
!= rcfw
->bmap_size
)
655 dev_err(&rcfw
->pdev
->dev
,
656 "disabling RCFW with pending cmd-bit %lx\n", indx
);
657 kfree(rcfw
->cmdq_bitmap
);
660 rcfw
->cmdq_bar_reg_iomem
= NULL
;
661 rcfw
->creq_bar_reg_iomem
= NULL
;
662 rcfw
->aeq_handler
= NULL
;
666 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw
*rcfw
, int msix_vector
,
669 bool gen_p5
= bnxt_qplib_is_chip_gen_p5(rcfw
->res
->cctx
);
675 rcfw
->vector
= msix_vector
;
677 tasklet_init(&rcfw
->worker
,
678 bnxt_qplib_service_creq
, (unsigned long)rcfw
);
680 tasklet_enable(&rcfw
->worker
);
681 rc
= request_irq(rcfw
->vector
, bnxt_qplib_creq_irq
, 0,
682 "bnxt_qplib_creq", rcfw
);
685 rcfw
->requested
= true;
686 bnxt_qplib_ring_creq_db_rearm(rcfw
->creq_bar_reg_iomem
,
687 rcfw
->creq
.cons
, rcfw
->creq
.max_elements
,
688 rcfw
->creq_ring_id
, gen_p5
);
693 int bnxt_qplib_enable_rcfw_channel(struct pci_dev
*pdev
,
694 struct bnxt_qplib_rcfw
*rcfw
,
696 int cp_bar_reg_off
, int virt_fn
,
697 int (*aeq_handler
)(struct bnxt_qplib_rcfw
*,
700 resource_size_t res_base
;
701 struct cmdq_init init
;
707 set_bit(FIRMWARE_FIRST_FLAG
, &rcfw
->flags
);
708 bmap_size
= BITS_TO_LONGS(rcfw
->cmdq_depth
) * sizeof(unsigned long);
709 rcfw
->cmdq_bitmap
= kzalloc(bmap_size
, GFP_KERNEL
);
710 if (!rcfw
->cmdq_bitmap
)
712 rcfw
->bmap_size
= bmap_size
;
715 rcfw
->cmdq_bar_reg
= RCFW_COMM_PCI_BAR_REGION
;
716 res_base
= pci_resource_start(pdev
, rcfw
->cmdq_bar_reg
);
720 rcfw
->cmdq_bar_reg_iomem
= ioremap(res_base
+
721 RCFW_COMM_BASE_OFFSET
,
723 if (!rcfw
->cmdq_bar_reg_iomem
) {
724 dev_err(&rcfw
->pdev
->dev
, "CMDQ BAR region %d mapping failed\n",
729 rcfw
->cmdq_bar_reg_prod_off
= virt_fn
? RCFW_VF_COMM_PROD_OFFSET
:
730 RCFW_PF_COMM_PROD_OFFSET
;
732 rcfw
->cmdq_bar_reg_trig_off
= RCFW_COMM_TRIG_OFFSET
;
735 rcfw
->creq_bar_reg
= RCFW_COMM_CONS_PCI_BAR_REGION
;
736 res_base
= pci_resource_start(pdev
, rcfw
->creq_bar_reg
);
738 dev_err(&rcfw
->pdev
->dev
,
739 "CREQ BAR region %d resc start is 0!\n",
741 /* Unconditionally map 8 bytes to support 57500 series */
742 rcfw
->creq_bar_reg_iomem
= ioremap(res_base
+ cp_bar_reg_off
,
744 if (!rcfw
->creq_bar_reg_iomem
) {
745 dev_err(&rcfw
->pdev
->dev
, "CREQ BAR region %d mapping failed\n",
747 iounmap(rcfw
->cmdq_bar_reg_iomem
);
748 rcfw
->cmdq_bar_reg_iomem
= NULL
;
751 rcfw
->creq_qp_event_processed
= 0;
752 rcfw
->creq_func_event_processed
= 0;
755 rcfw
->aeq_handler
= aeq_handler
;
756 init_waitqueue_head(&rcfw
->waitq
);
758 rc
= bnxt_qplib_rcfw_start_irq(rcfw
, msix_vector
, true);
760 dev_err(&rcfw
->pdev
->dev
,
761 "Failed to request IRQ for CREQ rc = 0x%x\n", rc
);
762 bnxt_qplib_disable_rcfw_channel(rcfw
);
766 init
.cmdq_pbl
= cpu_to_le64(rcfw
->cmdq
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
767 init
.cmdq_size_cmdq_lvl
= cpu_to_le16(
768 ((rcfw
->cmdq_depth
<< CMDQ_INIT_CMDQ_SIZE_SFT
) &
769 CMDQ_INIT_CMDQ_SIZE_MASK
) |
770 ((rcfw
->cmdq
.level
<< CMDQ_INIT_CMDQ_LVL_SFT
) &
771 CMDQ_INIT_CMDQ_LVL_MASK
));
772 init
.creq_ring_id
= cpu_to_le16(rcfw
->creq_ring_id
);
774 /* Write to the Bono mailbox register */
775 __iowrite32_copy(rcfw
->cmdq_bar_reg_iomem
, &init
, sizeof(init
) / 4);
779 struct bnxt_qplib_rcfw_sbuf
*bnxt_qplib_rcfw_alloc_sbuf(
780 struct bnxt_qplib_rcfw
*rcfw
,
783 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
785 sbuf
= kzalloc(sizeof(*sbuf
), GFP_ATOMIC
);
790 sbuf
->sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
->size
,
791 &sbuf
->dma_addr
, GFP_ATOMIC
);
801 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw
*rcfw
,
802 struct bnxt_qplib_rcfw_sbuf
*sbuf
)
805 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
->size
,
806 sbuf
->sb
, sbuf
->dma_addr
);