1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
25 #include "qed_dev_api.h"
28 #include "qed_init_ops.h"
31 #include "qed_reg_addr.h"
34 /* API common to all protocols */
36 BAR_ID_0
, /* used for GRC */
37 BAR_ID_1
/* Used for doorbells */
40 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
,
43 u32 bar_reg
= (bar_id
== BAR_ID_0
?
44 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
45 u32 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
48 return 1 << (val
+ 15);
50 /* Old MFW initialized above registered only conditionally */
51 if (p_hwfn
->cdev
->num_hwfns
> 1) {
53 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
54 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
57 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
62 void qed_init_dp(struct qed_dev
*cdev
,
63 u32 dp_module
, u8 dp_level
)
67 cdev
->dp_level
= dp_level
;
68 cdev
->dp_module
= dp_module
;
69 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
70 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
72 p_hwfn
->dp_level
= dp_level
;
73 p_hwfn
->dp_module
= dp_module
;
77 void qed_init_struct(struct qed_dev
*cdev
)
81 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
82 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
86 p_hwfn
->b_active
= false;
88 mutex_init(&p_hwfn
->dmae_info
.mutex
);
91 /* hwfn 0 is always active */
92 cdev
->hwfns
[0].b_active
= true;
94 /* set the default cache alignment to 128 */
95 cdev
->cache_shift
= 7;
98 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
100 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
102 kfree(qm_info
->qm_pq_params
);
103 qm_info
->qm_pq_params
= NULL
;
104 kfree(qm_info
->qm_vport_params
);
105 qm_info
->qm_vport_params
= NULL
;
106 kfree(qm_info
->qm_port_params
);
107 qm_info
->qm_port_params
= NULL
;
110 void qed_resc_free(struct qed_dev
*cdev
)
114 kfree(cdev
->fw_data
);
115 cdev
->fw_data
= NULL
;
117 kfree(cdev
->reset_stats
);
119 for_each_hwfn(cdev
, i
) {
120 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
122 kfree(p_hwfn
->p_tx_cids
);
123 p_hwfn
->p_tx_cids
= NULL
;
124 kfree(p_hwfn
->p_rx_cids
);
125 p_hwfn
->p_rx_cids
= NULL
;
128 for_each_hwfn(cdev
, i
) {
129 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
131 qed_cxt_mngr_free(p_hwfn
);
132 qed_qm_info_free(p_hwfn
);
133 qed_spq_free(p_hwfn
);
134 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
135 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
136 qed_int_free(p_hwfn
);
137 qed_dmae_info_free(p_hwfn
);
141 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
)
143 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
144 struct init_qm_port_params
*p_qm_port
;
145 u8 num_vports
, i
, vport_id
, num_ports
;
146 u16 num_pqs
, multi_cos_tcs
= 1;
148 memset(qm_info
, 0, sizeof(*qm_info
));
150 num_pqs
= multi_cos_tcs
+ 1; /* The '1' is for pure-LB */
151 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
153 /* Sanity checking that setup requires legal number of resources */
154 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
156 "Need too many Physical queues - 0x%04x when only %04x are available\n",
157 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
161 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
163 qm_info
->qm_pq_params
= kzalloc(sizeof(*qm_info
->qm_pq_params
) *
164 num_pqs
, GFP_KERNEL
);
165 if (!qm_info
->qm_pq_params
)
168 qm_info
->qm_vport_params
= kzalloc(sizeof(*qm_info
->qm_vport_params
) *
169 num_vports
, GFP_KERNEL
);
170 if (!qm_info
->qm_vport_params
)
173 qm_info
->qm_port_params
= kzalloc(sizeof(*qm_info
->qm_port_params
) *
174 MAX_NUM_PORTS
, GFP_KERNEL
);
175 if (!qm_info
->qm_port_params
)
178 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
180 /* First init per-TC PQs */
181 for (i
= 0; i
< multi_cos_tcs
; i
++) {
182 struct init_qm_pq_params
*params
= &qm_info
->qm_pq_params
[i
];
184 params
->vport_id
= vport_id
;
185 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
186 params
->wrr_group
= 1;
189 /* Then init pure-LB PQ */
190 qm_info
->pure_lb_pq
= i
;
191 qm_info
->qm_pq_params
[i
].vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
192 qm_info
->qm_pq_params
[i
].tc_id
= PURE_LB_TC
;
193 qm_info
->qm_pq_params
[i
].wrr_group
= 1;
196 qm_info
->offload_pq
= 0;
197 qm_info
->num_pqs
= num_pqs
;
198 qm_info
->num_vports
= num_vports
;
200 /* Initialize qm port parameters */
201 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
202 for (i
= 0; i
< num_ports
; i
++) {
203 p_qm_port
= &qm_info
->qm_port_params
[i
];
204 p_qm_port
->active
= 1;
205 p_qm_port
->num_active_phys_tcs
= 4;
206 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
207 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
210 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
212 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
214 qm_info
->start_vport
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
218 qm_info
->vport_rl_en
= 1;
223 DP_NOTICE(p_hwfn
, "Failed to allocate memory for QM params\n");
224 kfree(qm_info
->qm_pq_params
);
225 kfree(qm_info
->qm_vport_params
);
226 kfree(qm_info
->qm_port_params
);
231 int qed_resc_alloc(struct qed_dev
*cdev
)
233 struct qed_consq
*p_consq
;
237 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
241 /* Allocate Memory for the Queue->CID mapping */
242 for_each_hwfn(cdev
, i
) {
243 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
244 int tx_size
= sizeof(struct qed_hw_cid_data
) *
245 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
246 int rx_size
= sizeof(struct qed_hw_cid_data
) *
247 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
249 p_hwfn
->p_tx_cids
= kzalloc(tx_size
, GFP_KERNEL
);
250 if (!p_hwfn
->p_tx_cids
) {
252 "Failed to allocate memory for Tx Cids\n");
257 p_hwfn
->p_rx_cids
= kzalloc(rx_size
, GFP_KERNEL
);
258 if (!p_hwfn
->p_rx_cids
) {
260 "Failed to allocate memory for Rx Cids\n");
266 for_each_hwfn(cdev
, i
) {
267 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
269 /* First allocate the context manager structure */
270 rc
= qed_cxt_mngr_alloc(p_hwfn
);
274 /* Set the HW cid/tid numbers (in the contest manager)
275 * Must be done prior to any further computations.
277 rc
= qed_cxt_set_pf_params(p_hwfn
);
281 /* Prepare and process QM requirements */
282 rc
= qed_init_qm_info(p_hwfn
);
286 /* Compute the ILT client partition */
287 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
291 /* CID map / ILT shadow table / T2
292 * The talbes sizes are determined by the computations above
294 rc
= qed_cxt_tables_alloc(p_hwfn
);
298 /* SPQ, must follow ILT because initializes SPQ context */
299 rc
= qed_spq_alloc(p_hwfn
);
303 /* SP status block allocation */
304 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
307 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
312 p_eq
= qed_eq_alloc(p_hwfn
, 256);
319 p_consq
= qed_consq_alloc(p_hwfn
);
324 p_hwfn
->p_consq
= p_consq
;
326 /* DMA info initialization */
327 rc
= qed_dmae_info_alloc(p_hwfn
);
330 "Failed to allocate memory for dmae_info structure\n");
335 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
336 if (!cdev
->reset_stats
) {
337 DP_NOTICE(cdev
, "Failed to allocate reset statistics\n");
349 void qed_resc_setup(struct qed_dev
*cdev
)
353 for_each_hwfn(cdev
, i
) {
354 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
356 qed_cxt_mngr_setup(p_hwfn
);
357 qed_spq_setup(p_hwfn
);
358 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
359 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
361 /* Read shadow of current MFW mailbox */
362 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
363 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
364 p_hwfn
->mcp_info
->mfw_mb_cur
,
365 p_hwfn
->mcp_info
->mfw_mb_length
);
367 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
371 #define FINAL_CLEANUP_POLL_CNT (100)
372 #define FINAL_CLEANUP_POLL_TIME (10)
373 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
374 struct qed_ptt
*p_ptt
,
377 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
380 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
381 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
383 command
|= X_FINAL_CLEANUP_AGG_INT
<<
384 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
385 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
386 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
387 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
389 /* Make sure notification is not set before initiating final cleanup */
390 if (REG_RD(p_hwfn
, addr
)) {
393 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
394 REG_WR(p_hwfn
, addr
, 0);
397 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
398 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
401 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
403 /* Poll until completion */
404 while (!REG_RD(p_hwfn
, addr
) && count
--)
405 msleep(FINAL_CLEANUP_POLL_TIME
);
407 if (REG_RD(p_hwfn
, addr
))
411 "Failed to receive FW final cleanup notification\n");
413 /* Cleanup afterwards */
414 REG_WR(p_hwfn
, addr
, 0);
419 static void qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
423 hw_mode
= (1 << MODE_BB_B0
);
425 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
427 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
430 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
433 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
436 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
437 p_hwfn
->cdev
->num_ports_in_engines
);
441 switch (p_hwfn
->cdev
->mf_mode
) {
444 hw_mode
|= 1 << MODE_MF_SI
;
447 hw_mode
|= 1 << MODE_MF_SD
;
450 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
451 hw_mode
|= 1 << MODE_MF_SI
;
454 hw_mode
|= 1 << MODE_ASIC
;
456 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
459 /* Init run time data for all PFs on an engine. */
460 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
462 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
465 for_each_hwfn(cdev
, i
) {
466 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
467 struct qed_igu_info
*p_igu_info
;
468 struct qed_igu_block
*p_block
;
469 struct cau_sb_entry sb_entry
;
471 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
473 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
475 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
479 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
480 p_block
->function_id
,
482 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2,
488 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
489 struct qed_ptt
*p_ptt
,
492 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
493 struct qed_qm_common_rt_init_params params
;
494 struct qed_dev
*cdev
= p_hwfn
->cdev
;
497 qed_init_cau_rt_data(cdev
);
499 /* Program GTT windows */
500 qed_gtt_init(p_hwfn
);
502 if (p_hwfn
->mcp_info
) {
503 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
504 qm_info
->pf_rl_en
= 1;
505 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
506 qm_info
->pf_wfq_en
= 1;
509 memset(¶ms
, 0, sizeof(params
));
510 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
511 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
512 params
.pf_rl_en
= qm_info
->pf_rl_en
;
513 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
514 params
.vport_rl_en
= qm_info
->vport_rl_en
;
515 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
516 params
.port_params
= qm_info
->qm_port_params
;
518 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
520 qed_cxt_hw_init_common(p_hwfn
);
522 /* Close gate from NIG to BRB/Storm; By default they are open, but
523 * we close them to prevent NIG from passing data to reset blocks.
524 * Should have been done in the ENGINE phase, but init-tool lacks
525 * proper port-pretend capabilities.
527 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
528 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
529 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
530 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
531 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
532 qed_port_unpretend(p_hwfn
, p_ptt
);
534 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
538 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
539 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
541 /* Disable relaxed ordering in the PCI config space */
542 qed_wr(p_hwfn
, p_ptt
, 0x20b4,
543 qed_rd(p_hwfn
, p_ptt
, 0x20b4) & ~0x10);
548 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
549 struct qed_ptt
*p_ptt
,
554 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
, p_hwfn
->port_id
,
559 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
560 struct qed_ptt
*p_ptt
,
563 enum qed_int_mode int_mode
,
564 bool allow_npar_tx_switch
)
566 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
569 if (p_hwfn
->mcp_info
) {
570 struct qed_mcp_function_info
*p_info
;
572 p_info
= &p_hwfn
->mcp_info
->func_info
;
573 if (p_info
->bandwidth_min
)
574 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
576 /* Update rate limit once we'll actually have a link */
577 p_hwfn
->qm_info
.pf_rl
= 100;
580 qed_cxt_hw_init_pf(p_hwfn
);
582 qed_int_igu_init_rt(p_hwfn
);
584 /* Set VLAN in NIG if needed */
585 if (hw_mode
& (1 << MODE_MF_SD
)) {
586 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
587 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
588 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
589 p_hwfn
->hw_info
.ovlan
);
592 /* Enable classification by MAC if needed */
593 if (hw_mode
& (1 << MODE_MF_SI
)) {
594 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
595 "Configuring TAGMAC_CLS_TYPE\n");
597 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
600 /* Protocl Configuration */
601 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
, 0);
602 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
, 0);
603 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
605 /* Cleanup chip from previous driver if such remains exist */
606 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
);
610 /* PF Init sequence */
611 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
615 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
616 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
620 /* Pure runtime initializations - directly to the HW */
621 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
624 /* enable interrupts */
625 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
627 /* send function start command */
628 rc
= qed_sp_pf_start(p_hwfn
, p_hwfn
->cdev
->mf_mode
);
630 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
635 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
636 struct qed_ptt
*p_ptt
,
639 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
641 /* Change PF in PXP */
642 qed_wr(p_hwfn
, p_ptt
,
643 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
645 /* wait until value is set - try for 1 second every 50us */
646 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
647 val
= qed_rd(p_hwfn
, p_ptt
,
648 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
652 usleep_range(50, 60);
655 if (val
!= set_val
) {
657 "PFID_ENABLE_MASTER wasn't changed after a second\n");
664 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
665 struct qed_ptt
*p_main_ptt
)
667 /* Read shadow of current MFW mailbox */
668 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
669 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
670 p_hwfn
->mcp_info
->mfw_mb_cur
,
671 p_hwfn
->mcp_info
->mfw_mb_length
);
674 int qed_hw_init(struct qed_dev
*cdev
,
676 enum qed_int_mode int_mode
,
677 bool allow_npar_tx_switch
,
678 const u8
*bin_fw_data
)
680 u32 load_code
, param
;
683 rc
= qed_init_fw_data(cdev
, bin_fw_data
);
687 for_each_hwfn(cdev
, i
) {
688 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
690 /* Enable DMAE in PXP */
691 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
693 qed_calc_hw_mode(p_hwfn
);
695 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
698 DP_NOTICE(p_hwfn
, "Failed sending LOAD_REQ command\n");
702 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
704 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
705 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
708 p_hwfn
->first_on_engine
= (load_code
==
709 FW_MSG_CODE_DRV_LOAD_ENGINE
);
712 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
713 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
714 p_hwfn
->hw_info
.hw_mode
);
718 case FW_MSG_CODE_DRV_LOAD_PORT
:
719 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
720 p_hwfn
->hw_info
.hw_mode
);
725 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
726 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
727 p_hwfn
->hw_info
.hw_mode
,
728 b_hw_start
, int_mode
,
729 allow_npar_tx_switch
);
738 "init phase failed for loadcode 0x%x (rc %d)\n",
741 /* ACK mfw regardless of success or failure of initialization */
742 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
743 DRV_MSG_CODE_LOAD_DONE
,
744 0, &load_code
, ¶m
);
748 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
752 p_hwfn
->hw_init_done
= true;
758 #define QED_HW_STOP_RETRY_LIMIT (10)
759 static inline void qed_hw_timers_stop(struct qed_dev
*cdev
,
760 struct qed_hwfn
*p_hwfn
,
761 struct qed_ptt
*p_ptt
)
766 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
767 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
769 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
770 if ((!qed_rd(p_hwfn
, p_ptt
,
771 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
772 (!qed_rd(p_hwfn
, p_ptt
,
773 TM_REG_PF_SCAN_ACTIVE_TASK
)))
776 /* Dependent on number of connection/tasks, possibly
777 * 1ms sleep is required between polls
779 usleep_range(1000, 2000);
782 if (i
< QED_HW_STOP_RETRY_LIMIT
)
786 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
787 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
788 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
791 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
795 for_each_hwfn(cdev
, j
) {
796 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
797 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
799 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
803 int qed_hw_stop(struct qed_dev
*cdev
)
808 for_each_hwfn(cdev
, j
) {
809 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
810 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
812 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
814 /* mark the hw as uninitialized... */
815 p_hwfn
->hw_init_done
= false;
817 rc
= qed_sp_pf_stop(p_hwfn
);
820 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
822 qed_wr(p_hwfn
, p_ptt
,
823 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
825 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
826 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
827 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
828 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
829 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
831 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
833 /* Disable Attention Generation */
834 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
836 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
837 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
839 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
841 /* Need to wait 1ms to guarantee SBs are cleared */
842 usleep_range(1000, 2000);
845 /* Disable DMAE in PXP - in CMT, this should only be done for
846 * first hw-function, and only after all transactions have
847 * stopped for all active hw-functions.
849 t_rc
= qed_change_pci_hwfn(&cdev
->hwfns
[0],
850 cdev
->hwfns
[0].p_main_ptt
,
858 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
862 for_each_hwfn(cdev
, j
) {
863 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
864 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
868 "Shutting down the fastpath\n");
870 qed_wr(p_hwfn
, p_ptt
,
871 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
873 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
874 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
875 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
876 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
877 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
879 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
881 /* Need to wait 1ms to guarantee SBs are cleared */
882 usleep_range(1000, 2000);
886 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
888 /* Re-open incoming traffic */
889 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
890 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
893 static int qed_reg_assert(struct qed_hwfn
*hwfn
,
894 struct qed_ptt
*ptt
, u32 reg
,
897 u32 assert_val
= qed_rd(hwfn
, ptt
, reg
);
899 if (assert_val
!= expected
) {
900 DP_NOTICE(hwfn
, "Value at address 0x%x != 0x%08x\n",
908 int qed_hw_reset(struct qed_dev
*cdev
)
911 u32 unload_resp
, unload_param
;
914 for_each_hwfn(cdev
, i
) {
915 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
917 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Resetting hw/fw\n");
919 /* Check for incorrect states */
920 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
921 QM_REG_USG_CNT_PF_TX
, 0);
922 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
923 QM_REG_USG_CNT_PF_OTHER
, 0);
925 /* Disable PF in HW blocks */
926 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
927 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, QM_REG_PF_EN
, 0);
928 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
929 TCFC_REG_STRONG_ENABLE_PF
, 0);
930 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
931 CCFC_REG_STRONG_ENABLE_PF
, 0);
933 /* Send unload command to MCP */
934 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
935 DRV_MSG_CODE_UNLOAD_REQ
,
936 DRV_MB_PARAM_UNLOAD_WOL_MCP
,
937 &unload_resp
, &unload_param
);
939 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_REQ failed\n");
940 unload_resp
= FW_MSG_CODE_DRV_UNLOAD_ENGINE
;
943 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
944 DRV_MSG_CODE_UNLOAD_DONE
,
945 0, &unload_resp
, &unload_param
);
947 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_DONE failed\n");
955 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
956 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
958 qed_ptt_pool_free(p_hwfn
);
959 kfree(p_hwfn
->hw_info
.p_igu_info
);
962 /* Setup bar access */
963 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
965 /* clear indirect access */
966 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_88_F0
, 0);
967 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_8C_F0
, 0);
968 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_90_F0
, 0);
969 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_94_F0
, 0);
971 /* Clean Previous errors if such exist */
972 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
973 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
,
974 1 << p_hwfn
->abs_pf_id
);
976 /* enable internal target-read */
977 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
978 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
981 static void get_function_id(struct qed_hwfn
*p_hwfn
)
984 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
);
986 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
988 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
989 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
990 PXP_CONCRETE_FID_PFID
);
991 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
992 PXP_CONCRETE_FID_PORT
);
995 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
997 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
998 int num_features
= 1;
1000 feat_num
[QED_PF_L2_QUE
] = min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) /
1002 RESC_NUM(p_hwfn
, QED_L2_QUEUE
));
1003 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1004 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1005 feat_num
[QED_PF_L2_QUE
], RESC_NUM(p_hwfn
, QED_SB
),
1009 static void qed_hw_get_resc(struct qed_hwfn
*p_hwfn
)
1011 u32
*resc_start
= p_hwfn
->hw_info
.resc_start
;
1012 u32
*resc_num
= p_hwfn
->hw_info
.resc_num
;
1013 struct qed_sb_cnt_info sb_cnt_info
;
1016 num_funcs
= MAX_NUM_PFS_BB
;
1018 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1019 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1021 resc_num
[QED_SB
] = min_t(u32
,
1022 (MAX_SB_PER_PATH_BB
/ num_funcs
),
1023 sb_cnt_info
.sb_cnt
);
1024 resc_num
[QED_L2_QUEUE
] = MAX_NUM_L2_QUEUES_BB
/ num_funcs
;
1025 resc_num
[QED_VPORT
] = MAX_NUM_VPORTS_BB
/ num_funcs
;
1026 resc_num
[QED_RSS_ENG
] = ETH_RSS_ENGINE_NUM_BB
/ num_funcs
;
1027 resc_num
[QED_PQ
] = MAX_QM_TX_QUEUES_BB
/ num_funcs
;
1028 resc_num
[QED_RL
] = 8;
1029 resc_num
[QED_MAC
] = ETH_NUM_MAC_FILTERS
/ num_funcs
;
1030 resc_num
[QED_VLAN
] = (ETH_NUM_VLAN_FILTERS
- 1 /*For vlan0*/) /
1032 resc_num
[QED_ILT
] = 950;
1034 for (i
= 0; i
< QED_MAX_RESC
; i
++)
1035 resc_start
[i
] = resc_num
[i
] * p_hwfn
->rel_pf_id
;
1037 qed_hw_set_feat(p_hwfn
);
1039 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1040 "The numbers for each resource are:\n"
1041 "SB = %d start = %d\n"
1042 "L2_QUEUE = %d start = %d\n"
1043 "VPORT = %d start = %d\n"
1044 "PQ = %d start = %d\n"
1045 "RL = %d start = %d\n"
1046 "MAC = %d start = %d\n"
1047 "VLAN = %d start = %d\n"
1048 "ILT = %d start = %d\n",
1049 p_hwfn
->hw_info
.resc_num
[QED_SB
],
1050 p_hwfn
->hw_info
.resc_start
[QED_SB
],
1051 p_hwfn
->hw_info
.resc_num
[QED_L2_QUEUE
],
1052 p_hwfn
->hw_info
.resc_start
[QED_L2_QUEUE
],
1053 p_hwfn
->hw_info
.resc_num
[QED_VPORT
],
1054 p_hwfn
->hw_info
.resc_start
[QED_VPORT
],
1055 p_hwfn
->hw_info
.resc_num
[QED_PQ
],
1056 p_hwfn
->hw_info
.resc_start
[QED_PQ
],
1057 p_hwfn
->hw_info
.resc_num
[QED_RL
],
1058 p_hwfn
->hw_info
.resc_start
[QED_RL
],
1059 p_hwfn
->hw_info
.resc_num
[QED_MAC
],
1060 p_hwfn
->hw_info
.resc_start
[QED_MAC
],
1061 p_hwfn
->hw_info
.resc_num
[QED_VLAN
],
1062 p_hwfn
->hw_info
.resc_start
[QED_VLAN
],
1063 p_hwfn
->hw_info
.resc_num
[QED_ILT
],
1064 p_hwfn
->hw_info
.resc_start
[QED_ILT
]);
1067 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
,
1068 struct qed_ptt
*p_ptt
)
1070 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1071 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
1072 struct qed_mcp_link_params
*link
;
1074 /* Read global nvm_cfg address */
1075 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1077 /* Verify MCP has initialized it */
1078 if (!nvm_cfg_addr
) {
1079 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1083 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1084 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1086 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1087 offsetof(struct nvm_cfg1
, glob
) +
1088 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1090 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1092 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1093 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1094 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G
:
1095 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1097 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G
:
1098 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1100 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G
:
1101 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1103 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F
:
1104 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1106 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E
:
1107 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1109 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G
:
1110 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1112 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G
:
1113 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1115 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G
:
1116 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1118 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G
:
1119 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1122 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n",
1127 /* Read default link configuration */
1128 link
= &p_hwfn
->mcp_info
->link_input
;
1129 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1130 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
1131 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1133 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
1134 link
->speed
.advertised_speeds
=
1135 link_temp
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
1137 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
=
1138 link
->speed
.advertised_speeds
;
1140 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1142 offsetof(struct nvm_cfg1_port
, link_settings
));
1143 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
1144 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
1145 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
1146 link
->speed
.autoneg
= true;
1148 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
1149 link
->speed
.forced_speed
= 1000;
1151 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
1152 link
->speed
.forced_speed
= 10000;
1154 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
1155 link
->speed
.forced_speed
= 25000;
1157 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
1158 link
->speed
.forced_speed
= 40000;
1160 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
1161 link
->speed
.forced_speed
= 50000;
1163 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G
:
1164 link
->speed
.forced_speed
= 100000;
1167 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n",
1171 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
1172 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
1173 link
->pause
.autoneg
= !!(link_temp
&
1174 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
1175 link
->pause
.forced_rx
= !!(link_temp
&
1176 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
1177 link
->pause
.forced_tx
= !!(link_temp
&
1178 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
1179 link
->loopback_mode
= 0;
1181 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1182 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1183 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
1184 link
->speed
.autoneg
, link
->pause
.autoneg
);
1186 /* Read Multi-function information from shmem */
1187 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1188 offsetof(struct nvm_cfg1
, glob
) +
1189 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
1191 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
1193 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
1194 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
1197 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
1198 p_hwfn
->cdev
->mf_mode
= QED_MF_OVLAN
;
1200 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
1201 p_hwfn
->cdev
->mf_mode
= QED_MF_NPAR
;
1203 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
1204 p_hwfn
->cdev
->mf_mode
= QED_MF_DEFAULT
;
1207 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
1208 p_hwfn
->cdev
->mf_mode
);
1210 /* Read Multi-function information from shmem */
1211 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1212 offsetof(struct nvm_cfg1
, glob
) +
1213 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
1215 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
1216 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
1217 __set_bit(QED_DEV_CAP_ETH
,
1218 &p_hwfn
->hw_info
.device_capabilities
);
1220 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
1224 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
1225 struct qed_ptt
*p_ptt
,
1226 enum qed_pci_personality personality
)
1231 /* Read the port mode */
1232 port_mode
= qed_rd(p_hwfn
, p_ptt
,
1233 CNIG_REG_NW_PORT_MODE_BB_B0
);
1235 if (port_mode
< 3) {
1236 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1237 } else if (port_mode
<= 5) {
1238 p_hwfn
->cdev
->num_ports_in_engines
= 2;
1240 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
1241 p_hwfn
->cdev
->num_ports_in_engines
);
1243 /* Default num_ports_in_engines to something */
1244 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1247 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
1249 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
1253 if (qed_mcp_is_init(p_hwfn
))
1254 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
1255 p_hwfn
->mcp_info
->func_info
.mac
);
1257 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
1259 if (qed_mcp_is_init(p_hwfn
)) {
1260 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
1261 p_hwfn
->hw_info
.ovlan
=
1262 p_hwfn
->mcp_info
->func_info
.ovlan
;
1264 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
1267 if (qed_mcp_is_init(p_hwfn
)) {
1268 enum qed_pci_personality protocol
;
1270 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
1271 p_hwfn
->hw_info
.personality
= protocol
;
1274 qed_hw_get_resc(p_hwfn
);
1279 static int qed_get_dev_info(struct qed_dev
*cdev
)
1281 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1284 /* Read Vendor Id / Device Id */
1285 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
,
1287 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
,
1289 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1290 MISCS_REG_CHIP_NUM
);
1291 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1292 MISCS_REG_CHIP_REV
);
1293 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
1295 cdev
->type
= QED_DEV_TYPE_BB
;
1296 /* Learn number of HW-functions */
1297 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1298 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
1300 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
1301 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
1302 cdev
->num_hwfns
= 2;
1304 cdev
->num_hwfns
= 1;
1307 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1308 MISCS_REG_CHIP_TEST_REG
) >> 4;
1309 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
1310 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1311 MISCS_REG_CHIP_METAL
);
1312 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
1314 DP_INFO(cdev
->hwfns
,
1315 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1316 cdev
->chip_num
, cdev
->chip_rev
,
1317 cdev
->chip_bond_id
, cdev
->chip_metal
);
1319 if (QED_IS_BB(cdev
) && CHIP_REV_IS_A0(cdev
)) {
1320 DP_NOTICE(cdev
->hwfns
,
1321 "The chip type/rev (BB A0) is not supported!\n");
1328 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
1329 void __iomem
*p_regview
,
1330 void __iomem
*p_doorbells
,
1331 enum qed_pci_personality personality
)
1335 /* Split PCI bars evenly between hwfns */
1336 p_hwfn
->regview
= p_regview
;
1337 p_hwfn
->doorbells
= p_doorbells
;
1339 /* Validate that chip access is feasible */
1340 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
1342 "Reading the ME register returns all Fs; Preventing further chip access\n");
1346 get_function_id(p_hwfn
);
1348 /* Allocate PTT pool */
1349 rc
= qed_ptt_pool_alloc(p_hwfn
);
1351 DP_NOTICE(p_hwfn
, "Failed to prepare hwfn's hw\n");
1355 /* Allocate the main PTT */
1356 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
1358 /* First hwfn learns basic information, e.g., number of hwfns */
1359 if (!p_hwfn
->my_id
) {
1360 rc
= qed_get_dev_info(p_hwfn
->cdev
);
1365 qed_hw_hwfn_prepare(p_hwfn
);
1367 /* Initialize MCP structure */
1368 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
1370 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
1374 /* Read the device configuration information from the HW and SHMEM */
1375 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
1377 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
1381 /* Allocate the init RT array and initialize the init-ops engine */
1382 rc
= qed_init_alloc(p_hwfn
);
1384 DP_NOTICE(p_hwfn
, "Failed to allocate the init array\n");
1390 qed_mcp_free(p_hwfn
);
1392 qed_hw_hwfn_free(p_hwfn
);
1397 int qed_hw_prepare(struct qed_dev
*cdev
,
1400 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1403 /* Store the precompiled init data ptrs */
1404 qed_init_iro_array(cdev
);
1406 /* Initialize the first hwfn - will learn number of hwfns */
1407 rc
= qed_hw_prepare_single(p_hwfn
,
1409 cdev
->doorbells
, personality
);
1413 personality
= p_hwfn
->hw_info
.personality
;
1415 /* Initialize the rest of the hwfns */
1416 if (cdev
->num_hwfns
> 1) {
1417 void __iomem
*p_regview
, *p_doorbell
;
1420 /* adjust bar offset for second engine */
1421 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, BAR_ID_0
) / 2;
1424 /* adjust doorbell bar offset for second engine */
1425 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, BAR_ID_1
) / 2;
1428 /* prepare second hw function */
1429 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
1430 p_doorbell
, personality
);
1432 /* in case of error, need to free the previously
1433 * initiliazed hwfn 0.
1436 qed_init_free(p_hwfn
);
1437 qed_mcp_free(p_hwfn
);
1438 qed_hw_hwfn_free(p_hwfn
);
1445 void qed_hw_remove(struct qed_dev
*cdev
)
1449 for_each_hwfn(cdev
, i
) {
1450 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1452 qed_init_free(p_hwfn
);
1453 qed_hw_hwfn_free(p_hwfn
);
1454 qed_mcp_free(p_hwfn
);
1458 int qed_chain_alloc(struct qed_dev
*cdev
,
1459 enum qed_chain_use_mode intended_use
,
1460 enum qed_chain_mode mode
,
1463 struct qed_chain
*p_chain
)
1465 dma_addr_t p_pbl_phys
= 0;
1466 void *p_pbl_virt
= NULL
;
1467 dma_addr_t p_phys
= 0;
1468 void *p_virt
= NULL
;
1472 if (mode
== QED_CHAIN_MODE_SINGLE
)
1475 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
1477 size
= page_cnt
* QED_CHAIN_PAGE_SIZE
;
1478 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1479 size
, &p_phys
, GFP_KERNEL
);
1481 DP_NOTICE(cdev
, "Failed to allocate chain mem\n");
1485 if (mode
== QED_CHAIN_MODE_PBL
) {
1486 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1487 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1491 DP_NOTICE(cdev
, "Failed to allocate chain pbl mem\n");
1495 qed_chain_pbl_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1496 (u8
)elem_size
, intended_use
,
1497 p_pbl_phys
, p_pbl_virt
);
1499 qed_chain_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1500 (u8
)elem_size
, intended_use
, mode
);
1506 dma_free_coherent(&cdev
->pdev
->dev
,
1507 page_cnt
* QED_CHAIN_PAGE_SIZE
,
1509 dma_free_coherent(&cdev
->pdev
->dev
,
1510 page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
,
1511 p_pbl_virt
, p_pbl_phys
);
1516 void qed_chain_free(struct qed_dev
*cdev
,
1517 struct qed_chain
*p_chain
)
1521 if (!p_chain
->p_virt_addr
)
1524 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
1525 size
= p_chain
->page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1526 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1527 p_chain
->pbl
.p_virt_table
,
1528 p_chain
->pbl
.p_phys_table
);
1531 size
= p_chain
->page_cnt
* QED_CHAIN_PAGE_SIZE
;
1532 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1533 p_chain
->p_virt_addr
,
1534 p_chain
->p_phys_addr
);
1537 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
,
1538 u16 src_id
, u16
*dst_id
)
1540 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
1543 min
= (u16
)RESC_START(p_hwfn
, QED_L2_QUEUE
);
1544 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
1546 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1552 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
1557 int qed_fw_vport(struct qed_hwfn
*p_hwfn
,
1558 u8 src_id
, u8
*dst_id
)
1560 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
1563 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
1564 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
1566 "vport id [%d] is not valid, available indices [%d - %d]\n",
1572 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
1577 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
,
1578 u8 src_id
, u8
*dst_id
)
1580 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
1583 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
1584 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
1586 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1592 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;