1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/mutex.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/string.h>
44 #include <linux/vmalloc.h>
45 #include <linux/etherdevice.h>
46 #include <linux/qed/qed_chain.h>
47 #include <linux/qed/qed_if.h>
51 #include "qed_dev_api.h"
55 #include "qed_init_ops.h"
57 #include "qed_iscsi.h"
61 #include "qed_reg_addr.h"
63 #include "qed_sriov.h"
67 static DEFINE_SPINLOCK(qm_lock
);
69 #define QED_MIN_DPIS (4)
70 #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
72 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
,
73 struct qed_ptt
*p_ptt
, enum BAR_ID bar_id
)
75 u32 bar_reg
= (bar_id
== BAR_ID_0
?
76 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
79 if (IS_VF(p_hwfn
->cdev
))
80 return qed_vf_hw_bar_size(p_hwfn
, bar_id
);
82 val
= qed_rd(p_hwfn
, p_ptt
, bar_reg
);
84 return 1 << (val
+ 15);
86 /* Old MFW initialized above registered only conditionally */
87 if (p_hwfn
->cdev
->num_hwfns
> 1) {
89 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
90 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
93 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
98 void qed_init_dp(struct qed_dev
*cdev
, u32 dp_module
, u8 dp_level
)
102 cdev
->dp_level
= dp_level
;
103 cdev
->dp_module
= dp_module
;
104 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
105 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
107 p_hwfn
->dp_level
= dp_level
;
108 p_hwfn
->dp_module
= dp_module
;
112 void qed_init_struct(struct qed_dev
*cdev
)
116 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
117 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
121 p_hwfn
->b_active
= false;
123 mutex_init(&p_hwfn
->dmae_info
.mutex
);
126 /* hwfn 0 is always active */
127 cdev
->hwfns
[0].b_active
= true;
129 /* set the default cache alignment to 128 */
130 cdev
->cache_shift
= 7;
133 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
135 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
137 kfree(qm_info
->qm_pq_params
);
138 qm_info
->qm_pq_params
= NULL
;
139 kfree(qm_info
->qm_vport_params
);
140 qm_info
->qm_vport_params
= NULL
;
141 kfree(qm_info
->qm_port_params
);
142 qm_info
->qm_port_params
= NULL
;
143 kfree(qm_info
->wfq_data
);
144 qm_info
->wfq_data
= NULL
;
147 void qed_resc_free(struct qed_dev
*cdev
)
152 for_each_hwfn(cdev
, i
)
153 qed_l2_free(&cdev
->hwfns
[i
]);
157 kfree(cdev
->fw_data
);
158 cdev
->fw_data
= NULL
;
160 kfree(cdev
->reset_stats
);
161 cdev
->reset_stats
= NULL
;
163 for_each_hwfn(cdev
, i
) {
164 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
166 qed_cxt_mngr_free(p_hwfn
);
167 qed_qm_info_free(p_hwfn
);
168 qed_spq_free(p_hwfn
);
170 qed_consq_free(p_hwfn
);
171 qed_int_free(p_hwfn
);
172 #ifdef CONFIG_QED_LL2
173 qed_ll2_free(p_hwfn
);
175 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
)
176 qed_fcoe_free(p_hwfn
);
178 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
179 qed_iscsi_free(p_hwfn
);
180 qed_ooo_free(p_hwfn
);
183 if (QED_IS_RDMA_PERSONALITY(p_hwfn
))
184 qed_rdma_info_free(p_hwfn
);
186 qed_iov_free(p_hwfn
);
188 qed_dmae_info_free(p_hwfn
);
189 qed_dcbx_info_free(p_hwfn
);
193 /******************** QM initialization *******************/
194 #define ACTIVE_TCS_BMAP 0x9f
195 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf
197 /* determines the physical queue flags for a given PF. */
198 static u32
qed_get_pq_flags(struct qed_hwfn
*p_hwfn
)
206 if (IS_QED_SRIOV(p_hwfn
->cdev
))
207 flags
|= PQ_FLAGS_VFS
;
210 switch (p_hwfn
->hw_info
.personality
) {
212 flags
|= PQ_FLAGS_MCOS
;
215 flags
|= PQ_FLAGS_OFLD
;
218 flags
|= PQ_FLAGS_ACK
| PQ_FLAGS_OOO
| PQ_FLAGS_OFLD
;
220 case QED_PCI_ETH_ROCE
:
221 flags
|= PQ_FLAGS_MCOS
| PQ_FLAGS_OFLD
| PQ_FLAGS_LLT
;
222 if (IS_QED_MULTI_TC_ROCE(p_hwfn
))
223 flags
|= PQ_FLAGS_MTC
;
225 case QED_PCI_ETH_IWARP
:
226 flags
|= PQ_FLAGS_MCOS
| PQ_FLAGS_ACK
| PQ_FLAGS_OOO
|
231 "unknown personality %d\n", p_hwfn
->hw_info
.personality
);
238 /* Getters for resource amounts necessary for qm initialization */
239 static u8
qed_init_qm_get_num_tcs(struct qed_hwfn
*p_hwfn
)
241 return p_hwfn
->hw_info
.num_hw_tc
;
244 static u16
qed_init_qm_get_num_vfs(struct qed_hwfn
*p_hwfn
)
246 return IS_QED_SRIOV(p_hwfn
->cdev
) ?
247 p_hwfn
->cdev
->p_iov_info
->total_vfs
: 0;
250 static u8
qed_init_qm_get_num_mtc_tcs(struct qed_hwfn
*p_hwfn
)
252 u32 pq_flags
= qed_get_pq_flags(p_hwfn
);
254 if (!(PQ_FLAGS_MTC
& pq_flags
))
257 return qed_init_qm_get_num_tcs(p_hwfn
);
260 #define NUM_DEFAULT_RLS 1
262 static u16
qed_init_qm_get_num_pf_rls(struct qed_hwfn
*p_hwfn
)
264 u16 num_pf_rls
, num_vfs
= qed_init_qm_get_num_vfs(p_hwfn
);
266 /* num RLs can't exceed resource amount of rls or vports */
267 num_pf_rls
= (u16
) min_t(u32
, RESC_NUM(p_hwfn
, QED_RL
),
268 RESC_NUM(p_hwfn
, QED_VPORT
));
270 /* Make sure after we reserve there's something left */
271 if (num_pf_rls
< num_vfs
+ NUM_DEFAULT_RLS
)
274 /* subtract rls necessary for VFs and one default one for the PF */
275 num_pf_rls
-= num_vfs
+ NUM_DEFAULT_RLS
;
280 static u16
qed_init_qm_get_num_vports(struct qed_hwfn
*p_hwfn
)
282 u32 pq_flags
= qed_get_pq_flags(p_hwfn
);
284 /* all pqs share the same vport, except for vfs and pf_rl pqs */
285 return (!!(PQ_FLAGS_RLS
& pq_flags
)) *
286 qed_init_qm_get_num_pf_rls(p_hwfn
) +
287 (!!(PQ_FLAGS_VFS
& pq_flags
)) *
288 qed_init_qm_get_num_vfs(p_hwfn
) + 1;
291 /* calc amount of PQs according to the requested flags */
292 static u16
qed_init_qm_get_num_pqs(struct qed_hwfn
*p_hwfn
)
294 u32 pq_flags
= qed_get_pq_flags(p_hwfn
);
296 return (!!(PQ_FLAGS_RLS
& pq_flags
)) *
297 qed_init_qm_get_num_pf_rls(p_hwfn
) +
298 (!!(PQ_FLAGS_MCOS
& pq_flags
)) *
299 qed_init_qm_get_num_tcs(p_hwfn
) +
300 (!!(PQ_FLAGS_LB
& pq_flags
)) + (!!(PQ_FLAGS_OOO
& pq_flags
)) +
301 (!!(PQ_FLAGS_ACK
& pq_flags
)) +
302 (!!(PQ_FLAGS_OFLD
& pq_flags
)) *
303 qed_init_qm_get_num_mtc_tcs(p_hwfn
) +
304 (!!(PQ_FLAGS_LLT
& pq_flags
)) *
305 qed_init_qm_get_num_mtc_tcs(p_hwfn
) +
306 (!!(PQ_FLAGS_VFS
& pq_flags
)) * qed_init_qm_get_num_vfs(p_hwfn
);
309 /* initialize the top level QM params */
310 static void qed_init_qm_params(struct qed_hwfn
*p_hwfn
)
312 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
315 /* pq and vport bases for this PF */
316 qm_info
->start_pq
= (u16
) RESC_START(p_hwfn
, QED_PQ
);
317 qm_info
->start_vport
= (u8
) RESC_START(p_hwfn
, QED_VPORT
);
319 /* rate limiting and weighted fair queueing are always enabled */
320 qm_info
->vport_rl_en
= true;
321 qm_info
->vport_wfq_en
= true;
323 /* TC config is different for AH 4 port */
324 four_port
= p_hwfn
->cdev
->num_ports_in_engine
== MAX_NUM_PORTS_K2
;
326 /* in AH 4 port we have fewer TCs per port */
327 qm_info
->max_phys_tcs_per_port
= four_port
? NUM_PHYS_TCS_4PORT_K2
:
330 /* unless MFW indicated otherwise, ooo_tc == 3 for
331 * AH 4-port and 4 otherwise.
333 if (!qm_info
->ooo_tc
)
334 qm_info
->ooo_tc
= four_port
? DCBX_TCP_OOO_K2_4PORT_TC
:
338 /* initialize qm vport params */
339 static void qed_init_qm_vport_params(struct qed_hwfn
*p_hwfn
)
341 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
344 /* all vports participate in weighted fair queueing */
345 for (i
= 0; i
< qed_init_qm_get_num_vports(p_hwfn
); i
++)
346 qm_info
->qm_vport_params
[i
].vport_wfq
= 1;
349 /* initialize qm port params */
350 static void qed_init_qm_port_params(struct qed_hwfn
*p_hwfn
)
352 /* Initialize qm port parameters */
353 u8 i
, active_phys_tcs
, num_ports
= p_hwfn
->cdev
->num_ports_in_engine
;
355 /* indicate how ooo and high pri traffic is dealt with */
356 active_phys_tcs
= num_ports
== MAX_NUM_PORTS_K2
?
357 ACTIVE_TCS_BMAP_4PORT_K2
:
360 for (i
= 0; i
< num_ports
; i
++) {
361 struct init_qm_port_params
*p_qm_port
=
362 &p_hwfn
->qm_info
.qm_port_params
[i
];
364 p_qm_port
->active
= 1;
365 p_qm_port
->active_phys_tcs
= active_phys_tcs
;
366 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
367 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
371 /* Reset the params which must be reset for qm init. QM init may be called as
372 * a result of flows other than driver load (e.g. dcbx renegotiation). Other
373 * params may be affected by the init but would simply recalculate to the same
374 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
375 * affected as these amounts stay the same.
377 static void qed_init_qm_reset_params(struct qed_hwfn
*p_hwfn
)
379 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
381 qm_info
->num_pqs
= 0;
382 qm_info
->num_vports
= 0;
383 qm_info
->num_pf_rls
= 0;
384 qm_info
->num_vf_pqs
= 0;
385 qm_info
->first_vf_pq
= 0;
386 qm_info
->first_mcos_pq
= 0;
387 qm_info
->first_rl_pq
= 0;
390 static void qed_init_qm_advance_vport(struct qed_hwfn
*p_hwfn
)
392 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
394 qm_info
->num_vports
++;
396 if (qm_info
->num_vports
> qed_init_qm_get_num_vports(p_hwfn
))
398 "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
399 qm_info
->num_vports
, qed_init_qm_get_num_vports(p_hwfn
));
402 /* initialize a single pq and manage qm_info resources accounting.
403 * The pq_init_flags param determines whether the PQ is rate limited
404 * (for VF or PF) and whether a new vport is allocated to the pq or not
405 * (i.e. vport will be shared).
408 /* flags for pq init */
409 #define PQ_INIT_SHARE_VPORT (1 << 0)
410 #define PQ_INIT_PF_RL (1 << 1)
411 #define PQ_INIT_VF_RL (1 << 2)
413 /* defines for pq init */
414 #define PQ_INIT_DEFAULT_WRR_GROUP 1
415 #define PQ_INIT_DEFAULT_TC 0
417 void qed_hw_info_set_offload_tc(struct qed_hw_info
*p_info
, u8 tc
)
419 p_info
->offload_tc
= tc
;
420 p_info
->offload_tc_set
= true;
423 static bool qed_is_offload_tc_set(struct qed_hwfn
*p_hwfn
)
425 return p_hwfn
->hw_info
.offload_tc_set
;
428 static u32
qed_get_offload_tc(struct qed_hwfn
*p_hwfn
)
430 if (qed_is_offload_tc_set(p_hwfn
))
431 return p_hwfn
->hw_info
.offload_tc
;
433 return PQ_INIT_DEFAULT_TC
;
436 static void qed_init_qm_pq(struct qed_hwfn
*p_hwfn
,
437 struct qed_qm_info
*qm_info
,
438 u8 tc
, u32 pq_init_flags
)
440 u16 pq_idx
= qm_info
->num_pqs
, max_pq
= qed_init_qm_get_num_pqs(p_hwfn
);
444 "pq overflow! pq %d, max pq %d\n", pq_idx
, max_pq
);
447 qm_info
->qm_pq_params
[pq_idx
].port_id
= p_hwfn
->port_id
;
448 qm_info
->qm_pq_params
[pq_idx
].vport_id
= qm_info
->start_vport
+
450 qm_info
->qm_pq_params
[pq_idx
].tc_id
= tc
;
451 qm_info
->qm_pq_params
[pq_idx
].wrr_group
= PQ_INIT_DEFAULT_WRR_GROUP
;
452 qm_info
->qm_pq_params
[pq_idx
].rl_valid
=
453 (pq_init_flags
& PQ_INIT_PF_RL
|| pq_init_flags
& PQ_INIT_VF_RL
);
455 /* qm params accounting */
457 if (!(pq_init_flags
& PQ_INIT_SHARE_VPORT
))
458 qm_info
->num_vports
++;
460 if (pq_init_flags
& PQ_INIT_PF_RL
)
461 qm_info
->num_pf_rls
++;
463 if (qm_info
->num_vports
> qed_init_qm_get_num_vports(p_hwfn
))
465 "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
466 qm_info
->num_vports
, qed_init_qm_get_num_vports(p_hwfn
));
468 if (qm_info
->num_pf_rls
> qed_init_qm_get_num_pf_rls(p_hwfn
))
470 "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
471 qm_info
->num_pf_rls
, qed_init_qm_get_num_pf_rls(p_hwfn
));
474 /* get pq index according to PQ_FLAGS */
475 static u16
*qed_init_qm_get_idx_from_flags(struct qed_hwfn
*p_hwfn
,
476 unsigned long pq_flags
)
478 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
480 /* Can't have multiple flags set here */
481 if (bitmap_weight(&pq_flags
,
482 sizeof(pq_flags
) * BITS_PER_BYTE
) > 1) {
483 DP_ERR(p_hwfn
, "requested multiple pq flags 0x%lx\n", pq_flags
);
487 if (!(qed_get_pq_flags(p_hwfn
) & pq_flags
)) {
488 DP_ERR(p_hwfn
, "pq flag 0x%lx is not set\n", pq_flags
);
494 return &qm_info
->first_rl_pq
;
496 return &qm_info
->first_mcos_pq
;
498 return &qm_info
->pure_lb_pq
;
500 return &qm_info
->ooo_pq
;
502 return &qm_info
->pure_ack_pq
;
504 return &qm_info
->first_ofld_pq
;
506 return &qm_info
->first_llt_pq
;
508 return &qm_info
->first_vf_pq
;
514 return &qm_info
->start_pq
;
517 /* save pq index in qm info */
518 static void qed_init_qm_set_idx(struct qed_hwfn
*p_hwfn
,
519 u32 pq_flags
, u16 pq_val
)
521 u16
*base_pq_idx
= qed_init_qm_get_idx_from_flags(p_hwfn
, pq_flags
);
523 *base_pq_idx
= p_hwfn
->qm_info
.start_pq
+ pq_val
;
526 /* get tx pq index, with the PQ TX base already set (ready for context init) */
527 u16
qed_get_cm_pq_idx(struct qed_hwfn
*p_hwfn
, u32 pq_flags
)
529 u16
*base_pq_idx
= qed_init_qm_get_idx_from_flags(p_hwfn
, pq_flags
);
531 return *base_pq_idx
+ CM_TX_PQ_BASE
;
534 u16
qed_get_cm_pq_idx_mcos(struct qed_hwfn
*p_hwfn
, u8 tc
)
536 u8 max_tc
= qed_init_qm_get_num_tcs(p_hwfn
);
539 DP_ERR(p_hwfn
, "pq with flag 0x%lx do not exist\n",
541 return p_hwfn
->qm_info
.start_pq
;
545 DP_ERR(p_hwfn
, "tc %d must be smaller than %d\n", tc
, max_tc
);
547 return qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_MCOS
) + (tc
% max_tc
);
550 u16
qed_get_cm_pq_idx_vf(struct qed_hwfn
*p_hwfn
, u16 vf
)
552 u16 max_vf
= qed_init_qm_get_num_vfs(p_hwfn
);
555 DP_ERR(p_hwfn
, "pq with flag 0x%lx do not exist\n",
557 return p_hwfn
->qm_info
.start_pq
;
561 DP_ERR(p_hwfn
, "vf %d must be smaller than %d\n", vf
, max_vf
);
563 return qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_VFS
) + (vf
% max_vf
);
566 u16
qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn
*p_hwfn
, u8 tc
)
568 u16 first_ofld_pq
, pq_offset
;
570 first_ofld_pq
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OFLD
);
571 pq_offset
= (tc
< qed_init_qm_get_num_mtc_tcs(p_hwfn
)) ?
572 tc
: PQ_INIT_DEFAULT_TC
;
574 return first_ofld_pq
+ pq_offset
;
577 u16
qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn
*p_hwfn
, u8 tc
)
579 u16 first_llt_pq
, pq_offset
;
581 first_llt_pq
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_LLT
);
582 pq_offset
= (tc
< qed_init_qm_get_num_mtc_tcs(p_hwfn
)) ?
583 tc
: PQ_INIT_DEFAULT_TC
;
585 return first_llt_pq
+ pq_offset
;
588 /* Functions for creating specific types of pqs */
589 static void qed_init_qm_lb_pq(struct qed_hwfn
*p_hwfn
)
591 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
593 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_LB
))
596 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_LB
, qm_info
->num_pqs
);
597 qed_init_qm_pq(p_hwfn
, qm_info
, PURE_LB_TC
, PQ_INIT_SHARE_VPORT
);
600 static void qed_init_qm_ooo_pq(struct qed_hwfn
*p_hwfn
)
602 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
604 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_OOO
))
607 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_OOO
, qm_info
->num_pqs
);
608 qed_init_qm_pq(p_hwfn
, qm_info
, qm_info
->ooo_tc
, PQ_INIT_SHARE_VPORT
);
611 static void qed_init_qm_pure_ack_pq(struct qed_hwfn
*p_hwfn
)
613 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
615 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_ACK
))
618 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_ACK
, qm_info
->num_pqs
);
619 qed_init_qm_pq(p_hwfn
, qm_info
, qed_get_offload_tc(p_hwfn
),
620 PQ_INIT_SHARE_VPORT
);
623 static void qed_init_qm_mtc_pqs(struct qed_hwfn
*p_hwfn
)
625 u8 num_tcs
= qed_init_qm_get_num_mtc_tcs(p_hwfn
);
626 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
629 /* override pq's TC if offload TC is set */
630 for (tc
= 0; tc
< num_tcs
; tc
++)
631 qed_init_qm_pq(p_hwfn
, qm_info
,
632 qed_is_offload_tc_set(p_hwfn
) ?
633 p_hwfn
->hw_info
.offload_tc
: tc
,
634 PQ_INIT_SHARE_VPORT
);
637 static void qed_init_qm_offload_pq(struct qed_hwfn
*p_hwfn
)
639 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
641 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_OFLD
))
644 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_OFLD
, qm_info
->num_pqs
);
645 qed_init_qm_mtc_pqs(p_hwfn
);
648 static void qed_init_qm_low_latency_pq(struct qed_hwfn
*p_hwfn
)
650 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
652 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_LLT
))
655 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_LLT
, qm_info
->num_pqs
);
656 qed_init_qm_mtc_pqs(p_hwfn
);
659 static void qed_init_qm_mcos_pqs(struct qed_hwfn
*p_hwfn
)
661 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
664 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_MCOS
))
667 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_MCOS
, qm_info
->num_pqs
);
668 for (tc_idx
= 0; tc_idx
< qed_init_qm_get_num_tcs(p_hwfn
); tc_idx
++)
669 qed_init_qm_pq(p_hwfn
, qm_info
, tc_idx
, PQ_INIT_SHARE_VPORT
);
672 static void qed_init_qm_vf_pqs(struct qed_hwfn
*p_hwfn
)
674 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
675 u16 vf_idx
, num_vfs
= qed_init_qm_get_num_vfs(p_hwfn
);
677 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_VFS
))
680 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_VFS
, qm_info
->num_pqs
);
681 qm_info
->num_vf_pqs
= num_vfs
;
682 for (vf_idx
= 0; vf_idx
< num_vfs
; vf_idx
++)
683 qed_init_qm_pq(p_hwfn
,
684 qm_info
, PQ_INIT_DEFAULT_TC
, PQ_INIT_VF_RL
);
687 static void qed_init_qm_rl_pqs(struct qed_hwfn
*p_hwfn
)
689 u16 pf_rls_idx
, num_pf_rls
= qed_init_qm_get_num_pf_rls(p_hwfn
);
690 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
692 if (!(qed_get_pq_flags(p_hwfn
) & PQ_FLAGS_RLS
))
695 qed_init_qm_set_idx(p_hwfn
, PQ_FLAGS_RLS
, qm_info
->num_pqs
);
696 for (pf_rls_idx
= 0; pf_rls_idx
< num_pf_rls
; pf_rls_idx
++)
697 qed_init_qm_pq(p_hwfn
, qm_info
, qed_get_offload_tc(p_hwfn
),
701 static void qed_init_qm_pq_params(struct qed_hwfn
*p_hwfn
)
703 /* rate limited pqs, must come first (FW assumption) */
704 qed_init_qm_rl_pqs(p_hwfn
);
706 /* pqs for multi cos */
707 qed_init_qm_mcos_pqs(p_hwfn
);
709 /* pure loopback pq */
710 qed_init_qm_lb_pq(p_hwfn
);
712 /* out of order pq */
713 qed_init_qm_ooo_pq(p_hwfn
);
716 qed_init_qm_pure_ack_pq(p_hwfn
);
718 /* pq for offloaded protocol */
719 qed_init_qm_offload_pq(p_hwfn
);
722 qed_init_qm_low_latency_pq(p_hwfn
);
724 /* done sharing vports */
725 qed_init_qm_advance_vport(p_hwfn
);
728 qed_init_qm_vf_pqs(p_hwfn
);
731 /* compare values of getters against resources amounts */
732 static int qed_init_qm_sanity(struct qed_hwfn
*p_hwfn
)
734 if (qed_init_qm_get_num_vports(p_hwfn
) > RESC_NUM(p_hwfn
, QED_VPORT
)) {
735 DP_ERR(p_hwfn
, "requested amount of vports exceeds resource\n");
739 if (qed_init_qm_get_num_pqs(p_hwfn
) <= RESC_NUM(p_hwfn
, QED_PQ
))
742 if (QED_IS_ROCE_PERSONALITY(p_hwfn
)) {
743 p_hwfn
->hw_info
.multi_tc_roce_en
= 0;
745 "multi-tc roce was disabled to reduce requested amount of pqs\n");
746 if (qed_init_qm_get_num_pqs(p_hwfn
) <= RESC_NUM(p_hwfn
, QED_PQ
))
750 DP_ERR(p_hwfn
, "requested amount of pqs exceeds resource\n");
754 static void qed_dp_init_qm_params(struct qed_hwfn
*p_hwfn
)
756 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
757 struct init_qm_vport_params
*vport
;
758 struct init_qm_port_params
*port
;
759 struct init_qm_pq_params
*pq
;
762 /* top level params */
765 "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
767 qm_info
->start_vport
,
769 qm_info
->first_ofld_pq
,
770 qm_info
->first_llt_pq
,
771 qm_info
->pure_ack_pq
);
774 "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
776 qm_info
->first_vf_pq
,
779 qm_info
->num_vports
, qm_info
->max_phys_tcs_per_port
);
782 "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
785 qm_info
->vport_rl_en
,
786 qm_info
->vport_wfq_en
,
789 qm_info
->num_pf_rls
, qed_get_pq_flags(p_hwfn
));
792 for (i
= 0; i
< p_hwfn
->cdev
->num_ports_in_engine
; i
++) {
793 port
= &(qm_info
->qm_port_params
[i
]);
796 "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
799 port
->active_phys_tcs
,
800 port
->num_pbf_cmd_lines
,
801 port
->num_btb_blocks
, port
->reserved
);
805 for (i
= 0; i
< qm_info
->num_vports
; i
++) {
806 vport
= &(qm_info
->qm_vport_params
[i
]);
809 "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
810 qm_info
->start_vport
+ i
,
811 vport
->vport_rl
, vport
->vport_wfq
);
812 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++)
815 "%d ", vport
->first_tx_pq_id
[tc
]);
816 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "]\n");
820 for (i
= 0; i
< qm_info
->num_pqs
; i
++) {
821 pq
= &(qm_info
->qm_pq_params
[i
]);
824 "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
825 qm_info
->start_pq
+ i
,
828 pq
->tc_id
, pq
->wrr_group
, pq
->rl_valid
);
832 static void qed_init_qm_info(struct qed_hwfn
*p_hwfn
)
834 /* reset params required for init run */
835 qed_init_qm_reset_params(p_hwfn
);
837 /* init QM top level params */
838 qed_init_qm_params(p_hwfn
);
840 /* init QM port params */
841 qed_init_qm_port_params(p_hwfn
);
843 /* init QM vport params */
844 qed_init_qm_vport_params(p_hwfn
);
846 /* init QM physical queue params */
847 qed_init_qm_pq_params(p_hwfn
);
849 /* display all that init */
850 qed_dp_init_qm_params(p_hwfn
);
853 /* This function reconfigures the QM pf on the fly.
854 * For this purpose we:
855 * 1. reconfigure the QM database
856 * 2. set new values to runtime array
857 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
858 * 4. activate init tool in QM_PF stage
859 * 5. send an sdm_qm_cmd through rbc interface to release the QM
861 int qed_qm_reconf(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
863 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
867 /* initialize qed's qm data structure */
868 qed_init_qm_info(p_hwfn
);
870 /* stop PF's qm queues */
871 spin_lock_bh(&qm_lock
);
872 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, false, true,
873 qm_info
->start_pq
, qm_info
->num_pqs
);
874 spin_unlock_bh(&qm_lock
);
878 /* clear the QM_PF runtime phase leftovers from previous init */
879 qed_init_clear_rt_data(p_hwfn
);
881 /* prepare QM portion of runtime array */
882 qed_qm_init_pf(p_hwfn
, p_ptt
, false);
884 /* activate init tool on runtime array */
885 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, p_hwfn
->rel_pf_id
,
886 p_hwfn
->hw_info
.hw_mode
);
890 /* start PF's qm queues */
891 spin_lock_bh(&qm_lock
);
892 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, true, true,
893 qm_info
->start_pq
, qm_info
->num_pqs
);
894 spin_unlock_bh(&qm_lock
);
901 static int qed_alloc_qm_data(struct qed_hwfn
*p_hwfn
)
903 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
906 rc
= qed_init_qm_sanity(p_hwfn
);
910 qm_info
->qm_pq_params
= kcalloc(qed_init_qm_get_num_pqs(p_hwfn
),
911 sizeof(*qm_info
->qm_pq_params
),
913 if (!qm_info
->qm_pq_params
)
916 qm_info
->qm_vport_params
= kcalloc(qed_init_qm_get_num_vports(p_hwfn
),
917 sizeof(*qm_info
->qm_vport_params
),
919 if (!qm_info
->qm_vport_params
)
922 qm_info
->qm_port_params
= kcalloc(p_hwfn
->cdev
->num_ports_in_engine
,
923 sizeof(*qm_info
->qm_port_params
),
925 if (!qm_info
->qm_port_params
)
928 qm_info
->wfq_data
= kcalloc(qed_init_qm_get_num_vports(p_hwfn
),
929 sizeof(*qm_info
->wfq_data
),
931 if (!qm_info
->wfq_data
)
937 DP_NOTICE(p_hwfn
, "Failed to allocate memory for QM params\n");
938 qed_qm_info_free(p_hwfn
);
942 int qed_resc_alloc(struct qed_dev
*cdev
)
944 u32 rdma_tasks
, excess_tasks
;
949 for_each_hwfn(cdev
, i
) {
950 rc
= qed_l2_alloc(&cdev
->hwfns
[i
]);
957 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
961 for_each_hwfn(cdev
, i
) {
962 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
963 u32 n_eqes
, num_cons
;
965 /* First allocate the context manager structure */
966 rc
= qed_cxt_mngr_alloc(p_hwfn
);
970 /* Set the HW cid/tid numbers (in the contest manager)
971 * Must be done prior to any further computations.
973 rc
= qed_cxt_set_pf_params(p_hwfn
, RDMA_MAX_TIDS
);
977 rc
= qed_alloc_qm_data(p_hwfn
);
982 qed_init_qm_info(p_hwfn
);
984 /* Compute the ILT client partition */
985 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
, &line_count
);
988 "too many ILT lines; re-computing with less lines\n");
989 /* In case there are not enough ILT lines we reduce the
990 * number of RDMA tasks and re-compute.
993 qed_cxt_cfg_ilt_compute_excess(p_hwfn
, line_count
);
997 rdma_tasks
= RDMA_MAX_TIDS
- excess_tasks
;
998 rc
= qed_cxt_set_pf_params(p_hwfn
, rdma_tasks
);
1002 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
, &line_count
);
1005 "failed ILT compute. Requested too many lines: %u\n",
1012 /* CID map / ILT shadow table / T2
1013 * The talbes sizes are determined by the computations above
1015 rc
= qed_cxt_tables_alloc(p_hwfn
);
1019 /* SPQ, must follow ILT because initializes SPQ context */
1020 rc
= qed_spq_alloc(p_hwfn
);
1024 /* SP status block allocation */
1025 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
1028 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
1032 rc
= qed_iov_alloc(p_hwfn
);
1037 n_eqes
= qed_chain_get_capacity(&p_hwfn
->p_spq
->chain
);
1038 if (QED_IS_RDMA_PERSONALITY(p_hwfn
)) {
1039 enum protocol_type rdma_proto
;
1041 if (QED_IS_ROCE_PERSONALITY(p_hwfn
))
1042 rdma_proto
= PROTOCOLID_ROCE
;
1044 rdma_proto
= PROTOCOLID_IWARP
;
1046 num_cons
= qed_cxt_get_proto_cid_count(p_hwfn
,
1049 n_eqes
+= num_cons
+ 2 * MAX_NUM_VFS_BB
;
1050 } else if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
1052 qed_cxt_get_proto_cid_count(p_hwfn
,
1055 n_eqes
+= 2 * num_cons
;
1058 if (n_eqes
> 0xFFFF) {
1060 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
1065 rc
= qed_eq_alloc(p_hwfn
, (u16
) n_eqes
);
1069 rc
= qed_consq_alloc(p_hwfn
);
1073 rc
= qed_l2_alloc(p_hwfn
);
1077 #ifdef CONFIG_QED_LL2
1078 if (p_hwfn
->using_ll2
) {
1079 rc
= qed_ll2_alloc(p_hwfn
);
1085 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
) {
1086 rc
= qed_fcoe_alloc(p_hwfn
);
1091 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
1092 rc
= qed_iscsi_alloc(p_hwfn
);
1095 rc
= qed_ooo_alloc(p_hwfn
);
1100 if (QED_IS_RDMA_PERSONALITY(p_hwfn
)) {
1101 rc
= qed_rdma_info_alloc(p_hwfn
);
1106 /* DMA info initialization */
1107 rc
= qed_dmae_info_alloc(p_hwfn
);
1111 /* DCBX initialization */
1112 rc
= qed_dcbx_info_alloc(p_hwfn
);
1117 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
1118 if (!cdev
->reset_stats
)
1126 qed_resc_free(cdev
);
1130 void qed_resc_setup(struct qed_dev
*cdev
)
1135 for_each_hwfn(cdev
, i
)
1136 qed_l2_setup(&cdev
->hwfns
[i
]);
1140 for_each_hwfn(cdev
, i
) {
1141 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1143 qed_cxt_mngr_setup(p_hwfn
);
1144 qed_spq_setup(p_hwfn
);
1145 qed_eq_setup(p_hwfn
);
1146 qed_consq_setup(p_hwfn
);
1148 /* Read shadow of current MFW mailbox */
1149 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
1150 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
1151 p_hwfn
->mcp_info
->mfw_mb_cur
,
1152 p_hwfn
->mcp_info
->mfw_mb_length
);
1154 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
1156 qed_l2_setup(p_hwfn
);
1157 qed_iov_setup(p_hwfn
);
1158 #ifdef CONFIG_QED_LL2
1159 if (p_hwfn
->using_ll2
)
1160 qed_ll2_setup(p_hwfn
);
1162 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
)
1163 qed_fcoe_setup(p_hwfn
);
1165 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
1166 qed_iscsi_setup(p_hwfn
);
1167 qed_ooo_setup(p_hwfn
);
1172 #define FINAL_CLEANUP_POLL_CNT (100)
1173 #define FINAL_CLEANUP_POLL_TIME (10)
1174 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
1175 struct qed_ptt
*p_ptt
, u16 id
, bool is_vf
)
1177 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
1180 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
1181 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
1186 command
|= X_FINAL_CLEANUP_AGG_INT
<<
1187 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
1188 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
1189 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
1190 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
1192 /* Make sure notification is not set before initiating final cleanup */
1193 if (REG_RD(p_hwfn
, addr
)) {
1195 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
1196 REG_WR(p_hwfn
, addr
, 0);
1199 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1200 "Sending final cleanup for PFVF[%d] [Command %08x]\n",
1203 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
1205 /* Poll until completion */
1206 while (!REG_RD(p_hwfn
, addr
) && count
--)
1207 msleep(FINAL_CLEANUP_POLL_TIME
);
1209 if (REG_RD(p_hwfn
, addr
))
1213 "Failed to receive FW final cleanup notification\n");
1215 /* Cleanup afterwards */
1216 REG_WR(p_hwfn
, addr
, 0);
1221 static int qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
1225 if (QED_IS_BB_B0(p_hwfn
->cdev
)) {
1226 hw_mode
|= 1 << MODE_BB
;
1227 } else if (QED_IS_AH(p_hwfn
->cdev
)) {
1228 hw_mode
|= 1 << MODE_K2
;
1230 DP_NOTICE(p_hwfn
, "Unknown chip type %#x\n",
1231 p_hwfn
->cdev
->type
);
1235 switch (p_hwfn
->cdev
->num_ports_in_engine
) {
1237 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
1240 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
1243 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
1246 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
1247 p_hwfn
->cdev
->num_ports_in_engine
);
1251 if (test_bit(QED_MF_OVLAN_CLSS
, &p_hwfn
->cdev
->mf_bits
))
1252 hw_mode
|= 1 << MODE_MF_SD
;
1254 hw_mode
|= 1 << MODE_MF_SI
;
1256 hw_mode
|= 1 << MODE_ASIC
;
1258 if (p_hwfn
->cdev
->num_hwfns
> 1)
1259 hw_mode
|= 1 << MODE_100G
;
1261 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
1263 DP_VERBOSE(p_hwfn
, (NETIF_MSG_PROBE
| NETIF_MSG_IFUP
),
1264 "Configuring function for hw_mode: 0x%08x\n",
1265 p_hwfn
->hw_info
.hw_mode
);
1270 /* Init run time data for all PFs on an engine. */
1271 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
1273 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
1276 for_each_hwfn(cdev
, i
) {
1277 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1278 struct qed_igu_info
*p_igu_info
;
1279 struct qed_igu_block
*p_block
;
1280 struct cau_sb_entry sb_entry
;
1282 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
1285 igu_sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
); igu_sb_id
++) {
1286 p_block
= &p_igu_info
->entry
[igu_sb_id
];
1288 if (!p_block
->is_pf
)
1291 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
1292 p_block
->function_id
, 0, 0);
1293 STORE_RT_REG_AGG(p_hwfn
, offset
+ igu_sb_id
* 2,
1299 static void qed_init_cache_line_size(struct qed_hwfn
*p_hwfn
,
1300 struct qed_ptt
*p_ptt
)
1302 u32 val
, wr_mbs
, cache_line_size
;
1304 val
= qed_rd(p_hwfn
, p_ptt
, PSWRQ2_REG_WR_MBS0
);
1317 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
1322 cache_line_size
= min_t(u32
, L1_CACHE_BYTES
, wr_mbs
);
1323 switch (cache_line_size
) {
1338 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
1342 if (L1_CACHE_BYTES
> wr_mbs
)
1344 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
1345 L1_CACHE_BYTES
, wr_mbs
);
1347 STORE_RT_REG(p_hwfn
, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET
, val
);
1349 STORE_RT_REG(p_hwfn
, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET
, val
);
1350 STORE_RT_REG(p_hwfn
, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET
, val
);
1354 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
1355 struct qed_ptt
*p_ptt
, int hw_mode
)
1357 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
1358 struct qed_qm_common_rt_init_params params
;
1359 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1360 u8 vf_id
, max_num_vfs
;
1365 qed_init_cau_rt_data(cdev
);
1367 /* Program GTT windows */
1368 qed_gtt_init(p_hwfn
);
1370 if (p_hwfn
->mcp_info
) {
1371 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
1372 qm_info
->pf_rl_en
= true;
1373 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
1374 qm_info
->pf_wfq_en
= true;
1377 memset(¶ms
, 0, sizeof(params
));
1378 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engine
;
1379 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
1380 params
.pf_rl_en
= qm_info
->pf_rl_en
;
1381 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
1382 params
.vport_rl_en
= qm_info
->vport_rl_en
;
1383 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
1384 params
.port_params
= qm_info
->qm_port_params
;
1386 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
1388 qed_cxt_hw_init_common(p_hwfn
);
1390 qed_init_cache_line_size(p_hwfn
, p_ptt
);
1392 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
1396 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
1397 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
1399 if (QED_IS_BB(p_hwfn
->cdev
)) {
1400 num_pfs
= NUM_OF_ENG_PFS(p_hwfn
->cdev
);
1401 for (pf_id
= 0; pf_id
< num_pfs
; pf_id
++) {
1402 qed_fid_pretend(p_hwfn
, p_ptt
, pf_id
);
1403 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1404 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1406 /* pretend to original PF */
1407 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
1410 max_num_vfs
= QED_IS_AH(cdev
) ? MAX_NUM_VFS_K2
: MAX_NUM_VFS_BB
;
1411 for (vf_id
= 0; vf_id
< max_num_vfs
; vf_id
++) {
1412 concrete_fid
= qed_vfid_to_concrete(p_hwfn
, vf_id
);
1413 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) concrete_fid
);
1414 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_STRONG_ENABLE_VF
, 0x1);
1415 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_WEAK_ENABLE_VF
, 0x0);
1416 qed_wr(p_hwfn
, p_ptt
, TCFC_REG_STRONG_ENABLE_VF
, 0x1);
1417 qed_wr(p_hwfn
, p_ptt
, TCFC_REG_WEAK_ENABLE_VF
, 0x0);
1419 /* pretend to original PF */
1420 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
1426 qed_hw_init_dpi_size(struct qed_hwfn
*p_hwfn
,
1427 struct qed_ptt
*p_ptt
, u32 pwm_region_size
, u32 n_cpus
)
1429 u32 dpi_bit_shift
, dpi_count
, dpi_page_size
;
1433 /* Calculate DPI size */
1434 n_wids
= max_t(u32
, QED_MIN_WIDS
, n_cpus
);
1435 dpi_page_size
= QED_WID_SIZE
* roundup_pow_of_two(n_wids
);
1436 dpi_page_size
= (dpi_page_size
+ PAGE_SIZE
- 1) & ~(PAGE_SIZE
- 1);
1437 dpi_bit_shift
= ilog2(dpi_page_size
/ 4096);
1438 dpi_count
= pwm_region_size
/ dpi_page_size
;
1440 min_dpis
= p_hwfn
->pf_params
.rdma_pf_params
.min_dpis
;
1441 min_dpis
= max_t(u32
, QED_MIN_DPIS
, min_dpis
);
1443 p_hwfn
->dpi_size
= dpi_page_size
;
1444 p_hwfn
->dpi_count
= dpi_count
;
1446 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_DPI_BIT_SHIFT
, dpi_bit_shift
);
1448 if (dpi_count
< min_dpis
)
1454 enum QED_ROCE_EDPM_MODE
{
1455 QED_ROCE_EDPM_MODE_ENABLE
= 0,
1456 QED_ROCE_EDPM_MODE_FORCE_ON
= 1,
1457 QED_ROCE_EDPM_MODE_DISABLE
= 2,
1461 qed_hw_init_pf_doorbell_bar(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1463 u32 pwm_regsize
, norm_regsize
;
1464 u32 non_pwm_conn
, min_addr_reg1
;
1465 u32 db_bar_size
, n_cpus
= 1;
1471 db_bar_size
= qed_hw_bar_size(p_hwfn
, p_ptt
, BAR_ID_1
);
1472 if (p_hwfn
->cdev
->num_hwfns
> 1)
1475 /* Calculate doorbell regions */
1476 non_pwm_conn
= qed_cxt_get_proto_cid_start(p_hwfn
, PROTOCOLID_CORE
) +
1477 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_CORE
,
1479 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
,
1481 norm_regsize
= roundup(QED_PF_DEMS_SIZE
* non_pwm_conn
, PAGE_SIZE
);
1482 min_addr_reg1
= norm_regsize
/ 4096;
1483 pwm_regsize
= db_bar_size
- norm_regsize
;
1485 /* Check that the normal and PWM sizes are valid */
1486 if (db_bar_size
< norm_regsize
) {
1487 DP_ERR(p_hwfn
->cdev
,
1488 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
1489 db_bar_size
, norm_regsize
);
1493 if (pwm_regsize
< QED_MIN_PWM_REGION
) {
1494 DP_ERR(p_hwfn
->cdev
,
1495 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
1497 QED_MIN_PWM_REGION
, db_bar_size
, norm_regsize
);
1501 /* Calculate number of DPIs */
1502 roce_edpm_mode
= p_hwfn
->pf_params
.rdma_pf_params
.roce_edpm_mode
;
1503 if ((roce_edpm_mode
== QED_ROCE_EDPM_MODE_ENABLE
) ||
1504 ((roce_edpm_mode
== QED_ROCE_EDPM_MODE_FORCE_ON
))) {
1505 /* Either EDPM is mandatory, or we are attempting to allocate a
1508 n_cpus
= num_present_cpus();
1509 rc
= qed_hw_init_dpi_size(p_hwfn
, p_ptt
, pwm_regsize
, n_cpus
);
1512 cond
= (rc
&& (roce_edpm_mode
== QED_ROCE_EDPM_MODE_ENABLE
)) ||
1513 (roce_edpm_mode
== QED_ROCE_EDPM_MODE_DISABLE
);
1514 if (cond
|| p_hwfn
->dcbx_no_edpm
) {
1515 /* Either EDPM is disabled from user configuration, or it is
1516 * disabled via DCBx, or it is not mandatory and we failed to
1517 * allocated a WID per CPU.
1520 rc
= qed_hw_init_dpi_size(p_hwfn
, p_ptt
, pwm_regsize
, n_cpus
);
1523 qed_rdma_dpm_bar(p_hwfn
, p_ptt
);
1526 p_hwfn
->wid_count
= (u16
) n_cpus
;
1529 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
1534 ((p_hwfn
->dcbx_no_edpm
) || (p_hwfn
->db_bar_no_edpm
)) ?
1535 "disabled" : "enabled");
1539 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
1541 p_hwfn
->pf_params
.rdma_pf_params
.min_dpis
);
1545 p_hwfn
->dpi_start_offset
= norm_regsize
;
1547 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
1548 pf_dems_shift
= ilog2(QED_PF_DEMS_SIZE
/ 4);
1549 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_ICID_BIT_SHIFT_NORM
, pf_dems_shift
);
1550 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_MIN_ADDR_REG1
, min_addr_reg1
);
1555 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
1556 struct qed_ptt
*p_ptt
, int hw_mode
)
1560 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
, p_hwfn
->port_id
, hw_mode
);
1564 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE
, 0);
1569 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
1570 struct qed_ptt
*p_ptt
,
1571 struct qed_tunnel_info
*p_tunn
,
1574 enum qed_int_mode int_mode
,
1575 bool allow_npar_tx_switch
)
1577 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
1580 if (p_hwfn
->mcp_info
) {
1581 struct qed_mcp_function_info
*p_info
;
1583 p_info
= &p_hwfn
->mcp_info
->func_info
;
1584 if (p_info
->bandwidth_min
)
1585 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
1587 /* Update rate limit once we'll actually have a link */
1588 p_hwfn
->qm_info
.pf_rl
= 100000;
1591 qed_cxt_hw_init_pf(p_hwfn
, p_ptt
);
1593 qed_int_igu_init_rt(p_hwfn
);
1595 /* Set VLAN in NIG if needed */
1596 if (hw_mode
& BIT(MODE_MF_SD
)) {
1597 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
1598 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
1599 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
1600 p_hwfn
->hw_info
.ovlan
);
1602 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
1603 "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
1604 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET
,
1608 /* Enable classification by MAC if needed */
1609 if (hw_mode
& BIT(MODE_MF_SI
)) {
1610 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
1611 "Configuring TAGMAC_CLS_TYPE\n");
1612 STORE_RT_REG(p_hwfn
,
1613 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
1616 /* Protocol Configuration */
1617 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
,
1618 (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) ? 1 : 0);
1619 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
,
1620 (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
) ? 1 : 0);
1621 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
1623 /* Cleanup chip from previous driver if such remains exist */
1624 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
, false);
1628 /* Sanity check before the PF init sequence that uses DMAE */
1629 rc
= qed_dmae_sanity(p_hwfn
, p_ptt
, "pf_phase");
1633 /* PF Init sequence */
1634 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
1638 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1639 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
1643 /* Pure runtime initializations - directly to the HW */
1644 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
1646 rc
= qed_hw_init_pf_doorbell_bar(p_hwfn
, p_ptt
);
1651 /* enable interrupts */
1652 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
1654 /* send function start command */
1655 rc
= qed_sp_pf_start(p_hwfn
, p_ptt
, p_tunn
,
1656 allow_npar_tx_switch
);
1658 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
1661 if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
) {
1662 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TAG1
, BIT(2));
1663 qed_wr(p_hwfn
, p_ptt
,
1664 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST
,
1671 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
1672 struct qed_ptt
*p_ptt
,
1675 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
1677 /* Change PF in PXP */
1678 qed_wr(p_hwfn
, p_ptt
,
1679 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
1681 /* wait until value is set - try for 1 second every 50us */
1682 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
1683 val
= qed_rd(p_hwfn
, p_ptt
,
1684 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
1688 usleep_range(50, 60);
1691 if (val
!= set_val
) {
1693 "PFID_ENABLE_MASTER wasn't changed after a second\n");
1700 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
1701 struct qed_ptt
*p_main_ptt
)
1703 /* Read shadow of current MFW mailbox */
1704 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
1705 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
1706 p_hwfn
->mcp_info
->mfw_mb_cur
, p_hwfn
->mcp_info
->mfw_mb_length
);
1710 qed_fill_load_req_params(struct qed_load_req_params
*p_load_req
,
1711 struct qed_drv_load_params
*p_drv_load
)
1713 memset(p_load_req
, 0, sizeof(*p_load_req
));
1715 p_load_req
->drv_role
= p_drv_load
->is_crash_kernel
?
1716 QED_DRV_ROLE_KDUMP
: QED_DRV_ROLE_OS
;
1717 p_load_req
->timeout_val
= p_drv_load
->mfw_timeout_val
;
1718 p_load_req
->avoid_eng_reset
= p_drv_load
->avoid_eng_reset
;
1719 p_load_req
->override_force_load
= p_drv_load
->override_force_load
;
1722 static int qed_vf_start(struct qed_hwfn
*p_hwfn
,
1723 struct qed_hw_init_params
*p_params
)
1725 if (p_params
->p_tunn
) {
1726 qed_vf_set_vf_start_tunn_update_param(p_params
->p_tunn
);
1727 qed_vf_pf_tunnel_param_update(p_hwfn
, p_params
->p_tunn
);
1730 p_hwfn
->b_int_enabled
= true;
1735 int qed_hw_init(struct qed_dev
*cdev
, struct qed_hw_init_params
*p_params
)
1737 struct qed_load_req_params load_req_params
;
1738 u32 load_code
, resp
, param
, drv_mb_param
;
1739 bool b_default_mtu
= true;
1740 struct qed_hwfn
*p_hwfn
;
1741 int rc
= 0, mfw_rc
, i
;
1744 if ((p_params
->int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
1745 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
1750 rc
= qed_init_fw_data(cdev
, p_params
->bin_fw_data
);
1755 for_each_hwfn(cdev
, i
) {
1756 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1758 /* If management didn't provide a default, set one of our own */
1759 if (!p_hwfn
->hw_info
.mtu
) {
1760 p_hwfn
->hw_info
.mtu
= 1500;
1761 b_default_mtu
= false;
1765 qed_vf_start(p_hwfn
, p_params
);
1769 /* Enable DMAE in PXP */
1770 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
1772 rc
= qed_calc_hw_mode(p_hwfn
);
1776 if (IS_PF(cdev
) && (test_bit(QED_MF_8021Q_TAGGING
,
1778 test_bit(QED_MF_8021AD_TAGGING
,
1780 if (test_bit(QED_MF_8021Q_TAGGING
, &cdev
->mf_bits
))
1781 ether_type
= ETH_P_8021Q
;
1783 ether_type
= ETH_P_8021AD
;
1784 STORE_RT_REG(p_hwfn
, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET
,
1786 STORE_RT_REG(p_hwfn
, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET
,
1788 STORE_RT_REG(p_hwfn
, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET
,
1790 STORE_RT_REG(p_hwfn
, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET
,
1794 qed_fill_load_req_params(&load_req_params
,
1795 p_params
->p_drv_load_params
);
1796 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
1799 DP_NOTICE(p_hwfn
, "Failed sending a LOAD_REQ command\n");
1803 load_code
= load_req_params
.load_code
;
1804 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1805 "Load request was sent. Load code: 0x%x\n",
1808 qed_mcp_set_capabilities(p_hwfn
, p_hwfn
->p_main_ptt
);
1810 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
1812 p_hwfn
->first_on_engine
= (load_code
==
1813 FW_MSG_CODE_DRV_LOAD_ENGINE
);
1815 switch (load_code
) {
1816 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1817 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
1818 p_hwfn
->hw_info
.hw_mode
);
1822 case FW_MSG_CODE_DRV_LOAD_PORT
:
1823 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
1824 p_hwfn
->hw_info
.hw_mode
);
1829 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1830 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
1832 p_hwfn
->hw_info
.hw_mode
,
1833 p_params
->b_hw_start
,
1835 p_params
->allow_npar_tx_switch
);
1839 "Unexpected load code [0x%08x]", load_code
);
1846 "init phase failed for loadcode 0x%x (rc %d)\n",
1849 /* ACK mfw regardless of success or failure of initialization */
1850 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1851 DRV_MSG_CODE_LOAD_DONE
,
1852 0, &load_code
, ¶m
);
1856 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
1860 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1861 if (param
& FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR
)
1863 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1865 /* send DCBX attention request command */
1868 "sending phony dcbx set command to trigger DCBx attention handling\n");
1869 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1870 DRV_MSG_CODE_SET_DCBX
,
1871 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT
,
1872 &load_code
, ¶m
);
1875 "Failed to send DCBX attention request\n");
1879 p_hwfn
->hw_init_done
= true;
1883 p_hwfn
= QED_LEADING_HWFN(cdev
);
1885 /* Get pre-negotiated values for stag, bandwidth etc. */
1888 "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
1889 drv_mb_param
= 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET
;
1890 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1891 DRV_MSG_CODE_GET_OEM_UPDATES
,
1892 drv_mb_param
, &resp
, ¶m
);
1895 "Failed to send GET_OEM_UPDATES attention request\n");
1897 drv_mb_param
= STORM_FW_VERSION
;
1898 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1899 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER
,
1900 drv_mb_param
, &load_code
, ¶m
);
1902 DP_INFO(p_hwfn
, "Failed to update firmware version\n");
1904 if (!b_default_mtu
) {
1905 rc
= qed_mcp_ov_update_mtu(p_hwfn
, p_hwfn
->p_main_ptt
,
1906 p_hwfn
->hw_info
.mtu
);
1909 "Failed to update default mtu\n");
1912 rc
= qed_mcp_ov_update_driver_state(p_hwfn
,
1914 QED_OV_DRIVER_STATE_DISABLED
);
1916 DP_INFO(p_hwfn
, "Failed to update driver state\n");
1918 rc
= qed_mcp_ov_update_eswitch(p_hwfn
, p_hwfn
->p_main_ptt
,
1919 QED_OV_ESWITCH_NONE
);
1921 DP_INFO(p_hwfn
, "Failed to update eswitch mode\n");
1927 #define QED_HW_STOP_RETRY_LIMIT (10)
1928 static void qed_hw_timers_stop(struct qed_dev
*cdev
,
1929 struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1934 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
1935 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
1937 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
1938 if ((!qed_rd(p_hwfn
, p_ptt
,
1939 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
1940 (!qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
)))
1943 /* Dependent on number of connection/tasks, possibly
1944 * 1ms sleep is required between polls
1946 usleep_range(1000, 2000);
1949 if (i
< QED_HW_STOP_RETRY_LIMIT
)
1953 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
1954 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
1955 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
1958 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
1962 for_each_hwfn(cdev
, j
) {
1963 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1964 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1966 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1970 int qed_hw_stop(struct qed_dev
*cdev
)
1972 struct qed_hwfn
*p_hwfn
;
1973 struct qed_ptt
*p_ptt
;
1977 for_each_hwfn(cdev
, j
) {
1978 p_hwfn
= &cdev
->hwfns
[j
];
1979 p_ptt
= p_hwfn
->p_main_ptt
;
1981 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
1984 qed_vf_pf_int_cleanup(p_hwfn
);
1985 rc
= qed_vf_pf_reset(p_hwfn
);
1988 "qed_vf_pf_reset failed. rc = %d.\n",
1995 /* mark the hw as uninitialized... */
1996 p_hwfn
->hw_init_done
= false;
1998 /* Send unload command to MCP */
1999 rc
= qed_mcp_unload_req(p_hwfn
, p_ptt
);
2002 "Failed sending a UNLOAD_REQ command. rc = %d.\n",
2007 qed_slowpath_irq_sync(p_hwfn
);
2009 /* After this point no MFW attentions are expected, e.g. prevent
2010 * race between pf stop and dcbx pf update.
2012 rc
= qed_sp_pf_stop(p_hwfn
);
2015 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
2020 qed_wr(p_hwfn
, p_ptt
,
2021 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
2023 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
2024 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
2025 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
2026 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
2027 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
2029 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
2031 /* Disable Attention Generation */
2032 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
2034 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
2035 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
2037 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
2039 /* Need to wait 1ms to guarantee SBs are cleared */
2040 usleep_range(1000, 2000);
2042 /* Disable PF in HW blocks */
2043 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
2044 qed_wr(p_hwfn
, p_ptt
, QM_REG_PF_EN
, 0);
2046 qed_mcp_unload_done(p_hwfn
, p_ptt
);
2049 "Failed sending a UNLOAD_DONE command. rc = %d.\n",
2056 p_hwfn
= QED_LEADING_HWFN(cdev
);
2057 p_ptt
= QED_LEADING_HWFN(cdev
)->p_main_ptt
;
2059 /* Disable DMAE in PXP - in CMT, this should only be done for
2060 * first hw-function, and only after all transactions have
2061 * stopped for all active hw-functions.
2063 rc
= qed_change_pci_hwfn(p_hwfn
, p_ptt
, false);
2066 "qed_change_pci_hwfn failed. rc = %d.\n", rc
);
2074 int qed_hw_stop_fastpath(struct qed_dev
*cdev
)
2078 for_each_hwfn(cdev
, j
) {
2079 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
2080 struct qed_ptt
*p_ptt
;
2083 qed_vf_pf_int_cleanup(p_hwfn
);
2086 p_ptt
= qed_ptt_acquire(p_hwfn
);
2091 NETIF_MSG_IFDOWN
, "Shutting down the fastpath\n");
2093 qed_wr(p_hwfn
, p_ptt
,
2094 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
2096 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
2097 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
2098 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
2099 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
2100 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
2102 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
2104 /* Need to wait 1ms to guarantee SBs are cleared */
2105 usleep_range(1000, 2000);
2106 qed_ptt_release(p_hwfn
, p_ptt
);
2112 int qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
2114 struct qed_ptt
*p_ptt
;
2116 if (IS_VF(p_hwfn
->cdev
))
2119 p_ptt
= qed_ptt_acquire(p_hwfn
);
2123 if (p_hwfn
->p_rdma_info
&&
2124 p_hwfn
->p_rdma_info
->active
&& p_hwfn
->b_rdma_enabled_in_prs
)
2125 qed_wr(p_hwfn
, p_ptt
, p_hwfn
->rdma_prs_search_reg
, 0x1);
2127 /* Re-open incoming traffic */
2128 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
2129 qed_ptt_release(p_hwfn
, p_ptt
);
2134 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
2135 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
2137 qed_ptt_pool_free(p_hwfn
);
2138 kfree(p_hwfn
->hw_info
.p_igu_info
);
2139 p_hwfn
->hw_info
.p_igu_info
= NULL
;
2142 /* Setup bar access */
2143 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
2145 /* clear indirect access */
2146 if (QED_IS_AH(p_hwfn
->cdev
)) {
2147 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2148 PGLUE_B_REG_PGL_ADDR_E8_F0_K2
, 0);
2149 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2150 PGLUE_B_REG_PGL_ADDR_EC_F0_K2
, 0);
2151 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2152 PGLUE_B_REG_PGL_ADDR_F0_F0_K2
, 0);
2153 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2154 PGLUE_B_REG_PGL_ADDR_F4_F0_K2
, 0);
2156 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2157 PGLUE_B_REG_PGL_ADDR_88_F0_BB
, 0);
2158 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2159 PGLUE_B_REG_PGL_ADDR_8C_F0_BB
, 0);
2160 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2161 PGLUE_B_REG_PGL_ADDR_90_F0_BB
, 0);
2162 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2163 PGLUE_B_REG_PGL_ADDR_94_F0_BB
, 0);
2166 /* Clean Previous errors if such exist */
2167 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2168 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
, 1 << p_hwfn
->abs_pf_id
);
2170 /* enable internal target-read */
2171 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
2172 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
2175 static void get_function_id(struct qed_hwfn
*p_hwfn
)
2178 p_hwfn
->hw_info
.opaque_fid
= (u16
) REG_RD(p_hwfn
,
2179 PXP_PF_ME_OPAQUE_ADDR
);
2181 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
2183 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
2184 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
2185 PXP_CONCRETE_FID_PFID
);
2186 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
2187 PXP_CONCRETE_FID_PORT
);
2189 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
2190 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
2191 p_hwfn
->hw_info
.concrete_fid
, p_hwfn
->hw_info
.opaque_fid
);
2194 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
2196 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
2197 struct qed_sb_cnt_info sb_cnt
;
2200 memset(&sb_cnt
, 0, sizeof(sb_cnt
));
2201 qed_int_get_num_sbs(p_hwfn
, &sb_cnt
);
2203 if (IS_ENABLED(CONFIG_QED_RDMA
) &&
2204 QED_IS_RDMA_PERSONALITY(p_hwfn
)) {
2205 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
2206 * the status blocks equally between L2 / RoCE but with
2207 * consideration as to how many l2 queues / cnqs we have.
2209 feat_num
[QED_RDMA_CNQ
] =
2210 min_t(u32
, sb_cnt
.cnt
/ 2,
2211 RESC_NUM(p_hwfn
, QED_RDMA_CNQ_RAM
));
2213 non_l2_sbs
= feat_num
[QED_RDMA_CNQ
];
2215 if (QED_IS_L2_PERSONALITY(p_hwfn
)) {
2216 /* Start by allocating VF queues, then PF's */
2217 feat_num
[QED_VF_L2_QUE
] = min_t(u32
,
2218 RESC_NUM(p_hwfn
, QED_L2_QUEUE
),
2220 feat_num
[QED_PF_L2_QUE
] = min_t(u32
,
2221 sb_cnt
.cnt
- non_l2_sbs
,
2228 if (QED_IS_FCOE_PERSONALITY(p_hwfn
))
2229 feat_num
[QED_FCOE_CQ
] = min_t(u32
, sb_cnt
.cnt
,
2233 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
))
2234 feat_num
[QED_ISCSI_CQ
] = min_t(u32
, sb_cnt
.cnt
,
2239 "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
2240 (int)FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
),
2241 (int)FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
),
2242 (int)FEAT_NUM(p_hwfn
, QED_RDMA_CNQ
),
2243 (int)FEAT_NUM(p_hwfn
, QED_FCOE_CQ
),
2244 (int)FEAT_NUM(p_hwfn
, QED_ISCSI_CQ
),
2248 const char *qed_hw_get_resc_name(enum qed_resources res_id
)
2265 case QED_RDMA_CNQ_RAM
:
2266 return "RDMA_CNQ_RAM";
2273 case QED_RDMA_STATS_QUEUE
:
2274 return "RDMA_STATS_QUEUE";
2280 return "UNKNOWN_RESOURCE";
2285 __qed_hw_set_soft_resc_size(struct qed_hwfn
*p_hwfn
,
2286 struct qed_ptt
*p_ptt
,
2287 enum qed_resources res_id
,
2288 u32 resc_max_val
, u32
*p_mcp_resp
)
2292 rc
= qed_mcp_set_resc_max_val(p_hwfn
, p_ptt
, res_id
,
2293 resc_max_val
, p_mcp_resp
);
2296 "MFW response failure for a max value setting of resource %d [%s]\n",
2297 res_id
, qed_hw_get_resc_name(res_id
));
2301 if (*p_mcp_resp
!= FW_MSG_CODE_RESOURCE_ALLOC_OK
)
2303 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
2304 res_id
, qed_hw_get_resc_name(res_id
), *p_mcp_resp
);
2310 qed_hw_set_soft_resc_size(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2312 bool b_ah
= QED_IS_AH(p_hwfn
->cdev
);
2313 u32 resc_max_val
, mcp_resp
;
2317 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++) {
2320 resc_max_val
= MAX_NUM_LL2_RX_QUEUES
;
2322 case QED_RDMA_CNQ_RAM
:
2323 /* No need for a case for QED_CMDQS_CQS since
2324 * CNQ/CMDQS are the same resource.
2326 resc_max_val
= NUM_OF_GLOBAL_QUEUES
;
2328 case QED_RDMA_STATS_QUEUE
:
2329 resc_max_val
= b_ah
? RDMA_NUM_STATISTIC_COUNTERS_K2
2330 : RDMA_NUM_STATISTIC_COUNTERS_BB
;
2333 resc_max_val
= BDQ_NUM_RESOURCES
;
2339 rc
= __qed_hw_set_soft_resc_size(p_hwfn
, p_ptt
, res_id
,
2340 resc_max_val
, &mcp_resp
);
2344 /* There's no point to continue to the next resource if the
2345 * command is not supported by the MFW.
2346 * We do continue if the command is supported but the resource
2347 * is unknown to the MFW. Such a resource will be later
2348 * configured with the default allocation values.
2350 if (mcp_resp
== FW_MSG_CODE_UNSUPPORTED
)
2358 int qed_hw_get_dflt_resc(struct qed_hwfn
*p_hwfn
,
2359 enum qed_resources res_id
,
2360 u32
*p_resc_num
, u32
*p_resc_start
)
2362 u8 num_funcs
= p_hwfn
->num_funcs_on_engine
;
2363 bool b_ah
= QED_IS_AH(p_hwfn
->cdev
);
2367 *p_resc_num
= (b_ah
? MAX_NUM_L2_QUEUES_K2
:
2368 MAX_NUM_L2_QUEUES_BB
) / num_funcs
;
2371 *p_resc_num
= (b_ah
? MAX_NUM_VPORTS_K2
:
2372 MAX_NUM_VPORTS_BB
) / num_funcs
;
2375 *p_resc_num
= (b_ah
? ETH_RSS_ENGINE_NUM_K2
:
2376 ETH_RSS_ENGINE_NUM_BB
) / num_funcs
;
2379 *p_resc_num
= (b_ah
? MAX_QM_TX_QUEUES_K2
:
2380 MAX_QM_TX_QUEUES_BB
) / num_funcs
;
2381 *p_resc_num
&= ~0x7; /* The granularity of the PQs is 8 */
2384 *p_resc_num
= MAX_QM_GLOBAL_RLS
/ num_funcs
;
2388 /* Each VFC resource can accommodate both a MAC and a VLAN */
2389 *p_resc_num
= ETH_NUM_MAC_FILTERS
/ num_funcs
;
2392 *p_resc_num
= (b_ah
? PXP_NUM_ILT_RECORDS_K2
:
2393 PXP_NUM_ILT_RECORDS_BB
) / num_funcs
;
2396 *p_resc_num
= MAX_NUM_LL2_RX_QUEUES
/ num_funcs
;
2398 case QED_RDMA_CNQ_RAM
:
2400 /* CNQ/CMDQS are the same resource */
2401 *p_resc_num
= NUM_OF_GLOBAL_QUEUES
/ num_funcs
;
2403 case QED_RDMA_STATS_QUEUE
:
2404 *p_resc_num
= (b_ah
? RDMA_NUM_STATISTIC_COUNTERS_K2
:
2405 RDMA_NUM_STATISTIC_COUNTERS_BB
) / num_funcs
;
2408 if (p_hwfn
->hw_info
.personality
!= QED_PCI_ISCSI
&&
2409 p_hwfn
->hw_info
.personality
!= QED_PCI_FCOE
)
2415 /* Since we want its value to reflect whether MFW supports
2416 * the new scheme, have a default of 0.
2428 else if (p_hwfn
->cdev
->num_ports_in_engine
== 4)
2429 *p_resc_start
= p_hwfn
->port_id
;
2430 else if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
)
2431 *p_resc_start
= p_hwfn
->port_id
;
2432 else if (p_hwfn
->hw_info
.personality
== QED_PCI_FCOE
)
2433 *p_resc_start
= p_hwfn
->port_id
+ 2;
2436 *p_resc_start
= *p_resc_num
* p_hwfn
->enabled_func_idx
;
2443 static int __qed_hw_set_resc_info(struct qed_hwfn
*p_hwfn
,
2444 enum qed_resources res_id
)
2446 u32 dflt_resc_num
= 0, dflt_resc_start
= 0;
2447 u32 mcp_resp
, *p_resc_num
, *p_resc_start
;
2450 p_resc_num
= &RESC_NUM(p_hwfn
, res_id
);
2451 p_resc_start
= &RESC_START(p_hwfn
, res_id
);
2453 rc
= qed_hw_get_dflt_resc(p_hwfn
, res_id
, &dflt_resc_num
,
2457 "Failed to get default amount for resource %d [%s]\n",
2458 res_id
, qed_hw_get_resc_name(res_id
));
2462 rc
= qed_mcp_get_resc_info(p_hwfn
, p_hwfn
->p_main_ptt
, res_id
,
2463 &mcp_resp
, p_resc_num
, p_resc_start
);
2466 "MFW response failure for an allocation request for resource %d [%s]\n",
2467 res_id
, qed_hw_get_resc_name(res_id
));
2471 /* Default driver values are applied in the following cases:
2472 * - The resource allocation MB command is not supported by the MFW
2473 * - There is an internal error in the MFW while processing the request
2474 * - The resource ID is unknown to the MFW
2476 if (mcp_resp
!= FW_MSG_CODE_RESOURCE_ALLOC_OK
) {
2478 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
2480 qed_hw_get_resc_name(res_id
),
2481 mcp_resp
, dflt_resc_num
, dflt_resc_start
);
2482 *p_resc_num
= dflt_resc_num
;
2483 *p_resc_start
= dflt_resc_start
;
2488 /* PQs have to divide by 8 [that's the HW granularity].
2489 * Reduce number so it would fit.
2491 if ((res_id
== QED_PQ
) && ((*p_resc_num
% 8) || (*p_resc_start
% 8))) {
2493 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
2495 (*p_resc_num
) & ~0x7,
2496 *p_resc_start
, (*p_resc_start
) & ~0x7);
2497 *p_resc_num
&= ~0x7;
2498 *p_resc_start
&= ~0x7;
2504 static int qed_hw_set_resc_info(struct qed_hwfn
*p_hwfn
)
2509 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++) {
2510 rc
= __qed_hw_set_resc_info(p_hwfn
, res_id
);
2518 static int qed_hw_get_resc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2520 struct qed_resc_unlock_params resc_unlock_params
;
2521 struct qed_resc_lock_params resc_lock_params
;
2522 bool b_ah
= QED_IS_AH(p_hwfn
->cdev
);
2526 /* Setting the max values of the soft resources and the following
2527 * resources allocation queries should be atomic. Since several PFs can
2528 * run in parallel - a resource lock is needed.
2529 * If either the resource lock or resource set value commands are not
2530 * supported - skip the the max values setting, release the lock if
2531 * needed, and proceed to the queries. Other failures, including a
2532 * failure to acquire the lock, will cause this function to fail.
2534 qed_mcp_resc_lock_default_init(&resc_lock_params
, &resc_unlock_params
,
2535 QED_RESC_LOCK_RESC_ALLOC
, false);
2537 rc
= qed_mcp_resc_lock(p_hwfn
, p_ptt
, &resc_lock_params
);
2538 if (rc
&& rc
!= -EINVAL
) {
2540 } else if (rc
== -EINVAL
) {
2542 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
2543 } else if (!rc
&& !resc_lock_params
.b_granted
) {
2545 "Failed to acquire the resource lock for the resource allocation commands\n");
2548 rc
= qed_hw_set_soft_resc_size(p_hwfn
, p_ptt
);
2549 if (rc
&& rc
!= -EINVAL
) {
2551 "Failed to set the max values of the soft resources\n");
2552 goto unlock_and_exit
;
2553 } else if (rc
== -EINVAL
) {
2555 "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
2556 rc
= qed_mcp_resc_unlock(p_hwfn
, p_ptt
,
2557 &resc_unlock_params
);
2560 "Failed to release the resource lock for the resource allocation commands\n");
2564 rc
= qed_hw_set_resc_info(p_hwfn
);
2566 goto unlock_and_exit
;
2568 if (resc_lock_params
.b_granted
&& !resc_unlock_params
.b_released
) {
2569 rc
= qed_mcp_resc_unlock(p_hwfn
, p_ptt
, &resc_unlock_params
);
2572 "Failed to release the resource lock for the resource allocation commands\n");
2575 /* Sanity for ILT */
2576 if ((b_ah
&& (RESC_END(p_hwfn
, QED_ILT
) > PXP_NUM_ILT_RECORDS_K2
)) ||
2577 (!b_ah
&& (RESC_END(p_hwfn
, QED_ILT
) > PXP_NUM_ILT_RECORDS_BB
))) {
2578 DP_NOTICE(p_hwfn
, "Can't assign ILT pages [%08x,...,%08x]\n",
2579 RESC_START(p_hwfn
, QED_ILT
),
2580 RESC_END(p_hwfn
, QED_ILT
) - 1);
2584 /* This will also learn the number of SBs from MFW */
2585 if (qed_int_igu_reset_cam(p_hwfn
, p_ptt
))
2588 qed_hw_set_feat(p_hwfn
);
2590 for (res_id
= 0; res_id
< QED_MAX_RESC
; res_id
++)
2591 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
, "%s = %d start = %d\n",
2592 qed_hw_get_resc_name(res_id
),
2593 RESC_NUM(p_hwfn
, res_id
),
2594 RESC_START(p_hwfn
, res_id
));
2599 if (resc_lock_params
.b_granted
&& !resc_unlock_params
.b_released
)
2600 qed_mcp_resc_unlock(p_hwfn
, p_ptt
, &resc_unlock_params
);
2604 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2606 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
2607 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
2608 struct qed_mcp_link_capabilities
*p_caps
;
2609 struct qed_mcp_link_params
*link
;
2611 /* Read global nvm_cfg address */
2612 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
2614 /* Verify MCP has initialized it */
2615 if (!nvm_cfg_addr
) {
2616 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
2620 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
2621 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
2623 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2624 offsetof(struct nvm_cfg1
, glob
) +
2625 offsetof(struct nvm_cfg1_glob
, core_cfg
);
2627 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
2629 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
2630 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
2631 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G
:
2632 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
2634 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G
:
2635 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
2637 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G
:
2638 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
2640 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F
:
2641 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
2643 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E
:
2644 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
2646 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G
:
2647 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
2649 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G
:
2650 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
2652 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G
:
2653 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
2655 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G
:
2656 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X10G
;
2658 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G
:
2659 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
2661 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G
:
2662 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X25G
;
2665 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n", core_cfg
);
2669 /* Read default link configuration */
2670 link
= &p_hwfn
->mcp_info
->link_input
;
2671 p_caps
= &p_hwfn
->mcp_info
->link_capabilities
;
2672 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2673 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
2674 link_temp
= qed_rd(p_hwfn
, p_ptt
,
2676 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
2677 link_temp
&= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
2678 link
->speed
.advertised_speeds
= link_temp
;
2680 link_temp
= link
->speed
.advertised_speeds
;
2681 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
= link_temp
;
2683 link_temp
= qed_rd(p_hwfn
, p_ptt
,
2685 offsetof(struct nvm_cfg1_port
, link_settings
));
2686 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
2687 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
2688 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
2689 link
->speed
.autoneg
= true;
2691 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
2692 link
->speed
.forced_speed
= 1000;
2694 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
2695 link
->speed
.forced_speed
= 10000;
2697 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
2698 link
->speed
.forced_speed
= 25000;
2700 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
2701 link
->speed
.forced_speed
= 40000;
2703 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
2704 link
->speed
.forced_speed
= 50000;
2706 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G
:
2707 link
->speed
.forced_speed
= 100000;
2710 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n", link_temp
);
2713 p_hwfn
->mcp_info
->link_capabilities
.default_speed_autoneg
=
2714 link
->speed
.autoneg
;
2716 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
2717 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
2718 link
->pause
.autoneg
= !!(link_temp
&
2719 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
2720 link
->pause
.forced_rx
= !!(link_temp
&
2721 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
2722 link
->pause
.forced_tx
= !!(link_temp
&
2723 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
2724 link
->loopback_mode
= 0;
2726 if (p_hwfn
->mcp_info
->capabilities
& FW_MB_PARAM_FEATURE_SUPPORT_EEE
) {
2727 link_temp
= qed_rd(p_hwfn
, p_ptt
, port_cfg_addr
+
2728 offsetof(struct nvm_cfg1_port
, ext_phy
));
2729 link_temp
&= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK
;
2730 link_temp
>>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET
;
2731 p_caps
->default_eee
= QED_MCP_EEE_ENABLED
;
2732 link
->eee
.enable
= true;
2733 switch (link_temp
) {
2734 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED
:
2735 p_caps
->default_eee
= QED_MCP_EEE_DISABLED
;
2736 link
->eee
.enable
= false;
2738 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED
:
2739 p_caps
->eee_lpi_timer
= EEE_TX_TIMER_USEC_BALANCED_TIME
;
2741 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE
:
2742 p_caps
->eee_lpi_timer
=
2743 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME
;
2745 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY
:
2746 p_caps
->eee_lpi_timer
= EEE_TX_TIMER_USEC_LATENCY_TIME
;
2750 link
->eee
.tx_lpi_timer
= p_caps
->eee_lpi_timer
;
2751 link
->eee
.tx_lpi_enable
= link
->eee
.enable
;
2752 link
->eee
.adv_caps
= QED_EEE_1G_ADV
| QED_EEE_10G_ADV
;
2754 p_caps
->default_eee
= QED_MCP_EEE_UNSUPPORTED
;
2759 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
2760 link
->speed
.forced_speed
,
2761 link
->speed
.advertised_speeds
,
2762 link
->speed
.autoneg
,
2763 link
->pause
.autoneg
,
2764 p_caps
->default_eee
, p_caps
->eee_lpi_timer
);
2766 if (IS_LEAD_HWFN(p_hwfn
)) {
2767 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2769 /* Read Multi-function information from shmem */
2770 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2771 offsetof(struct nvm_cfg1
, glob
) +
2772 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
2774 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
2776 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
2777 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
2780 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
2781 cdev
->mf_bits
= BIT(QED_MF_OVLAN_CLSS
);
2783 case NVM_CFG1_GLOB_MF_MODE_UFP
:
2784 cdev
->mf_bits
= BIT(QED_MF_OVLAN_CLSS
) |
2785 BIT(QED_MF_LLH_PROTO_CLSS
) |
2786 BIT(QED_MF_UFP_SPECIFIC
) |
2787 BIT(QED_MF_8021Q_TAGGING
);
2789 case NVM_CFG1_GLOB_MF_MODE_BD
:
2790 cdev
->mf_bits
= BIT(QED_MF_OVLAN_CLSS
) |
2791 BIT(QED_MF_LLH_PROTO_CLSS
) |
2792 BIT(QED_MF_8021AD_TAGGING
);
2794 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
2795 cdev
->mf_bits
= BIT(QED_MF_LLH_MAC_CLSS
) |
2796 BIT(QED_MF_LLH_PROTO_CLSS
) |
2797 BIT(QED_MF_LL2_NON_UNICAST
) |
2798 BIT(QED_MF_INTER_PF_SWITCH
);
2800 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
2801 cdev
->mf_bits
= BIT(QED_MF_LLH_MAC_CLSS
) |
2802 BIT(QED_MF_LLH_PROTO_CLSS
) |
2803 BIT(QED_MF_LL2_NON_UNICAST
);
2804 if (QED_IS_BB(p_hwfn
->cdev
))
2805 cdev
->mf_bits
|= BIT(QED_MF_NEED_DEF_PF
);
2809 DP_INFO(p_hwfn
, "Multi function mode is 0x%lx\n",
2813 DP_INFO(p_hwfn
, "Multi function mode is 0x%lx\n",
2814 p_hwfn
->cdev
->mf_bits
);
2816 /* Read device capabilities information from shmem */
2817 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2818 offsetof(struct nvm_cfg1
, glob
) +
2819 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
2821 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
2822 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
2823 __set_bit(QED_DEV_CAP_ETH
,
2824 &p_hwfn
->hw_info
.device_capabilities
);
2825 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE
)
2826 __set_bit(QED_DEV_CAP_FCOE
,
2827 &p_hwfn
->hw_info
.device_capabilities
);
2828 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI
)
2829 __set_bit(QED_DEV_CAP_ISCSI
,
2830 &p_hwfn
->hw_info
.device_capabilities
);
2831 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE
)
2832 __set_bit(QED_DEV_CAP_ROCE
,
2833 &p_hwfn
->hw_info
.device_capabilities
);
2835 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
2838 static void qed_get_num_funcs(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2840 u8 num_funcs
, enabled_func_idx
= p_hwfn
->rel_pf_id
;
2841 u32 reg_function_hide
, tmp
, eng_mask
, low_pfs_mask
;
2842 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2844 num_funcs
= QED_IS_AH(cdev
) ? MAX_NUM_PFS_K2
: MAX_NUM_PFS_BB
;
2846 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
2847 * in the other bits are selected.
2848 * Bits 1-15 are for functions 1-15, respectively, and their value is
2849 * '0' only for enabled functions (function 0 always exists and
2851 * In case of CMT, only the "even" functions are enabled, and thus the
2852 * number of functions for both hwfns is learnt from the same bits.
2854 reg_function_hide
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_FUNCTION_HIDE
);
2856 if (reg_function_hide
& 0x1) {
2857 if (QED_IS_BB(cdev
)) {
2858 if (QED_PATH_ID(p_hwfn
) && cdev
->num_hwfns
== 1) {
2870 /* Get the number of the enabled functions on the engine */
2871 tmp
= (reg_function_hide
^ 0xffffffff) & eng_mask
;
2878 /* Get the PF index within the enabled functions */
2879 low_pfs_mask
= (0x1 << p_hwfn
->abs_pf_id
) - 1;
2880 tmp
= reg_function_hide
& eng_mask
& low_pfs_mask
;
2888 p_hwfn
->num_funcs_on_engine
= num_funcs
;
2889 p_hwfn
->enabled_func_idx
= enabled_func_idx
;
2893 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
2896 p_hwfn
->enabled_func_idx
, p_hwfn
->num_funcs_on_engine
);
2899 static void qed_hw_info_port_num_bb(struct qed_hwfn
*p_hwfn
,
2900 struct qed_ptt
*p_ptt
)
2904 port_mode
= qed_rd(p_hwfn
, p_ptt
, CNIG_REG_NW_PORT_MODE_BB
);
2906 if (port_mode
< 3) {
2907 p_hwfn
->cdev
->num_ports_in_engine
= 1;
2908 } else if (port_mode
<= 5) {
2909 p_hwfn
->cdev
->num_ports_in_engine
= 2;
2911 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
2912 p_hwfn
->cdev
->num_ports_in_engine
);
2914 /* Default num_ports_in_engine to something */
2915 p_hwfn
->cdev
->num_ports_in_engine
= 1;
2919 static void qed_hw_info_port_num_ah(struct qed_hwfn
*p_hwfn
,
2920 struct qed_ptt
*p_ptt
)
2925 p_hwfn
->cdev
->num_ports_in_engine
= 0;
2927 for (i
= 0; i
< MAX_NUM_PORTS_K2
; i
++) {
2928 port
= qed_rd(p_hwfn
, p_ptt
,
2929 CNIG_REG_NIG_PORT0_CONF_K2
+ (i
* 4));
2931 p_hwfn
->cdev
->num_ports_in_engine
++;
2934 if (!p_hwfn
->cdev
->num_ports_in_engine
) {
2935 DP_NOTICE(p_hwfn
, "All NIG ports are inactive\n");
2937 /* Default num_ports_in_engine to something */
2938 p_hwfn
->cdev
->num_ports_in_engine
= 1;
2942 static void qed_hw_info_port_num(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2944 if (QED_IS_BB(p_hwfn
->cdev
))
2945 qed_hw_info_port_num_bb(p_hwfn
, p_ptt
);
2947 qed_hw_info_port_num_ah(p_hwfn
, p_ptt
);
2950 static void qed_get_eee_caps(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2952 struct qed_mcp_link_capabilities
*p_caps
;
2955 p_caps
= &p_hwfn
->mcp_info
->link_capabilities
;
2956 if (p_caps
->default_eee
== QED_MCP_EEE_UNSUPPORTED
)
2959 p_caps
->eee_speed_caps
= 0;
2960 eee_status
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
2961 offsetof(struct public_port
, eee_status
));
2962 eee_status
= (eee_status
& EEE_SUPPORTED_SPEED_MASK
) >>
2963 EEE_SUPPORTED_SPEED_OFFSET
;
2965 if (eee_status
& EEE_1G_SUPPORTED
)
2966 p_caps
->eee_speed_caps
|= QED_EEE_1G_ADV
;
2967 if (eee_status
& EEE_10G_ADV
)
2968 p_caps
->eee_speed_caps
|= QED_EEE_10G_ADV
;
2972 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
2973 struct qed_ptt
*p_ptt
,
2974 enum qed_pci_personality personality
)
2978 /* Since all information is common, only first hwfns should do this */
2979 if (IS_LEAD_HWFN(p_hwfn
)) {
2980 rc
= qed_iov_hw_info(p_hwfn
);
2985 qed_hw_info_port_num(p_hwfn
, p_ptt
);
2987 qed_mcp_get_capabilities(p_hwfn
, p_ptt
);
2989 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
2991 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
2995 if (qed_mcp_is_init(p_hwfn
))
2996 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
2997 p_hwfn
->mcp_info
->func_info
.mac
);
2999 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
3001 if (qed_mcp_is_init(p_hwfn
)) {
3002 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
3003 p_hwfn
->hw_info
.ovlan
=
3004 p_hwfn
->mcp_info
->func_info
.ovlan
;
3006 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
3008 qed_get_eee_caps(p_hwfn
, p_ptt
);
3010 qed_mcp_read_ufp_config(p_hwfn
, p_ptt
);
3013 if (qed_mcp_is_init(p_hwfn
)) {
3014 enum qed_pci_personality protocol
;
3016 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
3017 p_hwfn
->hw_info
.personality
= protocol
;
3020 if (QED_IS_ROCE_PERSONALITY(p_hwfn
))
3021 p_hwfn
->hw_info
.multi_tc_roce_en
= 1;
3023 p_hwfn
->hw_info
.num_hw_tc
= NUM_PHYS_TCS_4PORT_K2
;
3024 p_hwfn
->hw_info
.num_active_tc
= 1;
3026 qed_get_num_funcs(p_hwfn
, p_ptt
);
3028 if (qed_mcp_is_init(p_hwfn
))
3029 p_hwfn
->hw_info
.mtu
= p_hwfn
->mcp_info
->func_info
.mtu
;
3031 return qed_hw_get_resc(p_hwfn
, p_ptt
);
3034 static int qed_get_dev_info(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3036 struct qed_dev
*cdev
= p_hwfn
->cdev
;
3040 /* Read Vendor Id / Device Id */
3041 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
, &cdev
->vendor_id
);
3042 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
, &cdev
->device_id
);
3044 /* Determine type */
3045 device_id_mask
= cdev
->device_id
& QED_DEV_ID_MASK
;
3046 switch (device_id_mask
) {
3047 case QED_DEV_ID_MASK_BB
:
3048 cdev
->type
= QED_DEV_TYPE_BB
;
3050 case QED_DEV_ID_MASK_AH
:
3051 cdev
->type
= QED_DEV_TYPE_AH
;
3054 DP_NOTICE(p_hwfn
, "Unknown device id 0x%x\n", cdev
->device_id
);
3058 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_ptt
, MISCS_REG_CHIP_NUM
);
3059 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_ptt
, MISCS_REG_CHIP_REV
);
3061 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
3063 /* Learn number of HW-functions */
3064 tmp
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_CMT_ENABLED_FOR_PAIR
);
3066 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
3067 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
3068 cdev
->num_hwfns
= 2;
3070 cdev
->num_hwfns
= 1;
3073 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_ptt
,
3074 MISCS_REG_CHIP_TEST_REG
) >> 4;
3075 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
3076 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_ptt
, MISCS_REG_CHIP_METAL
);
3077 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
3079 DP_INFO(cdev
->hwfns
,
3080 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
3081 QED_IS_BB(cdev
) ? "BB" : "AH",
3082 'A' + cdev
->chip_rev
,
3083 (int)cdev
->chip_metal
,
3084 cdev
->chip_num
, cdev
->chip_rev
,
3085 cdev
->chip_bond_id
, cdev
->chip_metal
);
3090 static void qed_nvm_info_free(struct qed_hwfn
*p_hwfn
)
3092 kfree(p_hwfn
->nvm_info
.image_att
);
3093 p_hwfn
->nvm_info
.image_att
= NULL
;
3096 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
3097 void __iomem
*p_regview
,
3098 void __iomem
*p_doorbells
,
3100 enum qed_pci_personality personality
)
3104 /* Split PCI bars evenly between hwfns */
3105 p_hwfn
->regview
= p_regview
;
3106 p_hwfn
->doorbells
= p_doorbells
;
3107 p_hwfn
->db_phys_addr
= db_phys_addr
;
3109 if (IS_VF(p_hwfn
->cdev
))
3110 return qed_vf_hw_prepare(p_hwfn
);
3112 /* Validate that chip access is feasible */
3113 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
3115 "Reading the ME register returns all Fs; Preventing further chip access\n");
3119 get_function_id(p_hwfn
);
3121 /* Allocate PTT pool */
3122 rc
= qed_ptt_pool_alloc(p_hwfn
);
3126 /* Allocate the main PTT */
3127 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
3129 /* First hwfn learns basic information, e.g., number of hwfns */
3130 if (!p_hwfn
->my_id
) {
3131 rc
= qed_get_dev_info(p_hwfn
, p_hwfn
->p_main_ptt
);
3136 qed_hw_hwfn_prepare(p_hwfn
);
3138 /* Initialize MCP structure */
3139 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
3141 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
3145 /* Read the device configuration information from the HW and SHMEM */
3146 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
3148 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
3152 /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
3153 * is called as it sets the ports number in an engine.
3155 if (IS_LEAD_HWFN(p_hwfn
)) {
3156 rc
= qed_mcp_initiate_pf_flr(p_hwfn
, p_hwfn
->p_main_ptt
);
3158 DP_NOTICE(p_hwfn
, "Failed to initiate PF FLR\n");
3161 /* NVRAM info initialization and population */
3162 if (IS_LEAD_HWFN(p_hwfn
)) {
3163 rc
= qed_mcp_nvm_info_populate(p_hwfn
);
3166 "Failed to populate nvm info shadow\n");
3171 /* Allocate the init RT array and initialize the init-ops engine */
3172 rc
= qed_init_alloc(p_hwfn
);
3178 if (IS_LEAD_HWFN(p_hwfn
))
3179 qed_nvm_info_free(p_hwfn
);
3181 if (IS_LEAD_HWFN(p_hwfn
))
3182 qed_iov_free_hw_info(p_hwfn
->cdev
);
3183 qed_mcp_free(p_hwfn
);
3185 qed_hw_hwfn_free(p_hwfn
);
3190 int qed_hw_prepare(struct qed_dev
*cdev
,
3193 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3196 /* Store the precompiled init data ptrs */
3198 qed_init_iro_array(cdev
);
3200 /* Initialize the first hwfn - will learn number of hwfns */
3201 rc
= qed_hw_prepare_single(p_hwfn
,
3209 personality
= p_hwfn
->hw_info
.personality
;
3211 /* Initialize the rest of the hwfns */
3212 if (cdev
->num_hwfns
> 1) {
3213 void __iomem
*p_regview
, *p_doorbell
;
3217 /* adjust bar offset for second engine */
3218 offset
= qed_hw_bar_size(p_hwfn
, p_hwfn
->p_main_ptt
,
3220 p_regview
= cdev
->regview
+ offset
;
3222 offset
= qed_hw_bar_size(p_hwfn
, p_hwfn
->p_main_ptt
,
3225 p_doorbell
= cdev
->doorbells
+ offset
;
3227 db_phys_addr
= cdev
->db_phys_addr
+ offset
;
3229 /* prepare second hw function */
3230 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
3231 p_doorbell
, db_phys_addr
,
3234 /* in case of error, need to free the previously
3235 * initiliazed hwfn 0.
3239 qed_init_free(p_hwfn
);
3240 qed_nvm_info_free(p_hwfn
);
3241 qed_mcp_free(p_hwfn
);
3242 qed_hw_hwfn_free(p_hwfn
);
3250 void qed_hw_remove(struct qed_dev
*cdev
)
3252 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3256 qed_mcp_ov_update_driver_state(p_hwfn
, p_hwfn
->p_main_ptt
,
3257 QED_OV_DRIVER_STATE_NOT_LOADED
);
3259 for_each_hwfn(cdev
, i
) {
3260 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3263 qed_vf_pf_release(p_hwfn
);
3267 qed_init_free(p_hwfn
);
3268 qed_hw_hwfn_free(p_hwfn
);
3269 qed_mcp_free(p_hwfn
);
3272 qed_iov_free_hw_info(cdev
);
3274 qed_nvm_info_free(p_hwfn
);
3277 static void qed_chain_free_next_ptr(struct qed_dev
*cdev
,
3278 struct qed_chain
*p_chain
)
3280 void *p_virt
= p_chain
->p_virt_addr
, *p_virt_next
= NULL
;
3281 dma_addr_t p_phys
= p_chain
->p_phys_addr
, p_phys_next
= 0;
3282 struct qed_chain_next
*p_next
;
3288 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
3290 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
3294 p_next
= (struct qed_chain_next
*)((u8
*)p_virt
+ size
);
3295 p_virt_next
= p_next
->next_virt
;
3296 p_phys_next
= HILO_DMA_REGPAIR(p_next
->next_phys
);
3298 dma_free_coherent(&cdev
->pdev
->dev
,
3299 QED_CHAIN_PAGE_SIZE
, p_virt
, p_phys
);
3301 p_virt
= p_virt_next
;
3302 p_phys
= p_phys_next
;
3306 static void qed_chain_free_single(struct qed_dev
*cdev
,
3307 struct qed_chain
*p_chain
)
3309 if (!p_chain
->p_virt_addr
)
3312 dma_free_coherent(&cdev
->pdev
->dev
,
3313 QED_CHAIN_PAGE_SIZE
,
3314 p_chain
->p_virt_addr
, p_chain
->p_phys_addr
);
3317 static void qed_chain_free_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
3319 struct addr_tbl_entry
*pp_addr_tbl
= p_chain
->pbl
.pp_addr_tbl
;
3320 u32 page_cnt
= p_chain
->page_cnt
, i
, pbl_size
;
3325 for (i
= 0; i
< page_cnt
; i
++) {
3326 if (!pp_addr_tbl
[i
].virt_addr
|| !pp_addr_tbl
[i
].dma_map
)
3329 dma_free_coherent(&cdev
->pdev
->dev
,
3330 QED_CHAIN_PAGE_SIZE
,
3331 pp_addr_tbl
[i
].virt_addr
,
3332 pp_addr_tbl
[i
].dma_map
);
3335 pbl_size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
3337 if (!p_chain
->b_external_pbl
)
3338 dma_free_coherent(&cdev
->pdev
->dev
,
3340 p_chain
->pbl_sp
.p_virt_table
,
3341 p_chain
->pbl_sp
.p_phys_table
);
3343 vfree(p_chain
->pbl
.pp_addr_tbl
);
3344 p_chain
->pbl
.pp_addr_tbl
= NULL
;
3347 void qed_chain_free(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
3349 switch (p_chain
->mode
) {
3350 case QED_CHAIN_MODE_NEXT_PTR
:
3351 qed_chain_free_next_ptr(cdev
, p_chain
);
3353 case QED_CHAIN_MODE_SINGLE
:
3354 qed_chain_free_single(cdev
, p_chain
);
3356 case QED_CHAIN_MODE_PBL
:
3357 qed_chain_free_pbl(cdev
, p_chain
);
3363 qed_chain_alloc_sanity_check(struct qed_dev
*cdev
,
3364 enum qed_chain_cnt_type cnt_type
,
3365 size_t elem_size
, u32 page_cnt
)
3367 u64 chain_size
= ELEMS_PER_PAGE(elem_size
) * page_cnt
;
3369 /* The actual chain size can be larger than the maximal possible value
3370 * after rounding up the requested elements number to pages, and after
3371 * taking into acount the unusuable elements (next-ptr elements).
3372 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
3373 * size/capacity fields are of a u32 type.
3375 if ((cnt_type
== QED_CHAIN_CNT_TYPE_U16
&&
3376 chain_size
> ((u32
)U16_MAX
+ 1)) ||
3377 (cnt_type
== QED_CHAIN_CNT_TYPE_U32
&& chain_size
> U32_MAX
)) {
3379 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
3388 qed_chain_alloc_next_ptr(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
3390 void *p_virt
= NULL
, *p_virt_prev
= NULL
;
3391 dma_addr_t p_phys
= 0;
3394 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
3395 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
3396 QED_CHAIN_PAGE_SIZE
,
3397 &p_phys
, GFP_KERNEL
);
3402 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
3403 qed_chain_reset(p_chain
);
3405 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
3409 p_virt_prev
= p_virt
;
3411 /* Last page's next element should point to the beginning of the
3414 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
3415 p_chain
->p_virt_addr
,
3416 p_chain
->p_phys_addr
);
3422 qed_chain_alloc_single(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
3424 dma_addr_t p_phys
= 0;
3425 void *p_virt
= NULL
;
3427 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
3428 QED_CHAIN_PAGE_SIZE
, &p_phys
, GFP_KERNEL
);
3432 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
3433 qed_chain_reset(p_chain
);
3439 qed_chain_alloc_pbl(struct qed_dev
*cdev
,
3440 struct qed_chain
*p_chain
,
3441 struct qed_chain_ext_pbl
*ext_pbl
)
3443 u32 page_cnt
= p_chain
->page_cnt
, size
, i
;
3444 dma_addr_t p_phys
= 0, p_pbl_phys
= 0;
3445 struct addr_tbl_entry
*pp_addr_tbl
;
3446 u8
*p_pbl_virt
= NULL
;
3447 void *p_virt
= NULL
;
3449 size
= page_cnt
* sizeof(*pp_addr_tbl
);
3450 pp_addr_tbl
= vzalloc(size
);
3454 /* The allocation of the PBL table is done with its full size, since it
3455 * is expected to be successive.
3456 * qed_chain_init_pbl_mem() is called even in a case of an allocation
3457 * failure, since tbl was previously allocated, and it
3458 * should be saved to allow its freeing during the error flow.
3460 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
3463 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
3464 size
, &p_pbl_phys
, GFP_KERNEL
);
3466 p_pbl_virt
= ext_pbl
->p_pbl_virt
;
3467 p_pbl_phys
= ext_pbl
->p_pbl_phys
;
3468 p_chain
->b_external_pbl
= true;
3471 qed_chain_init_pbl_mem(p_chain
, p_pbl_virt
, p_pbl_phys
, pp_addr_tbl
);
3475 for (i
= 0; i
< page_cnt
; i
++) {
3476 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
3477 QED_CHAIN_PAGE_SIZE
,
3478 &p_phys
, GFP_KERNEL
);
3483 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
3484 qed_chain_reset(p_chain
);
3487 /* Fill the PBL table with the physical address of the page */
3488 *(dma_addr_t
*)p_pbl_virt
= p_phys
;
3489 /* Keep the virtual address of the page */
3490 p_chain
->pbl
.pp_addr_tbl
[i
].virt_addr
= p_virt
;
3491 p_chain
->pbl
.pp_addr_tbl
[i
].dma_map
= p_phys
;
3493 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
3499 int qed_chain_alloc(struct qed_dev
*cdev
,
3500 enum qed_chain_use_mode intended_use
,
3501 enum qed_chain_mode mode
,
3502 enum qed_chain_cnt_type cnt_type
,
3505 struct qed_chain
*p_chain
,
3506 struct qed_chain_ext_pbl
*ext_pbl
)
3511 if (mode
== QED_CHAIN_MODE_SINGLE
)
3514 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
3516 rc
= qed_chain_alloc_sanity_check(cdev
, cnt_type
, elem_size
, page_cnt
);
3519 "Cannot allocate a chain with the given arguments:\n");
3521 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
3522 intended_use
, mode
, cnt_type
, num_elems
, elem_size
);
3526 qed_chain_init_params(p_chain
, page_cnt
, (u8
) elem_size
, intended_use
,
3530 case QED_CHAIN_MODE_NEXT_PTR
:
3531 rc
= qed_chain_alloc_next_ptr(cdev
, p_chain
);
3533 case QED_CHAIN_MODE_SINGLE
:
3534 rc
= qed_chain_alloc_single(cdev
, p_chain
);
3536 case QED_CHAIN_MODE_PBL
:
3537 rc
= qed_chain_alloc_pbl(cdev
, p_chain
, ext_pbl
);
3546 qed_chain_free(cdev
, p_chain
);
3550 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
, u16 src_id
, u16
*dst_id
)
3552 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
3555 min
= (u16
) RESC_START(p_hwfn
, QED_L2_QUEUE
);
3556 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
3558 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
3564 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
3569 int qed_fw_vport(struct qed_hwfn
*p_hwfn
, u8 src_id
, u8
*dst_id
)
3571 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
3574 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
3575 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
3577 "vport id [%d] is not valid, available indices [%d - %d]\n",
3583 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
3588 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
, u8 src_id
, u8
*dst_id
)
3590 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
3593 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
3594 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
3596 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
3602 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;
3607 static void qed_llh_mac_to_filter(u32
*p_high
, u32
*p_low
,
3610 *p_high
= p_filter
[1] | (p_filter
[0] << 8);
3611 *p_low
= p_filter
[5] | (p_filter
[4] << 8) |
3612 (p_filter
[3] << 16) | (p_filter
[2] << 24);
3615 int qed_llh_add_mac_filter(struct qed_hwfn
*p_hwfn
,
3616 struct qed_ptt
*p_ptt
, u8
*p_filter
)
3618 u32 high
= 0, low
= 0, en
;
3621 if (!test_bit(QED_MF_LLH_MAC_CLSS
, &p_hwfn
->cdev
->mf_bits
))
3624 qed_llh_mac_to_filter(&high
, &low
, p_filter
);
3626 /* Find a free entry and utilize it */
3627 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
3628 en
= qed_rd(p_hwfn
, p_ptt
,
3629 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
));
3632 qed_wr(p_hwfn
, p_ptt
,
3633 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3634 2 * i
* sizeof(u32
), low
);
3635 qed_wr(p_hwfn
, p_ptt
,
3636 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3637 (2 * i
+ 1) * sizeof(u32
), high
);
3638 qed_wr(p_hwfn
, p_ptt
,
3639 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 0);
3640 qed_wr(p_hwfn
, p_ptt
,
3641 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
3642 i
* sizeof(u32
), 0);
3643 qed_wr(p_hwfn
, p_ptt
,
3644 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 1);
3647 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
) {
3649 "Failed to find an empty LLH filter to utilize\n");
3653 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3654 "mac: %pM is added at %d\n",
3660 void qed_llh_remove_mac_filter(struct qed_hwfn
*p_hwfn
,
3661 struct qed_ptt
*p_ptt
, u8
*p_filter
)
3663 u32 high
= 0, low
= 0;
3666 if (!test_bit(QED_MF_LLH_MAC_CLSS
, &p_hwfn
->cdev
->mf_bits
))
3669 qed_llh_mac_to_filter(&high
, &low
, p_filter
);
3671 /* Find the entry and clean it */
3672 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
3673 if (qed_rd(p_hwfn
, p_ptt
,
3674 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3675 2 * i
* sizeof(u32
)) != low
)
3677 if (qed_rd(p_hwfn
, p_ptt
,
3678 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3679 (2 * i
+ 1) * sizeof(u32
)) != high
)
3682 qed_wr(p_hwfn
, p_ptt
,
3683 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 0);
3684 qed_wr(p_hwfn
, p_ptt
,
3685 NIG_REG_LLH_FUNC_FILTER_VALUE
+ 2 * i
* sizeof(u32
), 0);
3686 qed_wr(p_hwfn
, p_ptt
,
3687 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3688 (2 * i
+ 1) * sizeof(u32
), 0);
3690 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3691 "mac: %pM is removed from %d\n",
3695 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
)
3696 DP_NOTICE(p_hwfn
, "Tried to remove a non-configured filter\n");
3700 qed_llh_add_protocol_filter(struct qed_hwfn
*p_hwfn
,
3701 struct qed_ptt
*p_ptt
,
3702 u16 source_port_or_eth_type
,
3703 u16 dest_port
, enum qed_llh_port_filter_type_t type
)
3705 u32 high
= 0, low
= 0, en
;
3708 if (!test_bit(QED_MF_LLH_PROTO_CLSS
, &p_hwfn
->cdev
->mf_bits
))
3712 case QED_LLH_FILTER_ETHERTYPE
:
3713 high
= source_port_or_eth_type
;
3715 case QED_LLH_FILTER_TCP_SRC_PORT
:
3716 case QED_LLH_FILTER_UDP_SRC_PORT
:
3717 low
= source_port_or_eth_type
<< 16;
3719 case QED_LLH_FILTER_TCP_DEST_PORT
:
3720 case QED_LLH_FILTER_UDP_DEST_PORT
:
3723 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT
:
3724 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
:
3725 low
= (source_port_or_eth_type
<< 16) | dest_port
;
3729 "Non valid LLH protocol filter type %d\n", type
);
3732 /* Find a free entry and utilize it */
3733 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
3734 en
= qed_rd(p_hwfn
, p_ptt
,
3735 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
));
3738 qed_wr(p_hwfn
, p_ptt
,
3739 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3740 2 * i
* sizeof(u32
), low
);
3741 qed_wr(p_hwfn
, p_ptt
,
3742 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3743 (2 * i
+ 1) * sizeof(u32
), high
);
3744 qed_wr(p_hwfn
, p_ptt
,
3745 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 1);
3746 qed_wr(p_hwfn
, p_ptt
,
3747 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
3748 i
* sizeof(u32
), 1 << type
);
3749 qed_wr(p_hwfn
, p_ptt
,
3750 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 1);
3753 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
) {
3755 "Failed to find an empty LLH filter to utilize\n");
3759 case QED_LLH_FILTER_ETHERTYPE
:
3760 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3761 "ETH type %x is added at %d\n",
3762 source_port_or_eth_type
, i
);
3764 case QED_LLH_FILTER_TCP_SRC_PORT
:
3765 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3766 "TCP src port %x is added at %d\n",
3767 source_port_or_eth_type
, i
);
3769 case QED_LLH_FILTER_UDP_SRC_PORT
:
3770 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3771 "UDP src port %x is added at %d\n",
3772 source_port_or_eth_type
, i
);
3774 case QED_LLH_FILTER_TCP_DEST_PORT
:
3775 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3776 "TCP dst port %x is added at %d\n", dest_port
, i
);
3778 case QED_LLH_FILTER_UDP_DEST_PORT
:
3779 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3780 "UDP dst port %x is added at %d\n", dest_port
, i
);
3782 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT
:
3783 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3784 "TCP src/dst ports %x/%x are added at %d\n",
3785 source_port_or_eth_type
, dest_port
, i
);
3787 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
:
3788 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
3789 "UDP src/dst ports %x/%x are added at %d\n",
3790 source_port_or_eth_type
, dest_port
, i
);
3797 qed_llh_remove_protocol_filter(struct qed_hwfn
*p_hwfn
,
3798 struct qed_ptt
*p_ptt
,
3799 u16 source_port_or_eth_type
,
3801 enum qed_llh_port_filter_type_t type
)
3803 u32 high
= 0, low
= 0;
3806 if (!test_bit(QED_MF_LLH_PROTO_CLSS
, &p_hwfn
->cdev
->mf_bits
))
3810 case QED_LLH_FILTER_ETHERTYPE
:
3811 high
= source_port_or_eth_type
;
3813 case QED_LLH_FILTER_TCP_SRC_PORT
:
3814 case QED_LLH_FILTER_UDP_SRC_PORT
:
3815 low
= source_port_or_eth_type
<< 16;
3817 case QED_LLH_FILTER_TCP_DEST_PORT
:
3818 case QED_LLH_FILTER_UDP_DEST_PORT
:
3821 case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT
:
3822 case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
:
3823 low
= (source_port_or_eth_type
<< 16) | dest_port
;
3827 "Non valid LLH protocol filter type %d\n", type
);
3831 for (i
= 0; i
< NIG_REG_LLH_FUNC_FILTER_EN_SIZE
; i
++) {
3832 if (!qed_rd(p_hwfn
, p_ptt
,
3833 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
)))
3835 if (!qed_rd(p_hwfn
, p_ptt
,
3836 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
)))
3838 if (!(qed_rd(p_hwfn
, p_ptt
,
3839 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
3840 i
* sizeof(u32
)) & BIT(type
)))
3842 if (qed_rd(p_hwfn
, p_ptt
,
3843 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3844 2 * i
* sizeof(u32
)) != low
)
3846 if (qed_rd(p_hwfn
, p_ptt
,
3847 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3848 (2 * i
+ 1) * sizeof(u32
)) != high
)
3851 qed_wr(p_hwfn
, p_ptt
,
3852 NIG_REG_LLH_FUNC_FILTER_EN
+ i
* sizeof(u32
), 0);
3853 qed_wr(p_hwfn
, p_ptt
,
3854 NIG_REG_LLH_FUNC_FILTER_MODE
+ i
* sizeof(u32
), 0);
3855 qed_wr(p_hwfn
, p_ptt
,
3856 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE
+
3857 i
* sizeof(u32
), 0);
3858 qed_wr(p_hwfn
, p_ptt
,
3859 NIG_REG_LLH_FUNC_FILTER_VALUE
+ 2 * i
* sizeof(u32
), 0);
3860 qed_wr(p_hwfn
, p_ptt
,
3861 NIG_REG_LLH_FUNC_FILTER_VALUE
+
3862 (2 * i
+ 1) * sizeof(u32
), 0);
3866 if (i
>= NIG_REG_LLH_FUNC_FILTER_EN_SIZE
)
3867 DP_NOTICE(p_hwfn
, "Tried to remove a non-configured filter\n");
3870 static int qed_set_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3871 u32 hw_addr
, void *p_eth_qzone
,
3872 size_t eth_qzone_size
, u8 timeset
)
3874 struct coalescing_timeset
*p_coal_timeset
;
3876 if (p_hwfn
->cdev
->int_coalescing_mode
!= QED_COAL_MODE_ENABLE
) {
3877 DP_NOTICE(p_hwfn
, "Coalescing configuration not enabled\n");
3881 p_coal_timeset
= p_eth_qzone
;
3882 memset(p_eth_qzone
, 0, eth_qzone_size
);
3883 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_TIMESET
, timeset
);
3884 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_VALID
, 1);
3885 qed_memcpy_to(p_hwfn
, p_ptt
, hw_addr
, p_eth_qzone
, eth_qzone_size
);
3890 int qed_set_queue_coalesce(u16 rx_coal
, u16 tx_coal
, void *p_handle
)
3892 struct qed_queue_cid
*p_cid
= p_handle
;
3893 struct qed_hwfn
*p_hwfn
;
3894 struct qed_ptt
*p_ptt
;
3897 p_hwfn
= p_cid
->p_owner
;
3899 if (IS_VF(p_hwfn
->cdev
))
3900 return qed_vf_pf_set_coalesce(p_hwfn
, rx_coal
, tx_coal
, p_cid
);
3902 p_ptt
= qed_ptt_acquire(p_hwfn
);
3907 rc
= qed_set_rxq_coalesce(p_hwfn
, p_ptt
, rx_coal
, p_cid
);
3910 p_hwfn
->cdev
->rx_coalesce_usecs
= rx_coal
;
3914 rc
= qed_set_txq_coalesce(p_hwfn
, p_ptt
, tx_coal
, p_cid
);
3917 p_hwfn
->cdev
->tx_coalesce_usecs
= tx_coal
;
3920 qed_ptt_release(p_hwfn
, p_ptt
);
3924 int qed_set_rxq_coalesce(struct qed_hwfn
*p_hwfn
,
3925 struct qed_ptt
*p_ptt
,
3926 u16 coalesce
, struct qed_queue_cid
*p_cid
)
3928 struct ustorm_eth_queue_zone eth_qzone
;
3929 u8 timeset
, timer_res
;
3933 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
3934 if (coalesce
<= 0x7F) {
3936 } else if (coalesce
<= 0xFF) {
3938 } else if (coalesce
<= 0x1FF) {
3941 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
3944 timeset
= (u8
)(coalesce
>> timer_res
);
3946 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
,
3947 p_cid
->sb_igu_id
, false);
3951 address
= BAR0_MAP_REG_USDM_RAM
+
3952 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid
->abs
.queue_id
);
3954 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
3955 sizeof(struct ustorm_eth_queue_zone
), timeset
);
3963 int qed_set_txq_coalesce(struct qed_hwfn
*p_hwfn
,
3964 struct qed_ptt
*p_ptt
,
3965 u16 coalesce
, struct qed_queue_cid
*p_cid
)
3967 struct xstorm_eth_queue_zone eth_qzone
;
3968 u8 timeset
, timer_res
;
3972 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
3973 if (coalesce
<= 0x7F) {
3975 } else if (coalesce
<= 0xFF) {
3977 } else if (coalesce
<= 0x1FF) {
3980 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
3983 timeset
= (u8
)(coalesce
>> timer_res
);
3985 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
,
3986 p_cid
->sb_igu_id
, true);
3990 address
= BAR0_MAP_REG_XSDM_RAM
+
3991 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid
->abs
.queue_id
);
3993 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
3994 sizeof(struct xstorm_eth_queue_zone
), timeset
);
3999 /* Calculate final WFQ values for all vports and configure them.
4000 * After this configuration each vport will have
4001 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
4003 static void qed_configure_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
4004 struct qed_ptt
*p_ptt
,
4007 struct init_qm_vport_params
*vport_params
;
4010 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
4012 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
4013 u32 wfq_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
4015 vport_params
[i
].vport_wfq
= (wfq_speed
* QED_WFQ_UNIT
) /
4017 qed_init_vport_wfq(p_hwfn
, p_ptt
,
4018 vport_params
[i
].first_tx_pq_id
,
4019 vport_params
[i
].vport_wfq
);
4023 static void qed_init_wfq_default_param(struct qed_hwfn
*p_hwfn
,
4029 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++)
4030 p_hwfn
->qm_info
.qm_vport_params
[i
].vport_wfq
= 1;
4033 static void qed_disable_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
4034 struct qed_ptt
*p_ptt
,
4037 struct init_qm_vport_params
*vport_params
;
4040 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
4042 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
4043 qed_init_wfq_default_param(p_hwfn
, min_pf_rate
);
4044 qed_init_vport_wfq(p_hwfn
, p_ptt
,
4045 vport_params
[i
].first_tx_pq_id
,
4046 vport_params
[i
].vport_wfq
);
4050 /* This function performs several validations for WFQ
4051 * configuration and required min rate for a given vport
4052 * 1. req_rate must be greater than one percent of min_pf_rate.
4053 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
4054 * rates to get less than one percent of min_pf_rate.
4055 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
4057 static int qed_init_wfq_param(struct qed_hwfn
*p_hwfn
,
4058 u16 vport_id
, u32 req_rate
, u32 min_pf_rate
)
4060 u32 total_req_min_rate
= 0, total_left_rate
= 0, left_rate_per_vp
= 0;
4061 int non_requested_count
= 0, req_count
= 0, i
, num_vports
;
4063 num_vports
= p_hwfn
->qm_info
.num_vports
;
4065 /* Accounting for the vports which are configured for WFQ explicitly */
4066 for (i
= 0; i
< num_vports
; i
++) {
4069 if ((i
!= vport_id
) &&
4070 p_hwfn
->qm_info
.wfq_data
[i
].configured
) {
4072 tmp_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
4073 total_req_min_rate
+= tmp_speed
;
4077 /* Include current vport data as well */
4079 total_req_min_rate
+= req_rate
;
4080 non_requested_count
= num_vports
- req_count
;
4082 if (req_rate
< min_pf_rate
/ QED_WFQ_UNIT
) {
4083 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
4084 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
4085 vport_id
, req_rate
, min_pf_rate
);
4089 if (num_vports
> QED_WFQ_UNIT
) {
4090 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
4091 "Number of vports is greater than %d\n",
4096 if (total_req_min_rate
> min_pf_rate
) {
4097 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
4098 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
4099 total_req_min_rate
, min_pf_rate
);
4103 total_left_rate
= min_pf_rate
- total_req_min_rate
;
4105 left_rate_per_vp
= total_left_rate
/ non_requested_count
;
4106 if (left_rate_per_vp
< min_pf_rate
/ QED_WFQ_UNIT
) {
4107 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
4108 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
4109 left_rate_per_vp
, min_pf_rate
);
4113 p_hwfn
->qm_info
.wfq_data
[vport_id
].min_speed
= req_rate
;
4114 p_hwfn
->qm_info
.wfq_data
[vport_id
].configured
= true;
4116 for (i
= 0; i
< num_vports
; i
++) {
4117 if (p_hwfn
->qm_info
.wfq_data
[i
].configured
)
4120 p_hwfn
->qm_info
.wfq_data
[i
].min_speed
= left_rate_per_vp
;
4126 static int __qed_configure_vport_wfq(struct qed_hwfn
*p_hwfn
,
4127 struct qed_ptt
*p_ptt
, u16 vp_id
, u32 rate
)
4129 struct qed_mcp_link_state
*p_link
;
4132 p_link
= &p_hwfn
->cdev
->hwfns
[0].mcp_info
->link_output
;
4134 if (!p_link
->min_pf_rate
) {
4135 p_hwfn
->qm_info
.wfq_data
[vp_id
].min_speed
= rate
;
4136 p_hwfn
->qm_info
.wfq_data
[vp_id
].configured
= true;
4140 rc
= qed_init_wfq_param(p_hwfn
, vp_id
, rate
, p_link
->min_pf_rate
);
4143 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
,
4144 p_link
->min_pf_rate
);
4147 "Validation failed while configuring min rate\n");
4152 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn
*p_hwfn
,
4153 struct qed_ptt
*p_ptt
,
4156 bool use_wfq
= false;
4160 /* Validate all pre configured vports for wfq */
4161 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
4164 if (!p_hwfn
->qm_info
.wfq_data
[i
].configured
)
4167 rate
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
4170 rc
= qed_init_wfq_param(p_hwfn
, i
, rate
, min_pf_rate
);
4173 "WFQ validation failed while configuring min rate\n");
4179 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
4181 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
4186 /* Main API for qed clients to configure vport min rate.
4187 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
4188 * rate - Speed in Mbps needs to be assigned to a given vport.
4190 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
)
4192 int i
, rc
= -EINVAL
;
4194 /* Currently not supported; Might change in future */
4195 if (cdev
->num_hwfns
> 1) {
4197 "WFQ configuration is not supported for this device\n");
4201 for_each_hwfn(cdev
, i
) {
4202 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4203 struct qed_ptt
*p_ptt
;
4205 p_ptt
= qed_ptt_acquire(p_hwfn
);
4209 rc
= __qed_configure_vport_wfq(p_hwfn
, p_ptt
, vp_id
, rate
);
4212 qed_ptt_release(p_hwfn
, p_ptt
);
4216 qed_ptt_release(p_hwfn
, p_ptt
);
4222 /* API to configure WFQ from mcp link change */
4223 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
,
4224 struct qed_ptt
*p_ptt
, u32 min_pf_rate
)
4228 if (cdev
->num_hwfns
> 1) {
4231 "WFQ configuration is not supported for this device\n");
4235 for_each_hwfn(cdev
, i
) {
4236 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4238 __qed_configure_vp_wfq_on_link_change(p_hwfn
, p_ptt
,
4243 int __qed_configure_pf_max_bandwidth(struct qed_hwfn
*p_hwfn
,
4244 struct qed_ptt
*p_ptt
,
4245 struct qed_mcp_link_state
*p_link
,
4250 p_hwfn
->mcp_info
->func_info
.bandwidth_max
= max_bw
;
4252 if (!p_link
->line_speed
&& (max_bw
!= 100))
4255 p_link
->speed
= (p_link
->line_speed
* max_bw
) / 100;
4256 p_hwfn
->qm_info
.pf_rl
= p_link
->speed
;
4258 /* Since the limiter also affects Tx-switched traffic, we don't want it
4259 * to limit such traffic in case there's no actual limit.
4260 * In that case, set limit to imaginary high boundary.
4263 p_hwfn
->qm_info
.pf_rl
= 100000;
4265 rc
= qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
4266 p_hwfn
->qm_info
.pf_rl
);
4268 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
4269 "Configured MAX bandwidth to be %08x Mb/sec\n",
4275 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
4276 int qed_configure_pf_max_bandwidth(struct qed_dev
*cdev
, u8 max_bw
)
4278 int i
, rc
= -EINVAL
;
4280 if (max_bw
< 1 || max_bw
> 100) {
4281 DP_NOTICE(cdev
, "PF max bw valid range is [1-100]\n");
4285 for_each_hwfn(cdev
, i
) {
4286 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4287 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
4288 struct qed_mcp_link_state
*p_link
;
4289 struct qed_ptt
*p_ptt
;
4291 p_link
= &p_lead
->mcp_info
->link_output
;
4293 p_ptt
= qed_ptt_acquire(p_hwfn
);
4297 rc
= __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
4300 qed_ptt_release(p_hwfn
, p_ptt
);
4309 int __qed_configure_pf_min_bandwidth(struct qed_hwfn
*p_hwfn
,
4310 struct qed_ptt
*p_ptt
,
4311 struct qed_mcp_link_state
*p_link
,
4316 p_hwfn
->mcp_info
->func_info
.bandwidth_min
= min_bw
;
4317 p_hwfn
->qm_info
.pf_wfq
= min_bw
;
4319 if (!p_link
->line_speed
)
4322 p_link
->min_pf_rate
= (p_link
->line_speed
* min_bw
) / 100;
4324 rc
= qed_init_pf_wfq(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
, min_bw
);
4326 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
4327 "Configured MIN bandwidth to be %d Mb/sec\n",
4328 p_link
->min_pf_rate
);
4333 /* Main API to configure PF min bandwidth where bw range is [1-100] */
4334 int qed_configure_pf_min_bandwidth(struct qed_dev
*cdev
, u8 min_bw
)
4336 int i
, rc
= -EINVAL
;
4338 if (min_bw
< 1 || min_bw
> 100) {
4339 DP_NOTICE(cdev
, "PF min bw valid range is [1-100]\n");
4343 for_each_hwfn(cdev
, i
) {
4344 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4345 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
4346 struct qed_mcp_link_state
*p_link
;
4347 struct qed_ptt
*p_ptt
;
4349 p_link
= &p_lead
->mcp_info
->link_output
;
4351 p_ptt
= qed_ptt_acquire(p_hwfn
);
4355 rc
= __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
4358 qed_ptt_release(p_hwfn
, p_ptt
);
4362 if (p_link
->min_pf_rate
) {
4363 u32 min_rate
= p_link
->min_pf_rate
;
4365 rc
= __qed_configure_vp_wfq_on_link_change(p_hwfn
,
4370 qed_ptt_release(p_hwfn
, p_ptt
);
4376 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
4378 struct qed_mcp_link_state
*p_link
;
4380 p_link
= &p_hwfn
->mcp_info
->link_output
;
4382 if (p_link
->min_pf_rate
)
4383 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
,
4384 p_link
->min_pf_rate
);
4386 memset(p_hwfn
->qm_info
.wfq_data
, 0,
4387 sizeof(*p_hwfn
->qm_info
.wfq_data
) * p_hwfn
->qm_info
.num_vports
);
4390 int qed_device_num_engines(struct qed_dev
*cdev
)
4392 return QED_IS_BB(cdev
) ? 2 : 1;
4395 static int qed_device_num_ports(struct qed_dev
*cdev
)
4397 /* in CMT always only one port */
4398 if (cdev
->num_hwfns
> 1)
4401 return cdev
->num_ports_in_engine
* qed_device_num_engines(cdev
);
4404 int qed_device_get_port_id(struct qed_dev
*cdev
)
4406 return (QED_LEADING_HWFN(cdev
)->abs_pf_id
) % qed_device_num_ports(cdev
);
4409 void qed_set_fw_mac_addr(__le16
*fw_msb
,
4410 __le16
*fw_mid
, __le16
*fw_lsb
, u8
*mac
)
4412 ((u8
*)fw_msb
)[0] = mac
[1];
4413 ((u8
*)fw_msb
)[1] = mac
[0];
4414 ((u8
*)fw_mid
)[0] = mac
[3];
4415 ((u8
*)fw_mid
)[1] = mac
[2];
4416 ((u8
*)fw_lsb
)[0] = mac
[5];
4417 ((u8
*)fw_lsb
)[1] = mac
[4];