1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/delay.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/string.h>
40 #include "qed_init_ops.h"
41 #include "qed_reg_addr.h"
57 /* general constants */
58 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
61 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
63 #define QM_INVALID_PQ_ID 0xffff
65 #define QM_BYPASS_EN 1
66 #define QM_BYTE_CRD_EN 1
67 /* other PQ constants */
68 #define QM_OTHER_PQS_PER_PF 4
70 #define QM_WFQ_UPPER_BOUND 62500000
71 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
72 #define QM_WFQ_VP_PQ_PF_SHIFT 5
73 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
74 #define QM_WFQ_MAX_INC_VAL 43750000
77 #define QM_RL_UPPER_BOUND 62500000
78 #define QM_RL_PERIOD 5 /* in us */
79 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
80 #define QM_RL_MAX_INC_VAL 43750000
81 #define QM_RL_INC_VAL(rate) max_t(u32, \
82 (u32)(((rate ? rate : \
86 /* AFullOprtnstcCrdMask constants */
87 #define QM_OPPOR_LINE_VOQ_DEF 1
88 #define QM_OPPOR_FW_STOP_DEF 0
89 #define QM_OPPOR_PQ_EMPTY_DEF 1
90 /* Command Queue constants */
91 #define PBF_CMDQ_PURE_LB_LINES 150
92 #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
93 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
94 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
95 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
96 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
97 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
98 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
99 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
100 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
102 2) | QM_LINE_CRD_REG_SIGN_BIT)
103 /* BTB: blocks constants (block size = 256B) */
104 #define BTB_JUMBO_PKT_BLOCKS 38
105 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
106 #define BTB_PURE_LB_FACTOR 10
107 #define BTB_PURE_LB_RATIO 7
108 /* QM stop command constants */
109 #define QM_STOP_PQ_MASK_WIDTH 32
110 #define QM_STOP_CMD_ADDR 0x2
111 #define QM_STOP_CMD_STRUCT_SIZE 2
112 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
113 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
114 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
115 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
116 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
117 #define QM_STOP_CMD_GROUP_ID_MASK 15
118 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
119 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
120 #define QM_STOP_CMD_PQ_TYPE_MASK 1
121 #define QM_STOP_CMD_MAX_POLL_COUNT 100
122 #define QM_STOP_CMD_POLL_PERIOD_US 500
123 /* QM command macros */
124 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
126 #define QM_CMD_SET_FIELD(var, cmd, field, \
127 value) SET_FIELD(var[cmd ## _ ## field ## \
132 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
133 (max_phys_tcs_per_port) + \
135 #define LB_VOQ(port) ( \
136 MAX_PHYS_VOQS + (port))
137 #define VOQ(port, tc, max_phy_tcs_pr_port) \
139 LB_TC ? PHYS_VOQ(port, \
141 max_phy_tcs_pr_port) \
143 /******************** INTERNAL IMPLEMENTATION *********************/
144 /* Prepare PF RL enable/disable runtime init values */
145 static void qed_enable_pf_rl(struct qed_hwfn
*p_hwfn
, bool pf_rl_en
)
147 STORE_RT_REG(p_hwfn
, QM_REG_RLPFENABLE_RT_OFFSET
, pf_rl_en
? 1 : 0);
149 /* enable RLs for all VOQs */
150 STORE_RT_REG(p_hwfn
, QM_REG_RLPFVOQENABLE_RT_OFFSET
,
151 (1 << MAX_NUM_VOQS
) - 1);
152 /* write RL period */
154 QM_REG_RLPFPERIOD_RT_OFFSET
, QM_RL_PERIOD_CLK_25M
);
156 QM_REG_RLPFPERIODTIMER_RT_OFFSET
,
157 QM_RL_PERIOD_CLK_25M
);
158 /* set credit threshold for QM bypass flow */
161 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET
,
166 /* Prepare PF WFQ enable/disable runtime init values */
167 static void qed_enable_pf_wfq(struct qed_hwfn
*p_hwfn
, bool pf_wfq_en
)
169 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFENABLE_RT_OFFSET
, pf_wfq_en
? 1 : 0);
170 /* set credit threshold for QM bypass flow */
171 if (pf_wfq_en
&& QM_BYPASS_EN
)
173 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET
,
177 /* Prepare VPORT RL enable/disable runtime init values */
178 static void qed_enable_vport_rl(struct qed_hwfn
*p_hwfn
, bool vport_rl_en
)
180 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLENABLE_RT_OFFSET
,
181 vport_rl_en
? 1 : 0);
183 /* write RL period (use timer 0 only) */
185 QM_REG_RLGLBLPERIOD_0_RT_OFFSET
,
186 QM_RL_PERIOD_CLK_25M
);
188 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET
,
189 QM_RL_PERIOD_CLK_25M
);
190 /* set credit threshold for QM bypass flow */
193 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET
,
198 /* Prepare VPORT WFQ enable/disable runtime init values */
199 static void qed_enable_vport_wfq(struct qed_hwfn
*p_hwfn
, bool vport_wfq_en
)
201 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPENABLE_RT_OFFSET
,
202 vport_wfq_en
? 1 : 0);
203 /* set credit threshold for QM bypass flow */
204 if (vport_wfq_en
&& QM_BYPASS_EN
)
206 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET
,
210 /* Prepare runtime init values to allocate PBF command queue lines for
213 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn
*p_hwfn
,
214 u8 voq
, u16 cmdq_lines
)
218 /* In A0 - Limit the size of pbf queue so that only 511 commands with
219 * the minimum size of 4 (FCoE minimum size)
221 bool is_bb_a0
= QED_IS_BB_A0(p_hwfn
->cdev
);
224 cmdq_lines
= min_t(u32
, cmdq_lines
, 1022);
225 qm_line_crd
= QM_VOQ_LINE_CRD(cmdq_lines
);
226 OVERWRITE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(voq
),
228 STORE_RT_REG(p_hwfn
, QM_REG_VOQCRDLINE_RT_OFFSET
+ voq
, qm_line_crd
);
229 STORE_RT_REG(p_hwfn
, QM_REG_VOQINITCRDLINE_RT_OFFSET
+ voq
,
233 /* Prepare runtime init values to allocate PBF command queue lines. */
234 static void qed_cmdq_lines_rt_init(
235 struct qed_hwfn
*p_hwfn
,
236 u8 max_ports_per_engine
,
237 u8 max_phys_tcs_per_port
,
238 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
240 u8 tc
, voq
, port_id
, num_tcs_in_port
;
242 /* clear PBF lines for all VOQs */
243 for (voq
= 0; voq
< MAX_NUM_VOQS
; voq
++)
244 STORE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(voq
), 0);
245 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
246 if (port_params
[port_id
].active
) {
247 u16 phys_lines
, phys_lines_per_tc
;
249 /* find #lines to divide between active phys TCs */
250 phys_lines
= port_params
[port_id
].num_pbf_cmd_lines
-
251 PBF_CMDQ_PURE_LB_LINES
;
252 /* find #lines per active physical TC */
254 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
255 if (((port_params
[port_id
].active_phys_tcs
>>
260 phys_lines_per_tc
= phys_lines
/ num_tcs_in_port
;
261 /* init registers per active TC */
262 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
263 if (((port_params
[port_id
].active_phys_tcs
>>
267 voq
= PHYS_VOQ(port_id
, tc
,
268 max_phys_tcs_per_port
);
269 qed_cmdq_lines_voq_rt_init(p_hwfn
, voq
,
273 /* init registers for pure LB TC */
274 qed_cmdq_lines_voq_rt_init(p_hwfn
, LB_VOQ(port_id
),
275 PBF_CMDQ_PURE_LB_LINES
);
280 static void qed_btb_blocks_rt_init(
281 struct qed_hwfn
*p_hwfn
,
282 u8 max_ports_per_engine
,
283 u8 max_phys_tcs_per_port
,
284 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
286 u32 usable_blocks
, pure_lb_blocks
, phys_blocks
;
287 u8 tc
, voq
, port_id
, num_tcs_in_port
;
289 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
292 if (!port_params
[port_id
].active
)
295 /* subtract headroom blocks */
296 usable_blocks
= port_params
[port_id
].num_btb_blocks
-
299 /* find blocks per physical TC */
301 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
302 if (((port_params
[port_id
].active_phys_tcs
>>
307 pure_lb_blocks
= (usable_blocks
* BTB_PURE_LB_FACTOR
) /
308 (num_tcs_in_port
* BTB_PURE_LB_FACTOR
+
310 pure_lb_blocks
= max_t(u32
, BTB_JUMBO_PKT_BLOCKS
,
311 pure_lb_blocks
/ BTB_PURE_LB_FACTOR
);
312 phys_blocks
= (usable_blocks
- pure_lb_blocks
) /
315 /* init physical TCs */
316 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
317 if (((port_params
[port_id
].active_phys_tcs
>>
321 voq
= PHYS_VOQ(port_id
, tc
,
322 max_phys_tcs_per_port
);
323 STORE_RT_REG(p_hwfn
, PBF_BTB_GUARANTEED_RT_OFFSET(voq
),
327 /* init pure LB TC */
328 temp
= LB_VOQ(port_id
);
329 STORE_RT_REG(p_hwfn
, PBF_BTB_GUARANTEED_RT_OFFSET(temp
),
334 /* Prepare Tx PQ mapping runtime init values for the specified PF */
335 static void qed_tx_pq_map_rt_init(
336 struct qed_hwfn
*p_hwfn
,
337 struct qed_ptt
*p_ptt
,
338 struct qed_qm_pf_rt_init_params
*p_params
,
339 u32 base_mem_addr_4kb
)
341 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
342 u16 num_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
343 u16 first_pq_group
= p_params
->start_pq
/ QM_PF_QUEUE_GROUP_SIZE
;
344 u16 last_pq_group
= (p_params
->start_pq
+ num_pqs
- 1) /
345 QM_PF_QUEUE_GROUP_SIZE
;
346 bool is_bb_a0
= QED_IS_BB_A0(p_hwfn
->cdev
);
347 u16 i
, pq_id
, pq_group
;
349 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
350 u32 tx_pq_vf_mask
[MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
] = { 0 };
351 u32 tx_pq_vf_mask_width
= is_bb_a0
? 32 : QM_PF_QUEUE_GROUP_SIZE
;
352 u32 num_tx_pq_vf_masks
= MAX_QM_TX_QUEUES
/ tx_pq_vf_mask_width
;
353 u32 pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
);
354 u32 vport_pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_vf_cids
);
355 u32 mem_addr_4kb
= base_mem_addr_4kb
;
357 /* set mapping from PQ group to PF */
358 for (pq_group
= first_pq_group
; pq_group
<= last_pq_group
; pq_group
++)
359 STORE_RT_REG(p_hwfn
, QM_REG_PQTX2PF_0_RT_OFFSET
+ pq_group
,
360 (u32
)(p_params
->pf_id
));
362 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_0_RT_OFFSET
,
363 QM_PQ_SIZE_256B(p_params
->num_pf_cids
));
364 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_1_RT_OFFSET
,
365 QM_PQ_SIZE_256B(p_params
->num_vf_cids
));
367 /* go over all Tx PQs */
368 for (i
= 0, pq_id
= p_params
->start_pq
; i
< num_pqs
; i
++, pq_id
++) {
369 u8 voq
= VOQ(p_params
->port_id
, p_params
->pq_params
[i
].tc_id
,
370 p_params
->max_phys_tcs_per_port
);
371 bool is_vf_pq
= (i
>= p_params
->num_pf_pqs
);
372 struct qm_rf_pq_map tx_pq_map
;
374 /* update first Tx PQ of VPORT/TC */
375 u8 vport_id_in_pf
= p_params
->pq_params
[i
].vport_id
-
376 p_params
->start_vport
;
377 u16
*pq_ids
= &vport_params
[vport_id_in_pf
].first_tx_pq_id
[0];
378 u16 first_tx_pq_id
= pq_ids
[p_params
->pq_params
[i
].tc_id
];
380 if (first_tx_pq_id
== QM_INVALID_PQ_ID
) {
381 /* create new VP PQ */
382 pq_ids
[p_params
->pq_params
[i
].tc_id
] = pq_id
;
383 first_tx_pq_id
= pq_id
;
384 /* map VP PQ to VOQ and PF */
386 QM_REG_WFQVPMAP_RT_OFFSET
+
388 (voq
<< QM_WFQ_VP_PQ_VOQ_SHIFT
) |
390 QM_WFQ_VP_PQ_PF_SHIFT
));
392 /* fill PQ map entry */
393 memset(&tx_pq_map
, 0, sizeof(tx_pq_map
));
394 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_PQ_VALID
, 1);
395 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_RL_VALID
,
396 p_params
->pq_params
[i
].rl_valid
? 1 : 0);
397 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_VP_PQ_ID
, first_tx_pq_id
);
398 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_RL_ID
,
399 p_params
->pq_params
[i
].rl_valid
?
400 p_params
->pq_params
[i
].vport_id
: 0);
401 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_VOQ
, voq
);
402 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP
,
403 p_params
->pq_params
[i
].wrr_group
);
404 /* write PQ map entry to CAM */
405 STORE_RT_REG(p_hwfn
, QM_REG_TXPQMAP_RT_OFFSET
+ pq_id
,
406 *((u32
*)&tx_pq_map
));
407 /* set base address */
409 QM_REG_BASEADDRTXPQ_RT_OFFSET
+ pq_id
,
413 /* if PQ is associated with a VF, add indication
416 tx_pq_vf_mask
[pq_id
/ tx_pq_vf_mask_width
] |=
417 (1 << (pq_id
% tx_pq_vf_mask_width
));
418 mem_addr_4kb
+= vport_pq_mem_4kb
;
420 mem_addr_4kb
+= pq_mem_4kb
;
424 /* store Tx PQ VF mask to size select register */
425 for (i
= 0; i
< num_tx_pq_vf_masks
; i
++) {
426 if (tx_pq_vf_mask
[i
]) {
429 addr
= QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+ i
;
430 STORE_RT_REG(p_hwfn
, addr
,
436 /* Prepare Other PQ mapping runtime init values for the specified PF */
437 static void qed_other_pq_map_rt_init(struct qed_hwfn
*p_hwfn
,
441 u32 num_tids
, u32 base_mem_addr_4kb
)
445 /* a single other PQ group is used in each PF,
446 * where PQ group i is used in PF i.
448 u16 pq_group
= pf_id
;
449 u32 pq_size
= num_pf_cids
+ num_tids
;
450 u32 pq_mem_4kb
= QM_PQ_MEM_4KB(pq_size
);
451 u32 mem_addr_4kb
= base_mem_addr_4kb
;
453 /* map PQ group to PF */
454 STORE_RT_REG(p_hwfn
, QM_REG_PQOTHER2PF_0_RT_OFFSET
+ pq_group
,
457 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_2_RT_OFFSET
,
458 QM_PQ_SIZE_256B(pq_size
));
459 /* set base address */
460 for (i
= 0, pq_id
= pf_id
* QM_PF_QUEUE_GROUP_SIZE
;
461 i
< QM_OTHER_PQS_PER_PF
; i
++, pq_id
++) {
463 QM_REG_BASEADDROTHERPQ_RT_OFFSET
+ pq_id
,
465 mem_addr_4kb
+= pq_mem_4kb
;
469 /* Prepare PF WFQ runtime init values for the specified PF.
470 * Return -1 on error.
472 static int qed_pf_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
473 struct qed_qm_pf_rt_init_params
*p_params
)
475 u16 num_tx_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
480 if (p_params
->pf_id
< MAX_NUM_PFS_BB
)
481 crd_reg_offset
= QM_REG_WFQPFCRD_RT_OFFSET
;
483 crd_reg_offset
= QM_REG_WFQPFCRD_MSB_RT_OFFSET
+
484 (p_params
->pf_id
% MAX_NUM_PFS_BB
);
486 inc_val
= QM_WFQ_INC_VAL(p_params
->pf_wfq
);
487 if (!inc_val
|| inc_val
> QM_WFQ_MAX_INC_VAL
) {
488 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration");
492 for (i
= 0; i
< num_tx_pqs
; i
++) {
493 u8 voq
= VOQ(p_params
->port_id
, p_params
->pq_params
[i
].tc_id
,
494 p_params
->max_phys_tcs_per_port
);
496 OVERWRITE_RT_REG(p_hwfn
,
497 crd_reg_offset
+ voq
* MAX_NUM_PFS_BB
,
498 QM_WFQ_CRD_REG_SIGN_BIT
);
501 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFWEIGHT_RT_OFFSET
+ p_params
->pf_id
,
504 QM_REG_WFQPFUPPERBOUND_RT_OFFSET
+ p_params
->pf_id
,
505 QM_WFQ_UPPER_BOUND
| QM_WFQ_CRD_REG_SIGN_BIT
);
509 /* Prepare PF RL runtime init values for the specified PF.
510 * Return -1 on error.
512 static int qed_pf_rl_rt_init(struct qed_hwfn
*p_hwfn
, u8 pf_id
, u32 pf_rl
)
514 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
516 if (inc_val
> QM_RL_MAX_INC_VAL
) {
517 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration");
520 STORE_RT_REG(p_hwfn
, QM_REG_RLPFCRD_RT_OFFSET
+ pf_id
,
521 QM_RL_CRD_REG_SIGN_BIT
);
522 STORE_RT_REG(p_hwfn
, QM_REG_RLPFUPPERBOUND_RT_OFFSET
+ pf_id
,
523 QM_RL_UPPER_BOUND
| QM_RL_CRD_REG_SIGN_BIT
);
524 STORE_RT_REG(p_hwfn
, QM_REG_RLPFINCVAL_RT_OFFSET
+ pf_id
, inc_val
);
528 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
529 * Return -1 on error.
531 static int qed_vp_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
533 struct init_qm_vport_params
*vport_params
)
538 /* go over all PF VPORTs */
539 for (i
= 0; i
< num_vports
; i
++) {
541 if (!vport_params
[i
].vport_wfq
)
544 inc_val
= QM_WFQ_INC_VAL(vport_params
[i
].vport_wfq
);
545 if (inc_val
> QM_WFQ_MAX_INC_VAL
) {
547 "Invalid VPORT WFQ weight configuration");
551 /* each VPORT can have several VPORT PQ IDs for
554 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
555 u16 vport_pq_id
= vport_params
[i
].first_tx_pq_id
[tc
];
557 if (vport_pq_id
!= QM_INVALID_PQ_ID
) {
559 QM_REG_WFQVPCRD_RT_OFFSET
+
561 QM_WFQ_CRD_REG_SIGN_BIT
);
563 QM_REG_WFQVPWEIGHT_RT_OFFSET
+
564 vport_pq_id
, inc_val
);
572 static int qed_vport_rl_rt_init(struct qed_hwfn
*p_hwfn
,
575 struct init_qm_vport_params
*vport_params
)
579 /* go over all PF VPORTs */
580 for (i
= 0, vport_id
= start_vport
; i
< num_vports
; i
++, vport_id
++) {
581 u32 inc_val
= QM_RL_INC_VAL(vport_params
[i
].vport_rl
);
583 if (inc_val
> QM_RL_MAX_INC_VAL
) {
585 "Invalid VPORT rate-limit configuration");
590 QM_REG_RLGLBLCRD_RT_OFFSET
+ vport_id
,
591 QM_RL_CRD_REG_SIGN_BIT
);
593 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
+ vport_id
,
594 QM_RL_UPPER_BOUND
| QM_RL_CRD_REG_SIGN_BIT
);
596 QM_REG_RLGLBLINCVAL_RT_OFFSET
+ vport_id
,
603 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn
*p_hwfn
,
604 struct qed_ptt
*p_ptt
)
608 for (i
= 0, reg_val
= 0; i
< QM_STOP_CMD_MAX_POLL_COUNT
&& reg_val
== 0;
610 udelay(QM_STOP_CMD_POLL_PERIOD_US
);
611 reg_val
= qed_rd(p_hwfn
, p_ptt
, QM_REG_SDMCMDREADY
);
614 /* check if timeout while waiting for SDM command ready */
615 if (i
== QM_STOP_CMD_MAX_POLL_COUNT
) {
616 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
617 "Timeout when waiting for QM SDM command ready signal\n");
624 static bool qed_send_qm_cmd(struct qed_hwfn
*p_hwfn
,
625 struct qed_ptt
*p_ptt
,
626 u32 cmd_addr
, u32 cmd_data_lsb
, u32 cmd_data_msb
)
628 if (!qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
))
631 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDADDR
, cmd_addr
);
632 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATALSB
, cmd_data_lsb
);
633 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATAMSB
, cmd_data_msb
);
634 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 1);
635 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 0);
637 return qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
);
640 /******************** INTERFACE IMPLEMENTATION *********************/
641 u32
qed_qm_pf_mem_size(u8 pf_id
,
644 u32 num_tids
, u16 num_pf_pqs
, u16 num_vf_pqs
)
646 return QM_PQ_MEM_4KB(num_pf_cids
) * num_pf_pqs
+
647 QM_PQ_MEM_4KB(num_vf_cids
) * num_vf_pqs
+
648 QM_PQ_MEM_4KB(num_pf_cids
+ num_tids
) * QM_OTHER_PQS_PER_PF
;
651 int qed_qm_common_rt_init(
652 struct qed_hwfn
*p_hwfn
,
653 struct qed_qm_common_rt_init_params
*p_params
)
655 /* init AFullOprtnstcCrdMask */
656 u32 mask
= (QM_OPPOR_LINE_VOQ_DEF
<<
657 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT
) |
658 (QM_BYTE_CRD_EN
<< QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT
) |
659 (p_params
->pf_wfq_en
<<
660 QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT
) |
661 (p_params
->vport_wfq_en
<<
662 QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT
) |
663 (p_params
->pf_rl_en
<<
664 QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT
) |
665 (p_params
->vport_rl_en
<<
666 QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT
) |
667 (QM_OPPOR_FW_STOP_DEF
<<
668 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT
) |
669 (QM_OPPOR_PQ_EMPTY_DEF
<<
670 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT
);
672 STORE_RT_REG(p_hwfn
, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET
, mask
);
673 qed_enable_pf_rl(p_hwfn
, p_params
->pf_rl_en
);
674 qed_enable_pf_wfq(p_hwfn
, p_params
->pf_wfq_en
);
675 qed_enable_vport_rl(p_hwfn
, p_params
->vport_rl_en
);
676 qed_enable_vport_wfq(p_hwfn
, p_params
->vport_wfq_en
);
677 qed_cmdq_lines_rt_init(p_hwfn
,
678 p_params
->max_ports_per_engine
,
679 p_params
->max_phys_tcs_per_port
,
680 p_params
->port_params
);
681 qed_btb_blocks_rt_init(p_hwfn
,
682 p_params
->max_ports_per_engine
,
683 p_params
->max_phys_tcs_per_port
,
684 p_params
->port_params
);
688 int qed_qm_pf_rt_init(struct qed_hwfn
*p_hwfn
,
689 struct qed_ptt
*p_ptt
,
690 struct qed_qm_pf_rt_init_params
*p_params
)
692 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
693 u32 other_mem_size_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
+
694 p_params
->num_tids
) *
698 /* clear first Tx PQ ID array for each VPORT */
699 for (i
= 0; i
< p_params
->num_vports
; i
++)
700 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++)
701 vport_params
[i
].first_tx_pq_id
[tc
] = QM_INVALID_PQ_ID
;
703 /* map Other PQs (if any) */
704 qed_other_pq_map_rt_init(p_hwfn
, p_params
->port_id
, p_params
->pf_id
,
705 p_params
->num_pf_cids
, p_params
->num_tids
, 0);
708 qed_tx_pq_map_rt_init(p_hwfn
, p_ptt
, p_params
, other_mem_size_4kb
);
710 if (p_params
->pf_wfq
)
711 if (qed_pf_wfq_rt_init(p_hwfn
, p_params
))
714 if (qed_pf_rl_rt_init(p_hwfn
, p_params
->pf_id
, p_params
->pf_rl
))
717 if (qed_vp_wfq_rt_init(p_hwfn
, p_params
->num_vports
, vport_params
))
720 if (qed_vport_rl_rt_init(p_hwfn
, p_params
->start_vport
,
721 p_params
->num_vports
, vport_params
))
727 int qed_init_pf_wfq(struct qed_hwfn
*p_hwfn
,
728 struct qed_ptt
*p_ptt
, u8 pf_id
, u16 pf_wfq
)
730 u32 inc_val
= QM_WFQ_INC_VAL(pf_wfq
);
732 if (!inc_val
|| inc_val
> QM_WFQ_MAX_INC_VAL
) {
733 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration");
737 qed_wr(p_hwfn
, p_ptt
, QM_REG_WFQPFWEIGHT
+ pf_id
* 4, inc_val
);
741 int qed_init_pf_rl(struct qed_hwfn
*p_hwfn
,
742 struct qed_ptt
*p_ptt
, u8 pf_id
, u32 pf_rl
)
744 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
746 if (inc_val
> QM_RL_MAX_INC_VAL
) {
747 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration");
751 qed_wr(p_hwfn
, p_ptt
,
752 QM_REG_RLPFCRD
+ pf_id
* 4,
753 QM_RL_CRD_REG_SIGN_BIT
);
754 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLPFINCVAL
+ pf_id
* 4, inc_val
);
759 int qed_init_vport_wfq(struct qed_hwfn
*p_hwfn
,
760 struct qed_ptt
*p_ptt
,
761 u16 first_tx_pq_id
[NUM_OF_TCS
], u16 vport_wfq
)
763 u32 inc_val
= QM_WFQ_INC_VAL(vport_wfq
);
766 if (!inc_val
|| inc_val
> QM_WFQ_MAX_INC_VAL
) {
767 DP_NOTICE(p_hwfn
, "Invalid VPORT WFQ weight configuration");
771 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
772 u16 vport_pq_id
= first_tx_pq_id
[tc
];
774 if (vport_pq_id
!= QM_INVALID_PQ_ID
)
775 qed_wr(p_hwfn
, p_ptt
,
776 QM_REG_WFQVPWEIGHT
+ vport_pq_id
* 4,
783 int qed_init_vport_rl(struct qed_hwfn
*p_hwfn
,
784 struct qed_ptt
*p_ptt
, u8 vport_id
, u32 vport_rl
)
786 u32 inc_val
= QM_RL_INC_VAL(vport_rl
);
788 if (inc_val
> QM_RL_MAX_INC_VAL
) {
789 DP_NOTICE(p_hwfn
, "Invalid VPORT rate-limit configuration");
793 qed_wr(p_hwfn
, p_ptt
,
794 QM_REG_RLGLBLCRD
+ vport_id
* 4,
795 QM_RL_CRD_REG_SIGN_BIT
);
796 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLGLBLINCVAL
+ vport_id
* 4, inc_val
);
801 bool qed_send_qm_stop_cmd(struct qed_hwfn
*p_hwfn
,
802 struct qed_ptt
*p_ptt
,
804 bool is_tx_pq
, u16 start_pq
, u16 num_pqs
)
806 u32 cmd_arr
[QM_CMD_STRUCT_SIZE(QM_STOP_CMD
)] = { 0 };
807 u32 pq_mask
= 0, last_pq
= start_pq
+ num_pqs
- 1, pq_id
;
809 /* set command's PQ type */
810 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, PQ_TYPE
, is_tx_pq
? 0 : 1);
812 for (pq_id
= start_pq
; pq_id
<= last_pq
; pq_id
++) {
813 /* set PQ bit in mask (stop command only) */
815 pq_mask
|= (1 << (pq_id
% QM_STOP_PQ_MASK_WIDTH
));
817 /* if last PQ or end of PQ mask, write command */
818 if ((pq_id
== last_pq
) ||
819 (pq_id
% QM_STOP_PQ_MASK_WIDTH
==
820 (QM_STOP_PQ_MASK_WIDTH
- 1))) {
821 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
,
822 PAUSE_MASK
, pq_mask
);
823 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
,
825 pq_id
/ QM_STOP_PQ_MASK_WIDTH
);
826 if (!qed_send_qm_cmd(p_hwfn
, p_ptt
, QM_STOP_CMD_ADDR
,
827 cmd_arr
[0], cmd_arr
[1]))
837 qed_set_tunnel_type_enable_bit(unsigned long *var
, int bit
, bool enable
)
845 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
847 void qed_set_vxlan_dest_port(struct qed_hwfn
*p_hwfn
,
848 struct qed_ptt
*p_ptt
, u16 dest_port
)
850 qed_wr(p_hwfn
, p_ptt
, PRS_REG_VXLAN_PORT
, dest_port
);
851 qed_wr(p_hwfn
, p_ptt
, NIG_REG_VXLAN_CTRL
, dest_port
);
852 qed_wr(p_hwfn
, p_ptt
, PBF_REG_VXLAN_PORT
, dest_port
);
855 void qed_set_vxlan_enable(struct qed_hwfn
*p_hwfn
,
856 struct qed_ptt
*p_ptt
, bool vxlan_enable
)
858 unsigned long reg_val
= 0;
861 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
862 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT
;
863 qed_set_tunnel_type_enable_bit(®_val
, shift
, vxlan_enable
);
865 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
868 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
869 PRS_ETH_TUNN_FIC_FORMAT
);
871 reg_val
= qed_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
872 shift
= NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT
;
873 qed_set_tunnel_type_enable_bit(®_val
, shift
, vxlan_enable
);
875 qed_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
877 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN
,
878 vxlan_enable
? 1 : 0);
881 void qed_set_gre_enable(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
882 bool eth_gre_enable
, bool ip_gre_enable
)
884 unsigned long reg_val
= 0;
887 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
888 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT
;
889 qed_set_tunnel_type_enable_bit(®_val
, shift
, eth_gre_enable
);
891 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT
;
892 qed_set_tunnel_type_enable_bit(®_val
, shift
, ip_gre_enable
);
893 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
895 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
896 PRS_ETH_TUNN_FIC_FORMAT
);
898 reg_val
= qed_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
899 shift
= NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT
;
900 qed_set_tunnel_type_enable_bit(®_val
, shift
, eth_gre_enable
);
902 shift
= NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT
;
903 qed_set_tunnel_type_enable_bit(®_val
, shift
, ip_gre_enable
);
904 qed_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
906 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN
,
907 eth_gre_enable
? 1 : 0);
908 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN
,
909 ip_gre_enable
? 1 : 0);
912 void qed_set_geneve_dest_port(struct qed_hwfn
*p_hwfn
,
913 struct qed_ptt
*p_ptt
, u16 dest_port
)
915 qed_wr(p_hwfn
, p_ptt
, PRS_REG_NGE_PORT
, dest_port
);
916 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_PORT
, dest_port
);
917 qed_wr(p_hwfn
, p_ptt
, PBF_REG_NGE_PORT
, dest_port
);
920 void qed_set_geneve_enable(struct qed_hwfn
*p_hwfn
,
921 struct qed_ptt
*p_ptt
,
922 bool eth_geneve_enable
, bool ip_geneve_enable
)
924 unsigned long reg_val
= 0;
927 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
928 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT
;
929 qed_set_tunnel_type_enable_bit(®_val
, shift
, eth_geneve_enable
);
931 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT
;
932 qed_set_tunnel_type_enable_bit(®_val
, shift
, ip_geneve_enable
);
934 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
936 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
937 PRS_ETH_TUNN_FIC_FORMAT
);
939 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_ETH_ENABLE
,
940 eth_geneve_enable
? 1 : 0);
941 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_IP_ENABLE
, ip_geneve_enable
? 1 : 0);
944 reg_val
= (ip_geneve_enable
|| eth_geneve_enable
) ? 1 : 0;
945 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_COMP_VER
, reg_val
);
946 qed_wr(p_hwfn
, p_ptt
, PBF_REG_NGE_COMP_VER
, reg_val
);
947 qed_wr(p_hwfn
, p_ptt
, PRS_REG_NGE_COMP_VER
, reg_val
);
949 /* EDPM with geneve tunnel not supported in BB_B0 */
950 if (QED_IS_BB_B0(p_hwfn
->cdev
))
953 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN
,
954 eth_geneve_enable
? 1 : 0);
955 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN
,
956 ip_geneve_enable
? 1 : 0);