1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <linux/delay.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
16 #include "qed_init_ops.h"
17 #include "qed_reg_addr.h"
33 /* general constants */
34 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
35 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
38 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
40 #define QM_INVALID_PQ_ID 0xffff
42 #define QM_BYPASS_EN 1
43 #define QM_BYTE_CRD_EN 1
44 /* other PQ constants */
45 #define QM_OTHER_PQS_PER_PF 4
47 #define QM_WFQ_UPPER_BOUND 6250000
48 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
49 #define QM_WFQ_VP_PQ_PF_SHIFT 5
50 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
51 #define QM_WFQ_MAX_INC_VAL 4375000
52 #define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
54 #define QM_RL_UPPER_BOUND 6250000
55 #define QM_RL_PERIOD 5 /* in us */
56 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
57 #define QM_RL_INC_VAL(rate) max_t(u32, \
58 (((rate ? rate : 1000000) \
59 * QM_RL_PERIOD) / 8), 1)
60 #define QM_RL_MAX_INC_VAL 4375000
61 /* AFullOprtnstcCrdMask constants */
62 #define QM_OPPOR_LINE_VOQ_DEF 1
63 #define QM_OPPOR_FW_STOP_DEF 0
64 #define QM_OPPOR_PQ_EMPTY_DEF 1
65 #define EAGLE_WORKAROUND_TC 7
66 /* Command Queue constants */
67 #define PBF_CMDQ_PURE_LB_LINES 150
68 #define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
69 #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
70 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
71 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
72 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
73 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
74 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
75 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
76 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
77 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
79 2) | QM_LINE_CRD_REG_SIGN_BIT)
80 /* BTB: blocks constants (block size = 256B) */
81 #define BTB_JUMBO_PKT_BLOCKS 38
82 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
83 #define BTB_EAGLE_WORKAROUND_BLOCKS 4
84 #define BTB_PURE_LB_FACTOR 10
85 #define BTB_PURE_LB_RATIO 7
86 /* QM stop command constants */
87 #define QM_STOP_PQ_MASK_WIDTH 32
88 #define QM_STOP_CMD_ADDR 0x2
89 #define QM_STOP_CMD_STRUCT_SIZE 2
90 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
91 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
92 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
93 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
94 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
95 #define QM_STOP_CMD_GROUP_ID_MASK 15
96 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
97 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
98 #define QM_STOP_CMD_PQ_TYPE_MASK 1
99 #define QM_STOP_CMD_MAX_POLL_COUNT 100
100 #define QM_STOP_CMD_POLL_PERIOD_US 500
101 /* QM command macros */
102 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
104 #define QM_CMD_SET_FIELD(var, cmd, field, \
105 value) SET_FIELD(var[cmd ## _ ## field ## \
110 #define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
111 (max_phy_tcs_pr_port) \
113 #define LB_VOQ(port) ( \
114 MAX_PHYS_VOQS + (port))
115 #define VOQ(port, tc, max_phy_tcs_pr_port) \
117 LB_TC ? PHYS_VOQ(port, \
119 max_phy_tcs_pr_port) \
121 /******************** INTERNAL IMPLEMENTATION *********************/
122 /* Prepare PF RL enable/disable runtime init values */
123 static void qed_enable_pf_rl(struct qed_hwfn
*p_hwfn
,
126 STORE_RT_REG(p_hwfn
, QM_REG_RLPFENABLE_RT_OFFSET
, pf_rl_en
? 1 : 0);
128 /* enable RLs for all VOQs */
129 STORE_RT_REG(p_hwfn
, QM_REG_RLPFVOQENABLE_RT_OFFSET
,
130 (1 << MAX_NUM_VOQS
) - 1);
131 /* write RL period */
133 QM_REG_RLPFPERIOD_RT_OFFSET
,
134 QM_RL_PERIOD_CLK_25M
);
136 QM_REG_RLPFPERIODTIMER_RT_OFFSET
,
137 QM_RL_PERIOD_CLK_25M
);
138 /* set credit threshold for QM bypass flow */
141 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET
,
146 /* Prepare PF WFQ enable/disable runtime init values */
147 static void qed_enable_pf_wfq(struct qed_hwfn
*p_hwfn
,
150 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFENABLE_RT_OFFSET
, pf_wfq_en
? 1 : 0);
151 /* set credit threshold for QM bypass flow */
152 if (pf_wfq_en
&& QM_BYPASS_EN
)
154 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET
,
158 /* Prepare VPORT RL enable/disable runtime init values */
159 static void qed_enable_vport_rl(struct qed_hwfn
*p_hwfn
,
162 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLENABLE_RT_OFFSET
,
163 vport_rl_en
? 1 : 0);
165 /* write RL period (use timer 0 only) */
167 QM_REG_RLGLBLPERIOD_0_RT_OFFSET
,
168 QM_RL_PERIOD_CLK_25M
);
170 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET
,
171 QM_RL_PERIOD_CLK_25M
);
172 /* set credit threshold for QM bypass flow */
175 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET
,
180 /* Prepare VPORT WFQ enable/disable runtime init values */
181 static void qed_enable_vport_wfq(struct qed_hwfn
*p_hwfn
,
184 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPENABLE_RT_OFFSET
,
185 vport_wfq_en
? 1 : 0);
186 /* set credit threshold for QM bypass flow */
187 if (vport_wfq_en
&& QM_BYPASS_EN
)
189 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET
,
193 /* Prepare runtime init values to allocate PBF command queue lines for
196 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn
*p_hwfn
,
202 /* In A0 - Limit the size of pbf queue so that only 511 commands with
203 * the minimum size of 4 (FCoE minimum size)
205 bool is_bb_a0
= QED_IS_BB_A0(p_hwfn
->cdev
);
208 cmdq_lines
= min_t(u32
, cmdq_lines
, 1022);
209 qm_line_crd
= QM_VOQ_LINE_CRD(cmdq_lines
);
210 OVERWRITE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(voq
),
212 STORE_RT_REG(p_hwfn
, QM_REG_VOQCRDLINE_RT_OFFSET
+ voq
, qm_line_crd
);
213 STORE_RT_REG(p_hwfn
, QM_REG_VOQINITCRDLINE_RT_OFFSET
+ voq
,
217 /* Prepare runtime init values to allocate PBF command queue lines. */
218 static void qed_cmdq_lines_rt_init(
219 struct qed_hwfn
*p_hwfn
,
220 u8 max_ports_per_engine
,
221 u8 max_phys_tcs_per_port
,
222 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
226 /* clear PBF lines for all VOQs */
227 for (voq
= 0; voq
< MAX_NUM_VOQS
; voq
++)
228 STORE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(voq
), 0);
229 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
230 if (port_params
[port_id
].active
) {
231 u16 phys_lines
, phys_lines_per_tc
;
232 u8 phys_tcs
= port_params
[port_id
].num_active_phys_tcs
;
234 /* find #lines to divide between the active
237 phys_lines
= port_params
[port_id
].num_pbf_cmd_lines
-
238 PBF_CMDQ_PURE_LB_LINES
;
239 /* find #lines per active physical TC */
240 phys_lines_per_tc
= phys_lines
/ phys_tcs
;
241 /* init registers per active TC */
242 for (tc
= 0; tc
< phys_tcs
; tc
++) {
243 voq
= PHYS_VOQ(port_id
, tc
,
244 max_phys_tcs_per_port
);
245 qed_cmdq_lines_voq_rt_init(p_hwfn
, voq
,
248 /* init registers for pure LB TC */
249 qed_cmdq_lines_voq_rt_init(p_hwfn
, LB_VOQ(port_id
),
250 PBF_CMDQ_PURE_LB_LINES
);
255 static void qed_btb_blocks_rt_init(
256 struct qed_hwfn
*p_hwfn
,
257 u8 max_ports_per_engine
,
258 u8 max_phys_tcs_per_port
,
259 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
261 u32 usable_blocks
, pure_lb_blocks
, phys_blocks
;
264 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
268 if (!port_params
[port_id
].active
)
271 phys_tcs
= port_params
[port_id
].num_active_phys_tcs
;
273 /* subtract headroom blocks */
274 usable_blocks
= port_params
[port_id
].num_btb_blocks
-
277 /* find blocks per physical TC. use factor to avoid
278 * floating arithmethic.
280 pure_lb_blocks
= (usable_blocks
* BTB_PURE_LB_FACTOR
) /
281 (phys_tcs
* BTB_PURE_LB_FACTOR
+
283 pure_lb_blocks
= max_t(u32
, BTB_JUMBO_PKT_BLOCKS
,
284 pure_lb_blocks
/ BTB_PURE_LB_FACTOR
);
285 phys_blocks
= (usable_blocks
- pure_lb_blocks
) / phys_tcs
;
287 /* init physical TCs */
288 for (tc
= 0; tc
< phys_tcs
; tc
++) {
289 voq
= PHYS_VOQ(port_id
, tc
, max_phys_tcs_per_port
);
290 STORE_RT_REG(p_hwfn
, PBF_BTB_GUARANTEED_RT_OFFSET(voq
),
294 /* init pure LB TC */
295 temp
= LB_VOQ(port_id
);
296 STORE_RT_REG(p_hwfn
, PBF_BTB_GUARANTEED_RT_OFFSET(temp
),
301 /* Prepare Tx PQ mapping runtime init values for the specified PF */
302 static void qed_tx_pq_map_rt_init(
303 struct qed_hwfn
*p_hwfn
,
304 struct qed_ptt
*p_ptt
,
305 struct qed_qm_pf_rt_init_params
*p_params
,
306 u32 base_mem_addr_4kb
)
308 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
309 u16 num_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
310 u16 first_pq_group
= p_params
->start_pq
/ QM_PF_QUEUE_GROUP_SIZE
;
311 u16 last_pq_group
= (p_params
->start_pq
+ num_pqs
- 1) /
312 QM_PF_QUEUE_GROUP_SIZE
;
313 bool is_bb_a0
= QED_IS_BB_A0(p_hwfn
->cdev
);
314 u16 i
, pq_id
, pq_group
;
316 /* a bit per Tx PQ indicating if the PQ is associated with a VF */
317 u32 tx_pq_vf_mask
[MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
] = { 0 };
318 u32 tx_pq_vf_mask_width
= is_bb_a0
? 32 : QM_PF_QUEUE_GROUP_SIZE
;
319 u32 num_tx_pq_vf_masks
= MAX_QM_TX_QUEUES
/ tx_pq_vf_mask_width
;
320 u32 pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
);
321 u32 vport_pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_vf_cids
);
322 u32 mem_addr_4kb
= base_mem_addr_4kb
;
324 /* set mapping from PQ group to PF */
325 for (pq_group
= first_pq_group
; pq_group
<= last_pq_group
; pq_group
++)
326 STORE_RT_REG(p_hwfn
, QM_REG_PQTX2PF_0_RT_OFFSET
+ pq_group
,
327 (u32
)(p_params
->pf_id
));
329 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_0_RT_OFFSET
,
330 QM_PQ_SIZE_256B(p_params
->num_pf_cids
));
331 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_1_RT_OFFSET
,
332 QM_PQ_SIZE_256B(p_params
->num_vf_cids
));
334 /* go over all Tx PQs */
335 for (i
= 0, pq_id
= p_params
->start_pq
; i
< num_pqs
; i
++, pq_id
++) {
336 u8 voq
= VOQ(p_params
->port_id
, p_params
->pq_params
[i
].tc_id
,
337 p_params
->max_phys_tcs_per_port
);
338 bool is_vf_pq
= (i
>= p_params
->num_pf_pqs
);
339 struct qm_rf_pq_map tx_pq_map
;
341 /* update first Tx PQ of VPORT/TC */
342 u8 vport_id_in_pf
= p_params
->pq_params
[i
].vport_id
-
343 p_params
->start_vport
;
344 u16
*pq_ids
= &vport_params
[vport_id_in_pf
].first_tx_pq_id
[0];
345 u16 first_tx_pq_id
= pq_ids
[p_params
->pq_params
[i
].tc_id
];
347 if (first_tx_pq_id
== QM_INVALID_PQ_ID
) {
348 /* create new VP PQ */
349 pq_ids
[p_params
->pq_params
[i
].tc_id
] = pq_id
;
350 first_tx_pq_id
= pq_id
;
351 /* map VP PQ to VOQ and PF */
353 QM_REG_WFQVPMAP_RT_OFFSET
+
355 (voq
<< QM_WFQ_VP_PQ_VOQ_SHIFT
) |
357 QM_WFQ_VP_PQ_PF_SHIFT
));
359 /* fill PQ map entry */
360 memset(&tx_pq_map
, 0, sizeof(tx_pq_map
));
361 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_PQ_VALID
, 1);
362 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_RL_VALID
,
364 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_VP_PQ_ID
, first_tx_pq_id
);
365 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_RL_ID
,
366 is_vf_pq
? p_params
->pq_params
[i
].vport_id
: 0);
367 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_VOQ
, voq
);
368 SET_FIELD(tx_pq_map
.reg
, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP
,
369 p_params
->pq_params
[i
].wrr_group
);
370 /* write PQ map entry to CAM */
371 STORE_RT_REG(p_hwfn
, QM_REG_TXPQMAP_RT_OFFSET
+ pq_id
,
372 *((u32
*)&tx_pq_map
));
373 /* set base address */
375 QM_REG_BASEADDRTXPQ_RT_OFFSET
+ pq_id
,
379 /* if PQ is associated with a VF, add indication
382 tx_pq_vf_mask
[pq_id
/ tx_pq_vf_mask_width
] |=
383 (1 << (pq_id
% tx_pq_vf_mask_width
));
384 mem_addr_4kb
+= vport_pq_mem_4kb
;
386 mem_addr_4kb
+= pq_mem_4kb
;
390 /* store Tx PQ VF mask to size select register */
391 for (i
= 0; i
< num_tx_pq_vf_masks
; i
++) {
392 if (tx_pq_vf_mask
[i
]) {
394 u32 curr_mask
= 0, addr
;
396 addr
= QM_REG_MAXPQSIZETXSEL_0
+ (i
* 4);
397 if (!p_params
->is_first_pf
)
398 curr_mask
= qed_rd(p_hwfn
, p_ptt
,
401 addr
= QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+ i
;
403 STORE_RT_REG(p_hwfn
, addr
,
404 curr_mask
| tx_pq_vf_mask
[i
]);
408 addr
= QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+ i
;
409 STORE_RT_REG(p_hwfn
, addr
,
416 /* Prepare Other PQ mapping runtime init values for the specified PF */
417 static void qed_other_pq_map_rt_init(struct qed_hwfn
*p_hwfn
,
422 u32 base_mem_addr_4kb
)
426 /* a single other PQ group is used in each PF,
427 * where PQ group i is used in PF i.
429 u16 pq_group
= pf_id
;
430 u32 pq_size
= num_pf_cids
+ num_tids
;
431 u32 pq_mem_4kb
= QM_PQ_MEM_4KB(pq_size
);
432 u32 mem_addr_4kb
= base_mem_addr_4kb
;
434 /* map PQ group to PF */
435 STORE_RT_REG(p_hwfn
, QM_REG_PQOTHER2PF_0_RT_OFFSET
+ pq_group
,
438 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_2_RT_OFFSET
,
439 QM_PQ_SIZE_256B(pq_size
));
440 /* set base address */
441 for (i
= 0, pq_id
= pf_id
* QM_PF_QUEUE_GROUP_SIZE
;
442 i
< QM_OTHER_PQS_PER_PF
; i
++, pq_id
++) {
444 QM_REG_BASEADDROTHERPQ_RT_OFFSET
+ pq_id
,
446 mem_addr_4kb
+= pq_mem_4kb
;
450 /* Prepare PF WFQ runtime init values for the specified PF.
451 * Return -1 on error.
453 static int qed_pf_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
454 struct qed_qm_pf_rt_init_params
*p_params
)
456 u16 num_tx_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
461 if (p_params
->pf_id
< MAX_NUM_PFS_BB
)
462 crd_reg_offset
= QM_REG_WFQPFCRD_RT_OFFSET
;
464 crd_reg_offset
= QM_REG_WFQPFCRD_MSB_RT_OFFSET
+
465 (p_params
->pf_id
% MAX_NUM_PFS_BB
);
467 inc_val
= QM_WFQ_INC_VAL(p_params
->pf_wfq
);
468 if (inc_val
> QM_WFQ_MAX_INC_VAL
) {
469 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration");
472 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFWEIGHT_RT_OFFSET
+ p_params
->pf_id
,
475 QM_REG_WFQPFUPPERBOUND_RT_OFFSET
+ p_params
->pf_id
,
476 QM_WFQ_UPPER_BOUND
| QM_WFQ_CRD_REG_SIGN_BIT
);
478 for (i
= 0; i
< num_tx_pqs
; i
++) {
479 u8 voq
= VOQ(p_params
->port_id
, p_params
->pq_params
[i
].tc_id
,
480 p_params
->max_phys_tcs_per_port
);
482 OVERWRITE_RT_REG(p_hwfn
,
483 crd_reg_offset
+ voq
* MAX_NUM_PFS_BB
,
484 QM_WFQ_INIT_CRD(inc_val
) |
485 QM_WFQ_CRD_REG_SIGN_BIT
);
491 /* Prepare PF RL runtime init values for the specified PF.
492 * Return -1 on error.
494 static int qed_pf_rl_rt_init(struct qed_hwfn
*p_hwfn
,
498 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
500 if (inc_val
> QM_RL_MAX_INC_VAL
) {
501 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration");
504 STORE_RT_REG(p_hwfn
, QM_REG_RLPFCRD_RT_OFFSET
+ pf_id
,
505 QM_RL_CRD_REG_SIGN_BIT
);
506 STORE_RT_REG(p_hwfn
, QM_REG_RLPFUPPERBOUND_RT_OFFSET
+ pf_id
,
507 QM_RL_UPPER_BOUND
| QM_RL_CRD_REG_SIGN_BIT
);
508 STORE_RT_REG(p_hwfn
, QM_REG_RLPFINCVAL_RT_OFFSET
+ pf_id
, inc_val
);
512 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
513 * Return -1 on error.
515 static int qed_vp_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
517 struct init_qm_vport_params
*vport_params
)
522 /* go over all PF VPORTs */
523 for (i
= 0; i
< num_vports
; i
++) {
525 if (!vport_params
[i
].vport_wfq
)
528 inc_val
= QM_WFQ_INC_VAL(vport_params
[i
].vport_wfq
);
529 if (inc_val
> QM_WFQ_MAX_INC_VAL
) {
531 "Invalid VPORT WFQ weight configuration");
535 /* each VPORT can have several VPORT PQ IDs for
538 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
539 u16 vport_pq_id
= vport_params
[i
].first_tx_pq_id
[tc
];
541 if (vport_pq_id
!= QM_INVALID_PQ_ID
) {
543 QM_REG_WFQVPCRD_RT_OFFSET
+
545 QM_WFQ_CRD_REG_SIGN_BIT
);
547 QM_REG_WFQVPWEIGHT_RT_OFFSET
+
548 vport_pq_id
, inc_val
);
556 static int qed_vport_rl_rt_init(struct qed_hwfn
*p_hwfn
,
559 struct init_qm_vport_params
*vport_params
)
563 /* go over all PF VPORTs */
564 for (i
= 0, vport_id
= start_vport
; i
< num_vports
; i
++, vport_id
++) {
565 u32 inc_val
= QM_RL_INC_VAL(vport_params
[i
].vport_rl
);
567 if (inc_val
> QM_RL_MAX_INC_VAL
) {
569 "Invalid VPORT rate-limit configuration");
574 QM_REG_RLGLBLCRD_RT_OFFSET
+ vport_id
,
575 QM_RL_CRD_REG_SIGN_BIT
);
577 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
+ vport_id
,
578 QM_RL_UPPER_BOUND
| QM_RL_CRD_REG_SIGN_BIT
);
580 QM_REG_RLGLBLINCVAL_RT_OFFSET
+ vport_id
,
587 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn
*p_hwfn
,
588 struct qed_ptt
*p_ptt
)
592 for (i
= 0, reg_val
= 0; i
< QM_STOP_CMD_MAX_POLL_COUNT
&& reg_val
== 0;
594 udelay(QM_STOP_CMD_POLL_PERIOD_US
);
595 reg_val
= qed_rd(p_hwfn
, p_ptt
, QM_REG_SDMCMDREADY
);
598 /* check if timeout while waiting for SDM command ready */
599 if (i
== QM_STOP_CMD_MAX_POLL_COUNT
) {
600 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
601 "Timeout when waiting for QM SDM command ready signal\n");
608 static bool qed_send_qm_cmd(struct qed_hwfn
*p_hwfn
,
609 struct qed_ptt
*p_ptt
,
614 if (!qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
))
617 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDADDR
, cmd_addr
);
618 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATALSB
, cmd_data_lsb
);
619 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATAMSB
, cmd_data_msb
);
620 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 1);
621 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 0);
623 return qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
);
626 /******************** INTERFACE IMPLEMENTATION *********************/
627 u32
qed_qm_pf_mem_size(u8 pf_id
,
634 return QM_PQ_MEM_4KB(num_pf_cids
) * num_pf_pqs
+
635 QM_PQ_MEM_4KB(num_vf_cids
) * num_vf_pqs
+
636 QM_PQ_MEM_4KB(num_pf_cids
+ num_tids
) * QM_OTHER_PQS_PER_PF
;
639 int qed_qm_common_rt_init(
640 struct qed_hwfn
*p_hwfn
,
641 struct qed_qm_common_rt_init_params
*p_params
)
643 /* init AFullOprtnstcCrdMask */
644 u32 mask
= (QM_OPPOR_LINE_VOQ_DEF
<<
645 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT
) |
646 (QM_BYTE_CRD_EN
<< QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT
) |
647 (p_params
->pf_wfq_en
<<
648 QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT
) |
649 (p_params
->vport_wfq_en
<<
650 QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT
) |
651 (p_params
->pf_rl_en
<<
652 QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT
) |
653 (p_params
->vport_rl_en
<<
654 QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT
) |
655 (QM_OPPOR_FW_STOP_DEF
<<
656 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT
) |
657 (QM_OPPOR_PQ_EMPTY_DEF
<<
658 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT
);
660 STORE_RT_REG(p_hwfn
, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET
, mask
);
661 qed_enable_pf_rl(p_hwfn
, p_params
->pf_rl_en
);
662 qed_enable_pf_wfq(p_hwfn
, p_params
->pf_wfq_en
);
663 qed_enable_vport_rl(p_hwfn
, p_params
->vport_rl_en
);
664 qed_enable_vport_wfq(p_hwfn
, p_params
->vport_wfq_en
);
665 qed_cmdq_lines_rt_init(p_hwfn
,
666 p_params
->max_ports_per_engine
,
667 p_params
->max_phys_tcs_per_port
,
668 p_params
->port_params
);
669 qed_btb_blocks_rt_init(p_hwfn
,
670 p_params
->max_ports_per_engine
,
671 p_params
->max_phys_tcs_per_port
,
672 p_params
->port_params
);
676 int qed_qm_pf_rt_init(struct qed_hwfn
*p_hwfn
,
677 struct qed_ptt
*p_ptt
,
678 struct qed_qm_pf_rt_init_params
*p_params
)
680 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
681 u32 other_mem_size_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
+
682 p_params
->num_tids
) *
686 /* clear first Tx PQ ID array for each VPORT */
687 for (i
= 0; i
< p_params
->num_vports
; i
++)
688 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++)
689 vport_params
[i
].first_tx_pq_id
[tc
] = QM_INVALID_PQ_ID
;
691 /* map Other PQs (if any) */
692 qed_other_pq_map_rt_init(p_hwfn
, p_params
->port_id
, p_params
->pf_id
,
693 p_params
->num_pf_cids
, p_params
->num_tids
, 0);
696 qed_tx_pq_map_rt_init(p_hwfn
, p_ptt
, p_params
, other_mem_size_4kb
);
698 if (p_params
->pf_wfq
)
699 if (qed_pf_wfq_rt_init(p_hwfn
, p_params
))
702 if (qed_pf_rl_rt_init(p_hwfn
, p_params
->pf_id
, p_params
->pf_rl
))
705 if (qed_vp_wfq_rt_init(p_hwfn
, p_params
->num_vports
, vport_params
))
708 if (qed_vport_rl_rt_init(p_hwfn
, p_params
->start_vport
,
709 p_params
->num_vports
, vport_params
))
715 int qed_init_pf_rl(struct qed_hwfn
*p_hwfn
,
716 struct qed_ptt
*p_ptt
,
720 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
722 if (inc_val
> QM_RL_MAX_INC_VAL
) {
723 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration");
727 qed_wr(p_hwfn
, p_ptt
,
728 QM_REG_RLPFCRD
+ pf_id
* 4,
729 QM_RL_CRD_REG_SIGN_BIT
);
730 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLPFINCVAL
+ pf_id
* 4, inc_val
);
735 int qed_init_vport_rl(struct qed_hwfn
*p_hwfn
,
736 struct qed_ptt
*p_ptt
,
740 u32 inc_val
= QM_RL_INC_VAL(vport_rl
);
742 if (inc_val
> QM_RL_MAX_INC_VAL
) {
743 DP_NOTICE(p_hwfn
, "Invalid VPORT rate-limit configuration");
747 qed_wr(p_hwfn
, p_ptt
,
748 QM_REG_RLGLBLCRD
+ vport_id
* 4,
749 QM_RL_CRD_REG_SIGN_BIT
);
750 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLGLBLINCVAL
+ vport_id
* 4, inc_val
);
755 bool qed_send_qm_stop_cmd(struct qed_hwfn
*p_hwfn
,
756 struct qed_ptt
*p_ptt
,
762 u32 cmd_arr
[QM_CMD_STRUCT_SIZE(QM_STOP_CMD
)] = { 0 };
763 u32 pq_mask
= 0, last_pq
= start_pq
+ num_pqs
- 1, pq_id
;
765 /* set command's PQ type */
766 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, PQ_TYPE
, is_tx_pq
? 0 : 1);
768 for (pq_id
= start_pq
; pq_id
<= last_pq
; pq_id
++) {
769 /* set PQ bit in mask (stop command only) */
771 pq_mask
|= (1 << (pq_id
% QM_STOP_PQ_MASK_WIDTH
));
773 /* if last PQ or end of PQ mask, write command */
774 if ((pq_id
== last_pq
) ||
775 (pq_id
% QM_STOP_PQ_MASK_WIDTH
==
776 (QM_STOP_PQ_MASK_WIDTH
- 1))) {
777 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
,
778 PAUSE_MASK
, pq_mask
);
779 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
,
781 pq_id
/ QM_STOP_PQ_MASK_WIDTH
);
782 if (!qed_send_qm_cmd(p_hwfn
, p_ptt
, QM_STOP_CMD_ADDR
,
783 cmd_arr
[0], cmd_arr
[1]))