1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2021 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <linux/crc8.h>
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
15 #include "qed_init_ops.h"
16 #include "qed_iro_hsi.h"
17 #include "qed_reg_addr.h"
19 #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
21 static u16 con_region_offsets
[3][NUM_OF_CONNECTION_TYPES
] = {
22 {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
23 {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
24 {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
27 static u16 task_region_offsets
[1][NUM_OF_CONNECTION_TYPES
] = {
28 {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
31 /* General constants */
32 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
35 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
37 #define QM_INVALID_PQ_ID 0xffff
39 /* Max link speed (in Mbps) */
40 #define QM_MAX_LINK_SPEED 100000
43 #define QM_BYPASS_EN 1
44 #define QM_BYTE_CRD_EN 1
46 /* Initial VOQ byte credit */
47 #define QM_INITIAL_VOQ_BYTE_CRD 98304
48 /* Other PQ constants */
49 #define QM_OTHER_PQS_PER_PF 4
52 #define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
53 #define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
57 /* PF WFQ increment value, 0x9000 = 4*9*1024 */
58 #define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
60 /* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
61 #define QM_PF_WFQ_UPPER_BOUND 62500000
63 /* PF WFQ max increment value, 0.7 * upper bound */
64 #define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
66 /* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
67 #define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
69 /* VP WFQ increment value */
70 #define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
72 /* VP WFQ min increment value */
73 #define QM_VP_WFQ_MIN_INC_VAL 10800
75 /* VP WFQ max increment value, 2^30 */
76 #define QM_VP_WFQ_MAX_INC_VAL 0x40000000
78 /* VP WFQ bypass threshold */
79 #define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
81 /* VP RL credit task cost */
82 #define QM_VP_RL_CRD_TASK_COST 9700
84 /* Bit of VOQ in VP WFQ PQ map */
85 #define QM_VP_WFQ_PQ_VOQ_SHIFT 0
87 /* Bit of PF in VP WFQ PQ map */
88 #define QM_VP_WFQ_PQ_PF_SHIFT 5
93 #define QM_RL_PERIOD 5
95 /* Period in 25MHz cycles */
96 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
98 /* RL increment value - rate is specified in mbps */
99 #define QM_RL_INC_VAL(rate) ({ \
100 typeof(rate) __rate = (rate); \
102 (u32)(((__rate ? __rate : \
105 101) / (8 * 100)), 1); })
107 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
108 #define QM_PF_RL_UPPER_BOUND 62500000
110 /* Max PF RL increment value is 0.7 * upper bound */
111 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
113 /* QCN RL Upper bound, speed is in Mpbs */
114 #define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
117 QM_RL_PERIOD * 101) / (8 * 100)), \
118 QM_VP_RL_CRD_TASK_COST \
121 /* AFullOprtnstcCrdMask constants */
122 #define QM_OPPOR_LINE_VOQ_DEF 1
123 #define QM_OPPOR_FW_STOP_DEF 0
124 #define QM_OPPOR_PQ_EMPTY_DEF 1
126 /* Command Queue constants */
128 /* Pure LB CmdQ lines (+spare) */
129 #define PBF_CMDQ_PURE_LB_LINES 150
131 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
132 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
133 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
134 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
136 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
137 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
138 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
139 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
141 /* Returns the VOQ line credit for the specified number of PBF command lines.
142 * PBF lines are specified in 256b units.
144 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
145 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
147 /* BTB: blocks constants (block size = 256B) */
149 /* 256B blocks in 9700B packet */
150 #define BTB_JUMBO_PKT_BLOCKS 38
152 /* Headroom per-port */
153 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
154 #define BTB_PURE_LB_FACTOR 10
156 /* Factored (hence really 0.7) */
157 #define BTB_PURE_LB_RATIO 7
159 /* QM stop command constants */
160 #define QM_STOP_PQ_MASK_WIDTH 32
161 #define QM_STOP_CMD_ADDR 2
162 #define QM_STOP_CMD_STRUCT_SIZE 2
163 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
164 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
165 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
166 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
167 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
168 #define QM_STOP_CMD_GROUP_ID_MASK 15
169 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
170 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
171 #define QM_STOP_CMD_PQ_TYPE_MASK 1
172 #define QM_STOP_CMD_MAX_POLL_COUNT 100
173 #define QM_STOP_CMD_POLL_PERIOD_US 500
175 /* QM command macros */
176 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
177 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
178 SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
182 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
183 rl_id, ext_voq, wrr) \
187 BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
188 memset(&(map), 0, sizeof(map)); \
189 SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
190 SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
192 SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
193 SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
194 SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
195 SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
198 STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
200 (map).reg = cpu_to_le32(__reg); \
203 #define WRITE_PQ_INFO_TO_RAM 1
204 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
205 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
206 ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
209 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
210 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
211 XSTORM_PQ_INFO_OFFSET(pq_id))
213 static const char * const s_protocol_types
[] = {
214 "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
215 "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
216 "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
217 "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
220 static const char *s_ramrod_cmd_ids
[][28] = {
222 "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
223 "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
224 "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
225 "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
226 "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
227 "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
228 "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
229 { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
230 "FCOE_RAMROD_CMD_ID_STAT_FUNC",
231 "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
232 "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
233 { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
234 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
235 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
236 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
237 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
238 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
239 "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
240 "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
241 "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
242 "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
243 "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
244 "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
245 "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
246 "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
247 { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
248 "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
249 "CORE_RAMROD_TX_QUEUE_STOP",
250 "CORE_RAMROD_RX_QUEUE_FLUSH",
251 "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
252 { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
253 "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
254 "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
255 "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
256 "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
257 "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
258 "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
259 "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
260 "ETH_RAMROD_RX_ADD_UDP_FILTER",
261 "ETH_RAMROD_RX_DELETE_UDP_FILTER",
262 "ETH_RAMROD_RX_CREATE_GFT_ACTION",
263 "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
264 "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
265 "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
266 "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
267 { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
268 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
269 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
270 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
271 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
272 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
273 "RDMA_RAMROD_STOP_NS_TRACKING",
274 "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
275 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
276 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
277 "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
278 "IWARP_RAMROD_CMD_ID_MODIFY_QP",
279 "IWARP_RAMROD_CMD_ID_DESTROY_QP",
280 "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
282 { NULL
}, /*PREROCE*/
283 { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
284 "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
285 "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
286 "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
289 /******************** INTERNAL IMPLEMENTATION *********************/
291 /* Returns the external VOQ number */
292 static u8
qed_get_ext_voq(struct qed_hwfn
*p_hwfn
,
293 u8 port_id
, u8 tc
, u8 max_phys_tcs_per_port
)
295 if (tc
== PURE_LB_TC
)
296 return NUM_OF_PHYS_TCS
* MAX_NUM_PORTS_BB
+ port_id
;
298 return port_id
* max_phys_tcs_per_port
+ tc
;
301 /* Prepare PF RL enable/disable runtime init values */
302 static void qed_enable_pf_rl(struct qed_hwfn
*p_hwfn
, bool pf_rl_en
)
304 STORE_RT_REG(p_hwfn
, QM_REG_RLPFENABLE_RT_OFFSET
, pf_rl_en
? 1 : 0);
306 u8 num_ext_voqs
= MAX_NUM_VOQS
;
307 u64 voq_bit_mask
= ((u64
)1 << num_ext_voqs
) - 1;
309 /* Enable RLs for all VOQs */
311 QM_REG_RLPFVOQENABLE_RT_OFFSET
,
314 /* Write RL period */
316 QM_REG_RLPFPERIOD_RT_OFFSET
, QM_RL_PERIOD_CLK_25M
);
318 QM_REG_RLPFPERIODTIMER_RT_OFFSET
,
319 QM_RL_PERIOD_CLK_25M
);
321 /* Set credit threshold for QM bypass flow */
324 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET
,
325 QM_PF_RL_UPPER_BOUND
);
329 /* Prepare PF WFQ enable/disable runtime init values */
330 static void qed_enable_pf_wfq(struct qed_hwfn
*p_hwfn
, bool pf_wfq_en
)
332 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFENABLE_RT_OFFSET
, pf_wfq_en
? 1 : 0);
334 /* Set credit threshold for QM bypass flow */
335 if (pf_wfq_en
&& QM_BYPASS_EN
)
337 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET
,
338 QM_PF_WFQ_UPPER_BOUND
);
341 /* Prepare global RL enable/disable runtime init values */
342 static void qed_enable_global_rl(struct qed_hwfn
*p_hwfn
, bool global_rl_en
)
344 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLENABLE_RT_OFFSET
,
345 global_rl_en
? 1 : 0);
347 /* Write RL period (use timer 0 only) */
349 QM_REG_RLGLBLPERIOD_0_RT_OFFSET
,
350 QM_RL_PERIOD_CLK_25M
);
352 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET
,
353 QM_RL_PERIOD_CLK_25M
);
355 /* Set credit threshold for QM bypass flow */
358 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET
,
359 QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
363 /* Prepare VPORT WFQ enable/disable runtime init values */
364 static void qed_enable_vport_wfq(struct qed_hwfn
*p_hwfn
, bool vport_wfq_en
)
366 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPENABLE_RT_OFFSET
,
367 vport_wfq_en
? 1 : 0);
369 /* Set credit threshold for QM bypass flow */
370 if (vport_wfq_en
&& QM_BYPASS_EN
)
372 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET
,
373 QM_VP_WFQ_BYPASS_THRESH
);
376 /* Prepare runtime init values to allocate PBF command queue lines for
379 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn
*p_hwfn
,
380 u8 ext_voq
, u16 cmdq_lines
)
382 u32 qm_line_crd
= QM_VOQ_LINE_CRD(cmdq_lines
);
384 OVERWRITE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(ext_voq
),
386 STORE_RT_REG(p_hwfn
, QM_REG_VOQCRDLINE_RT_OFFSET
+ ext_voq
,
388 STORE_RT_REG(p_hwfn
, QM_REG_VOQINITCRDLINE_RT_OFFSET
+ ext_voq
,
392 /* Prepare runtime init values to allocate PBF command queue lines. */
394 qed_cmdq_lines_rt_init(struct qed_hwfn
*p_hwfn
,
395 u8 max_ports_per_engine
,
396 u8 max_phys_tcs_per_port
,
397 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
399 u8 tc
, ext_voq
, port_id
, num_tcs_in_port
;
400 u8 num_ext_voqs
= MAX_NUM_VOQS
;
402 /* Clear PBF lines of all VOQs */
403 for (ext_voq
= 0; ext_voq
< num_ext_voqs
; ext_voq
++)
404 STORE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(ext_voq
), 0);
406 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
407 u16 phys_lines
, phys_lines_per_tc
;
409 if (!port_params
[port_id
].active
)
412 /* Find number of command queue lines to divide between the
413 * active physical TCs.
415 phys_lines
= port_params
[port_id
].num_pbf_cmd_lines
;
416 phys_lines
-= PBF_CMDQ_PURE_LB_LINES
;
418 /* Find #lines per active physical TC */
420 for (tc
= 0; tc
< max_phys_tcs_per_port
; tc
++)
421 if (((port_params
[port_id
].active_phys_tcs
>>
424 phys_lines_per_tc
= phys_lines
/ num_tcs_in_port
;
426 /* Init registers per active TC */
427 for (tc
= 0; tc
< max_phys_tcs_per_port
; tc
++) {
428 ext_voq
= qed_get_ext_voq(p_hwfn
,
430 tc
, max_phys_tcs_per_port
);
431 if (((port_params
[port_id
].active_phys_tcs
>>
433 qed_cmdq_lines_voq_rt_init(p_hwfn
,
438 /* Init registers for pure LB TC */
439 ext_voq
= qed_get_ext_voq(p_hwfn
,
441 PURE_LB_TC
, max_phys_tcs_per_port
);
442 qed_cmdq_lines_voq_rt_init(p_hwfn
, ext_voq
,
443 PBF_CMDQ_PURE_LB_LINES
);
447 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
448 * specified port. The guaranteed BTB space is divided between the TCs as
449 * follows (shared space Is currently not used):
451 * B - BTB blocks for this port
452 * C - Number of physical TCs for this port
454 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
456 * b. B = B - 38 (remainder after global headroom allocation).
457 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
458 * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
459 * e. B/C blocks are allocated for each physical TC.
461 * - MTU is up to 9700 bytes (38 blocks)
462 * - All TCs are considered symmetrical (same rate and packet size)
463 * - No optimization for lossy TC (all are considered lossless). Shared space
464 * is not enabled and allocated for each TC.
467 qed_btb_blocks_rt_init(struct qed_hwfn
*p_hwfn
,
468 u8 max_ports_per_engine
,
469 u8 max_phys_tcs_per_port
,
470 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
472 u32 usable_blocks
, pure_lb_blocks
, phys_blocks
;
473 u8 tc
, ext_voq
, port_id
, num_tcs_in_port
;
475 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
476 if (!port_params
[port_id
].active
)
479 /* Subtract headroom blocks */
480 usable_blocks
= port_params
[port_id
].num_btb_blocks
-
483 /* Find blocks per physical TC. Use factor to avoid floating
487 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++)
488 if (((port_params
[port_id
].active_phys_tcs
>>
492 pure_lb_blocks
= (usable_blocks
* BTB_PURE_LB_FACTOR
) /
493 (num_tcs_in_port
* BTB_PURE_LB_FACTOR
+
495 pure_lb_blocks
= max_t(u32
, BTB_JUMBO_PKT_BLOCKS
,
496 pure_lb_blocks
/ BTB_PURE_LB_FACTOR
);
497 phys_blocks
= (usable_blocks
- pure_lb_blocks
) /
500 /* Init physical TCs */
501 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
502 if (((port_params
[port_id
].active_phys_tcs
>>
505 qed_get_ext_voq(p_hwfn
,
508 max_phys_tcs_per_port
);
510 PBF_BTB_GUARANTEED_RT_OFFSET
511 (ext_voq
), phys_blocks
);
515 /* Init pure LB TC */
516 ext_voq
= qed_get_ext_voq(p_hwfn
,
518 PURE_LB_TC
, max_phys_tcs_per_port
);
519 STORE_RT_REG(p_hwfn
, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq
),
524 /* Prepare runtime init values for the specified RL.
525 * Set max link speed (100Gbps) per rate limiter.
526 * Return -1 on error.
528 static int qed_global_rl_rt_init(struct qed_hwfn
*p_hwfn
)
530 u32 upper_bound
= QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED
) |
531 (u32
)QM_RL_CRD_REG_SIGN_BIT
;
535 /* Go over all global RLs */
536 for (rl_id
= 0; rl_id
< MAX_QM_GLOBAL_RLS
; rl_id
++) {
537 inc_val
= QM_RL_INC_VAL(QM_MAX_LINK_SPEED
);
540 QM_REG_RLGLBLCRD_RT_OFFSET
+ rl_id
,
541 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
543 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
+ rl_id
,
546 QM_REG_RLGLBLINCVAL_RT_OFFSET
+ rl_id
, inc_val
);
552 /* Returns the upper bound for the specified Vport RL parameters.
553 * link_speed is in Mbps.
554 * Returns 0 in case of error.
556 static u32
qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type
,
559 switch (vport_rl_type
) {
560 case QM_RL_TYPE_NORMAL
:
561 return QM_INITIAL_VOQ_BYTE_CRD
;
563 return QM_GLOBAL_RL_UPPER_BOUND(link_speed
);
569 /* Prepare VPORT RL runtime init values.
570 * Return -1 on error.
572 static int qed_vport_rl_rt_init(struct qed_hwfn
*p_hwfn
,
576 struct init_qm_rl_params
*rl_params
)
580 if (num_rls
&& start_rl
+ num_rls
>= MAX_QM_GLOBAL_RLS
) {
581 DP_NOTICE(p_hwfn
, "Invalid rate limiter configuration\n");
585 /* Go over all PF VPORTs */
586 for (i
= 0, rl_id
= start_rl
; i
< num_rls
; i
++, rl_id
++) {
587 u32 upper_bound
, inc_val
;
590 qed_get_vport_rl_upper_bound((enum init_qm_rl_type
)
591 rl_params
[i
].vport_rl_type
,
595 QM_RL_INC_VAL(rl_params
[i
].vport_rl
?
596 rl_params
[i
].vport_rl
: link_speed
);
597 if (inc_val
> upper_bound
) {
599 "Invalid RL rate - limit configuration\n");
603 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLCRD_RT_OFFSET
+ rl_id
,
604 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
605 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
+ rl_id
,
606 upper_bound
| (u32
)QM_RL_CRD_REG_SIGN_BIT
);
607 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLINCVAL_RT_OFFSET
+ rl_id
,
614 /* Prepare Tx PQ mapping runtime init values for the specified PF */
615 static int qed_tx_pq_map_rt_init(struct qed_hwfn
*p_hwfn
,
616 struct qed_ptt
*p_ptt
,
617 struct qed_qm_pf_rt_init_params
*p_params
,
618 u32 base_mem_addr_4kb
)
620 u32 tx_pq_vf_mask
[MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
] = { 0 };
621 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
622 u32 num_tx_pq_vf_masks
= MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
;
623 u16 num_pqs
, first_pq_group
, last_pq_group
, i
, j
, pq_id
, pq_group
;
624 struct init_qm_pq_params
*pq_params
= p_params
->pq_params
;
625 u32 pq_mem_4kb
, vport_pq_mem_4kb
, mem_addr_4kb
;
627 num_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
629 first_pq_group
= p_params
->start_pq
/ QM_PF_QUEUE_GROUP_SIZE
;
630 last_pq_group
= (p_params
->start_pq
+ num_pqs
- 1) /
631 QM_PF_QUEUE_GROUP_SIZE
;
633 pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
);
634 vport_pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_vf_cids
);
635 mem_addr_4kb
= base_mem_addr_4kb
;
637 /* Set mapping from PQ group to PF */
638 for (pq_group
= first_pq_group
; pq_group
<= last_pq_group
; pq_group
++)
639 STORE_RT_REG(p_hwfn
, QM_REG_PQTX2PF_0_RT_OFFSET
+ pq_group
,
640 (u32
)(p_params
->pf_id
));
643 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_0_RT_OFFSET
,
644 QM_PQ_SIZE_256B(p_params
->num_pf_cids
));
645 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_1_RT_OFFSET
,
646 QM_PQ_SIZE_256B(p_params
->num_vf_cids
));
648 /* Go over all Tx PQs */
649 for (i
= 0, pq_id
= p_params
->start_pq
; i
< num_pqs
; i
++, pq_id
++) {
650 u16
*p_first_tx_pq_id
, vport_id_in_pf
;
651 struct qm_rf_pq_map tx_pq_map
;
652 u8 tc_id
= pq_params
[i
].tc_id
;
656 ext_voq
= qed_get_ext_voq(p_hwfn
,
657 pq_params
[i
].port_id
,
659 p_params
->max_phys_tcs_per_port
);
660 is_vf_pq
= (i
>= p_params
->num_pf_pqs
);
662 /* Update first Tx PQ of VPORT/TC */
663 vport_id_in_pf
= pq_params
[i
].vport_id
- p_params
->start_vport
;
665 &vport_params
[vport_id_in_pf
].first_tx_pq_id
[tc_id
];
666 if (*p_first_tx_pq_id
== QM_INVALID_PQ_ID
) {
668 (ext_voq
<< QM_VP_WFQ_PQ_VOQ_SHIFT
) |
669 (p_params
->pf_id
<< QM_VP_WFQ_PQ_PF_SHIFT
);
671 /* Create new VP PQ */
672 *p_first_tx_pq_id
= pq_id
;
674 /* Map VP PQ to VOQ and PF */
676 QM_REG_WFQVPMAP_RT_OFFSET
+
681 /* Prepare PQ map entry */
682 QM_INIT_TX_PQ_MAP(p_hwfn
,
686 pq_params
[i
].rl_valid
,
688 ext_voq
, pq_params
[i
].wrr_group
);
690 /* Set PQ base address */
692 QM_REG_BASEADDRTXPQ_RT_OFFSET
+ pq_id
,
695 /* Clear PQ pointer table entry (64 bit) */
696 if (p_params
->is_pf_loading
)
697 for (j
= 0; j
< 2; j
++)
699 QM_REG_PTRTBLTX_RT_OFFSET
+
702 /* Write PQ info to RAM */
703 if (WRITE_PQ_INFO_TO_RAM
!= 0) {
706 pq_info
= PQ_INFO_ELEMENT(*p_first_tx_pq_id
,
709 pq_params
[i
].port_id
,
710 pq_params
[i
].rl_valid
,
712 qed_wr(p_hwfn
, p_ptt
, PQ_INFO_RAM_GRC_ADDRESS(pq_id
),
716 /* If VF PQ, add indication to PQ VF mask */
718 tx_pq_vf_mask
[pq_id
/
719 QM_PF_QUEUE_GROUP_SIZE
] |=
720 BIT((pq_id
% QM_PF_QUEUE_GROUP_SIZE
));
721 mem_addr_4kb
+= vport_pq_mem_4kb
;
723 mem_addr_4kb
+= pq_mem_4kb
;
727 /* Store Tx PQ VF mask to size select register */
728 for (i
= 0; i
< num_tx_pq_vf_masks
; i
++)
729 if (tx_pq_vf_mask
[i
])
731 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+ i
,
737 /* Prepare Other PQ mapping runtime init values for the specified PF */
738 static void qed_other_pq_map_rt_init(struct qed_hwfn
*p_hwfn
,
742 u32 num_tids
, u32 base_mem_addr_4kb
)
744 u32 pq_size
, pq_mem_4kb
, mem_addr_4kb
;
745 u16 i
, j
, pq_id
, pq_group
;
747 /* A single other PQ group is used in each PF, where PQ group i is used
751 pq_size
= num_pf_cids
+ num_tids
;
752 pq_mem_4kb
= QM_PQ_MEM_4KB(pq_size
);
753 mem_addr_4kb
= base_mem_addr_4kb
;
755 /* Map PQ group to PF */
756 STORE_RT_REG(p_hwfn
, QM_REG_PQOTHER2PF_0_RT_OFFSET
+ pq_group
,
760 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_2_RT_OFFSET
,
761 QM_PQ_SIZE_256B(pq_size
));
763 for (i
= 0, pq_id
= pf_id
* QM_PF_QUEUE_GROUP_SIZE
;
764 i
< QM_OTHER_PQS_PER_PF
; i
++, pq_id
++) {
765 /* Set PQ base address */
767 QM_REG_BASEADDROTHERPQ_RT_OFFSET
+ pq_id
,
770 /* Clear PQ pointer table entry */
772 for (j
= 0; j
< 2; j
++)
774 QM_REG_PTRTBLOTHER_RT_OFFSET
+
777 mem_addr_4kb
+= pq_mem_4kb
;
781 /* Prepare PF WFQ runtime init values for the specified PF.
782 * Return -1 on error.
784 static int qed_pf_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
785 struct qed_qm_pf_rt_init_params
*p_params
)
787 u16 num_tx_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
788 struct init_qm_pq_params
*pq_params
= p_params
->pq_params
;
789 u32 inc_val
, crd_reg_offset
;
793 inc_val
= QM_PF_WFQ_INC_VAL(p_params
->pf_wfq
);
794 if (!inc_val
|| inc_val
> QM_PF_WFQ_MAX_INC_VAL
) {
795 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration\n");
799 for (i
= 0; i
< num_tx_pqs
; i
++) {
800 ext_voq
= qed_get_ext_voq(p_hwfn
,
801 pq_params
[i
].port_id
,
803 p_params
->max_phys_tcs_per_port
);
805 (p_params
->pf_id
< MAX_NUM_PFS_BB
?
806 QM_REG_WFQPFCRD_RT_OFFSET
:
807 QM_REG_WFQPFCRD_MSB_RT_OFFSET
) +
808 ext_voq
* MAX_NUM_PFS_BB
+
809 (p_params
->pf_id
% MAX_NUM_PFS_BB
);
810 OVERWRITE_RT_REG(p_hwfn
,
811 crd_reg_offset
, (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
815 QM_REG_WFQPFUPPERBOUND_RT_OFFSET
+ p_params
->pf_id
,
816 QM_PF_WFQ_UPPER_BOUND
| (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
817 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFWEIGHT_RT_OFFSET
+ p_params
->pf_id
,
823 /* Prepare PF RL runtime init values for the specified PF.
824 * Return -1 on error.
826 static int qed_pf_rl_rt_init(struct qed_hwfn
*p_hwfn
, u8 pf_id
, u32 pf_rl
)
828 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
830 if (inc_val
> QM_PF_RL_MAX_INC_VAL
) {
831 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration\n");
836 QM_REG_RLPFCRD_RT_OFFSET
+ pf_id
,
837 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
839 QM_REG_RLPFUPPERBOUND_RT_OFFSET
+ pf_id
,
840 QM_PF_RL_UPPER_BOUND
| (u32
)QM_RL_CRD_REG_SIGN_BIT
);
841 STORE_RT_REG(p_hwfn
, QM_REG_RLPFINCVAL_RT_OFFSET
+ pf_id
, inc_val
);
846 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
847 * Return -1 on error.
849 static int qed_vp_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
851 struct init_qm_vport_params
*vport_params
)
853 u16 vport_pq_id
, wfq
, i
;
857 /* Go over all PF VPORTs */
858 for (i
= 0; i
< num_vports
; i
++) {
859 /* Each VPORT can have several VPORT PQ IDs for various TCs */
860 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
861 /* Check if VPORT/TC is valid */
862 vport_pq_id
= vport_params
[i
].first_tx_pq_id
[tc
];
863 if (vport_pq_id
== QM_INVALID_PQ_ID
)
866 /* Find WFQ weight (per VPORT or per VPORT+TC) */
867 wfq
= vport_params
[i
].wfq
;
868 wfq
= wfq
? wfq
: vport_params
[i
].tc_wfq
[tc
];
869 inc_val
= QM_VP_WFQ_INC_VAL(wfq
);
870 if (inc_val
> QM_VP_WFQ_MAX_INC_VAL
) {
872 "Invalid VPORT WFQ weight configuration\n");
876 /* Config registers */
877 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPCRD_RT_OFFSET
+
879 (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
880 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPUPPERBOUND_RT_OFFSET
+
882 inc_val
| QM_WFQ_CRD_REG_SIGN_BIT
);
883 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPWEIGHT_RT_OFFSET
+
884 vport_pq_id
, inc_val
);
891 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn
*p_hwfn
,
892 struct qed_ptt
*p_ptt
)
896 for (i
= 0, reg_val
= 0; i
< QM_STOP_CMD_MAX_POLL_COUNT
&& !reg_val
;
898 udelay(QM_STOP_CMD_POLL_PERIOD_US
);
899 reg_val
= qed_rd(p_hwfn
, p_ptt
, QM_REG_SDMCMDREADY
);
902 /* Check if timeout while waiting for SDM command ready */
903 if (i
== QM_STOP_CMD_MAX_POLL_COUNT
) {
904 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
905 "Timeout when waiting for QM SDM command ready signal\n");
912 static bool qed_send_qm_cmd(struct qed_hwfn
*p_hwfn
,
913 struct qed_ptt
*p_ptt
,
914 u32 cmd_addr
, u32 cmd_data_lsb
, u32 cmd_data_msb
)
916 if (!qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
))
919 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDADDR
, cmd_addr
);
920 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATALSB
, cmd_data_lsb
);
921 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATAMSB
, cmd_data_msb
);
922 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 1);
923 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 0);
925 return qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
);
928 /******************** INTERFACE IMPLEMENTATION *********************/
930 u32
qed_qm_pf_mem_size(u32 num_pf_cids
,
932 u32 num_tids
, u16 num_pf_pqs
, u16 num_vf_pqs
)
934 return QM_PQ_MEM_4KB(num_pf_cids
) * num_pf_pqs
+
935 QM_PQ_MEM_4KB(num_vf_cids
) * num_vf_pqs
+
936 QM_PQ_MEM_4KB(num_pf_cids
+ num_tids
) * QM_OTHER_PQS_PER_PF
;
939 int qed_qm_common_rt_init(struct qed_hwfn
*p_hwfn
,
940 struct qed_qm_common_rt_init_params
*p_params
)
944 /* Init AFullOprtnstcCrdMask */
945 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ
,
946 QM_OPPOR_LINE_VOQ_DEF
);
947 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ
, QM_BYTE_CRD_EN
);
948 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_PFWFQ
,
949 p_params
->pf_wfq_en
? 1 : 0);
950 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_VPWFQ
,
951 p_params
->vport_wfq_en
? 1 : 0);
952 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_PFRL
,
953 p_params
->pf_rl_en
? 1 : 0);
954 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL
,
955 p_params
->global_rl_en
? 1 : 0);
956 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE
, QM_OPPOR_FW_STOP_DEF
);
958 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY
, QM_OPPOR_PQ_EMPTY_DEF
);
959 STORE_RT_REG(p_hwfn
, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET
, mask
);
961 /* Enable/disable PF RL */
962 qed_enable_pf_rl(p_hwfn
, p_params
->pf_rl_en
);
964 /* Enable/disable PF WFQ */
965 qed_enable_pf_wfq(p_hwfn
, p_params
->pf_wfq_en
);
967 /* Enable/disable global RL */
968 qed_enable_global_rl(p_hwfn
, p_params
->global_rl_en
);
970 /* Enable/disable VPORT WFQ */
971 qed_enable_vport_wfq(p_hwfn
, p_params
->vport_wfq_en
);
973 /* Init PBF CMDQ line credit */
974 qed_cmdq_lines_rt_init(p_hwfn
,
975 p_params
->max_ports_per_engine
,
976 p_params
->max_phys_tcs_per_port
,
977 p_params
->port_params
);
979 /* Init BTB blocks in PBF */
980 qed_btb_blocks_rt_init(p_hwfn
,
981 p_params
->max_ports_per_engine
,
982 p_params
->max_phys_tcs_per_port
,
983 p_params
->port_params
);
985 qed_global_rl_rt_init(p_hwfn
);
990 int qed_qm_pf_rt_init(struct qed_hwfn
*p_hwfn
,
991 struct qed_ptt
*p_ptt
,
992 struct qed_qm_pf_rt_init_params
*p_params
)
994 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
995 u32 other_mem_size_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
+
996 p_params
->num_tids
) *
1001 /* Clear first Tx PQ ID array for each VPORT */
1002 for (i
= 0; i
< p_params
->num_vports
; i
++)
1003 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++)
1004 vport_params
[i
].first_tx_pq_id
[tc
] = QM_INVALID_PQ_ID
;
1006 /* Map Other PQs (if any) */
1007 qed_other_pq_map_rt_init(p_hwfn
,
1009 p_params
->is_pf_loading
, p_params
->num_pf_cids
,
1010 p_params
->num_tids
, 0);
1013 if (qed_tx_pq_map_rt_init(p_hwfn
, p_ptt
, p_params
, other_mem_size_4kb
))
1017 if (p_params
->pf_wfq
)
1018 if (qed_pf_wfq_rt_init(p_hwfn
, p_params
))
1022 if (qed_pf_rl_rt_init(p_hwfn
, p_params
->pf_id
, p_params
->pf_rl
))
1025 /* Init VPORT WFQ */
1026 if (qed_vp_wfq_rt_init(p_hwfn
, p_params
->num_vports
, vport_params
))
1030 if (qed_vport_rl_rt_init(p_hwfn
, p_params
->start_rl
,
1031 p_params
->num_rls
, p_params
->link_speed
,
1032 p_params
->rl_params
))
1038 int qed_init_pf_wfq(struct qed_hwfn
*p_hwfn
,
1039 struct qed_ptt
*p_ptt
, u8 pf_id
, u16 pf_wfq
)
1041 u32 inc_val
= QM_PF_WFQ_INC_VAL(pf_wfq
);
1043 if (!inc_val
|| inc_val
> QM_PF_WFQ_MAX_INC_VAL
) {
1044 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration\n");
1048 qed_wr(p_hwfn
, p_ptt
, QM_REG_WFQPFWEIGHT
+ pf_id
* 4, inc_val
);
1053 int qed_init_pf_rl(struct qed_hwfn
*p_hwfn
,
1054 struct qed_ptt
*p_ptt
, u8 pf_id
, u32 pf_rl
)
1056 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
1058 if (inc_val
> QM_PF_RL_MAX_INC_VAL
) {
1059 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration\n");
1064 p_ptt
, QM_REG_RLPFCRD
+ pf_id
* 4, (u32
)QM_RL_CRD_REG_SIGN_BIT
);
1065 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLPFINCVAL
+ pf_id
* 4, inc_val
);
1070 int qed_init_vport_wfq(struct qed_hwfn
*p_hwfn
,
1071 struct qed_ptt
*p_ptt
,
1072 u16 first_tx_pq_id
[NUM_OF_TCS
], u16 wfq
)
1078 for (tc
= 0; tc
< NUM_OF_TCS
&& !result
; tc
++) {
1079 vport_pq_id
= first_tx_pq_id
[tc
];
1080 if (vport_pq_id
!= QM_INVALID_PQ_ID
)
1081 result
= qed_init_vport_tc_wfq(p_hwfn
, p_ptt
,
1088 int qed_init_vport_tc_wfq(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
1089 u16 first_tx_pq_id
, u16 wfq
)
1093 if (first_tx_pq_id
== QM_INVALID_PQ_ID
)
1096 inc_val
= QM_VP_WFQ_INC_VAL(wfq
);
1097 if (!inc_val
|| inc_val
> QM_VP_WFQ_MAX_INC_VAL
) {
1098 DP_NOTICE(p_hwfn
, "Invalid VPORT WFQ configuration.\n");
1102 qed_wr(p_hwfn
, p_ptt
, QM_REG_WFQVPCRD
+ first_tx_pq_id
* 4,
1103 (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
1104 qed_wr(p_hwfn
, p_ptt
, QM_REG_WFQVPUPPERBOUND
+ first_tx_pq_id
* 4,
1105 inc_val
| QM_WFQ_CRD_REG_SIGN_BIT
);
1106 qed_wr(p_hwfn
, p_ptt
, QM_REG_WFQVPWEIGHT
+ first_tx_pq_id
* 4,
1112 int qed_init_global_rl(struct qed_hwfn
*p_hwfn
,
1113 struct qed_ptt
*p_ptt
, u16 rl_id
, u32 rate_limit
,
1114 enum init_qm_rl_type vport_rl_type
)
1116 u32 inc_val
, upper_bound
;
1120 QM_RL_TYPE_QCN
) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED
) :
1121 QM_INITIAL_VOQ_BYTE_CRD
;
1122 inc_val
= QM_RL_INC_VAL(rate_limit
);
1123 if (inc_val
> upper_bound
) {
1124 DP_NOTICE(p_hwfn
, "Invalid VPORT rate limit configuration.\n");
1128 qed_wr(p_hwfn
, p_ptt
,
1129 QM_REG_RLGLBLCRD
+ rl_id
* 4, (u32
)QM_RL_CRD_REG_SIGN_BIT
);
1132 QM_REG_RLGLBLUPPERBOUND
+ rl_id
* 4,
1133 upper_bound
| (u32
)QM_RL_CRD_REG_SIGN_BIT
);
1134 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLGLBLINCVAL
+ rl_id
* 4, inc_val
);
1139 bool qed_send_qm_stop_cmd(struct qed_hwfn
*p_hwfn
,
1140 struct qed_ptt
*p_ptt
,
1141 bool is_release_cmd
,
1142 bool is_tx_pq
, u16 start_pq
, u16 num_pqs
)
1144 u32 cmd_arr
[QM_CMD_STRUCT_SIZE(QM_STOP_CMD
)] = { 0 };
1145 u32 pq_mask
= 0, last_pq
, pq_id
;
1147 last_pq
= start_pq
+ num_pqs
- 1;
1149 /* Set command's PQ type */
1150 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, PQ_TYPE
, is_tx_pq
? 0 : 1);
1152 /* Go over requested PQs */
1153 for (pq_id
= start_pq
; pq_id
<= last_pq
; pq_id
++) {
1154 /* Set PQ bit in mask (stop command only) */
1155 if (!is_release_cmd
)
1156 pq_mask
|= BIT((pq_id
% QM_STOP_PQ_MASK_WIDTH
));
1158 /* If last PQ or end of PQ mask, write command */
1159 if ((pq_id
== last_pq
) ||
1160 (pq_id
% QM_STOP_PQ_MASK_WIDTH
==
1161 (QM_STOP_PQ_MASK_WIDTH
- 1))) {
1162 QM_CMD_SET_FIELD(cmd_arr
,
1163 QM_STOP_CMD
, PAUSE_MASK
, pq_mask
);
1164 QM_CMD_SET_FIELD(cmd_arr
,
1167 pq_id
/ QM_STOP_PQ_MASK_WIDTH
);
1168 if (!qed_send_qm_cmd(p_hwfn
, p_ptt
, QM_STOP_CMD_ADDR
,
1169 cmd_arr
[0], cmd_arr
[1]))
1178 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1180 typeof(var) *__p_var = &(var); \
1181 typeof(offset) __offset = offset; \
1182 *__p_var = (*__p_var & ~BIT(__offset)) | \
1183 ((enable) ? BIT(__offset) : 0); \
1186 #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
1187 #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
1189 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
1193 for (i = 0; i < (arr_size); i++) \
1195 ((addr) + (4 * i)), \
1196 ((u32 *)&(arr))[i]); \
1200 * qed_dmae_to_grc() - Internal function for writing from host to
1201 * wide-bus registers (split registers are not supported yet).
1203 * @p_hwfn: HW device data.
1204 * @p_ptt: PTT window used for writing the registers.
1205 * @p_data: Pointer to source data.
1206 * @addr: Destination register address.
1207 * @len_in_dwords: Data length in dwords (u32).
1209 * Return: Length of the written data in dwords (u32) or -1 on invalid
1212 static int qed_dmae_to_grc(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
1213 __le32
*p_data
, u32 addr
, u32 len_in_dwords
)
1215 struct qed_dmae_params params
= { 0 };
1222 /* Set DMAE params */
1223 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_COMPLETION_DST
, 1);
1225 /* Execute DMAE command */
1226 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
1227 (u64
)(uintptr_t)(p_data
),
1228 addr
, len_in_dwords
, ¶ms
);
1230 /* If not read using DMAE, read using GRC */
1234 "Failed writing to chip using DMAE, using GRC instead\n");
1236 /* Swap to CPU byteorder and write to registers using GRC */
1237 data_cpu
= (__force u32
*)p_data
;
1238 le32_to_cpu_array(data_cpu
, len_in_dwords
);
1240 ARR_REG_WR(p_hwfn
, p_ptt
, addr
, data_cpu
, len_in_dwords
);
1241 cpu_to_le32_array(data_cpu
, len_in_dwords
);
1244 return len_in_dwords
;
1247 void qed_set_vxlan_dest_port(struct qed_hwfn
*p_hwfn
,
1248 struct qed_ptt
*p_ptt
, u16 dest_port
)
1250 /* Update PRS register */
1251 qed_wr(p_hwfn
, p_ptt
, PRS_REG_VXLAN_PORT
, dest_port
);
1253 /* Update NIG register */
1254 qed_wr(p_hwfn
, p_ptt
, NIG_REG_VXLAN_CTRL
, dest_port
);
1256 /* Update PBF register */
1257 qed_wr(p_hwfn
, p_ptt
, PBF_REG_VXLAN_PORT
, dest_port
);
1260 void qed_set_vxlan_enable(struct qed_hwfn
*p_hwfn
,
1261 struct qed_ptt
*p_ptt
, bool vxlan_enable
)
1266 /* Update PRS register */
1267 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1269 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE
, vxlan_enable
);
1270 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1273 qed_rd(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
);
1275 /* Update output only if tunnel blocks not included. */
1276 if (reg_val
== (u32
)PRS_ETH_OUTPUT_FORMAT
)
1277 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
1278 (u32
)PRS_ETH_TUNN_OUTPUT_FORMAT
);
1281 /* Update NIG register */
1282 reg_val
= qed_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
1283 shift
= NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT
;
1284 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, vxlan_enable
);
1285 qed_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
1287 /* Update DORQ register */
1289 p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN
, vxlan_enable
? 1 : 0);
1292 void qed_set_gre_enable(struct qed_hwfn
*p_hwfn
,
1293 struct qed_ptt
*p_ptt
,
1294 bool eth_gre_enable
, bool ip_gre_enable
)
1299 /* Update PRS register */
1300 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1302 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE
,
1305 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE
,
1307 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1310 qed_rd(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
);
1312 /* Update output only if tunnel blocks not included. */
1313 if (reg_val
== (u32
)PRS_ETH_OUTPUT_FORMAT
)
1314 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
1315 (u32
)PRS_ETH_TUNN_OUTPUT_FORMAT
);
1318 /* Update NIG register */
1319 reg_val
= qed_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
1320 shift
= NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT
;
1321 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, eth_gre_enable
);
1322 shift
= NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT
;
1323 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, ip_gre_enable
);
1324 qed_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
1326 /* Update DORQ registers */
1329 DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN
, eth_gre_enable
? 1 : 0);
1331 p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN
, ip_gre_enable
? 1 : 0);
1334 void qed_set_geneve_dest_port(struct qed_hwfn
*p_hwfn
,
1335 struct qed_ptt
*p_ptt
, u16 dest_port
)
1337 /* Update PRS register */
1338 qed_wr(p_hwfn
, p_ptt
, PRS_REG_NGE_PORT
, dest_port
);
1340 /* Update NIG register */
1341 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_PORT
, dest_port
);
1343 /* Update PBF register */
1344 qed_wr(p_hwfn
, p_ptt
, PBF_REG_NGE_PORT
, dest_port
);
1347 void qed_set_geneve_enable(struct qed_hwfn
*p_hwfn
,
1348 struct qed_ptt
*p_ptt
,
1349 bool eth_geneve_enable
, bool ip_geneve_enable
)
1353 /* Update PRS register */
1354 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1356 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE
,
1359 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE
,
1361 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1364 qed_rd(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
);
1366 /* Update output only if tunnel blocks not included. */
1367 if (reg_val
== (u32
)PRS_ETH_OUTPUT_FORMAT
)
1368 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0
,
1369 (u32
)PRS_ETH_TUNN_OUTPUT_FORMAT
);
1372 /* Update NIG register */
1373 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_ETH_ENABLE
,
1374 eth_geneve_enable
? 1 : 0);
1375 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_IP_ENABLE
, ip_geneve_enable
? 1 : 0);
1377 /* EDPM with geneve tunnel not supported in BB */
1378 if (QED_IS_BB_B0(p_hwfn
->cdev
))
1381 /* Update DORQ registers */
1384 DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2
,
1385 eth_geneve_enable
? 1 : 0);
1388 DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2
,
1389 ip_geneve_enable
? 1 : 0);
1392 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
1393 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
1395 void qed_set_vxlan_no_l2_enable(struct qed_hwfn
*p_hwfn
,
1396 struct qed_ptt
*p_ptt
, bool enable
)
1398 u32 reg_val
, cfg_mask
;
1400 /* read PRS config register */
1401 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_MSG_INFO
);
1403 /* set VXLAN_NO_L2_ENABLE mask */
1404 cfg_mask
= BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET
);
1407 /* set VXLAN_NO_L2_ENABLE flag */
1408 reg_val
|= cfg_mask
;
1410 /* update PRS FIC register */
1413 PRS_REG_OUTPUT_FORMAT_4_0
,
1414 (u32
)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT
);
1416 /* clear VXLAN_NO_L2_ENABLE flag */
1417 reg_val
&= ~cfg_mask
;
1420 /* write PRS config register */
1421 qed_wr(p_hwfn
, p_ptt
, PRS_REG_MSG_INFO
, reg_val
);
1424 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1425 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1426 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1427 #define PARSER_ETH_CONN_CM_HDR 0
1428 #define CAM_LINE_SIZE sizeof(u32)
1429 #define RAM_LINE_SIZE sizeof(u64)
1430 #define REG_SIZE sizeof(u32)
1432 void qed_gft_disable(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, u16 pf_id
)
1434 struct regpair ram_line
= { 0 };
1436 /* Disable gft search for PF */
1437 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_GFT
, 0);
1439 /* Clean ram & cam for next gft session */
1442 qed_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
, 0);
1445 qed_dmae_to_grc(p_hwfn
, p_ptt
, &ram_line
.lo
,
1446 PRS_REG_GFT_PROFILE_MASK_RAM
+ RAM_LINE_SIZE
* pf_id
,
1447 sizeof(ram_line
) / REG_SIZE
);
1450 void qed_gft_config(struct qed_hwfn
*p_hwfn
,
1451 struct qed_ptt
*p_ptt
,
1455 bool ipv4
, bool ipv6
, enum gft_profile_type profile_type
)
1457 struct regpair ram_line
;
1458 u32 search_non_ip_as_gft
;
1459 u32 reg_val
, cam_line
;
1464 "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1467 "gft_config: must accept at least on of - udp or tcp\n");
1468 if (profile_type
>= MAX_GFT_PROFILE_TYPE
)
1469 DP_NOTICE(p_hwfn
, "gft_config: unsupported gft_profile_type\n");
1471 /* Set RFS event ID to be awakened i Tstorm By Prs */
1472 reg_val
= T_ETH_PACKET_MATCH_RFS_EVENTID
<<
1473 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT
;
1474 reg_val
|= PARSER_ETH_CONN_CM_HDR
<< PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT
;
1475 qed_wr(p_hwfn
, p_ptt
, PRS_REG_CM_HDR_GFT
, reg_val
);
1477 /* Do not load context only cid in PRS on match. */
1478 qed_wr(p_hwfn
, p_ptt
, PRS_REG_LOAD_L2_FILTER
, 0);
1480 /* Do not use tenant ID exist bit for gft search */
1481 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TENANT_ID
, 0);
1485 SET_FIELD(cam_line
, GFT_CAM_LINE_MAPPED_VALID
, 1);
1487 /* Filters are per PF!! */
1489 GFT_CAM_LINE_MAPPED_PF_ID_MASK
,
1490 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK
);
1491 SET_FIELD(cam_line
, GFT_CAM_LINE_MAPPED_PF_ID
, pf_id
);
1493 if (!(tcp
&& udp
)) {
1495 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK
,
1496 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK
);
1499 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE
,
1500 GFT_PROFILE_TCP_PROTOCOL
);
1503 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE
,
1504 GFT_PROFILE_UDP_PROTOCOL
);
1507 if (!(ipv4
&& ipv6
)) {
1508 SET_FIELD(cam_line
, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK
, 1);
1511 GFT_CAM_LINE_MAPPED_IP_VERSION
,
1515 GFT_CAM_LINE_MAPPED_IP_VERSION
,
1519 /* Write characteristics to cam */
1520 qed_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
,
1523 qed_rd(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
);
1525 /* Write line to RAM - compare to filter 4 tuple */
1527 /* Search no IP as GFT */
1528 search_non_ip_as_gft
= 0;
1531 SET_FIELD(lo
, GFT_RAM_LINE_TUNNEL_DST_PORT
, 1);
1532 SET_FIELD(lo
, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL
, 1);
1534 if (profile_type
== GFT_PROFILE_TYPE_4_TUPLE
) {
1535 SET_FIELD(hi
, GFT_RAM_LINE_DST_IP
, 1);
1536 SET_FIELD(hi
, GFT_RAM_LINE_SRC_IP
, 1);
1537 SET_FIELD(hi
, GFT_RAM_LINE_OVER_IP_PROTOCOL
, 1);
1538 SET_FIELD(lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1539 SET_FIELD(lo
, GFT_RAM_LINE_SRC_PORT
, 1);
1540 SET_FIELD(lo
, GFT_RAM_LINE_DST_PORT
, 1);
1541 } else if (profile_type
== GFT_PROFILE_TYPE_L4_DST_PORT
) {
1542 SET_FIELD(hi
, GFT_RAM_LINE_OVER_IP_PROTOCOL
, 1);
1543 SET_FIELD(lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1544 SET_FIELD(lo
, GFT_RAM_LINE_DST_PORT
, 1);
1545 } else if (profile_type
== GFT_PROFILE_TYPE_IP_DST_ADDR
) {
1546 SET_FIELD(hi
, GFT_RAM_LINE_DST_IP
, 1);
1547 SET_FIELD(lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1548 } else if (profile_type
== GFT_PROFILE_TYPE_IP_SRC_ADDR
) {
1549 SET_FIELD(hi
, GFT_RAM_LINE_SRC_IP
, 1);
1550 SET_FIELD(lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1551 } else if (profile_type
== GFT_PROFILE_TYPE_TUNNEL_TYPE
) {
1552 SET_FIELD(lo
, GFT_RAM_LINE_TUNNEL_ETHERTYPE
, 1);
1554 /* Allow tunneled traffic without inner IP */
1555 search_non_ip_as_gft
= 1;
1558 ram_line
.lo
= cpu_to_le32(lo
);
1559 ram_line
.hi
= cpu_to_le32(hi
);
1562 p_ptt
, PRS_REG_SEARCH_NON_IP_AS_GFT
, search_non_ip_as_gft
);
1563 qed_dmae_to_grc(p_hwfn
, p_ptt
, &ram_line
.lo
,
1564 PRS_REG_GFT_PROFILE_MASK_RAM
+ RAM_LINE_SIZE
* pf_id
,
1565 sizeof(ram_line
) / REG_SIZE
);
1567 /* Set default profile so that no filter match will happen */
1568 ram_line
.lo
= cpu_to_le32(0xffffffff);
1569 ram_line
.hi
= cpu_to_le32(0x3ff);
1570 qed_dmae_to_grc(p_hwfn
, p_ptt
, &ram_line
.lo
,
1571 PRS_REG_GFT_PROFILE_MASK_RAM
+ RAM_LINE_SIZE
*
1572 PRS_GFT_CAM_LINES_NO_MATCH
,
1573 sizeof(ram_line
) / REG_SIZE
);
1575 /* Enable gft search */
1576 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_GFT
, 1);
1579 DECLARE_CRC8_TABLE(cdu_crc8_table
);
1581 /* Calculate and return CDU validation byte per connection type/region/cid */
1582 static u8
qed_calc_cdu_validation_byte(u8 conn_type
, u8 region
, u32 cid
)
1584 const u8 validation_cfg
= CDU_VALIDATION_DEFAULT_CFG
;
1585 u8 crc
, validation_byte
= 0;
1586 static u8 crc8_table_valid
; /* automatically initialized to 0 */
1587 u32 validation_string
= 0;
1590 if (!crc8_table_valid
) {
1591 crc8_populate_msb(cdu_crc8_table
, 0x07);
1592 crc8_table_valid
= 1;
1595 /* The CRC is calculated on the String-to-compress:
1596 * [31:8] = {CID[31:20],CID[11:0]}
1600 if ((validation_cfg
>> CDU_CONTEXT_VALIDATION_CFG_USE_CID
) & 1)
1601 validation_string
|= (cid
& 0xFFF00000) | ((cid
& 0xFFF) << 8);
1603 if ((validation_cfg
>> CDU_CONTEXT_VALIDATION_CFG_USE_REGION
) & 1)
1604 validation_string
|= ((region
& 0xF) << 4);
1606 if ((validation_cfg
>> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE
) & 1)
1607 validation_string
|= (conn_type
& 0xF);
1609 /* Convert to big-endian and calculate CRC8 */
1610 data_to_crc
= cpu_to_be32(validation_string
);
1611 crc
= crc8(cdu_crc8_table
, (u8
*)&data_to_crc
, sizeof(data_to_crc
),
1614 /* The validation byte [7:0] is composed:
1615 * for type A validation
1616 * [7] = active configuration bit
1619 * for type B validation
1620 * [7] = active configuration bit
1621 * [6:3] = connection_type[3:0]
1626 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE
) & 1) << 7;
1628 if ((validation_cfg
>>
1629 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT
) & 1)
1630 validation_byte
|= ((conn_type
& 0xF) << 3) | (crc
& 0x7);
1632 validation_byte
|= crc
& 0x7F;
1634 return validation_byte
;
1637 /* Calcualte and set validation bytes for session context */
1638 void qed_calc_session_ctx_validation(void *p_ctx_mem
,
1639 u16 ctx_size
, u8 ctx_type
, u32 cid
)
1641 u8
*x_val_ptr
, *t_val_ptr
, *u_val_ptr
, *p_ctx
;
1643 p_ctx
= (u8
* const)p_ctx_mem
;
1644 x_val_ptr
= &p_ctx
[con_region_offsets
[0][ctx_type
]];
1645 t_val_ptr
= &p_ctx
[con_region_offsets
[1][ctx_type
]];
1646 u_val_ptr
= &p_ctx
[con_region_offsets
[2][ctx_type
]];
1648 memset(p_ctx
, 0, ctx_size
);
1650 *x_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 3, cid
);
1651 *t_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 4, cid
);
1652 *u_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 5, cid
);
1655 /* Calcualte and set validation bytes for task context */
1656 void qed_calc_task_ctx_validation(void *p_ctx_mem
,
1657 u16 ctx_size
, u8 ctx_type
, u32 tid
)
1659 u8
*p_ctx
, *region1_val_ptr
;
1661 p_ctx
= (u8
* const)p_ctx_mem
;
1662 region1_val_ptr
= &p_ctx
[task_region_offsets
[0][ctx_type
]];
1664 memset(p_ctx
, 0, ctx_size
);
1666 *region1_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 1, tid
);
1669 /* Memset session context to 0 while preserving validation bytes */
1670 void qed_memset_session_ctx(void *p_ctx_mem
, u32 ctx_size
, u8 ctx_type
)
1672 u8
*x_val_ptr
, *t_val_ptr
, *u_val_ptr
, *p_ctx
;
1673 u8 x_val
, t_val
, u_val
;
1675 p_ctx
= (u8
* const)p_ctx_mem
;
1676 x_val_ptr
= &p_ctx
[con_region_offsets
[0][ctx_type
]];
1677 t_val_ptr
= &p_ctx
[con_region_offsets
[1][ctx_type
]];
1678 u_val_ptr
= &p_ctx
[con_region_offsets
[2][ctx_type
]];
1684 memset(p_ctx
, 0, ctx_size
);
1691 /* Memset task context to 0 while preserving validation bytes */
1692 void qed_memset_task_ctx(void *p_ctx_mem
, u32 ctx_size
, u8 ctx_type
)
1694 u8
*p_ctx
, *region1_val_ptr
;
1697 p_ctx
= (u8
* const)p_ctx_mem
;
1698 region1_val_ptr
= &p_ctx
[task_region_offsets
[0][ctx_type
]];
1700 region1_val
= *region1_val_ptr
;
1702 memset(p_ctx
, 0, ctx_size
);
1704 *region1_val_ptr
= region1_val
;
1707 /* Enable and configure context validation */
1708 void qed_enable_context_validation(struct qed_hwfn
*p_hwfn
,
1709 struct qed_ptt
*p_ptt
)
1713 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1714 ctx_validation
= CDU_VALIDATION_DEFAULT_CFG
<< 24;
1715 qed_wr(p_hwfn
, p_ptt
, CDU_REG_CCFC_CTX_VALID0
, ctx_validation
);
1717 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1718 ctx_validation
= CDU_VALIDATION_DEFAULT_CFG
<< 8;
1719 qed_wr(p_hwfn
, p_ptt
, CDU_REG_CCFC_CTX_VALID1
, ctx_validation
);
1721 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1722 ctx_validation
= CDU_VALIDATION_DEFAULT_CFG
<< 8;
1723 qed_wr(p_hwfn
, p_ptt
, CDU_REG_TCFC_CTX_VALID0
, ctx_validation
);
1726 const char *qed_get_protocol_type_str(u32 protocol_type
)
1728 if (protocol_type
>= ARRAY_SIZE(s_protocol_types
))
1729 return "Invalid protocol type";
1731 return s_protocol_types
[protocol_type
];
1734 const char *qed_get_ramrod_cmd_id_str(u32 protocol_type
, u32 ramrod_cmd_id
)
1736 const char *ramrod_cmd_id_str
;
1738 if (protocol_type
>= ARRAY_SIZE(s_ramrod_cmd_ids
))
1739 return "Invalid protocol type";
1741 if (ramrod_cmd_id
>= ARRAY_SIZE(s_ramrod_cmd_ids
[0]))
1742 return "Invalid Ramrod command ID";
1744 ramrod_cmd_id_str
= s_ramrod_cmd_ids
[protocol_type
][ramrod_cmd_id
];
1746 if (!ramrod_cmd_id_str
)
1747 return "Invalid Ramrod command ID";
1749 return ramrod_cmd_id_str
;
1752 static u32
qed_get_rdma_assert_ram_addr(struct qed_hwfn
*p_hwfn
, u8 storm_id
)
1756 return TSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1757 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1759 return MSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1760 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1762 return USEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1763 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1765 return XSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1766 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1768 return YSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1769 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1771 return PSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1772 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1779 void qed_set_rdma_error_level(struct qed_hwfn
*p_hwfn
,
1780 struct qed_ptt
*p_ptt
,
1781 u8 assert_level
[NUM_STORMS
])
1785 for (storm_id
= 0; storm_id
< NUM_STORMS
; storm_id
++) {
1786 u32 ram_addr
= qed_get_rdma_assert_ram_addr(p_hwfn
, storm_id
);
1788 qed_wr(p_hwfn
, p_ptt
, ram_addr
, assert_level
[storm_id
]);
1792 #define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
1793 #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
1795 static u32
qed_get_overlay_addr_ram_addr(struct qed_hwfn
*p_hwfn
, u8 storm_id
)
1799 return TSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1800 TSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1802 return MSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1803 MSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1805 return USEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1806 USTORM_OVERLAY_BUF_ADDR_OFFSET
;
1808 return XSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1809 XSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1811 return YSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1812 YSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1814 return PSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1815 PSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1822 struct phys_mem_desc
*qed_fw_overlay_mem_alloc(struct qed_hwfn
*p_hwfn
,
1825 u32 buf_size_in_bytes
)
1827 u32 buf_size
= buf_size_in_bytes
/ sizeof(u32
), buf_offset
= 0;
1828 struct phys_mem_desc
*allocated_mem
;
1833 allocated_mem
= kcalloc(NUM_STORMS
, sizeof(struct phys_mem_desc
),
1838 /* For each Storm, set physical address in RAM */
1839 while (buf_offset
< buf_size
) {
1840 struct phys_mem_desc
*storm_mem_desc
;
1841 struct fw_overlay_buf_hdr
*hdr
;
1846 (struct fw_overlay_buf_hdr
*)&fw_overlay_in_buf
[buf_offset
];
1847 storm_buf_size
= GET_FIELD(hdr
->data
,
1848 FW_OVERLAY_BUF_HDR_BUF_SIZE
);
1849 storm_id
= GET_FIELD(hdr
->data
, FW_OVERLAY_BUF_HDR_STORM_ID
);
1850 if (storm_id
>= NUM_STORMS
)
1852 storm_mem_desc
= allocated_mem
+ storm_id
;
1853 storm_mem_desc
->size
= storm_buf_size
* sizeof(u32
);
1855 /* Allocate physical memory for Storm's overlays buffer */
1856 storm_mem_desc
->virt_addr
=
1857 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1858 storm_mem_desc
->size
,
1859 &storm_mem_desc
->phys_addr
, GFP_KERNEL
);
1860 if (!storm_mem_desc
->virt_addr
)
1863 /* Skip overlays buffer header */
1864 buf_offset
+= OVERLAY_HDR_SIZE_DWORDS
;
1866 /* Copy Storm's overlays buffer to allocated memory */
1867 memcpy(storm_mem_desc
->virt_addr
,
1868 &fw_overlay_in_buf
[buf_offset
], storm_mem_desc
->size
);
1870 /* Advance to next Storm */
1871 buf_offset
+= storm_buf_size
;
1874 /* If memory allocation has failed, free all allocated memory */
1875 if (buf_offset
< buf_size
) {
1876 qed_fw_overlay_mem_free(p_hwfn
, &allocated_mem
);
1880 return allocated_mem
;
1883 void qed_fw_overlay_init_ram(struct qed_hwfn
*p_hwfn
,
1884 struct qed_ptt
*p_ptt
,
1885 struct phys_mem_desc
*fw_overlay_mem
)
1889 for (storm_id
= 0; storm_id
< NUM_STORMS
; storm_id
++) {
1890 struct phys_mem_desc
*storm_mem_desc
=
1891 (struct phys_mem_desc
*)fw_overlay_mem
+ storm_id
;
1894 /* Skip Storms with no FW overlays */
1895 if (!storm_mem_desc
->virt_addr
)
1898 /* Calculate overlay RAM GRC address of current PF */
1899 ram_addr
= qed_get_overlay_addr_ram_addr(p_hwfn
, storm_id
) +
1900 sizeof(dma_addr_t
) * p_hwfn
->rel_pf_id
;
1902 /* Write Storm's overlay physical address to RAM */
1903 for (i
= 0; i
< PHYS_ADDR_DWORDS
; i
++, ram_addr
+= sizeof(u32
))
1904 qed_wr(p_hwfn
, p_ptt
, ram_addr
,
1905 ((u32
*)&storm_mem_desc
->phys_addr
)[i
]);
1909 void qed_fw_overlay_mem_free(struct qed_hwfn
*p_hwfn
,
1910 struct phys_mem_desc
**fw_overlay_mem
)
1914 if (!fw_overlay_mem
|| !(*fw_overlay_mem
))
1917 for (storm_id
= 0; storm_id
< NUM_STORMS
; storm_id
++) {
1918 struct phys_mem_desc
*storm_mem_desc
=
1919 (struct phys_mem_desc
*)*fw_overlay_mem
+ storm_id
;
1921 /* Free Storm's physical memory */
1922 if (storm_mem_desc
->virt_addr
)
1923 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1924 storm_mem_desc
->size
,
1925 storm_mem_desc
->virt_addr
,
1926 storm_mem_desc
->phys_addr
);
1929 /* Free allocated virtual memory */
1930 kfree(*fw_overlay_mem
);
1931 *fw_overlay_mem
= NULL
;