1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/crc8.h>
35 #include <linux/delay.h>
36 #include <linux/kernel.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
41 #include "qed_init_ops.h"
42 #include "qed_reg_addr.h"
44 #define CDU_VALIDATION_DEFAULT_CFG 61
46 static u16 con_region_offsets
[3][NUM_OF_CONNECTION_TYPES_E4
] = {
47 {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
48 {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
49 {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
52 static u16 task_region_offsets
[1][NUM_OF_CONNECTION_TYPES_E4
] = {
53 {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
56 /* General constants */
57 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
60 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
62 #define QM_INVALID_PQ_ID 0xffff
64 /* Max link speed (in Mbps) */
65 #define QM_MAX_LINK_SPEED 100000
68 #define QM_BYPASS_EN 1
69 #define QM_BYTE_CRD_EN 1
71 /* Other PQ constants */
72 #define QM_OTHER_PQS_PER_PF 4
76 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
77 #define QM_WFQ_UPPER_BOUND 62500000
79 /* Bit of VOQ in WFQ VP PQ map */
80 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
82 /* Bit of PF in WFQ VP PQ map */
83 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
85 /* 0x9000 = 4*9*1024 */
86 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
88 /* Max WFQ increment value is 0.7 * upper bound */
89 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
94 #define QM_RL_PERIOD 5
96 /* Period in 25MHz cycles */
97 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
99 /* RL increment value - rate is specified in mbps */
100 #define QM_RL_INC_VAL(rate) ({ \
101 typeof(rate) __rate = (rate); \
103 (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
107 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
108 #define QM_PF_RL_UPPER_BOUND 62500000
110 /* Max PF RL increment value is 0.7 * upper bound */
111 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
113 /* Vport RL Upper bound, link speed is in Mpbs */
114 #define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
115 QM_RL_INC_VAL(speed), \
118 /* Max Vport RL increment value is the Vport RL upper bound */
119 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
121 /* Vport RL credit threshold in case of QM bypass */
122 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
124 /* AFullOprtnstcCrdMask constants */
125 #define QM_OPPOR_LINE_VOQ_DEF 1
126 #define QM_OPPOR_FW_STOP_DEF 0
127 #define QM_OPPOR_PQ_EMPTY_DEF 1
129 /* Command Queue constants */
131 /* Pure LB CmdQ lines (+spare) */
132 #define PBF_CMDQ_PURE_LB_LINES 150
134 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
135 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
136 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
137 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
139 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
140 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
141 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
142 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
144 /* Returns the VOQ line credit for the specified number of PBF command lines.
145 * PBF lines are specified in 256b units.
147 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
148 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
150 /* BTB: blocks constants (block size = 256B) */
152 /* 256B blocks in 9700B packet */
153 #define BTB_JUMBO_PKT_BLOCKS 38
155 /* Headroom per-port */
156 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
157 #define BTB_PURE_LB_FACTOR 10
159 /* Factored (hence really 0.7) */
160 #define BTB_PURE_LB_RATIO 7
162 /* QM stop command constants */
163 #define QM_STOP_PQ_MASK_WIDTH 32
164 #define QM_STOP_CMD_ADDR 2
165 #define QM_STOP_CMD_STRUCT_SIZE 2
166 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
167 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
168 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
169 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
170 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
171 #define QM_STOP_CMD_GROUP_ID_MASK 15
172 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
173 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
174 #define QM_STOP_CMD_PQ_TYPE_MASK 1
175 #define QM_STOP_CMD_MAX_POLL_COUNT 100
176 #define QM_STOP_CMD_POLL_PERIOD_US 500
178 /* QM command macros */
179 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
180 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
181 SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
185 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
189 memset(&__map, 0, sizeof(__map)); \
190 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
191 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
193 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
195 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
196 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
197 SET_FIELD(__map.reg, \
198 QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
199 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
204 #define WRITE_PQ_INFO_TO_RAM 1
205 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
206 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
207 ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
210 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
211 XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
212 XSTORM_PQ_INFO_OFFSET(pq_id)
214 /******************** INTERNAL IMPLEMENTATION *********************/
216 /* Returns the external VOQ number */
217 static u8
qed_get_ext_voq(struct qed_hwfn
*p_hwfn
,
218 u8 port_id
, u8 tc
, u8 max_phys_tcs_per_port
)
220 if (tc
== PURE_LB_TC
)
221 return NUM_OF_PHYS_TCS
* MAX_NUM_PORTS_BB
+ port_id
;
223 return port_id
* max_phys_tcs_per_port
+ tc
;
226 /* Prepare PF RL enable/disable runtime init values */
227 static void qed_enable_pf_rl(struct qed_hwfn
*p_hwfn
, bool pf_rl_en
)
229 STORE_RT_REG(p_hwfn
, QM_REG_RLPFENABLE_RT_OFFSET
, pf_rl_en
? 1 : 0);
231 u8 num_ext_voqs
= MAX_NUM_VOQS_E4
;
232 u64 voq_bit_mask
= ((u64
)1 << num_ext_voqs
) - 1;
234 /* Enable RLs for all VOQs */
236 QM_REG_RLPFVOQENABLE_RT_OFFSET
,
239 /* Write RL period */
241 QM_REG_RLPFPERIOD_RT_OFFSET
, QM_RL_PERIOD_CLK_25M
);
243 QM_REG_RLPFPERIODTIMER_RT_OFFSET
,
244 QM_RL_PERIOD_CLK_25M
);
246 /* Set credit threshold for QM bypass flow */
249 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET
,
250 QM_PF_RL_UPPER_BOUND
);
254 /* Prepare PF WFQ enable/disable runtime init values */
255 static void qed_enable_pf_wfq(struct qed_hwfn
*p_hwfn
, bool pf_wfq_en
)
257 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFENABLE_RT_OFFSET
, pf_wfq_en
? 1 : 0);
259 /* Set credit threshold for QM bypass flow */
260 if (pf_wfq_en
&& QM_BYPASS_EN
)
262 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET
,
266 /* Prepare global RL enable/disable runtime init values */
267 static void qed_enable_global_rl(struct qed_hwfn
*p_hwfn
, bool global_rl_en
)
269 STORE_RT_REG(p_hwfn
, QM_REG_RLGLBLENABLE_RT_OFFSET
,
270 global_rl_en
? 1 : 0);
272 /* Write RL period (use timer 0 only) */
274 QM_REG_RLGLBLPERIOD_0_RT_OFFSET
,
275 QM_RL_PERIOD_CLK_25M
);
277 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET
,
278 QM_RL_PERIOD_CLK_25M
);
280 /* Set credit threshold for QM bypass flow */
283 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET
,
284 QM_VP_RL_BYPASS_THRESH_SPEED
);
288 /* Prepare VPORT WFQ enable/disable runtime init values */
289 static void qed_enable_vport_wfq(struct qed_hwfn
*p_hwfn
, bool vport_wfq_en
)
291 STORE_RT_REG(p_hwfn
, QM_REG_WFQVPENABLE_RT_OFFSET
,
292 vport_wfq_en
? 1 : 0);
294 /* Set credit threshold for QM bypass flow */
295 if (vport_wfq_en
&& QM_BYPASS_EN
)
297 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET
,
301 /* Prepare runtime init values to allocate PBF command queue lines for
304 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn
*p_hwfn
,
305 u8 ext_voq
, u16 cmdq_lines
)
307 u32 qm_line_crd
= QM_VOQ_LINE_CRD(cmdq_lines
);
309 OVERWRITE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(ext_voq
),
311 STORE_RT_REG(p_hwfn
, QM_REG_VOQCRDLINE_RT_OFFSET
+ ext_voq
,
313 STORE_RT_REG(p_hwfn
, QM_REG_VOQINITCRDLINE_RT_OFFSET
+ ext_voq
,
317 /* Prepare runtime init values to allocate PBF command queue lines. */
318 static void qed_cmdq_lines_rt_init(
319 struct qed_hwfn
*p_hwfn
,
320 u8 max_ports_per_engine
,
321 u8 max_phys_tcs_per_port
,
322 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
324 u8 tc
, ext_voq
, port_id
, num_tcs_in_port
;
325 u8 num_ext_voqs
= MAX_NUM_VOQS_E4
;
327 /* Clear PBF lines of all VOQs */
328 for (ext_voq
= 0; ext_voq
< num_ext_voqs
; ext_voq
++)
329 STORE_RT_REG(p_hwfn
, PBF_CMDQ_LINES_RT_OFFSET(ext_voq
), 0);
331 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
332 u16 phys_lines
, phys_lines_per_tc
;
334 if (!port_params
[port_id
].active
)
337 /* Find number of command queue lines to divide between the
338 * active physical TCs.
340 phys_lines
= port_params
[port_id
].num_pbf_cmd_lines
;
341 phys_lines
-= PBF_CMDQ_PURE_LB_LINES
;
343 /* Find #lines per active physical TC */
345 for (tc
= 0; tc
< max_phys_tcs_per_port
; tc
++)
346 if (((port_params
[port_id
].active_phys_tcs
>>
349 phys_lines_per_tc
= phys_lines
/ num_tcs_in_port
;
351 /* Init registers per active TC */
352 for (tc
= 0; tc
< max_phys_tcs_per_port
; tc
++) {
353 ext_voq
= qed_get_ext_voq(p_hwfn
,
355 tc
, max_phys_tcs_per_port
);
356 if (((port_params
[port_id
].active_phys_tcs
>>
358 qed_cmdq_lines_voq_rt_init(p_hwfn
,
363 /* Init registers for pure LB TC */
364 ext_voq
= qed_get_ext_voq(p_hwfn
,
366 PURE_LB_TC
, max_phys_tcs_per_port
);
367 qed_cmdq_lines_voq_rt_init(p_hwfn
, ext_voq
,
368 PBF_CMDQ_PURE_LB_LINES
);
372 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
373 * specified port. The guaranteed BTB space is divided between the TCs as
374 * follows (shared space Is currently not used):
376 * B - BTB blocks for this port
377 * C - Number of physical TCs for this port
379 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
381 * b. B = B - 38 (remainder after global headroom allocation).
382 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
383 * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
384 * e. B/C blocks are allocated for each physical TC.
386 * - MTU is up to 9700 bytes (38 blocks)
387 * - All TCs are considered symmetrical (same rate and packet size)
388 * - No optimization for lossy TC (all are considered lossless). Shared space
389 * is not enabled and allocated for each TC.
391 static void qed_btb_blocks_rt_init(
392 struct qed_hwfn
*p_hwfn
,
393 u8 max_ports_per_engine
,
394 u8 max_phys_tcs_per_port
,
395 struct init_qm_port_params port_params
[MAX_NUM_PORTS
])
397 u32 usable_blocks
, pure_lb_blocks
, phys_blocks
;
398 u8 tc
, ext_voq
, port_id
, num_tcs_in_port
;
400 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
401 if (!port_params
[port_id
].active
)
404 /* Subtract headroom blocks */
405 usable_blocks
= port_params
[port_id
].num_btb_blocks
-
408 /* Find blocks per physical TC. Use factor to avoid floating
412 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++)
413 if (((port_params
[port_id
].active_phys_tcs
>>
417 pure_lb_blocks
= (usable_blocks
* BTB_PURE_LB_FACTOR
) /
418 (num_tcs_in_port
* BTB_PURE_LB_FACTOR
+
420 pure_lb_blocks
= max_t(u32
, BTB_JUMBO_PKT_BLOCKS
,
421 pure_lb_blocks
/ BTB_PURE_LB_FACTOR
);
422 phys_blocks
= (usable_blocks
- pure_lb_blocks
) /
425 /* Init physical TCs */
426 for (tc
= 0; tc
< NUM_OF_PHYS_TCS
; tc
++) {
427 if (((port_params
[port_id
].active_phys_tcs
>>
430 qed_get_ext_voq(p_hwfn
,
433 max_phys_tcs_per_port
);
435 PBF_BTB_GUARANTEED_RT_OFFSET
436 (ext_voq
), phys_blocks
);
440 /* Init pure LB TC */
441 ext_voq
= qed_get_ext_voq(p_hwfn
,
443 PURE_LB_TC
, max_phys_tcs_per_port
);
444 STORE_RT_REG(p_hwfn
, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq
),
449 /* Prepare runtime init values for the specified RL.
450 * Set max link speed (100Gbps) per rate limiter.
451 * Return -1 on error.
453 static int qed_global_rl_rt_init(struct qed_hwfn
*p_hwfn
)
455 u32 upper_bound
= QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED
) |
456 (u32
)QM_RL_CRD_REG_SIGN_BIT
;
460 /* Go over all global RLs */
461 for (rl_id
= 0; rl_id
< MAX_QM_GLOBAL_RLS
; rl_id
++) {
462 inc_val
= QM_RL_INC_VAL(QM_MAX_LINK_SPEED
);
465 QM_REG_RLGLBLCRD_RT_OFFSET
+ rl_id
,
466 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
468 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET
+ rl_id
,
471 QM_REG_RLGLBLINCVAL_RT_OFFSET
+ rl_id
, inc_val
);
477 /* Prepare Tx PQ mapping runtime init values for the specified PF */
478 static void qed_tx_pq_map_rt_init(struct qed_hwfn
*p_hwfn
,
479 struct qed_ptt
*p_ptt
,
480 struct qed_qm_pf_rt_init_params
*p_params
,
481 u32 base_mem_addr_4kb
)
483 u32 tx_pq_vf_mask
[MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
] = { 0 };
484 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
485 u32 num_tx_pq_vf_masks
= MAX_QM_TX_QUEUES
/ QM_PF_QUEUE_GROUP_SIZE
;
486 u16 num_pqs
, first_pq_group
, last_pq_group
, i
, j
, pq_id
, pq_group
;
487 struct init_qm_pq_params
*pq_params
= p_params
->pq_params
;
488 u32 pq_mem_4kb
, vport_pq_mem_4kb
, mem_addr_4kb
;
490 num_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
492 first_pq_group
= p_params
->start_pq
/ QM_PF_QUEUE_GROUP_SIZE
;
493 last_pq_group
= (p_params
->start_pq
+ num_pqs
- 1) /
494 QM_PF_QUEUE_GROUP_SIZE
;
496 pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
);
497 vport_pq_mem_4kb
= QM_PQ_MEM_4KB(p_params
->num_vf_cids
);
498 mem_addr_4kb
= base_mem_addr_4kb
;
500 /* Set mapping from PQ group to PF */
501 for (pq_group
= first_pq_group
; pq_group
<= last_pq_group
; pq_group
++)
502 STORE_RT_REG(p_hwfn
, QM_REG_PQTX2PF_0_RT_OFFSET
+ pq_group
,
503 (u32
)(p_params
->pf_id
));
506 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_0_RT_OFFSET
,
507 QM_PQ_SIZE_256B(p_params
->num_pf_cids
));
508 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_1_RT_OFFSET
,
509 QM_PQ_SIZE_256B(p_params
->num_vf_cids
));
511 /* Go over all Tx PQs */
512 for (i
= 0, pq_id
= p_params
->start_pq
; i
< num_pqs
; i
++, pq_id
++) {
513 u16
*p_first_tx_pq_id
, vport_id_in_pf
;
514 struct qm_rf_pq_map_e4 tx_pq_map
;
515 u8 tc_id
= pq_params
[i
].tc_id
;
519 ext_voq
= qed_get_ext_voq(p_hwfn
,
520 pq_params
[i
].port_id
,
522 p_params
->max_phys_tcs_per_port
);
523 is_vf_pq
= (i
>= p_params
->num_pf_pqs
);
525 /* Update first Tx PQ of VPORT/TC */
526 vport_id_in_pf
= pq_params
[i
].vport_id
- p_params
->start_vport
;
528 &vport_params
[vport_id_in_pf
].first_tx_pq_id
[tc_id
];
529 if (*p_first_tx_pq_id
== QM_INVALID_PQ_ID
) {
531 (ext_voq
<< QM_WFQ_VP_PQ_VOQ_SHIFT
) |
532 (p_params
->pf_id
<< QM_WFQ_VP_PQ_PF_E4_SHIFT
);
534 /* Create new VP PQ */
535 *p_first_tx_pq_id
= pq_id
;
537 /* Map VP PQ to VOQ and PF */
539 QM_REG_WFQVPMAP_RT_OFFSET
+
544 /* Prepare PQ map entry */
545 QM_INIT_TX_PQ_MAP(p_hwfn
,
550 pq_params
[i
].rl_valid
,
552 ext_voq
, pq_params
[i
].wrr_group
);
554 /* Set PQ base address */
556 QM_REG_BASEADDRTXPQ_RT_OFFSET
+ pq_id
,
559 /* Clear PQ pointer table entry (64 bit) */
560 if (p_params
->is_pf_loading
)
561 for (j
= 0; j
< 2; j
++)
563 QM_REG_PTRTBLTX_RT_OFFSET
+
566 /* Write PQ info to RAM */
567 if (WRITE_PQ_INFO_TO_RAM
!= 0) {
570 pq_info
= PQ_INFO_ELEMENT(*p_first_tx_pq_id
,
573 pq_params
[i
].port_id
,
574 pq_params
[i
].rl_valid
,
576 qed_wr(p_hwfn
, p_ptt
, PQ_INFO_RAM_GRC_ADDRESS(pq_id
),
580 /* If VF PQ, add indication to PQ VF mask */
582 tx_pq_vf_mask
[pq_id
/
583 QM_PF_QUEUE_GROUP_SIZE
] |=
584 BIT((pq_id
% QM_PF_QUEUE_GROUP_SIZE
));
585 mem_addr_4kb
+= vport_pq_mem_4kb
;
587 mem_addr_4kb
+= pq_mem_4kb
;
591 /* Store Tx PQ VF mask to size select register */
592 for (i
= 0; i
< num_tx_pq_vf_masks
; i
++)
593 if (tx_pq_vf_mask
[i
])
595 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET
+ i
,
599 /* Prepare Other PQ mapping runtime init values for the specified PF */
600 static void qed_other_pq_map_rt_init(struct qed_hwfn
*p_hwfn
,
604 u32 num_tids
, u32 base_mem_addr_4kb
)
606 u32 pq_size
, pq_mem_4kb
, mem_addr_4kb
;
607 u16 i
, j
, pq_id
, pq_group
;
609 /* A single other PQ group is used in each PF, where PQ group i is used
613 pq_size
= num_pf_cids
+ num_tids
;
614 pq_mem_4kb
= QM_PQ_MEM_4KB(pq_size
);
615 mem_addr_4kb
= base_mem_addr_4kb
;
617 /* Map PQ group to PF */
618 STORE_RT_REG(p_hwfn
, QM_REG_PQOTHER2PF_0_RT_OFFSET
+ pq_group
,
622 STORE_RT_REG(p_hwfn
, QM_REG_MAXPQSIZE_2_RT_OFFSET
,
623 QM_PQ_SIZE_256B(pq_size
));
625 for (i
= 0, pq_id
= pf_id
* QM_PF_QUEUE_GROUP_SIZE
;
626 i
< QM_OTHER_PQS_PER_PF
; i
++, pq_id
++) {
627 /* Set PQ base address */
629 QM_REG_BASEADDROTHERPQ_RT_OFFSET
+ pq_id
,
632 /* Clear PQ pointer table entry */
634 for (j
= 0; j
< 2; j
++)
636 QM_REG_PTRTBLOTHER_RT_OFFSET
+
639 mem_addr_4kb
+= pq_mem_4kb
;
643 /* Prepare PF WFQ runtime init values for the specified PF.
644 * Return -1 on error.
646 static int qed_pf_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
648 struct qed_qm_pf_rt_init_params
*p_params
)
650 u16 num_tx_pqs
= p_params
->num_pf_pqs
+ p_params
->num_vf_pqs
;
651 struct init_qm_pq_params
*pq_params
= p_params
->pq_params
;
652 u32 inc_val
, crd_reg_offset
;
656 inc_val
= QM_WFQ_INC_VAL(p_params
->pf_wfq
);
657 if (!inc_val
|| inc_val
> QM_WFQ_MAX_INC_VAL
) {
658 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration\n");
662 for (i
= 0; i
< num_tx_pqs
; i
++) {
663 ext_voq
= qed_get_ext_voq(p_hwfn
,
664 pq_params
[i
].port_id
,
666 p_params
->max_phys_tcs_per_port
);
668 (p_params
->pf_id
< MAX_NUM_PFS_BB
?
669 QM_REG_WFQPFCRD_RT_OFFSET
:
670 QM_REG_WFQPFCRD_MSB_RT_OFFSET
) +
671 ext_voq
* MAX_NUM_PFS_BB
+
672 (p_params
->pf_id
% MAX_NUM_PFS_BB
);
673 OVERWRITE_RT_REG(p_hwfn
,
674 crd_reg_offset
, (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
678 QM_REG_WFQPFUPPERBOUND_RT_OFFSET
+ p_params
->pf_id
,
679 QM_WFQ_UPPER_BOUND
| (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
680 STORE_RT_REG(p_hwfn
, QM_REG_WFQPFWEIGHT_RT_OFFSET
+ p_params
->pf_id
,
686 /* Prepare PF RL runtime init values for the specified PF.
687 * Return -1 on error.
689 static int qed_pf_rl_rt_init(struct qed_hwfn
*p_hwfn
, u8 pf_id
, u32 pf_rl
)
691 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
693 if (inc_val
> QM_PF_RL_MAX_INC_VAL
) {
694 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration\n");
699 QM_REG_RLPFCRD_RT_OFFSET
+ pf_id
,
700 (u32
)QM_RL_CRD_REG_SIGN_BIT
);
702 QM_REG_RLPFUPPERBOUND_RT_OFFSET
+ pf_id
,
703 QM_PF_RL_UPPER_BOUND
| (u32
)QM_RL_CRD_REG_SIGN_BIT
);
704 STORE_RT_REG(p_hwfn
, QM_REG_RLPFINCVAL_RT_OFFSET
+ pf_id
, inc_val
);
709 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
710 * Return -1 on error.
712 static int qed_vp_wfq_rt_init(struct qed_hwfn
*p_hwfn
,
714 struct init_qm_vport_params
*vport_params
)
720 /* Go over all PF VPORTs */
721 for (i
= 0; i
< num_vports
; i
++) {
722 if (!vport_params
[i
].wfq
)
725 inc_val
= QM_WFQ_INC_VAL(vport_params
[i
].wfq
);
726 if (inc_val
> QM_WFQ_MAX_INC_VAL
) {
728 "Invalid VPORT WFQ weight configuration\n");
732 /* Each VPORT can have several VPORT PQ IDs for various TCs */
733 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
734 vport_pq_id
= vport_params
[i
].first_tx_pq_id
[tc
];
735 if (vport_pq_id
!= QM_INVALID_PQ_ID
) {
737 QM_REG_WFQVPCRD_RT_OFFSET
+
739 (u32
)QM_WFQ_CRD_REG_SIGN_BIT
);
741 QM_REG_WFQVPWEIGHT_RT_OFFSET
+
742 vport_pq_id
, inc_val
);
750 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn
*p_hwfn
,
751 struct qed_ptt
*p_ptt
)
755 for (i
= 0, reg_val
= 0; i
< QM_STOP_CMD_MAX_POLL_COUNT
&& !reg_val
;
757 udelay(QM_STOP_CMD_POLL_PERIOD_US
);
758 reg_val
= qed_rd(p_hwfn
, p_ptt
, QM_REG_SDMCMDREADY
);
761 /* Check if timeout while waiting for SDM command ready */
762 if (i
== QM_STOP_CMD_MAX_POLL_COUNT
) {
763 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
764 "Timeout when waiting for QM SDM command ready signal\n");
771 static bool qed_send_qm_cmd(struct qed_hwfn
*p_hwfn
,
772 struct qed_ptt
*p_ptt
,
773 u32 cmd_addr
, u32 cmd_data_lsb
, u32 cmd_data_msb
)
775 if (!qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
))
778 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDADDR
, cmd_addr
);
779 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATALSB
, cmd_data_lsb
);
780 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDDATAMSB
, cmd_data_msb
);
781 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 1);
782 qed_wr(p_hwfn
, p_ptt
, QM_REG_SDMCMDGO
, 0);
784 return qed_poll_on_qm_cmd_ready(p_hwfn
, p_ptt
);
787 /******************** INTERFACE IMPLEMENTATION *********************/
789 u32
qed_qm_pf_mem_size(u32 num_pf_cids
,
791 u32 num_tids
, u16 num_pf_pqs
, u16 num_vf_pqs
)
793 return QM_PQ_MEM_4KB(num_pf_cids
) * num_pf_pqs
+
794 QM_PQ_MEM_4KB(num_vf_cids
) * num_vf_pqs
+
795 QM_PQ_MEM_4KB(num_pf_cids
+ num_tids
) * QM_OTHER_PQS_PER_PF
;
798 int qed_qm_common_rt_init(struct qed_hwfn
*p_hwfn
,
799 struct qed_qm_common_rt_init_params
*p_params
)
803 /* Init AFullOprtnstcCrdMask */
804 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ
,
805 QM_OPPOR_LINE_VOQ_DEF
);
806 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ
, QM_BYTE_CRD_EN
);
807 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_PFWFQ
, p_params
->pf_wfq_en
);
808 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_VPWFQ
, p_params
->vport_wfq_en
);
809 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_PFRL
, p_params
->pf_rl_en
);
810 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL
,
811 p_params
->global_rl_en
);
812 SET_FIELD(mask
, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE
, QM_OPPOR_FW_STOP_DEF
);
814 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY
, QM_OPPOR_PQ_EMPTY_DEF
);
815 STORE_RT_REG(p_hwfn
, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET
, mask
);
817 /* Enable/disable PF RL */
818 qed_enable_pf_rl(p_hwfn
, p_params
->pf_rl_en
);
820 /* Enable/disable PF WFQ */
821 qed_enable_pf_wfq(p_hwfn
, p_params
->pf_wfq_en
);
823 /* Enable/disable global RL */
824 qed_enable_global_rl(p_hwfn
, p_params
->global_rl_en
);
826 /* Enable/disable VPORT WFQ */
827 qed_enable_vport_wfq(p_hwfn
, p_params
->vport_wfq_en
);
829 /* Init PBF CMDQ line credit */
830 qed_cmdq_lines_rt_init(p_hwfn
,
831 p_params
->max_ports_per_engine
,
832 p_params
->max_phys_tcs_per_port
,
833 p_params
->port_params
);
835 /* Init BTB blocks in PBF */
836 qed_btb_blocks_rt_init(p_hwfn
,
837 p_params
->max_ports_per_engine
,
838 p_params
->max_phys_tcs_per_port
,
839 p_params
->port_params
);
841 qed_global_rl_rt_init(p_hwfn
);
846 int qed_qm_pf_rt_init(struct qed_hwfn
*p_hwfn
,
847 struct qed_ptt
*p_ptt
,
848 struct qed_qm_pf_rt_init_params
*p_params
)
850 struct init_qm_vport_params
*vport_params
= p_params
->vport_params
;
851 u32 other_mem_size_4kb
= QM_PQ_MEM_4KB(p_params
->num_pf_cids
+
852 p_params
->num_tids
) *
858 /* Clear first Tx PQ ID array for each VPORT */
859 for (i
= 0; i
< p_params
->num_vports
; i
++)
860 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++)
861 vport_params
[i
].first_tx_pq_id
[tc
] = QM_INVALID_PQ_ID
;
863 /* Map Other PQs (if any) */
864 qed_other_pq_map_rt_init(p_hwfn
,
866 p_params
->is_pf_loading
, p_params
->num_pf_cids
,
867 p_params
->num_tids
, 0);
870 qed_tx_pq_map_rt_init(p_hwfn
, p_ptt
, p_params
, other_mem_size_4kb
);
873 if (p_params
->pf_wfq
)
874 if (qed_pf_wfq_rt_init(p_hwfn
, p_params
))
878 if (qed_pf_rl_rt_init(p_hwfn
, p_params
->pf_id
, p_params
->pf_rl
))
882 if (qed_vp_wfq_rt_init(p_hwfn
, p_params
->num_vports
, vport_params
))
888 int qed_init_pf_wfq(struct qed_hwfn
*p_hwfn
,
889 struct qed_ptt
*p_ptt
, u8 pf_id
, u16 pf_wfq
)
891 u32 inc_val
= QM_WFQ_INC_VAL(pf_wfq
);
893 if (!inc_val
|| inc_val
> QM_WFQ_MAX_INC_VAL
) {
894 DP_NOTICE(p_hwfn
, "Invalid PF WFQ weight configuration\n");
898 qed_wr(p_hwfn
, p_ptt
, QM_REG_WFQPFWEIGHT
+ pf_id
* 4, inc_val
);
903 int qed_init_pf_rl(struct qed_hwfn
*p_hwfn
,
904 struct qed_ptt
*p_ptt
, u8 pf_id
, u32 pf_rl
)
906 u32 inc_val
= QM_RL_INC_VAL(pf_rl
);
908 if (inc_val
> QM_PF_RL_MAX_INC_VAL
) {
909 DP_NOTICE(p_hwfn
, "Invalid PF rate limit configuration\n");
914 p_ptt
, QM_REG_RLPFCRD
+ pf_id
* 4, (u32
)QM_RL_CRD_REG_SIGN_BIT
);
915 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLPFINCVAL
+ pf_id
* 4, inc_val
);
920 int qed_init_vport_wfq(struct qed_hwfn
*p_hwfn
,
921 struct qed_ptt
*p_ptt
,
922 u16 first_tx_pq_id
[NUM_OF_TCS
], u16 wfq
)
928 inc_val
= QM_WFQ_INC_VAL(wfq
);
929 if (!inc_val
|| inc_val
> QM_WFQ_MAX_INC_VAL
) {
930 DP_NOTICE(p_hwfn
, "Invalid VPORT WFQ configuration.\n");
934 /* A VPORT can have several VPORT PQ IDs for various TCs */
935 for (tc
= 0; tc
< NUM_OF_TCS
; tc
++) {
936 vport_pq_id
= first_tx_pq_id
[tc
];
937 if (vport_pq_id
!= QM_INVALID_PQ_ID
)
940 QM_REG_WFQVPWEIGHT
+ vport_pq_id
* 4, inc_val
);
946 int qed_init_global_rl(struct qed_hwfn
*p_hwfn
,
947 struct qed_ptt
*p_ptt
, u16 rl_id
, u32 rate_limit
)
951 inc_val
= QM_RL_INC_VAL(rate_limit
);
952 if (inc_val
> QM_VP_RL_MAX_INC_VAL(rate_limit
)) {
953 DP_NOTICE(p_hwfn
, "Invalid rate limit configuration.\n");
957 qed_wr(p_hwfn
, p_ptt
,
958 QM_REG_RLGLBLCRD
+ rl_id
* 4, (u32
)QM_RL_CRD_REG_SIGN_BIT
);
959 qed_wr(p_hwfn
, p_ptt
, QM_REG_RLGLBLINCVAL
+ rl_id
* 4, inc_val
);
964 bool qed_send_qm_stop_cmd(struct qed_hwfn
*p_hwfn
,
965 struct qed_ptt
*p_ptt
,
967 bool is_tx_pq
, u16 start_pq
, u16 num_pqs
)
969 u32 cmd_arr
[QM_CMD_STRUCT_SIZE(QM_STOP_CMD
)] = { 0 };
970 u32 pq_mask
= 0, last_pq
, pq_id
;
972 last_pq
= start_pq
+ num_pqs
- 1;
974 /* Set command's PQ type */
975 QM_CMD_SET_FIELD(cmd_arr
, QM_STOP_CMD
, PQ_TYPE
, is_tx_pq
? 0 : 1);
977 /* Go over requested PQs */
978 for (pq_id
= start_pq
; pq_id
<= last_pq
; pq_id
++) {
979 /* Set PQ bit in mask (stop command only) */
981 pq_mask
|= BIT((pq_id
% QM_STOP_PQ_MASK_WIDTH
));
983 /* If last PQ or end of PQ mask, write command */
984 if ((pq_id
== last_pq
) ||
985 (pq_id
% QM_STOP_PQ_MASK_WIDTH
==
986 (QM_STOP_PQ_MASK_WIDTH
- 1))) {
987 QM_CMD_SET_FIELD(cmd_arr
,
988 QM_STOP_CMD
, PAUSE_MASK
, pq_mask
);
989 QM_CMD_SET_FIELD(cmd_arr
,
992 pq_id
/ QM_STOP_PQ_MASK_WIDTH
);
993 if (!qed_send_qm_cmd(p_hwfn
, p_ptt
, QM_STOP_CMD_ADDR
,
994 cmd_arr
[0], cmd_arr
[1]))
1003 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1005 typeof(var) *__p_var = &(var); \
1006 typeof(offset) __offset = offset; \
1007 *__p_var = (*__p_var & ~BIT(__offset)) | \
1008 ((enable) ? BIT(__offset) : 0); \
1011 #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
1012 #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
1014 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
1018 for (i = 0; i < (arr_size); i++) \
1020 ((addr) + (4 * i)), \
1021 ((u32 *)&(arr))[i]); \
1025 * @brief qed_dmae_to_grc - is an internal function - writes from host to
1026 * wide-bus registers (split registers are not supported yet)
1028 * @param p_hwfn - HW device data
1029 * @param p_ptt - ptt window used for writing the registers.
1030 * @param p_data - pointer to source data.
1031 * @param addr - Destination register address.
1032 * @param len_in_dwords - data length in DWARDS (u32)
1034 static int qed_dmae_to_grc(struct qed_hwfn
*p_hwfn
,
1035 struct qed_ptt
*p_ptt
,
1036 u32
*p_data
, u32 addr
, u32 len_in_dwords
)
1038 struct qed_dmae_params params
= {};
1044 /* Set DMAE params */
1045 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_COMPLETION_DST
, 1);
1047 /* Execute DMAE command */
1048 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
1049 (u64
)(uintptr_t)(p_data
),
1050 addr
, len_in_dwords
, ¶ms
);
1052 /* If not read using DMAE, read using GRC */
1056 "Failed writing to chip using DMAE, using GRC instead\n");
1057 /* write to registers using GRC */
1058 ARR_REG_WR(p_hwfn
, p_ptt
, addr
, p_data
, len_in_dwords
);
1061 return len_in_dwords
;
1064 void qed_set_vxlan_dest_port(struct qed_hwfn
*p_hwfn
,
1065 struct qed_ptt
*p_ptt
, u16 dest_port
)
1067 /* Update PRS register */
1068 qed_wr(p_hwfn
, p_ptt
, PRS_REG_VXLAN_PORT
, dest_port
);
1070 /* Update NIG register */
1071 qed_wr(p_hwfn
, p_ptt
, NIG_REG_VXLAN_CTRL
, dest_port
);
1073 /* Update PBF register */
1074 qed_wr(p_hwfn
, p_ptt
, PBF_REG_VXLAN_PORT
, dest_port
);
1077 void qed_set_vxlan_enable(struct qed_hwfn
*p_hwfn
,
1078 struct qed_ptt
*p_ptt
, bool vxlan_enable
)
1083 /* Update PRS register */
1084 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1085 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT
;
1086 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, vxlan_enable
);
1087 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1090 qed_rd(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
);
1092 /* Update output only if tunnel blocks not included. */
1093 if (reg_val
== (u32
)PRS_ETH_OUTPUT_FORMAT
)
1094 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
,
1095 (u32
)PRS_ETH_TUNN_OUTPUT_FORMAT
);
1098 /* Update NIG register */
1099 reg_val
= qed_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
1100 shift
= NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT
;
1101 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, vxlan_enable
);
1102 qed_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
1104 /* Update DORQ register */
1106 p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN
, vxlan_enable
? 1 : 0);
1109 void qed_set_gre_enable(struct qed_hwfn
*p_hwfn
,
1110 struct qed_ptt
*p_ptt
,
1111 bool eth_gre_enable
, bool ip_gre_enable
)
1116 /* Update PRS register */
1117 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1118 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT
;
1119 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, eth_gre_enable
);
1120 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT
;
1121 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, ip_gre_enable
);
1122 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1125 qed_rd(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
);
1127 /* Update output only if tunnel blocks not included. */
1128 if (reg_val
== (u32
)PRS_ETH_OUTPUT_FORMAT
)
1129 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
,
1130 (u32
)PRS_ETH_TUNN_OUTPUT_FORMAT
);
1133 /* Update NIG register */
1134 reg_val
= qed_rd(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
);
1135 shift
= NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT
;
1136 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, eth_gre_enable
);
1137 shift
= NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT
;
1138 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, ip_gre_enable
);
1139 qed_wr(p_hwfn
, p_ptt
, NIG_REG_ENC_TYPE_ENABLE
, reg_val
);
1141 /* Update DORQ registers */
1144 DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN
, eth_gre_enable
? 1 : 0);
1146 p_ptt
, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN
, ip_gre_enable
? 1 : 0);
1149 void qed_set_geneve_dest_port(struct qed_hwfn
*p_hwfn
,
1150 struct qed_ptt
*p_ptt
, u16 dest_port
)
1152 /* Update PRS register */
1153 qed_wr(p_hwfn
, p_ptt
, PRS_REG_NGE_PORT
, dest_port
);
1155 /* Update NIG register */
1156 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_PORT
, dest_port
);
1158 /* Update PBF register */
1159 qed_wr(p_hwfn
, p_ptt
, PBF_REG_NGE_PORT
, dest_port
);
1162 void qed_set_geneve_enable(struct qed_hwfn
*p_hwfn
,
1163 struct qed_ptt
*p_ptt
,
1164 bool eth_geneve_enable
, bool ip_geneve_enable
)
1169 /* Update PRS register */
1170 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
);
1171 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT
;
1172 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, eth_geneve_enable
);
1173 shift
= PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT
;
1174 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val
, shift
, ip_geneve_enable
);
1175 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ENCAPSULATION_TYPE_EN
, reg_val
);
1178 qed_rd(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
);
1180 /* Update output only if tunnel blocks not included. */
1181 if (reg_val
== (u32
)PRS_ETH_OUTPUT_FORMAT
)
1182 qed_wr(p_hwfn
, p_ptt
, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
,
1183 (u32
)PRS_ETH_TUNN_OUTPUT_FORMAT
);
1186 /* Update NIG register */
1187 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_ETH_ENABLE
,
1188 eth_geneve_enable
? 1 : 0);
1189 qed_wr(p_hwfn
, p_ptt
, NIG_REG_NGE_IP_ENABLE
, ip_geneve_enable
? 1 : 0);
1191 /* EDPM with geneve tunnel not supported in BB */
1192 if (QED_IS_BB_B0(p_hwfn
->cdev
))
1195 /* Update DORQ registers */
1198 DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5
,
1199 eth_geneve_enable
? 1 : 0);
1202 DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5
,
1203 ip_geneve_enable
? 1 : 0);
1206 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
1207 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
1209 void qed_set_vxlan_no_l2_enable(struct qed_hwfn
*p_hwfn
,
1210 struct qed_ptt
*p_ptt
, bool enable
)
1212 u32 reg_val
, cfg_mask
;
1214 /* read PRS config register */
1215 reg_val
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_MSG_INFO
);
1217 /* set VXLAN_NO_L2_ENABLE mask */
1218 cfg_mask
= BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET
);
1221 /* set VXLAN_NO_L2_ENABLE flag */
1222 reg_val
|= cfg_mask
;
1224 /* update PRS FIC register */
1227 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2
,
1228 (u32
)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT
);
1230 /* clear VXLAN_NO_L2_ENABLE flag */
1231 reg_val
&= ~cfg_mask
;
1234 /* write PRS config register */
1235 qed_wr(p_hwfn
, p_ptt
, PRS_REG_MSG_INFO
, reg_val
);
1238 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1239 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1240 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1241 #define PARSER_ETH_CONN_CM_HDR 0
1242 #define CAM_LINE_SIZE sizeof(u32)
1243 #define RAM_LINE_SIZE sizeof(u64)
1244 #define REG_SIZE sizeof(u32)
1246 void qed_gft_disable(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, u16 pf_id
)
1248 struct regpair ram_line
= { };
1250 /* Disable gft search for PF */
1251 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_GFT
, 0);
1253 /* Clean ram & cam for next gft session */
1256 qed_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
, 0);
1259 qed_dmae_to_grc(p_hwfn
, p_ptt
, (u32
*)&ram_line
,
1260 PRS_REG_GFT_PROFILE_MASK_RAM
+ RAM_LINE_SIZE
* pf_id
,
1261 sizeof(ram_line
) / REG_SIZE
);
1264 void qed_gft_config(struct qed_hwfn
*p_hwfn
,
1265 struct qed_ptt
*p_ptt
,
1269 bool ipv4
, bool ipv6
, enum gft_profile_type profile_type
)
1271 u32 reg_val
, cam_line
, search_non_ip_as_gft
;
1272 struct regpair ram_line
= { };
1276 "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1279 "gft_config: must accept at least on of - udp or tcp\n");
1280 if (profile_type
>= MAX_GFT_PROFILE_TYPE
)
1281 DP_NOTICE(p_hwfn
, "gft_config: unsupported gft_profile_type\n");
1283 /* Set RFS event ID to be awakened i Tstorm By Prs */
1284 reg_val
= T_ETH_PACKET_MATCH_RFS_EVENTID
<<
1285 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT
;
1286 reg_val
|= PARSER_ETH_CONN_CM_HDR
<< PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT
;
1287 qed_wr(p_hwfn
, p_ptt
, PRS_REG_CM_HDR_GFT
, reg_val
);
1289 /* Do not load context only cid in PRS on match. */
1290 qed_wr(p_hwfn
, p_ptt
, PRS_REG_LOAD_L2_FILTER
, 0);
1292 /* Do not use tenant ID exist bit for gft search */
1293 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TENANT_ID
, 0);
1297 SET_FIELD(cam_line
, GFT_CAM_LINE_MAPPED_VALID
, 1);
1299 /* Filters are per PF!! */
1301 GFT_CAM_LINE_MAPPED_PF_ID_MASK
,
1302 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK
);
1303 SET_FIELD(cam_line
, GFT_CAM_LINE_MAPPED_PF_ID
, pf_id
);
1305 if (!(tcp
&& udp
)) {
1307 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK
,
1308 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK
);
1311 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE
,
1312 GFT_PROFILE_TCP_PROTOCOL
);
1315 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE
,
1316 GFT_PROFILE_UDP_PROTOCOL
);
1319 if (!(ipv4
&& ipv6
)) {
1320 SET_FIELD(cam_line
, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK
, 1);
1323 GFT_CAM_LINE_MAPPED_IP_VERSION
,
1327 GFT_CAM_LINE_MAPPED_IP_VERSION
,
1331 /* Write characteristics to cam */
1332 qed_wr(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
,
1335 qed_rd(p_hwfn
, p_ptt
, PRS_REG_GFT_CAM
+ CAM_LINE_SIZE
* pf_id
);
1337 /* Write line to RAM - compare to filter 4 tuple */
1339 /* Search no IP as GFT */
1340 search_non_ip_as_gft
= 0;
1343 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_TUNNEL_DST_PORT
, 1);
1344 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL
, 1);
1346 if (profile_type
== GFT_PROFILE_TYPE_4_TUPLE
) {
1347 SET_FIELD(ram_line
.hi
, GFT_RAM_LINE_DST_IP
, 1);
1348 SET_FIELD(ram_line
.hi
, GFT_RAM_LINE_SRC_IP
, 1);
1349 SET_FIELD(ram_line
.hi
, GFT_RAM_LINE_OVER_IP_PROTOCOL
, 1);
1350 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1351 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_SRC_PORT
, 1);
1352 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_DST_PORT
, 1);
1353 } else if (profile_type
== GFT_PROFILE_TYPE_L4_DST_PORT
) {
1354 SET_FIELD(ram_line
.hi
, GFT_RAM_LINE_OVER_IP_PROTOCOL
, 1);
1355 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1356 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_DST_PORT
, 1);
1357 } else if (profile_type
== GFT_PROFILE_TYPE_IP_DST_ADDR
) {
1358 SET_FIELD(ram_line
.hi
, GFT_RAM_LINE_DST_IP
, 1);
1359 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1360 } else if (profile_type
== GFT_PROFILE_TYPE_IP_SRC_ADDR
) {
1361 SET_FIELD(ram_line
.hi
, GFT_RAM_LINE_SRC_IP
, 1);
1362 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_ETHERTYPE
, 1);
1363 } else if (profile_type
== GFT_PROFILE_TYPE_TUNNEL_TYPE
) {
1364 SET_FIELD(ram_line
.lo
, GFT_RAM_LINE_TUNNEL_ETHERTYPE
, 1);
1366 /* Allow tunneled traffic without inner IP */
1367 search_non_ip_as_gft
= 1;
1371 p_ptt
, PRS_REG_SEARCH_NON_IP_AS_GFT
, search_non_ip_as_gft
);
1372 qed_dmae_to_grc(p_hwfn
, p_ptt
, (u32
*)&ram_line
,
1373 PRS_REG_GFT_PROFILE_MASK_RAM
+ RAM_LINE_SIZE
* pf_id
,
1374 sizeof(ram_line
) / REG_SIZE
);
1376 /* Set default profile so that no filter match will happen */
1377 ram_line
.lo
= 0xffffffff;
1378 ram_line
.hi
= 0x3ff;
1379 qed_dmae_to_grc(p_hwfn
, p_ptt
, (u32
*)&ram_line
,
1380 PRS_REG_GFT_PROFILE_MASK_RAM
+ RAM_LINE_SIZE
*
1381 PRS_GFT_CAM_LINES_NO_MATCH
,
1382 sizeof(ram_line
) / REG_SIZE
);
1384 /* Enable gft search */
1385 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_GFT
, 1);
1388 DECLARE_CRC8_TABLE(cdu_crc8_table
);
1390 /* Calculate and return CDU validation byte per connection type/region/cid */
1391 static u8
qed_calc_cdu_validation_byte(u8 conn_type
, u8 region
, u32 cid
)
1393 const u8 validation_cfg
= CDU_VALIDATION_DEFAULT_CFG
;
1394 u8 crc
, validation_byte
= 0;
1395 static u8 crc8_table_valid
; /* automatically initialized to 0 */
1396 u32 validation_string
= 0;
1399 if (!crc8_table_valid
) {
1400 crc8_populate_msb(cdu_crc8_table
, 0x07);
1401 crc8_table_valid
= 1;
1404 /* The CRC is calculated on the String-to-compress:
1405 * [31:8] = {CID[31:20],CID[11:0]}
1409 if ((validation_cfg
>> CDU_CONTEXT_VALIDATION_CFG_USE_CID
) & 1)
1410 validation_string
|= (cid
& 0xFFF00000) | ((cid
& 0xFFF) << 8);
1412 if ((validation_cfg
>> CDU_CONTEXT_VALIDATION_CFG_USE_REGION
) & 1)
1413 validation_string
|= ((region
& 0xF) << 4);
1415 if ((validation_cfg
>> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE
) & 1)
1416 validation_string
|= (conn_type
& 0xF);
1418 /* Convert to big-endian and calculate CRC8 */
1419 data_to_crc
= be32_to_cpu(validation_string
);
1421 crc
= crc8(cdu_crc8_table
,
1422 (u8
*)&data_to_crc
, sizeof(data_to_crc
), CRC8_INIT_VALUE
);
1424 /* The validation byte [7:0] is composed:
1425 * for type A validation
1426 * [7] = active configuration bit
1429 * for type B validation
1430 * [7] = active configuration bit
1431 * [6:3] = connection_type[3:0]
1436 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE
) & 1) << 7;
1438 if ((validation_cfg
>>
1439 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT
) & 1)
1440 validation_byte
|= ((conn_type
& 0xF) << 3) | (crc
& 0x7);
1442 validation_byte
|= crc
& 0x7F;
1444 return validation_byte
;
1447 /* Calcualte and set validation bytes for session context */
1448 void qed_calc_session_ctx_validation(void *p_ctx_mem
,
1449 u16 ctx_size
, u8 ctx_type
, u32 cid
)
1451 u8
*x_val_ptr
, *t_val_ptr
, *u_val_ptr
, *p_ctx
;
1453 p_ctx
= (u8
* const)p_ctx_mem
;
1454 x_val_ptr
= &p_ctx
[con_region_offsets
[0][ctx_type
]];
1455 t_val_ptr
= &p_ctx
[con_region_offsets
[1][ctx_type
]];
1456 u_val_ptr
= &p_ctx
[con_region_offsets
[2][ctx_type
]];
1458 memset(p_ctx
, 0, ctx_size
);
1460 *x_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 3, cid
);
1461 *t_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 4, cid
);
1462 *u_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 5, cid
);
1465 /* Calcualte and set validation bytes for task context */
1466 void qed_calc_task_ctx_validation(void *p_ctx_mem
,
1467 u16 ctx_size
, u8 ctx_type
, u32 tid
)
1469 u8
*p_ctx
, *region1_val_ptr
;
1471 p_ctx
= (u8
* const)p_ctx_mem
;
1472 region1_val_ptr
= &p_ctx
[task_region_offsets
[0][ctx_type
]];
1474 memset(p_ctx
, 0, ctx_size
);
1476 *region1_val_ptr
= qed_calc_cdu_validation_byte(ctx_type
, 1, tid
);
1479 /* Memset session context to 0 while preserving validation bytes */
1480 void qed_memset_session_ctx(void *p_ctx_mem
, u32 ctx_size
, u8 ctx_type
)
1482 u8
*x_val_ptr
, *t_val_ptr
, *u_val_ptr
, *p_ctx
;
1483 u8 x_val
, t_val
, u_val
;
1485 p_ctx
= (u8
* const)p_ctx_mem
;
1486 x_val_ptr
= &p_ctx
[con_region_offsets
[0][ctx_type
]];
1487 t_val_ptr
= &p_ctx
[con_region_offsets
[1][ctx_type
]];
1488 u_val_ptr
= &p_ctx
[con_region_offsets
[2][ctx_type
]];
1494 memset(p_ctx
, 0, ctx_size
);
1501 /* Memset task context to 0 while preserving validation bytes */
1502 void qed_memset_task_ctx(void *p_ctx_mem
, u32 ctx_size
, u8 ctx_type
)
1504 u8
*p_ctx
, *region1_val_ptr
;
1507 p_ctx
= (u8
* const)p_ctx_mem
;
1508 region1_val_ptr
= &p_ctx
[task_region_offsets
[0][ctx_type
]];
1510 region1_val
= *region1_val_ptr
;
1512 memset(p_ctx
, 0, ctx_size
);
1514 *region1_val_ptr
= region1_val
;
1517 /* Enable and configure context validation */
1518 void qed_enable_context_validation(struct qed_hwfn
*p_hwfn
,
1519 struct qed_ptt
*p_ptt
)
1523 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1524 ctx_validation
= CDU_VALIDATION_DEFAULT_CFG
<< 24;
1525 qed_wr(p_hwfn
, p_ptt
, CDU_REG_CCFC_CTX_VALID0
, ctx_validation
);
1527 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1528 ctx_validation
= CDU_VALIDATION_DEFAULT_CFG
<< 8;
1529 qed_wr(p_hwfn
, p_ptt
, CDU_REG_CCFC_CTX_VALID1
, ctx_validation
);
1531 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1532 ctx_validation
= CDU_VALIDATION_DEFAULT_CFG
<< 8;
1533 qed_wr(p_hwfn
, p_ptt
, CDU_REG_TCFC_CTX_VALID0
, ctx_validation
);
1536 static u32
qed_get_rdma_assert_ram_addr(struct qed_hwfn
*p_hwfn
, u8 storm_id
)
1540 return TSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1541 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1543 return MSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1544 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1546 return USEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1547 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1549 return XSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1550 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1552 return YSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1553 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1555 return PSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1556 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn
->rel_pf_id
);
1563 void qed_set_rdma_error_level(struct qed_hwfn
*p_hwfn
,
1564 struct qed_ptt
*p_ptt
,
1565 u8 assert_level
[NUM_STORMS
])
1569 for (storm_id
= 0; storm_id
< NUM_STORMS
; storm_id
++) {
1570 u32 ram_addr
= qed_get_rdma_assert_ram_addr(p_hwfn
, storm_id
);
1572 qed_wr(p_hwfn
, p_ptt
, ram_addr
, assert_level
[storm_id
]);
1576 #define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
1577 #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
1579 static u32
qed_get_overlay_addr_ram_addr(struct qed_hwfn
*p_hwfn
, u8 storm_id
)
1583 return TSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1584 TSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1586 return MSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1587 MSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1589 return USEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1590 USTORM_OVERLAY_BUF_ADDR_OFFSET
;
1592 return XSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1593 XSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1595 return YSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1596 YSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1598 return PSEM_REG_FAST_MEMORY
+ SEM_FAST_REG_INT_RAM
+
1599 PSTORM_OVERLAY_BUF_ADDR_OFFSET
;
1606 struct phys_mem_desc
*qed_fw_overlay_mem_alloc(struct qed_hwfn
*p_hwfn
,
1609 u32 buf_size_in_bytes
)
1611 u32 buf_size
= buf_size_in_bytes
/ sizeof(u32
), buf_offset
= 0;
1612 struct phys_mem_desc
*allocated_mem
;
1617 allocated_mem
= kcalloc(NUM_STORMS
, sizeof(struct phys_mem_desc
),
1622 memset(allocated_mem
, 0, NUM_STORMS
* sizeof(struct phys_mem_desc
));
1624 /* For each Storm, set physical address in RAM */
1625 while (buf_offset
< buf_size
) {
1626 struct phys_mem_desc
*storm_mem_desc
;
1627 struct fw_overlay_buf_hdr
*hdr
;
1632 (struct fw_overlay_buf_hdr
*)&fw_overlay_in_buf
[buf_offset
];
1633 storm_buf_size
= GET_FIELD(hdr
->data
,
1634 FW_OVERLAY_BUF_HDR_BUF_SIZE
);
1635 storm_id
= GET_FIELD(hdr
->data
, FW_OVERLAY_BUF_HDR_STORM_ID
);
1636 storm_mem_desc
= allocated_mem
+ storm_id
;
1637 storm_mem_desc
->size
= storm_buf_size
* sizeof(u32
);
1639 /* Allocate physical memory for Storm's overlays buffer */
1640 storm_mem_desc
->virt_addr
=
1641 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1642 storm_mem_desc
->size
,
1643 &storm_mem_desc
->phys_addr
, GFP_KERNEL
);
1644 if (!storm_mem_desc
->virt_addr
)
1647 /* Skip overlays buffer header */
1648 buf_offset
+= OVERLAY_HDR_SIZE_DWORDS
;
1650 /* Copy Storm's overlays buffer to allocated memory */
1651 memcpy(storm_mem_desc
->virt_addr
,
1652 &fw_overlay_in_buf
[buf_offset
], storm_mem_desc
->size
);
1654 /* Advance to next Storm */
1655 buf_offset
+= storm_buf_size
;
1658 /* If memory allocation has failed, free all allocated memory */
1659 if (buf_offset
< buf_size
) {
1660 qed_fw_overlay_mem_free(p_hwfn
, allocated_mem
);
1664 return allocated_mem
;
1667 void qed_fw_overlay_init_ram(struct qed_hwfn
*p_hwfn
,
1668 struct qed_ptt
*p_ptt
,
1669 struct phys_mem_desc
*fw_overlay_mem
)
1673 for (storm_id
= 0; storm_id
< NUM_STORMS
; storm_id
++) {
1674 struct phys_mem_desc
*storm_mem_desc
=
1675 (struct phys_mem_desc
*)fw_overlay_mem
+ storm_id
;
1678 /* Skip Storms with no FW overlays */
1679 if (!storm_mem_desc
->virt_addr
)
1682 /* Calculate overlay RAM GRC address of current PF */
1683 ram_addr
= qed_get_overlay_addr_ram_addr(p_hwfn
, storm_id
) +
1684 sizeof(dma_addr_t
) * p_hwfn
->rel_pf_id
;
1686 /* Write Storm's overlay physical address to RAM */
1687 for (i
= 0; i
< PHYS_ADDR_DWORDS
; i
++, ram_addr
+= sizeof(u32
))
1688 qed_wr(p_hwfn
, p_ptt
, ram_addr
,
1689 ((u32
*)&storm_mem_desc
->phys_addr
)[i
]);
1693 void qed_fw_overlay_mem_free(struct qed_hwfn
*p_hwfn
,
1694 struct phys_mem_desc
*fw_overlay_mem
)
1698 if (!fw_overlay_mem
)
1701 for (storm_id
= 0; storm_id
< NUM_STORMS
; storm_id
++) {
1702 struct phys_mem_desc
*storm_mem_desc
=
1703 (struct phys_mem_desc
*)fw_overlay_mem
+ storm_id
;
1705 /* Free Storm's physical memory */
1706 if (storm_mem_desc
->virt_addr
)
1707 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1708 storm_mem_desc
->size
,
1709 storm_mem_desc
->virt_addr
,
1710 storm_mem_desc
->phys_addr
);
1713 /* Free allocated virtual memory */
1714 kfree(fw_overlay_mem
);