1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
17 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
19 #include "qed_sriov.h"
21 #define QED_INIT_MAX_POLL_COUNT 100
22 #define QED_INIT_POLL_PERIOD_US 500
24 static u32 pxp_global_win
[] = {
27 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
28 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
29 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
30 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
31 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
32 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
33 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
34 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
35 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
36 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
37 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
38 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
39 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
47 static const u32 iro_arr
[] = {
48 0x00000000, 0x00000000, 0x00080000,
49 0x00003288, 0x00000088, 0x00880000,
50 0x000058e8, 0x00000020, 0x00200000,
51 0x00000b00, 0x00000008, 0x00040000,
52 0x00000a80, 0x00000008, 0x00040000,
53 0x00000000, 0x00000008, 0x00020000,
54 0x00000080, 0x00000008, 0x00040000,
55 0x00000084, 0x00000008, 0x00020000,
56 0x00005718, 0x00000004, 0x00040000,
57 0x00004dd0, 0x00000000, 0x00780000,
58 0x00003e40, 0x00000000, 0x00780000,
59 0x00004480, 0x00000000, 0x00780000,
60 0x00003210, 0x00000000, 0x00780000,
61 0x00003b50, 0x00000000, 0x00780000,
62 0x00007f58, 0x00000000, 0x00780000,
63 0x00005f58, 0x00000000, 0x00080000,
64 0x00007100, 0x00000000, 0x00080000,
65 0x0000aea0, 0x00000000, 0x00080000,
66 0x00004398, 0x00000000, 0x00080000,
67 0x0000a5a0, 0x00000000, 0x00080000,
68 0x0000bde8, 0x00000000, 0x00080000,
69 0x00000020, 0x00000004, 0x00040000,
70 0x000056c8, 0x00000010, 0x00100000,
71 0x0000c210, 0x00000030, 0x00300000,
72 0x0000b088, 0x00000038, 0x00380000,
73 0x00003d20, 0x00000080, 0x00400000,
74 0x0000bf60, 0x00000000, 0x00040000,
75 0x00004560, 0x00040080, 0x00040000,
76 0x000001f8, 0x00000004, 0x00040000,
77 0x00003d60, 0x00000080, 0x00200000,
78 0x00008960, 0x00000040, 0x00300000,
79 0x0000e840, 0x00000060, 0x00600000,
80 0x00004618, 0x00000080, 0x00380000,
81 0x00010738, 0x000000c0, 0x00c00000,
82 0x000001f8, 0x00000002, 0x00020000,
83 0x0000a2a0, 0x00000000, 0x01080000,
84 0x0000a3a8, 0x00000008, 0x00080000,
85 0x000001c0, 0x00000008, 0x00080000,
86 0x000001f8, 0x00000008, 0x00080000,
87 0x00000ac0, 0x00000008, 0x00080000,
88 0x00002578, 0x00000008, 0x00080000,
89 0x000024f8, 0x00000008, 0x00080000,
90 0x00000280, 0x00000008, 0x00080000,
91 0x00000680, 0x00080018, 0x00080000,
92 0x00000b78, 0x00080018, 0x00020000,
93 0x0000c640, 0x00000050, 0x003c0000,
94 0x00012038, 0x00000018, 0x00100000,
95 0x00011b00, 0x00000040, 0x00180000,
96 0x000095d0, 0x00000050, 0x00200000,
97 0x00008b10, 0x00000040, 0x00280000,
98 0x00011640, 0x00000018, 0x00100000,
99 0x0000c828, 0x00000048, 0x00380000,
100 0x00011710, 0x00000020, 0x00200000,
101 0x00004650, 0x00000080, 0x00100000,
102 0x00003618, 0x00000010, 0x00100000,
103 0x0000a968, 0x00000008, 0x00010000,
104 0x000097a0, 0x00000008, 0x00010000,
105 0x00011990, 0x00000008, 0x00010000,
106 0x0000f018, 0x00000008, 0x00010000,
107 0x00012628, 0x00000008, 0x00010000,
108 0x00011da8, 0x00000008, 0x00010000,
109 0x0000aa78, 0x00000030, 0x00100000,
110 0x0000d768, 0x00000028, 0x00280000,
111 0x00009a58, 0x00000018, 0x00180000,
112 0x00009bd8, 0x00000008, 0x00080000,
113 0x00013a18, 0x00000008, 0x00080000,
114 0x000126e8, 0x00000018, 0x00180000,
115 0x0000e608, 0x00500288, 0x00100000,
116 0x00012970, 0x00000138, 0x00280000,
119 void qed_init_iro_array(struct qed_dev
*cdev
)
121 cdev
->iro_arr
= iro_arr
;
124 void qed_init_store_rt_reg(struct qed_hwfn
*p_hwfn
, u32 rt_offset
, u32 val
)
126 p_hwfn
->rt_data
.init_val
[rt_offset
] = val
;
127 p_hwfn
->rt_data
.b_valid
[rt_offset
] = true;
130 void qed_init_store_rt_agg(struct qed_hwfn
*p_hwfn
,
131 u32 rt_offset
, u32
*p_val
, size_t size
)
135 for (i
= 0; i
< size
/ sizeof(u32
); i
++) {
136 p_hwfn
->rt_data
.init_val
[rt_offset
+ i
] = p_val
[i
];
137 p_hwfn
->rt_data
.b_valid
[rt_offset
+ i
] = true;
141 static int qed_init_rt(struct qed_hwfn
*p_hwfn
,
142 struct qed_ptt
*p_ptt
,
143 u32 addr
, u16 rt_offset
, u16 size
, bool b_must_dmae
)
145 u32
*p_init_val
= &p_hwfn
->rt_data
.init_val
[rt_offset
];
146 bool *p_valid
= &p_hwfn
->rt_data
.b_valid
[rt_offset
];
150 /* Since not all RT entries are initialized, go over the RT and
151 * for each segment of initialized values use DMA.
153 for (i
= 0; i
< size
; i
++) {
157 /* In case there isn't any wide-bus configuration here,
158 * simply write the data instead of using dmae.
161 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), p_init_val
[i
]);
166 /* Start of a new segment */
167 for (segment
= 1; i
+ segment
< size
; segment
++)
168 if (!p_valid
[i
+ segment
])
171 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
172 (uintptr_t)(p_init_val
+ i
),
173 addr
+ (i
<< 2), segment
, NULL
);
177 /* invalidate after writing */
178 for (j
= i
; j
< i
+ segment
; j
++)
181 /* Jump over the entire segment, including invalid entry */
188 int qed_init_alloc(struct qed_hwfn
*p_hwfn
)
190 struct qed_rt_data
*rt_data
= &p_hwfn
->rt_data
;
192 if (IS_VF(p_hwfn
->cdev
))
195 rt_data
->b_valid
= kcalloc(RUNTIME_ARRAY_SIZE
, sizeof(bool),
197 if (!rt_data
->b_valid
)
200 rt_data
->init_val
= kcalloc(RUNTIME_ARRAY_SIZE
, sizeof(u32
),
202 if (!rt_data
->init_val
) {
203 kfree(rt_data
->b_valid
);
204 rt_data
->b_valid
= NULL
;
211 void qed_init_free(struct qed_hwfn
*p_hwfn
)
213 kfree(p_hwfn
->rt_data
.init_val
);
214 p_hwfn
->rt_data
.init_val
= NULL
;
215 kfree(p_hwfn
->rt_data
.b_valid
);
216 p_hwfn
->rt_data
.b_valid
= NULL
;
219 static int qed_init_array_dmae(struct qed_hwfn
*p_hwfn
,
220 struct qed_ptt
*p_ptt
,
222 u32 dmae_data_offset
,
230 /* Perform DMAE only for lengthy enough sections or for wide-bus */
231 if (!b_can_dmae
|| (!b_must_dmae
&& (size
< 16))) {
232 const u32
*data
= buf
+ dmae_data_offset
;
235 for (i
= 0; i
< size
; i
++)
236 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), data
[i
]);
238 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
239 (uintptr_t)(buf
+ dmae_data_offset
),
246 static int qed_init_fill_dmae(struct qed_hwfn
*p_hwfn
,
247 struct qed_ptt
*p_ptt
,
248 u32 addr
, u32 fill
, u32 fill_count
)
250 static u32 zero_buffer
[DMAE_MAX_RW_SIZE
];
251 struct qed_dmae_params params
= {};
253 memset(zero_buffer
, 0, sizeof(u32
) * DMAE_MAX_RW_SIZE
);
255 /* invoke the DMAE virtual/physical buffer API with
256 * 1. DMAE init channel
258 * 3. p_hwfb->temp_data,
261 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_RW_REPL_SRC
, 0x1);
262 return qed_dmae_host2grc(p_hwfn
, p_ptt
,
263 (uintptr_t)(&zero_buffer
[0]),
264 addr
, fill_count
, ¶ms
);
267 static void qed_init_fill(struct qed_hwfn
*p_hwfn
,
268 struct qed_ptt
*p_ptt
,
269 u32 addr
, u32 fill
, u32 fill_count
)
273 for (i
= 0; i
< fill_count
; i
++, addr
+= sizeof(u32
))
274 qed_wr(p_hwfn
, p_ptt
, addr
, fill
);
277 static int qed_init_cmd_array(struct qed_hwfn
*p_hwfn
,
278 struct qed_ptt
*p_ptt
,
279 struct init_write_op
*cmd
,
280 bool b_must_dmae
, bool b_can_dmae
)
282 u32 dmae_array_offset
= le32_to_cpu(cmd
->args
.array_offset
);
283 u32 data
= le32_to_cpu(cmd
->data
);
284 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
286 u32 offset
, output_len
, input_len
, max_size
;
287 struct qed_dev
*cdev
= p_hwfn
->cdev
;
288 union init_array_hdr
*hdr
;
289 const u32
*array_data
;
293 array_data
= cdev
->fw_data
->arr_data
;
295 hdr
= (union init_array_hdr
*)(array_data
+ dmae_array_offset
);
296 data
= le32_to_cpu(hdr
->raw
.data
);
297 switch (GET_FIELD(data
, INIT_ARRAY_RAW_HDR_TYPE
)) {
298 case INIT_ARR_ZIPPED
:
299 offset
= dmae_array_offset
+ 1;
300 input_len
= GET_FIELD(data
,
301 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE
);
302 max_size
= MAX_ZIPPED_SIZE
* 4;
303 memset(p_hwfn
->unzip_buf
, 0, max_size
);
305 output_len
= qed_unzip_data(p_hwfn
, input_len
,
306 (u8
*)&array_data
[offset
],
307 max_size
, (u8
*)p_hwfn
->unzip_buf
);
309 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
, 0,
312 b_must_dmae
, b_can_dmae
);
314 DP_NOTICE(p_hwfn
, "Failed to unzip dmae data\n");
318 case INIT_ARR_PATTERN
:
320 u32 repeats
= GET_FIELD(data
,
321 INIT_ARRAY_PATTERN_HDR_REPETITIONS
);
324 size
= GET_FIELD(data
, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE
);
326 for (i
= 0; i
< repeats
; i
++, addr
+= size
<< 2) {
327 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
328 dmae_array_offset
+ 1,
330 b_must_dmae
, b_can_dmae
);
336 case INIT_ARR_STANDARD
:
337 size
= GET_FIELD(data
, INIT_ARRAY_STANDARD_HDR_SIZE
);
338 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
339 dmae_array_offset
+ 1,
341 b_must_dmae
, b_can_dmae
);
348 /* init_ops write command */
349 static int qed_init_cmd_wr(struct qed_hwfn
*p_hwfn
,
350 struct qed_ptt
*p_ptt
,
351 struct init_write_op
*p_cmd
, bool b_can_dmae
)
353 u32 data
= le32_to_cpu(p_cmd
->data
);
354 bool b_must_dmae
= GET_FIELD(data
, INIT_WRITE_OP_WIDE_BUS
);
355 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
356 union init_write_args
*arg
= &p_cmd
->args
;
360 if (b_must_dmae
&& !b_can_dmae
) {
362 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
367 switch (GET_FIELD(data
, INIT_WRITE_OP_SOURCE
)) {
368 case INIT_SRC_INLINE
:
369 data
= le32_to_cpu(p_cmd
->args
.inline_val
);
370 qed_wr(p_hwfn
, p_ptt
, addr
, data
);
373 data
= le32_to_cpu(p_cmd
->args
.zeros_count
);
374 if (b_must_dmae
|| (b_can_dmae
&& (data
>= 64)))
375 rc
= qed_init_fill_dmae(p_hwfn
, p_ptt
, addr
, 0, data
);
377 qed_init_fill(p_hwfn
, p_ptt
, addr
, 0, data
);
380 rc
= qed_init_cmd_array(p_hwfn
, p_ptt
, p_cmd
,
381 b_must_dmae
, b_can_dmae
);
383 case INIT_SRC_RUNTIME
:
384 qed_init_rt(p_hwfn
, p_ptt
, addr
,
385 le16_to_cpu(arg
->runtime
.offset
),
386 le16_to_cpu(arg
->runtime
.size
),
394 static inline bool comp_eq(u32 val
, u32 expected_val
)
396 return val
== expected_val
;
399 static inline bool comp_and(u32 val
, u32 expected_val
)
401 return (val
& expected_val
) == expected_val
;
404 static inline bool comp_or(u32 val
, u32 expected_val
)
406 return (val
| expected_val
) > 0;
409 /* init_ops read/poll commands */
410 static void qed_init_cmd_rd(struct qed_hwfn
*p_hwfn
,
411 struct qed_ptt
*p_ptt
, struct init_read_op
*cmd
)
413 bool (*comp_check
)(u32 val
, u32 expected_val
);
414 u32 delay
= QED_INIT_POLL_PERIOD_US
, val
;
415 u32 data
, addr
, poll
;
418 data
= le32_to_cpu(cmd
->op_data
);
419 addr
= GET_FIELD(data
, INIT_READ_OP_ADDRESS
) << 2;
420 poll
= GET_FIELD(data
, INIT_READ_OP_POLL_TYPE
);
423 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
425 if (poll
== INIT_POLL_NONE
)
430 comp_check
= comp_eq
;
433 comp_check
= comp_or
;
436 comp_check
= comp_and
;
439 DP_ERR(p_hwfn
, "Invalid poll comparison type %08x\n",
444 data
= le32_to_cpu(cmd
->expected_val
);
446 i
< QED_INIT_MAX_POLL_COUNT
&& !comp_check(val
, data
);
449 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
452 if (i
== QED_INIT_MAX_POLL_COUNT
) {
454 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
455 addr
, le32_to_cpu(cmd
->expected_val
),
456 val
, le32_to_cpu(cmd
->op_data
));
460 /* init_ops callbacks entry point */
461 static int qed_init_cmd_cb(struct qed_hwfn
*p_hwfn
,
462 struct qed_ptt
*p_ptt
,
463 struct init_callback_op
*p_cmd
)
467 switch (p_cmd
->callback_id
) {
469 rc
= qed_dmae_sanity(p_hwfn
, p_ptt
, "engine_phase");
472 DP_NOTICE(p_hwfn
, "Unexpected init op callback ID %d\n",
480 static u8
qed_init_cmd_mode_match(struct qed_hwfn
*p_hwfn
,
481 u16
*p_offset
, int modes
)
483 struct qed_dev
*cdev
= p_hwfn
->cdev
;
484 const u8
*modes_tree_buf
;
485 u8 arg1
, arg2
, tree_val
;
487 modes_tree_buf
= cdev
->fw_data
->modes_tree_buf
;
488 tree_val
= modes_tree_buf
[(*p_offset
)++];
490 case INIT_MODE_OP_NOT
:
491 return qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
) ^ 1;
492 case INIT_MODE_OP_OR
:
493 arg1
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
494 arg2
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
496 case INIT_MODE_OP_AND
:
497 arg1
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
498 arg2
= qed_init_cmd_mode_match(p_hwfn
, p_offset
, modes
);
501 tree_val
-= MAX_INIT_MODE_OPS
;
502 return (modes
& BIT(tree_val
)) ? 1 : 0;
506 static u32
qed_init_cmd_mode(struct qed_hwfn
*p_hwfn
,
507 struct init_if_mode_op
*p_cmd
, int modes
)
509 u16 offset
= le16_to_cpu(p_cmd
->modes_buf_offset
);
511 if (qed_init_cmd_mode_match(p_hwfn
, &offset
, modes
))
514 return GET_FIELD(le32_to_cpu(p_cmd
->op_data
),
515 INIT_IF_MODE_OP_CMD_OFFSET
);
518 static u32
qed_init_cmd_phase(struct qed_hwfn
*p_hwfn
,
519 struct init_if_phase_op
*p_cmd
,
520 u32 phase
, u32 phase_id
)
522 u32 data
= le32_to_cpu(p_cmd
->phase_data
);
523 u32 op_data
= le32_to_cpu(p_cmd
->op_data
);
525 if (!(GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE
) == phase
&&
526 (GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == ANY_PHASE_ID
||
527 GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == phase_id
)))
528 return GET_FIELD(op_data
, INIT_IF_PHASE_OP_CMD_OFFSET
);
533 int qed_init_run(struct qed_hwfn
*p_hwfn
,
534 struct qed_ptt
*p_ptt
, int phase
, int phase_id
, int modes
)
536 bool b_dmae
= (phase
!= PHASE_ENGINE
);
537 struct qed_dev
*cdev
= p_hwfn
->cdev
;
538 u32 cmd_num
, num_init_ops
;
539 union init_op
*init_ops
;
542 num_init_ops
= cdev
->fw_data
->init_ops_size
;
543 init_ops
= cdev
->fw_data
->init_ops
;
545 p_hwfn
->unzip_buf
= kzalloc(MAX_ZIPPED_SIZE
* 4, GFP_ATOMIC
);
546 if (!p_hwfn
->unzip_buf
)
549 for (cmd_num
= 0; cmd_num
< num_init_ops
; cmd_num
++) {
550 union init_op
*cmd
= &init_ops
[cmd_num
];
551 u32 data
= le32_to_cpu(cmd
->raw
.op_data
);
553 switch (GET_FIELD(data
, INIT_CALLBACK_OP_OP
)) {
555 rc
= qed_init_cmd_wr(p_hwfn
, p_ptt
, &cmd
->write
,
559 qed_init_cmd_rd(p_hwfn
, p_ptt
, &cmd
->read
);
561 case INIT_OP_IF_MODE
:
562 cmd_num
+= qed_init_cmd_mode(p_hwfn
, &cmd
->if_mode
,
565 case INIT_OP_IF_PHASE
:
566 cmd_num
+= qed_init_cmd_phase(p_hwfn
, &cmd
->if_phase
,
570 /* qed_init_run is always invoked from
573 udelay(le32_to_cpu(cmd
->delay
.delay
));
576 case INIT_OP_CALLBACK
:
577 rc
= qed_init_cmd_cb(p_hwfn
, p_ptt
, &cmd
->callback
);
578 if (phase
== PHASE_ENGINE
&&
579 cmd
->callback
.callback_id
== DMAE_READY_CB
)
588 kfree(p_hwfn
->unzip_buf
);
589 p_hwfn
->unzip_buf
= NULL
;
593 void qed_gtt_init(struct qed_hwfn
*p_hwfn
)
598 /* Set the global windows */
599 gtt_base
= PXP_PF_WINDOW_ADMIN_START
+ PXP_PF_WINDOW_ADMIN_GLOBAL_START
;
601 for (i
= 0; i
< ARRAY_SIZE(pxp_global_win
); i
++)
602 if (pxp_global_win
[i
])
603 REG_WR(p_hwfn
, gtt_base
+ i
* PXP_GLOBAL_ENTRY_SIZE
,
607 int qed_init_fw_data(struct qed_dev
*cdev
, const u8
*data
)
609 struct qed_fw_data
*fw
= cdev
->fw_data
;
610 struct bin_buffer_hdr
*buf_hdr
;
614 DP_NOTICE(cdev
, "Invalid fw data\n");
618 /* First Dword contains metadata and should be skipped */
619 buf_hdr
= (struct bin_buffer_hdr
*)data
;
621 offset
= buf_hdr
[BIN_BUF_INIT_FW_VER_INFO
].offset
;
622 fw
->fw_ver_info
= (struct fw_ver_info
*)(data
+ offset
);
624 offset
= buf_hdr
[BIN_BUF_INIT_CMD
].offset
;
625 fw
->init_ops
= (union init_op
*)(data
+ offset
);
627 offset
= buf_hdr
[BIN_BUF_INIT_VAL
].offset
;
628 fw
->arr_data
= (u32
*)(data
+ offset
);
630 offset
= buf_hdr
[BIN_BUF_INIT_MODE_TREE
].offset
;
631 fw
->modes_tree_buf
= (u8
*)(data
+ offset
);
632 len
= buf_hdr
[BIN_BUF_INIT_CMD
].length
;
633 fw
->init_ops_size
= len
/ sizeof(struct init_raw_op
);
635 offset
= buf_hdr
[BIN_BUF_INIT_OVERLAYS
].offset
;
636 fw
->fw_overlays
= (u32
*)(data
+ offset
);
637 len
= buf_hdr
[BIN_BUF_INIT_OVERLAYS
].length
;
638 fw
->fw_overlays_len
= len
;