1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
19 #include "qed_init_ops.h"
20 #include "qed_reg_addr.h"
22 #define QED_INIT_MAX_POLL_COUNT 100
23 #define QED_INIT_POLL_PERIOD_US 500
25 static u32 pxp_global_win
[] = {
28 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
29 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
30 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
31 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
32 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
33 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
34 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
35 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
36 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
37 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
47 void qed_init_iro_array(struct qed_dev
*cdev
)
49 cdev
->iro_arr
= iro_arr
;
52 /* Runtime configuration helpers */
53 void qed_init_clear_rt_data(struct qed_hwfn
*p_hwfn
)
57 for (i
= 0; i
< RUNTIME_ARRAY_SIZE
; i
++)
58 p_hwfn
->rt_data
.b_valid
[i
] = false;
61 void qed_init_store_rt_reg(struct qed_hwfn
*p_hwfn
,
65 p_hwfn
->rt_data
.init_val
[rt_offset
] = val
;
66 p_hwfn
->rt_data
.b_valid
[rt_offset
] = true;
69 void qed_init_store_rt_agg(struct qed_hwfn
*p_hwfn
,
70 u32 rt_offset
, u32
*p_val
,
75 for (i
= 0; i
< size
/ sizeof(u32
); i
++) {
76 p_hwfn
->rt_data
.init_val
[rt_offset
+ i
] = p_val
[i
];
77 p_hwfn
->rt_data
.b_valid
[rt_offset
+ i
] = true;
81 static int qed_init_rt(struct qed_hwfn
*p_hwfn
,
82 struct qed_ptt
*p_ptt
,
88 u32
*p_init_val
= &p_hwfn
->rt_data
.init_val
[rt_offset
];
89 bool *p_valid
= &p_hwfn
->rt_data
.b_valid
[rt_offset
];
93 /* Since not all RT entries are initialized, go over the RT and
94 * for each segment of initialized values use DMA.
96 for (i
= 0; i
< size
; i
++) {
100 /* In case there isn't any wide-bus configuration here,
101 * simply write the data instead of using dmae.
104 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2),
109 /* Start of a new segment */
110 for (segment
= 1; i
+ segment
< size
; segment
++)
111 if (!p_valid
[i
+ segment
])
114 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
115 (uintptr_t)(p_init_val
+ i
),
116 addr
+ (i
<< 2), segment
, 0);
120 /* Jump over the entire segment, including invalid entry */
127 int qed_init_alloc(struct qed_hwfn
*p_hwfn
)
129 struct qed_rt_data
*rt_data
= &p_hwfn
->rt_data
;
131 rt_data
->b_valid
= kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE
,
133 if (!rt_data
->b_valid
)
136 rt_data
->init_val
= kzalloc(sizeof(u32
) * RUNTIME_ARRAY_SIZE
,
138 if (!rt_data
->init_val
) {
139 kfree(rt_data
->b_valid
);
146 void qed_init_free(struct qed_hwfn
*p_hwfn
)
148 kfree(p_hwfn
->rt_data
.init_val
);
149 kfree(p_hwfn
->rt_data
.b_valid
);
152 static int qed_init_array_dmae(struct qed_hwfn
*p_hwfn
,
153 struct qed_ptt
*p_ptt
,
155 u32 dmae_data_offset
,
163 /* Perform DMAE only for lengthy enough sections or for wide-bus */
164 if (!b_can_dmae
|| (!b_must_dmae
&& (size
< 16))) {
165 const u32
*data
= buf
+ dmae_data_offset
;
168 for (i
= 0; i
< size
; i
++)
169 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), data
[i
]);
171 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
172 (uintptr_t)(buf
+ dmae_data_offset
),
179 static int qed_init_fill_dmae(struct qed_hwfn
*p_hwfn
,
180 struct qed_ptt
*p_ptt
,
185 static u32 zero_buffer
[DMAE_MAX_RW_SIZE
];
187 memset(zero_buffer
, 0, sizeof(u32
) * DMAE_MAX_RW_SIZE
);
189 /* invoke the DMAE virtual/physical buffer API with
190 * 1. DMAE init channel
192 * 3. p_hwfb->temp_data,
196 return qed_dmae_host2grc(p_hwfn
, p_ptt
,
197 (uintptr_t)(&zero_buffer
[0]),
199 QED_DMAE_FLAG_RW_REPL_SRC
);
202 static void qed_init_fill(struct qed_hwfn
*p_hwfn
,
203 struct qed_ptt
*p_ptt
,
210 for (i
= 0; i
< fill_count
; i
++, addr
+= sizeof(u32
))
211 qed_wr(p_hwfn
, p_ptt
, addr
, fill
);
214 static int qed_init_cmd_array(struct qed_hwfn
*p_hwfn
,
215 struct qed_ptt
*p_ptt
,
216 struct init_write_op
*cmd
,
220 u32 data
= le32_to_cpu(cmd
->data
);
221 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
222 u32 dmae_array_offset
= le32_to_cpu(cmd
->args
.array_offset
);
223 u32 offset
, output_len
, input_len
, max_size
;
224 struct qed_dev
*cdev
= p_hwfn
->cdev
;
225 union init_array_hdr
*hdr
;
226 const u32
*array_data
;
230 array_data
= cdev
->fw_data
->arr_data
;
232 hdr
= (union init_array_hdr
*)(array_data
+
234 data
= le32_to_cpu(hdr
->raw
.data
);
235 switch (GET_FIELD(data
, INIT_ARRAY_RAW_HDR_TYPE
)) {
236 case INIT_ARR_ZIPPED
:
237 offset
= dmae_array_offset
+ 1;
238 input_len
= GET_FIELD(data
,
239 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE
);
240 max_size
= MAX_ZIPPED_SIZE
* 4;
241 memset(p_hwfn
->unzip_buf
, 0, max_size
);
243 output_len
= qed_unzip_data(p_hwfn
, input_len
,
244 (u8
*)&array_data
[offset
],
245 max_size
, (u8
*)p_hwfn
->unzip_buf
);
247 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
, 0,
250 b_must_dmae
, b_can_dmae
);
252 DP_NOTICE(p_hwfn
, "Failed to unzip dmae data\n");
256 case INIT_ARR_PATTERN
:
258 u32 repeats
= GET_FIELD(data
,
259 INIT_ARRAY_PATTERN_HDR_REPETITIONS
);
262 size
= GET_FIELD(data
, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE
);
264 for (i
= 0; i
< repeats
; i
++, addr
+= size
<< 2) {
265 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
266 dmae_array_offset
+ 1,
268 b_must_dmae
, b_can_dmae
);
274 case INIT_ARR_STANDARD
:
275 size
= GET_FIELD(data
, INIT_ARRAY_STANDARD_HDR_SIZE
);
276 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
277 dmae_array_offset
+ 1,
279 b_must_dmae
, b_can_dmae
);
286 /* init_ops write command */
287 static int qed_init_cmd_wr(struct qed_hwfn
*p_hwfn
,
288 struct qed_ptt
*p_ptt
,
289 struct init_write_op
*cmd
,
292 u32 data
= le32_to_cpu(cmd
->data
);
293 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
294 bool b_must_dmae
= GET_FIELD(data
, INIT_WRITE_OP_WIDE_BUS
);
295 union init_write_args
*arg
= &cmd
->args
;
299 if (b_must_dmae
&& !b_can_dmae
) {
301 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
306 switch (GET_FIELD(data
, INIT_WRITE_OP_SOURCE
)) {
307 case INIT_SRC_INLINE
:
308 qed_wr(p_hwfn
, p_ptt
, addr
,
309 le32_to_cpu(arg
->inline_val
));
313 (b_can_dmae
&& (le32_to_cpu(arg
->zeros_count
) >= 64)))
314 rc
= qed_init_fill_dmae(p_hwfn
, p_ptt
, addr
, 0,
315 le32_to_cpu(arg
->zeros_count
));
317 qed_init_fill(p_hwfn
, p_ptt
, addr
, 0,
318 le32_to_cpu(arg
->zeros_count
));
321 rc
= qed_init_cmd_array(p_hwfn
, p_ptt
, cmd
,
322 b_must_dmae
, b_can_dmae
);
324 case INIT_SRC_RUNTIME
:
325 qed_init_rt(p_hwfn
, p_ptt
, addr
,
326 le16_to_cpu(arg
->runtime
.offset
),
327 le16_to_cpu(arg
->runtime
.size
),
335 static inline bool comp_eq(u32 val
, u32 expected_val
)
337 return val
== expected_val
;
340 static inline bool comp_and(u32 val
, u32 expected_val
)
342 return (val
& expected_val
) == expected_val
;
345 static inline bool comp_or(u32 val
, u32 expected_val
)
347 return (val
| expected_val
) > 0;
350 /* init_ops read/poll commands */
351 static void qed_init_cmd_rd(struct qed_hwfn
*p_hwfn
,
352 struct qed_ptt
*p_ptt
,
353 struct init_read_op
*cmd
)
355 bool (*comp_check
)(u32 val
, u32 expected_val
);
356 u32 delay
= QED_INIT_POLL_PERIOD_US
, val
;
357 u32 data
, addr
, poll
;
360 data
= le32_to_cpu(cmd
->op_data
);
361 addr
= GET_FIELD(data
, INIT_READ_OP_ADDRESS
) << 2;
362 poll
= GET_FIELD(data
, INIT_READ_OP_POLL_TYPE
);
365 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
367 if (poll
== INIT_POLL_NONE
)
372 comp_check
= comp_eq
;
375 comp_check
= comp_or
;
378 comp_check
= comp_and
;
381 DP_ERR(p_hwfn
, "Invalid poll comparison type %08x\n",
386 data
= le32_to_cpu(cmd
->expected_val
);
388 i
< QED_INIT_MAX_POLL_COUNT
&& !comp_check(val
, data
);
391 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
394 if (i
== QED_INIT_MAX_POLL_COUNT
) {
396 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
397 addr
, le32_to_cpu(cmd
->expected_val
),
398 val
, le32_to_cpu(cmd
->op_data
));
402 /* init_ops callbacks entry point */
403 static void qed_init_cmd_cb(struct qed_hwfn
*p_hwfn
,
404 struct qed_ptt
*p_ptt
,
405 struct init_callback_op
*p_cmd
)
407 DP_NOTICE(p_hwfn
, "Currently init values have no need of callbacks\n");
410 static u8
qed_init_cmd_mode_match(struct qed_hwfn
*p_hwfn
,
414 struct qed_dev
*cdev
= p_hwfn
->cdev
;
415 const u8
*modes_tree_buf
;
416 u8 arg1
, arg2
, tree_val
;
418 modes_tree_buf
= cdev
->fw_data
->modes_tree_buf
;
419 tree_val
= modes_tree_buf
[(*offset
)++];
421 case INIT_MODE_OP_NOT
:
422 return qed_init_cmd_mode_match(p_hwfn
, offset
, modes
) ^ 1;
423 case INIT_MODE_OP_OR
:
424 arg1
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
425 arg2
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
427 case INIT_MODE_OP_AND
:
428 arg1
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
429 arg2
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
432 tree_val
-= MAX_INIT_MODE_OPS
;
433 return (modes
& (1 << tree_val
)) ? 1 : 0;
437 static u32
qed_init_cmd_mode(struct qed_hwfn
*p_hwfn
,
438 struct init_if_mode_op
*p_cmd
,
441 u16 offset
= le16_to_cpu(p_cmd
->modes_buf_offset
);
443 if (qed_init_cmd_mode_match(p_hwfn
, &offset
, modes
))
446 return GET_FIELD(le32_to_cpu(p_cmd
->op_data
),
447 INIT_IF_MODE_OP_CMD_OFFSET
);
450 static u32
qed_init_cmd_phase(struct qed_hwfn
*p_hwfn
,
451 struct init_if_phase_op
*p_cmd
,
455 u32 data
= le32_to_cpu(p_cmd
->phase_data
);
456 u32 op_data
= le32_to_cpu(p_cmd
->op_data
);
458 if (!(GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE
) == phase
&&
459 (GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == ANY_PHASE_ID
||
460 GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == phase_id
)))
461 return GET_FIELD(op_data
, INIT_IF_PHASE_OP_CMD_OFFSET
);
466 int qed_init_run(struct qed_hwfn
*p_hwfn
,
467 struct qed_ptt
*p_ptt
,
472 struct qed_dev
*cdev
= p_hwfn
->cdev
;
473 u32 cmd_num
, num_init_ops
;
474 union init_op
*init_ops
;
478 num_init_ops
= cdev
->fw_data
->init_ops_size
;
479 init_ops
= cdev
->fw_data
->init_ops
;
481 p_hwfn
->unzip_buf
= kzalloc(MAX_ZIPPED_SIZE
* 4, GFP_ATOMIC
);
482 if (!p_hwfn
->unzip_buf
) {
483 DP_NOTICE(p_hwfn
, "Failed to allocate unzip buffer\n");
487 for (cmd_num
= 0; cmd_num
< num_init_ops
; cmd_num
++) {
488 union init_op
*cmd
= &init_ops
[cmd_num
];
489 u32 data
= le32_to_cpu(cmd
->raw
.op_data
);
491 switch (GET_FIELD(data
, INIT_CALLBACK_OP_OP
)) {
493 rc
= qed_init_cmd_wr(p_hwfn
, p_ptt
, &cmd
->write
,
497 qed_init_cmd_rd(p_hwfn
, p_ptt
, &cmd
->read
);
499 case INIT_OP_IF_MODE
:
500 cmd_num
+= qed_init_cmd_mode(p_hwfn
, &cmd
->if_mode
,
503 case INIT_OP_IF_PHASE
:
504 cmd_num
+= qed_init_cmd_phase(p_hwfn
, &cmd
->if_phase
,
506 b_dmae
= GET_FIELD(data
, INIT_IF_PHASE_OP_DMAE_ENABLE
);
509 /* qed_init_run is always invoked from
512 udelay(le32_to_cpu(cmd
->delay
.delay
));
515 case INIT_OP_CALLBACK
:
516 qed_init_cmd_cb(p_hwfn
, p_ptt
, &cmd
->callback
);
524 kfree(p_hwfn
->unzip_buf
);
528 void qed_gtt_init(struct qed_hwfn
*p_hwfn
)
533 /* Set the global windows */
534 gtt_base
= PXP_PF_WINDOW_ADMIN_START
+ PXP_PF_WINDOW_ADMIN_GLOBAL_START
;
536 for (i
= 0; i
< ARRAY_SIZE(pxp_global_win
); i
++)
537 if (pxp_global_win
[i
])
538 REG_WR(p_hwfn
, gtt_base
+ i
* PXP_GLOBAL_ENTRY_SIZE
,
542 int qed_init_fw_data(struct qed_dev
*cdev
,
545 struct qed_fw_data
*fw
= cdev
->fw_data
;
546 struct bin_buffer_hdr
*buf_hdr
;
550 DP_NOTICE(cdev
, "Invalid fw data\n");
554 buf_hdr
= (struct bin_buffer_hdr
*)data
;
556 offset
= buf_hdr
[BIN_BUF_INIT_CMD
].offset
;
557 fw
->init_ops
= (union init_op
*)(data
+ offset
);
559 offset
= buf_hdr
[BIN_BUF_INIT_VAL
].offset
;
560 fw
->arr_data
= (u32
*)(data
+ offset
);
562 offset
= buf_hdr
[BIN_BUF_INIT_MODE_TREE
].offset
;
563 fw
->modes_tree_buf
= (u8
*)(data
+ offset
);
564 len
= buf_hdr
[BIN_BUF_INIT_CMD
].length
;
565 fw
->init_ops_size
= len
/ sizeof(struct init_raw_op
);