1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
19 #include "qed_init_ops.h"
20 #include "qed_reg_addr.h"
22 #define QED_INIT_MAX_POLL_COUNT 100
23 #define QED_INIT_POLL_PERIOD_US 500
25 static u32 pxp_global_win
[] = {
28 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
29 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
30 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
31 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
32 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
33 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
34 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
35 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
36 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
37 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
47 void qed_init_iro_array(struct qed_dev
*cdev
)
49 cdev
->iro_arr
= iro_arr
;
52 /* Runtime configuration helpers */
53 void qed_init_clear_rt_data(struct qed_hwfn
*p_hwfn
)
57 for (i
= 0; i
< RUNTIME_ARRAY_SIZE
; i
++)
58 p_hwfn
->rt_data
[i
].b_valid
= false;
61 void qed_init_store_rt_reg(struct qed_hwfn
*p_hwfn
,
65 p_hwfn
->rt_data
[rt_offset
].init_val
= val
;
66 p_hwfn
->rt_data
[rt_offset
].b_valid
= true;
69 void qed_init_store_rt_agg(struct qed_hwfn
*p_hwfn
,
76 for (i
= 0; i
< size
/ sizeof(u32
); i
++) {
77 p_hwfn
->rt_data
[rt_offset
+ i
].init_val
= val
[i
];
78 p_hwfn
->rt_data
[rt_offset
+ i
].b_valid
= true;
82 static void qed_init_rt(struct qed_hwfn
*p_hwfn
,
83 struct qed_ptt
*p_ptt
,
88 struct qed_rt_data
*rt_data
= p_hwfn
->rt_data
+ rt_offset
;
91 for (i
= 0; i
< size
; i
++) {
92 if (!rt_data
[i
].b_valid
)
94 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), rt_data
[i
].init_val
);
98 int qed_init_alloc(struct qed_hwfn
*p_hwfn
)
100 struct qed_rt_data
*rt_data
;
102 rt_data
= kzalloc(sizeof(*rt_data
) * RUNTIME_ARRAY_SIZE
, GFP_ATOMIC
);
106 p_hwfn
->rt_data
= rt_data
;
111 void qed_init_free(struct qed_hwfn
*p_hwfn
)
113 kfree(p_hwfn
->rt_data
);
114 p_hwfn
->rt_data
= NULL
;
117 static int qed_init_array_dmae(struct qed_hwfn
*p_hwfn
,
118 struct qed_ptt
*p_ptt
,
120 u32 dmae_data_offset
,
128 /* Perform DMAE only for lengthy enough sections or for wide-bus */
129 if (!b_can_dmae
|| (!b_must_dmae
&& (size
< 16))) {
130 const u32
*data
= buf
+ dmae_data_offset
;
133 for (i
= 0; i
< size
; i
++)
134 qed_wr(p_hwfn
, p_ptt
, addr
+ (i
<< 2), data
[i
]);
136 rc
= qed_dmae_host2grc(p_hwfn
, p_ptt
,
137 (uintptr_t)(buf
+ dmae_data_offset
),
144 static int qed_init_fill_dmae(struct qed_hwfn
*p_hwfn
,
145 struct qed_ptt
*p_ptt
,
150 static u32 zero_buffer
[DMAE_MAX_RW_SIZE
];
152 memset(zero_buffer
, 0, sizeof(u32
) * DMAE_MAX_RW_SIZE
);
154 /* invoke the DMAE virtual/physical buffer API with
155 * 1. DMAE init channel
157 * 3. p_hwfb->temp_data,
161 return qed_dmae_host2grc(p_hwfn
, p_ptt
,
162 (uintptr_t)(&zero_buffer
[0]),
164 QED_DMAE_FLAG_RW_REPL_SRC
);
167 static void qed_init_fill(struct qed_hwfn
*p_hwfn
,
168 struct qed_ptt
*p_ptt
,
175 for (i
= 0; i
< fill_count
; i
++, addr
+= sizeof(u32
))
176 qed_wr(p_hwfn
, p_ptt
, addr
, fill
);
179 static int qed_init_cmd_array(struct qed_hwfn
*p_hwfn
,
180 struct qed_ptt
*p_ptt
,
181 struct init_write_op
*cmd
,
185 u32 data
= le32_to_cpu(cmd
->data
);
186 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
187 u32 dmae_array_offset
= le32_to_cpu(cmd
->args
.array_offset
);
188 u32 offset
, output_len
, input_len
, max_size
;
189 struct qed_dev
*cdev
= p_hwfn
->cdev
;
190 union init_array_hdr
*hdr
;
191 const u32
*array_data
;
195 array_data
= cdev
->fw_data
->arr_data
;
197 hdr
= (union init_array_hdr
*)(array_data
+
199 data
= le32_to_cpu(hdr
->raw
.data
);
200 switch (GET_FIELD(data
, INIT_ARRAY_RAW_HDR_TYPE
)) {
201 case INIT_ARR_ZIPPED
:
202 offset
= dmae_array_offset
+ 1;
203 input_len
= GET_FIELD(data
,
204 INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE
);
205 max_size
= MAX_ZIPPED_SIZE
* 4;
206 memset(p_hwfn
->unzip_buf
, 0, max_size
);
208 output_len
= qed_unzip_data(p_hwfn
, input_len
,
209 (u8
*)&array_data
[offset
],
210 max_size
, (u8
*)p_hwfn
->unzip_buf
);
212 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
, 0,
215 b_must_dmae
, b_can_dmae
);
217 DP_NOTICE(p_hwfn
, "Failed to unzip dmae data\n");
221 case INIT_ARR_PATTERN
:
223 u32 repeats
= GET_FIELD(data
,
224 INIT_ARRAY_PATTERN_HDR_REPETITIONS
);
227 size
= GET_FIELD(data
, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE
);
229 for (i
= 0; i
< repeats
; i
++, addr
+= size
<< 2) {
230 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
231 dmae_array_offset
+ 1,
233 b_must_dmae
, b_can_dmae
);
239 case INIT_ARR_STANDARD
:
240 size
= GET_FIELD(data
, INIT_ARRAY_STANDARD_HDR_SIZE
);
241 rc
= qed_init_array_dmae(p_hwfn
, p_ptt
, addr
,
242 dmae_array_offset
+ 1,
244 b_must_dmae
, b_can_dmae
);
251 /* init_ops write command */
252 static int qed_init_cmd_wr(struct qed_hwfn
*p_hwfn
,
253 struct qed_ptt
*p_ptt
,
254 struct init_write_op
*cmd
,
257 u32 data
= le32_to_cpu(cmd
->data
);
258 u32 addr
= GET_FIELD(data
, INIT_WRITE_OP_ADDRESS
) << 2;
259 bool b_must_dmae
= GET_FIELD(data
, INIT_WRITE_OP_WIDE_BUS
);
260 union init_write_args
*arg
= &cmd
->args
;
264 if (b_must_dmae
&& !b_can_dmae
) {
266 "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
271 switch (GET_FIELD(data
, INIT_WRITE_OP_SOURCE
)) {
272 case INIT_SRC_INLINE
:
273 qed_wr(p_hwfn
, p_ptt
, addr
,
274 le32_to_cpu(arg
->inline_val
));
278 (b_can_dmae
&& (le32_to_cpu(arg
->zeros_count
) >= 64)))
279 rc
= qed_init_fill_dmae(p_hwfn
, p_ptt
, addr
, 0,
280 le32_to_cpu(arg
->zeros_count
));
282 qed_init_fill(p_hwfn
, p_ptt
, addr
, 0,
283 le32_to_cpu(arg
->zeros_count
));
286 rc
= qed_init_cmd_array(p_hwfn
, p_ptt
, cmd
,
287 b_must_dmae
, b_can_dmae
);
289 case INIT_SRC_RUNTIME
:
290 qed_init_rt(p_hwfn
, p_ptt
, addr
,
291 le16_to_cpu(arg
->runtime
.offset
),
292 le16_to_cpu(arg
->runtime
.size
));
299 static inline bool comp_eq(u32 val
, u32 expected_val
)
301 return val
== expected_val
;
304 static inline bool comp_and(u32 val
, u32 expected_val
)
306 return (val
& expected_val
) == expected_val
;
309 static inline bool comp_or(u32 val
, u32 expected_val
)
311 return (val
| expected_val
) > 0;
314 /* init_ops read/poll commands */
315 static void qed_init_cmd_rd(struct qed_hwfn
*p_hwfn
,
316 struct qed_ptt
*p_ptt
,
317 struct init_read_op
*cmd
)
319 u32 data
= le32_to_cpu(cmd
->op_data
);
320 u32 addr
= GET_FIELD(data
, INIT_READ_OP_ADDRESS
) << 2;
322 bool (*comp_check
)(u32 val
,
324 u32 delay
= QED_INIT_POLL_PERIOD_US
, val
;
326 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
328 data
= le32_to_cpu(cmd
->op_data
);
329 if (GET_FIELD(data
, INIT_READ_OP_POLL
)) {
332 switch (GET_FIELD(data
, INIT_READ_OP_POLL_COMP
)) {
333 case INIT_COMPARISON_EQ
:
334 comp_check
= comp_eq
;
336 case INIT_COMPARISON_OR
:
337 comp_check
= comp_or
;
339 case INIT_COMPARISON_AND
:
340 comp_check
= comp_and
;
344 DP_ERR(p_hwfn
, "Invalid poll comparison type %08x\n",
350 i
< QED_INIT_MAX_POLL_COUNT
&&
351 !comp_check(val
, le32_to_cpu(cmd
->expected_val
));
354 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
357 if (i
== QED_INIT_MAX_POLL_COUNT
)
359 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
360 addr
, le32_to_cpu(cmd
->expected_val
),
365 /* init_ops callbacks entry point */
366 static void qed_init_cmd_cb(struct qed_hwfn
*p_hwfn
,
367 struct qed_ptt
*p_ptt
,
368 struct init_callback_op
*p_cmd
)
370 DP_NOTICE(p_hwfn
, "Currently init values have no need of callbacks\n");
373 static u8
qed_init_cmd_mode_match(struct qed_hwfn
*p_hwfn
,
377 struct qed_dev
*cdev
= p_hwfn
->cdev
;
378 const u8
*modes_tree_buf
;
379 u8 arg1
, arg2
, tree_val
;
381 modes_tree_buf
= cdev
->fw_data
->modes_tree_buf
;
382 tree_val
= modes_tree_buf
[(*offset
)++];
384 case INIT_MODE_OP_NOT
:
385 return qed_init_cmd_mode_match(p_hwfn
, offset
, modes
) ^ 1;
386 case INIT_MODE_OP_OR
:
387 arg1
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
388 arg2
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
390 case INIT_MODE_OP_AND
:
391 arg1
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
392 arg2
= qed_init_cmd_mode_match(p_hwfn
, offset
, modes
);
395 tree_val
-= MAX_INIT_MODE_OPS
;
396 return (modes
& (1 << tree_val
)) ? 1 : 0;
400 static u32
qed_init_cmd_mode(struct qed_hwfn
*p_hwfn
,
401 struct init_if_mode_op
*p_cmd
,
404 u16 offset
= le16_to_cpu(p_cmd
->modes_buf_offset
);
406 if (qed_init_cmd_mode_match(p_hwfn
, &offset
, modes
))
409 return GET_FIELD(le32_to_cpu(p_cmd
->op_data
),
410 INIT_IF_MODE_OP_CMD_OFFSET
);
413 static u32
qed_init_cmd_phase(struct qed_hwfn
*p_hwfn
,
414 struct init_if_phase_op
*p_cmd
,
418 u32 data
= le32_to_cpu(p_cmd
->phase_data
);
419 u32 op_data
= le32_to_cpu(p_cmd
->op_data
);
421 if (!(GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE
) == phase
&&
422 (GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == ANY_PHASE_ID
||
423 GET_FIELD(data
, INIT_IF_PHASE_OP_PHASE_ID
) == phase_id
)))
424 return GET_FIELD(op_data
, INIT_IF_PHASE_OP_CMD_OFFSET
);
429 int qed_init_run(struct qed_hwfn
*p_hwfn
,
430 struct qed_ptt
*p_ptt
,
435 struct qed_dev
*cdev
= p_hwfn
->cdev
;
436 u32 cmd_num
, num_init_ops
;
437 union init_op
*init_ops
;
441 num_init_ops
= cdev
->fw_data
->init_ops_size
;
442 init_ops
= cdev
->fw_data
->init_ops
;
444 p_hwfn
->unzip_buf
= kzalloc(MAX_ZIPPED_SIZE
* 4, GFP_ATOMIC
);
445 if (!p_hwfn
->unzip_buf
) {
446 DP_NOTICE(p_hwfn
, "Failed to allocate unzip buffer\n");
450 for (cmd_num
= 0; cmd_num
< num_init_ops
; cmd_num
++) {
451 union init_op
*cmd
= &init_ops
[cmd_num
];
452 u32 data
= le32_to_cpu(cmd
->raw
.op_data
);
454 switch (GET_FIELD(data
, INIT_CALLBACK_OP_OP
)) {
456 rc
= qed_init_cmd_wr(p_hwfn
, p_ptt
, &cmd
->write
,
460 qed_init_cmd_rd(p_hwfn
, p_ptt
, &cmd
->read
);
462 case INIT_OP_IF_MODE
:
463 cmd_num
+= qed_init_cmd_mode(p_hwfn
, &cmd
->if_mode
,
466 case INIT_OP_IF_PHASE
:
467 cmd_num
+= qed_init_cmd_phase(p_hwfn
, &cmd
->if_phase
,
469 b_dmae
= GET_FIELD(data
, INIT_IF_PHASE_OP_DMAE_ENABLE
);
472 /* qed_init_run is always invoked from
475 udelay(le32_to_cpu(cmd
->delay
.delay
));
478 case INIT_OP_CALLBACK
:
479 qed_init_cmd_cb(p_hwfn
, p_ptt
, &cmd
->callback
);
487 kfree(p_hwfn
->unzip_buf
);
491 void qed_gtt_init(struct qed_hwfn
*p_hwfn
)
496 /* Set the global windows */
497 gtt_base
= PXP_PF_WINDOW_ADMIN_START
+ PXP_PF_WINDOW_ADMIN_GLOBAL_START
;
499 for (i
= 0; i
< ARRAY_SIZE(pxp_global_win
); i
++)
500 if (pxp_global_win
[i
])
501 REG_WR(p_hwfn
, gtt_base
+ i
* PXP_GLOBAL_ENTRY_SIZE
,
505 int qed_init_fw_data(struct qed_dev
*cdev
,
508 struct qed_fw_data
*fw
= cdev
->fw_data
;
509 struct bin_buffer_hdr
*buf_hdr
;
513 DP_NOTICE(cdev
, "Invalid fw data\n");
517 buf_hdr
= (struct bin_buffer_hdr
*)data
;
519 offset
= buf_hdr
[BIN_BUF_INIT_CMD
].offset
;
520 fw
->init_ops
= (union init_op
*)(data
+ offset
);
522 offset
= buf_hdr
[BIN_BUF_INIT_VAL
].offset
;
523 fw
->arr_data
= (u32
*)(data
+ offset
);
525 offset
= buf_hdr
[BIN_BUF_INIT_MODE_TREE
].offset
;
526 fw
->modes_tree_buf
= (u8
*)(data
+ offset
);
527 len
= buf_hdr
[BIN_BUF_INIT_CMD
].length
;
528 fw
->init_ops_size
= len
/ sizeof(struct init_raw_op
);