1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/qed/qed_chain.h>
23 #include "qed_reg_addr.h"
24 #include "qed_sriov.h"
26 #define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000
27 #define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000
28 #define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000
29 #define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10
32 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
35 struct list_head list_entry
;
37 struct pxp_ptt_entry pxp
;
42 struct list_head free_list
;
43 spinlock_t lock
; /* ptt synchronized access */
44 struct qed_ptt ptts
[PXP_EXTERNAL_BAR_PF_WINDOW_NUM
];
47 int qed_ptt_pool_alloc(struct qed_hwfn
*p_hwfn
)
49 struct qed_ptt_pool
*p_pool
= kmalloc(sizeof(*p_pool
), GFP_KERNEL
);
55 INIT_LIST_HEAD(&p_pool
->free_list
);
56 for (i
= 0; i
< PXP_EXTERNAL_BAR_PF_WINDOW_NUM
; i
++) {
57 p_pool
->ptts
[i
].idx
= i
;
58 p_pool
->ptts
[i
].pxp
.offset
= QED_BAR_INVALID_OFFSET
;
59 p_pool
->ptts
[i
].pxp
.pretend
.control
= 0;
60 p_pool
->ptts
[i
].hwfn_id
= p_hwfn
->my_id
;
61 if (i
>= RESERVED_PTT_MAX
)
62 list_add(&p_pool
->ptts
[i
].list_entry
,
66 p_hwfn
->p_ptt_pool
= p_pool
;
67 spin_lock_init(&p_pool
->lock
);
72 void qed_ptt_invalidate(struct qed_hwfn
*p_hwfn
)
74 struct qed_ptt
*p_ptt
;
77 for (i
= 0; i
< PXP_EXTERNAL_BAR_PF_WINDOW_NUM
; i
++) {
78 p_ptt
= &p_hwfn
->p_ptt_pool
->ptts
[i
];
79 p_ptt
->pxp
.offset
= QED_BAR_INVALID_OFFSET
;
83 void qed_ptt_pool_free(struct qed_hwfn
*p_hwfn
)
85 kfree(p_hwfn
->p_ptt_pool
);
86 p_hwfn
->p_ptt_pool
= NULL
;
89 struct qed_ptt
*qed_ptt_acquire(struct qed_hwfn
*p_hwfn
)
91 return qed_ptt_acquire_context(p_hwfn
, false);
94 struct qed_ptt
*qed_ptt_acquire_context(struct qed_hwfn
*p_hwfn
, bool is_atomic
)
96 struct qed_ptt
*p_ptt
;
97 unsigned int i
, count
;
100 count
= QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT
;
102 count
= QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT
;
104 /* Take the free PTT from the list */
105 for (i
= 0; i
< count
; i
++) {
106 spin_lock_bh(&p_hwfn
->p_ptt_pool
->lock
);
108 if (!list_empty(&p_hwfn
->p_ptt_pool
->free_list
)) {
109 p_ptt
= list_first_entry(&p_hwfn
->p_ptt_pool
->free_list
,
110 struct qed_ptt
, list_entry
);
111 list_del(&p_ptt
->list_entry
);
113 spin_unlock_bh(&p_hwfn
->p_ptt_pool
->lock
);
115 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
116 "allocated ptt %d\n", p_ptt
->idx
);
120 spin_unlock_bh(&p_hwfn
->p_ptt_pool
->lock
);
123 udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY
);
125 usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP
,
126 QED_BAR_ACQUIRE_TIMEOUT_USLEEP
* 2);
129 DP_NOTICE(p_hwfn
, "PTT acquire timeout - failed to allocate PTT\n");
133 void qed_ptt_release(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
135 spin_lock_bh(&p_hwfn
->p_ptt_pool
->lock
);
136 list_add(&p_ptt
->list_entry
, &p_hwfn
->p_ptt_pool
->free_list
);
137 spin_unlock_bh(&p_hwfn
->p_ptt_pool
->lock
);
140 u32
qed_ptt_get_hw_addr(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
142 /* The HW is using DWORDS and we need to translate it to Bytes */
143 return le32_to_cpu(p_ptt
->pxp
.offset
) << 2;
146 static u32
qed_ptt_config_addr(struct qed_ptt
*p_ptt
)
148 return PXP_PF_WINDOW_ADMIN_PER_PF_START
+
149 p_ptt
->idx
* sizeof(struct pxp_ptt_entry
);
152 u32
qed_ptt_get_bar_addr(struct qed_ptt
*p_ptt
)
154 return PXP_EXTERNAL_BAR_PF_WINDOW_START
+
155 p_ptt
->idx
* PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE
;
158 void qed_ptt_set_win(struct qed_hwfn
*p_hwfn
,
159 struct qed_ptt
*p_ptt
, u32 new_hw_addr
)
163 prev_hw_addr
= qed_ptt_get_hw_addr(p_hwfn
, p_ptt
);
165 if (new_hw_addr
== prev_hw_addr
)
168 /* Update PTT entery in admin window */
169 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
170 "Updating PTT entry %d to offset 0x%x\n",
171 p_ptt
->idx
, new_hw_addr
);
173 /* The HW is using DWORDS and the address is in Bytes */
174 p_ptt
->pxp
.offset
= cpu_to_le32(new_hw_addr
>> 2);
177 qed_ptt_config_addr(p_ptt
) +
178 offsetof(struct pxp_ptt_entry
, offset
),
179 le32_to_cpu(p_ptt
->pxp
.offset
));
182 static u32
qed_set_ptt(struct qed_hwfn
*p_hwfn
,
183 struct qed_ptt
*p_ptt
, u32 hw_addr
)
185 u32 win_hw_addr
= qed_ptt_get_hw_addr(p_hwfn
, p_ptt
);
188 offset
= hw_addr
- win_hw_addr
;
190 if (p_ptt
->hwfn_id
!= p_hwfn
->my_id
)
192 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
193 p_ptt
->idx
, p_ptt
->hwfn_id
, p_hwfn
->my_id
);
195 /* Verify the address is within the window */
196 if (hw_addr
< win_hw_addr
||
197 offset
>= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE
) {
198 qed_ptt_set_win(p_hwfn
, p_ptt
, hw_addr
);
202 return qed_ptt_get_bar_addr(p_ptt
) + offset
;
205 struct qed_ptt
*qed_get_reserved_ptt(struct qed_hwfn
*p_hwfn
,
206 enum reserved_ptts ptt_idx
)
208 if (ptt_idx
>= RESERVED_PTT_MAX
) {
210 "Requested PTT %d is out of range\n", ptt_idx
);
214 return &p_hwfn
->p_ptt_pool
->ptts
[ptt_idx
];
217 void qed_wr(struct qed_hwfn
*p_hwfn
,
218 struct qed_ptt
*p_ptt
,
219 u32 hw_addr
, u32 val
)
221 u32 bar_addr
= qed_set_ptt(p_hwfn
, p_ptt
, hw_addr
);
223 REG_WR(p_hwfn
, bar_addr
, val
);
224 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
225 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
226 bar_addr
, hw_addr
, val
);
229 u32
qed_rd(struct qed_hwfn
*p_hwfn
,
230 struct qed_ptt
*p_ptt
,
233 u32 bar_addr
= qed_set_ptt(p_hwfn
, p_ptt
, hw_addr
);
234 u32 val
= REG_RD(p_hwfn
, bar_addr
);
236 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
237 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
238 bar_addr
, hw_addr
, val
);
243 static void qed_memcpy_hw(struct qed_hwfn
*p_hwfn
,
244 struct qed_ptt
*p_ptt
,
245 void *addr
, u32 hw_addr
, size_t n
, bool to_device
)
247 u32 dw_count
, *host_addr
, hw_offset
;
248 size_t quota
, done
= 0;
249 u32 __iomem
*reg_addr
;
252 quota
= min_t(size_t, n
- done
,
253 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE
);
255 if (IS_PF(p_hwfn
->cdev
)) {
256 qed_ptt_set_win(p_hwfn
, p_ptt
, hw_addr
+ done
);
257 hw_offset
= qed_ptt_get_bar_addr(p_ptt
);
259 hw_offset
= hw_addr
+ done
;
262 dw_count
= quota
/ 4;
263 host_addr
= (u32
*)((u8
*)addr
+ done
);
264 reg_addr
= (u32 __iomem
*)REG_ADDR(p_hwfn
, hw_offset
);
267 DIRECT_REG_WR(reg_addr
++, *host_addr
++);
270 *host_addr
++ = DIRECT_REG_RD(reg_addr
++);
276 void qed_memcpy_from(struct qed_hwfn
*p_hwfn
,
277 struct qed_ptt
*p_ptt
, void *dest
, u32 hw_addr
, size_t n
)
279 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
280 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
281 hw_addr
, dest
, hw_addr
, (unsigned long)n
);
283 qed_memcpy_hw(p_hwfn
, p_ptt
, dest
, hw_addr
, n
, false);
286 void qed_memcpy_to(struct qed_hwfn
*p_hwfn
,
287 struct qed_ptt
*p_ptt
, u32 hw_addr
, void *src
, size_t n
)
289 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
290 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
291 hw_addr
, hw_addr
, src
, (unsigned long)n
);
293 qed_memcpy_hw(p_hwfn
, p_ptt
, src
, hw_addr
, n
, true);
296 void qed_fid_pretend(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, u16 fid
)
300 SET_FIELD(control
, PXP_PRETEND_CMD_IS_CONCRETE
, 1);
301 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_FUNCTION
, 1);
303 /* Every pretend undos previous pretends, including
304 * previous port pretend.
306 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, 0);
307 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 0);
308 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
310 if (!GET_FIELD(fid
, PXP_CONCRETE_FID_VFVALID
))
311 fid
= GET_FIELD(fid
, PXP_CONCRETE_FID_PFID
);
313 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
314 p_ptt
->pxp
.pretend
.fid
.concrete_fid
.fid
= cpu_to_le16(fid
);
317 qed_ptt_config_addr(p_ptt
) +
318 offsetof(struct pxp_ptt_entry
, pretend
),
319 *(u32
*)&p_ptt
->pxp
.pretend
);
322 void qed_port_pretend(struct qed_hwfn
*p_hwfn
,
323 struct qed_ptt
*p_ptt
, u8 port_id
)
327 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, port_id
);
328 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 1);
329 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
331 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
334 qed_ptt_config_addr(p_ptt
) +
335 offsetof(struct pxp_ptt_entry
, pretend
),
336 *(u32
*)&p_ptt
->pxp
.pretend
);
339 void qed_port_unpretend(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
343 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, 0);
344 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 0);
345 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
347 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
350 qed_ptt_config_addr(p_ptt
) +
351 offsetof(struct pxp_ptt_entry
, pretend
),
352 *(u32
*)&p_ptt
->pxp
.pretend
);
355 void qed_port_fid_pretend(struct qed_hwfn
*p_hwfn
,
356 struct qed_ptt
*p_ptt
, u8 port_id
, u16 fid
)
360 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, port_id
);
361 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 1);
362 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
363 SET_FIELD(control
, PXP_PRETEND_CMD_IS_CONCRETE
, 1);
364 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_FUNCTION
, 1);
365 if (!GET_FIELD(fid
, PXP_CONCRETE_FID_VFVALID
))
366 fid
= GET_FIELD(fid
, PXP_CONCRETE_FID_PFID
);
367 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
368 p_ptt
->pxp
.pretend
.fid
.concrete_fid
.fid
= cpu_to_le16(fid
);
370 qed_ptt_config_addr(p_ptt
) +
371 offsetof(struct pxp_ptt_entry
, pretend
),
372 *(u32
*)&p_ptt
->pxp
.pretend
);
375 u32
qed_vfid_to_concrete(struct qed_hwfn
*p_hwfn
, u8 vfid
)
377 u32 concrete_fid
= 0;
379 SET_FIELD(concrete_fid
, PXP_CONCRETE_FID_PFID
, p_hwfn
->rel_pf_id
);
380 SET_FIELD(concrete_fid
, PXP_CONCRETE_FID_VFID
, vfid
);
381 SET_FIELD(concrete_fid
, PXP_CONCRETE_FID_VFVALID
, 1);
387 #define QED_DMAE_FLAGS_IS_SET(params, flag) \
388 ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
390 static void qed_dmae_opcode(struct qed_hwfn
*p_hwfn
,
391 const u8 is_src_type_grc
,
392 const u8 is_dst_type_grc
,
393 struct qed_dmae_params
*p_params
)
395 u8 src_pfid
, dst_pfid
, port_id
;
399 /* Whether the source is the PCIe or the GRC.
400 * 0- The source is the PCIe
401 * 1- The source is the GRC.
403 SET_FIELD(opcode
, DMAE_CMD_SRC
,
404 (is_src_type_grc
? dmae_cmd_src_grc
: dmae_cmd_src_pcie
));
405 src_pfid
= QED_DMAE_FLAGS_IS_SET(p_params
, SRC_PF_VALID
) ?
406 p_params
->src_pfid
: p_hwfn
->rel_pf_id
;
407 SET_FIELD(opcode
, DMAE_CMD_SRC_PF_ID
, src_pfid
);
409 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
410 SET_FIELD(opcode
, DMAE_CMD_DST
,
411 (is_dst_type_grc
? dmae_cmd_dst_grc
: dmae_cmd_dst_pcie
));
412 dst_pfid
= QED_DMAE_FLAGS_IS_SET(p_params
, DST_PF_VALID
) ?
413 p_params
->dst_pfid
: p_hwfn
->rel_pf_id
;
414 SET_FIELD(opcode
, DMAE_CMD_DST_PF_ID
, dst_pfid
);
417 /* Whether to write a completion word to the completion destination:
418 * 0-Do not write a completion word
419 * 1-Write the completion word
421 SET_FIELD(opcode
, DMAE_CMD_COMP_WORD_EN
, 1);
422 SET_FIELD(opcode
, DMAE_CMD_SRC_ADDR_RESET
, 1);
424 if (QED_DMAE_FLAGS_IS_SET(p_params
, COMPLETION_DST
))
425 SET_FIELD(opcode
, DMAE_CMD_COMP_FUNC
, 1);
427 /* swapping mode 3 - big endian */
428 SET_FIELD(opcode
, DMAE_CMD_ENDIANITY_MODE
, DMAE_CMD_ENDIANITY
);
430 port_id
= (QED_DMAE_FLAGS_IS_SET(p_params
, PORT_VALID
)) ?
431 p_params
->port_id
: p_hwfn
->port_id
;
432 SET_FIELD(opcode
, DMAE_CMD_PORT_ID
, port_id
);
434 /* reset source address in next go */
435 SET_FIELD(opcode
, DMAE_CMD_SRC_ADDR_RESET
, 1);
437 /* reset dest address in next go */
438 SET_FIELD(opcode
, DMAE_CMD_DST_ADDR_RESET
, 1);
440 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
441 if (QED_DMAE_FLAGS_IS_SET(p_params
, SRC_VF_VALID
)) {
442 SET_FIELD(opcode
, DMAE_CMD_SRC_VF_ID_VALID
, 1);
443 SET_FIELD(opcode_b
, DMAE_CMD_SRC_VF_ID
, p_params
->src_vfid
);
445 SET_FIELD(opcode_b
, DMAE_CMD_SRC_VF_ID
, 0xFF);
447 if (QED_DMAE_FLAGS_IS_SET(p_params
, DST_VF_VALID
)) {
448 SET_FIELD(opcode
, DMAE_CMD_DST_VF_ID_VALID
, 1);
449 SET_FIELD(opcode_b
, DMAE_CMD_DST_VF_ID
, p_params
->dst_vfid
);
451 SET_FIELD(opcode_b
, DMAE_CMD_DST_VF_ID
, 0xFF);
454 p_hwfn
->dmae_info
.p_dmae_cmd
->opcode
= cpu_to_le32(opcode
);
455 p_hwfn
->dmae_info
.p_dmae_cmd
->opcode_b
= cpu_to_le16(opcode_b
);
458 u32
qed_dmae_idx_to_go_cmd(u8 idx
)
460 /* All the DMAE 'go' registers form an array in internal memory */
461 return DMAE_REG_GO_C0
+ (idx
<< 2);
464 static int qed_dmae_post_command(struct qed_hwfn
*p_hwfn
,
465 struct qed_ptt
*p_ptt
)
467 struct dmae_cmd
*p_command
= p_hwfn
->dmae_info
.p_dmae_cmd
;
468 u8 idx_cmd
= p_hwfn
->dmae_info
.channel
, i
;
471 /* verify address is not NULL */
472 if ((((!p_command
->dst_addr_lo
) && (!p_command
->dst_addr_hi
)) ||
473 ((!p_command
->src_addr_lo
) && (!p_command
->src_addr_hi
)))) {
475 "source or destination address 0 idx_cmd=%d\n"
476 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
478 le32_to_cpu(p_command
->opcode
),
479 le16_to_cpu(p_command
->opcode_b
),
480 le16_to_cpu(p_command
->length_dw
),
481 le32_to_cpu(p_command
->src_addr_hi
),
482 le32_to_cpu(p_command
->src_addr_lo
),
483 le32_to_cpu(p_command
->dst_addr_hi
),
484 le32_to_cpu(p_command
->dst_addr_lo
));
491 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
493 le32_to_cpu(p_command
->opcode
),
494 le16_to_cpu(p_command
->opcode_b
),
495 le16_to_cpu(p_command
->length_dw
),
496 le32_to_cpu(p_command
->src_addr_hi
),
497 le32_to_cpu(p_command
->src_addr_lo
),
498 le32_to_cpu(p_command
->dst_addr_hi
),
499 le32_to_cpu(p_command
->dst_addr_lo
));
501 /* Copy the command to DMAE - need to do it before every call
502 * for source/dest address no reset.
503 * The first 9 DWs are the command registers, the 10 DW is the
504 * GO register, and the rest are result registers
505 * (which are read only by the client).
507 for (i
= 0; i
< DMAE_CMD_SIZE
; i
++) {
508 u32 data
= (i
< DMAE_CMD_SIZE_TO_FILL
) ?
509 *(((u32
*)p_command
) + i
) : 0;
511 qed_wr(p_hwfn
, p_ptt
,
513 (idx_cmd
* DMAE_CMD_SIZE
* sizeof(u32
)) +
514 (i
* sizeof(u32
)), data
);
517 qed_wr(p_hwfn
, p_ptt
, qed_dmae_idx_to_go_cmd(idx_cmd
), DMAE_GO_VALUE
);
522 int qed_dmae_info_alloc(struct qed_hwfn
*p_hwfn
)
524 dma_addr_t
*p_addr
= &p_hwfn
->dmae_info
.completion_word_phys_addr
;
525 struct dmae_cmd
**p_cmd
= &p_hwfn
->dmae_info
.p_dmae_cmd
;
526 u32
**p_buff
= &p_hwfn
->dmae_info
.p_intermediate_buffer
;
527 u32
**p_comp
= &p_hwfn
->dmae_info
.p_completion_word
;
529 *p_comp
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
530 sizeof(u32
), p_addr
, GFP_KERNEL
);
534 p_addr
= &p_hwfn
->dmae_info
.dmae_cmd_phys_addr
;
535 *p_cmd
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
536 sizeof(struct dmae_cmd
),
541 p_addr
= &p_hwfn
->dmae_info
.intermediate_buffer_phys_addr
;
542 *p_buff
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
543 sizeof(u32
) * DMAE_MAX_RW_SIZE
,
548 p_hwfn
->dmae_info
.channel
= p_hwfn
->rel_pf_id
;
552 qed_dmae_info_free(p_hwfn
);
556 void qed_dmae_info_free(struct qed_hwfn
*p_hwfn
)
560 /* Just make sure no one is in the middle */
561 mutex_lock(&p_hwfn
->dmae_info
.mutex
);
563 if (p_hwfn
->dmae_info
.p_completion_word
) {
564 p_phys
= p_hwfn
->dmae_info
.completion_word_phys_addr
;
565 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
567 p_hwfn
->dmae_info
.p_completion_word
, p_phys
);
568 p_hwfn
->dmae_info
.p_completion_word
= NULL
;
571 if (p_hwfn
->dmae_info
.p_dmae_cmd
) {
572 p_phys
= p_hwfn
->dmae_info
.dmae_cmd_phys_addr
;
573 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
574 sizeof(struct dmae_cmd
),
575 p_hwfn
->dmae_info
.p_dmae_cmd
, p_phys
);
576 p_hwfn
->dmae_info
.p_dmae_cmd
= NULL
;
579 if (p_hwfn
->dmae_info
.p_intermediate_buffer
) {
580 p_phys
= p_hwfn
->dmae_info
.intermediate_buffer_phys_addr
;
581 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
582 sizeof(u32
) * DMAE_MAX_RW_SIZE
,
583 p_hwfn
->dmae_info
.p_intermediate_buffer
,
585 p_hwfn
->dmae_info
.p_intermediate_buffer
= NULL
;
588 mutex_unlock(&p_hwfn
->dmae_info
.mutex
);
591 static int qed_dmae_operation_wait(struct qed_hwfn
*p_hwfn
)
593 u32 wait_cnt_limit
= 10000, wait_cnt
= 0;
597 while (*p_hwfn
->dmae_info
.p_completion_word
!= DMAE_COMPLETION_VAL
) {
598 udelay(DMAE_MIN_WAIT_TIME
);
600 if (++wait_cnt
> wait_cnt_limit
) {
601 DP_NOTICE(p_hwfn
->cdev
,
602 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
603 *p_hwfn
->dmae_info
.p_completion_word
,
604 DMAE_COMPLETION_VAL
);
609 /* to sync the completion_word since we are not
610 * using the volatile keyword for p_completion_word
616 *p_hwfn
->dmae_info
.p_completion_word
= 0;
621 static int qed_dmae_execute_sub_operation(struct qed_hwfn
*p_hwfn
,
622 struct qed_ptt
*p_ptt
,
629 dma_addr_t phys
= p_hwfn
->dmae_info
.intermediate_buffer_phys_addr
;
630 struct dmae_cmd
*cmd
= p_hwfn
->dmae_info
.p_dmae_cmd
;
634 case QED_DMAE_ADDRESS_GRC
:
635 case QED_DMAE_ADDRESS_HOST_PHYS
:
636 cmd
->src_addr_hi
= cpu_to_le32(upper_32_bits(src_addr
));
637 cmd
->src_addr_lo
= cpu_to_le32(lower_32_bits(src_addr
));
639 /* for virtual source addresses we use the intermediate buffer. */
640 case QED_DMAE_ADDRESS_HOST_VIRT
:
641 cmd
->src_addr_hi
= cpu_to_le32(upper_32_bits(phys
));
642 cmd
->src_addr_lo
= cpu_to_le32(lower_32_bits(phys
));
643 memcpy(&p_hwfn
->dmae_info
.p_intermediate_buffer
[0],
644 (void *)(uintptr_t)src_addr
,
645 length_dw
* sizeof(u32
));
652 case QED_DMAE_ADDRESS_GRC
:
653 case QED_DMAE_ADDRESS_HOST_PHYS
:
654 cmd
->dst_addr_hi
= cpu_to_le32(upper_32_bits(dst_addr
));
655 cmd
->dst_addr_lo
= cpu_to_le32(lower_32_bits(dst_addr
));
657 /* for virtual source addresses we use the intermediate buffer. */
658 case QED_DMAE_ADDRESS_HOST_VIRT
:
659 cmd
->dst_addr_hi
= cpu_to_le32(upper_32_bits(phys
));
660 cmd
->dst_addr_lo
= cpu_to_le32(lower_32_bits(phys
));
666 cmd
->length_dw
= cpu_to_le16((u16
)length_dw
);
668 qed_dmae_post_command(p_hwfn
, p_ptt
);
670 qed_status
= qed_dmae_operation_wait(p_hwfn
);
674 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
675 src_addr
, dst_addr
, length_dw
);
679 if (dst_type
== QED_DMAE_ADDRESS_HOST_VIRT
)
680 memcpy((void *)(uintptr_t)(dst_addr
),
681 &p_hwfn
->dmae_info
.p_intermediate_buffer
[0],
682 length_dw
* sizeof(u32
));
687 static int qed_dmae_execute_command(struct qed_hwfn
*p_hwfn
,
688 struct qed_ptt
*p_ptt
,
689 u64 src_addr
, u64 dst_addr
,
690 u8 src_type
, u8 dst_type
,
692 struct qed_dmae_params
*p_params
)
694 dma_addr_t phys
= p_hwfn
->dmae_info
.completion_word_phys_addr
;
695 u16 length_cur
= 0, i
= 0, cnt_split
= 0, length_mod
= 0;
696 struct dmae_cmd
*cmd
= p_hwfn
->dmae_info
.p_dmae_cmd
;
697 u64 src_addr_split
= 0, dst_addr_split
= 0;
698 u16 length_limit
= DMAE_MAX_RW_SIZE
;
702 if (p_hwfn
->cdev
->recov_in_prog
) {
705 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
706 src_addr
, src_type
, dst_addr
, dst_type
,
709 /* Let the flow complete w/o any error handling */
713 qed_dmae_opcode(p_hwfn
,
714 (src_type
== QED_DMAE_ADDRESS_GRC
),
715 (dst_type
== QED_DMAE_ADDRESS_GRC
),
718 cmd
->comp_addr_lo
= cpu_to_le32(lower_32_bits(phys
));
719 cmd
->comp_addr_hi
= cpu_to_le32(upper_32_bits(phys
));
720 cmd
->comp_val
= cpu_to_le32(DMAE_COMPLETION_VAL
);
722 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
723 cnt_split
= size_in_dwords
/ length_limit
;
724 length_mod
= size_in_dwords
% length_limit
;
726 src_addr_split
= src_addr
;
727 dst_addr_split
= dst_addr
;
729 for (i
= 0; i
<= cnt_split
; i
++) {
730 offset
= length_limit
* i
;
732 if (!QED_DMAE_FLAGS_IS_SET(p_params
, RW_REPL_SRC
)) {
733 if (src_type
== QED_DMAE_ADDRESS_GRC
)
734 src_addr_split
= src_addr
+ offset
;
736 src_addr_split
= src_addr
+ (offset
* 4);
739 if (dst_type
== QED_DMAE_ADDRESS_GRC
)
740 dst_addr_split
= dst_addr
+ offset
;
742 dst_addr_split
= dst_addr
+ (offset
* 4);
744 length_cur
= (cnt_split
== i
) ? length_mod
: length_limit
;
746 /* might be zero on last iteration */
750 qed_status
= qed_dmae_execute_sub_operation(p_hwfn
,
758 qed_hw_err_notify(p_hwfn
, p_ptt
, QED_HW_ERR_DMAE_FAIL
,
759 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
760 qed_status
, src_addr
,
761 dst_addr
, length_cur
);
769 int qed_dmae_host2grc(struct qed_hwfn
*p_hwfn
,
770 struct qed_ptt
*p_ptt
,
771 u64 source_addr
, u32 grc_addr
, u32 size_in_dwords
,
772 struct qed_dmae_params
*p_params
)
774 u32 grc_addr_in_dw
= grc_addr
/ sizeof(u32
);
778 mutex_lock(&p_hwfn
->dmae_info
.mutex
);
780 rc
= qed_dmae_execute_command(p_hwfn
, p_ptt
, source_addr
,
782 QED_DMAE_ADDRESS_HOST_VIRT
,
783 QED_DMAE_ADDRESS_GRC
,
784 size_in_dwords
, p_params
);
786 mutex_unlock(&p_hwfn
->dmae_info
.mutex
);
791 int qed_dmae_grc2host(struct qed_hwfn
*p_hwfn
,
792 struct qed_ptt
*p_ptt
,
794 dma_addr_t dest_addr
, u32 size_in_dwords
,
795 struct qed_dmae_params
*p_params
)
797 u32 grc_addr_in_dw
= grc_addr
/ sizeof(u32
);
801 mutex_lock(&p_hwfn
->dmae_info
.mutex
);
803 rc
= qed_dmae_execute_command(p_hwfn
, p_ptt
, grc_addr_in_dw
,
804 dest_addr
, QED_DMAE_ADDRESS_GRC
,
805 QED_DMAE_ADDRESS_HOST_VIRT
,
806 size_in_dwords
, p_params
);
808 mutex_unlock(&p_hwfn
->dmae_info
.mutex
);
813 int qed_dmae_host2host(struct qed_hwfn
*p_hwfn
,
814 struct qed_ptt
*p_ptt
,
815 dma_addr_t source_addr
,
816 dma_addr_t dest_addr
,
817 u32 size_in_dwords
, struct qed_dmae_params
*p_params
)
821 mutex_lock(&(p_hwfn
->dmae_info
.mutex
));
823 rc
= qed_dmae_execute_command(p_hwfn
, p_ptt
, source_addr
,
825 QED_DMAE_ADDRESS_HOST_PHYS
,
826 QED_DMAE_ADDRESS_HOST_PHYS
,
827 size_in_dwords
, p_params
);
829 mutex_unlock(&(p_hwfn
->dmae_info
.mutex
));
834 void qed_hw_err_notify(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
835 enum qed_hw_err_type err_type
, const char *fmt
, ...)
837 char buf
[QED_HW_ERR_MAX_STR_SIZE
];
843 len
= vsnprintf(buf
, QED_HW_ERR_MAX_STR_SIZE
, fmt
, vl
);
846 if (len
> QED_HW_ERR_MAX_STR_SIZE
- 1)
847 len
= QED_HW_ERR_MAX_STR_SIZE
- 1;
849 DP_NOTICE(p_hwfn
, "%s", buf
);
852 /* Fan failure cannot be masked by handling of another HW error */
853 if (p_hwfn
->cdev
->recov_in_prog
&&
854 err_type
!= QED_HW_ERR_FAN_FAIL
) {
857 "Recovery is in progress. Avoid notifying about HW error %d.\n",
862 qed_hw_error_occurred(p_hwfn
, err_type
);
865 qed_mcp_send_raw_debug_data(p_hwfn
, p_ptt
, buf
, len
);
868 int qed_dmae_sanity(struct qed_hwfn
*p_hwfn
,
869 struct qed_ptt
*p_ptt
, const char *phase
)
871 u32 size
= PAGE_SIZE
/ 2, val
;
877 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
878 2 * size
, &p_phys
, GFP_KERNEL
);
881 "DMAE sanity [%s]: failed to allocate memory\n",
886 /* Fill the bottom half of the allocated memory with a known pattern */
887 for (p_tmp
= (u32
*)p_virt
;
888 p_tmp
< (u32
*)((u8
*)p_virt
+ size
); p_tmp
++) {
889 /* Save the address itself as the value */
890 val
= (u32
)(uintptr_t)p_tmp
;
894 /* Zero the top half of the allocated memory */
895 memset((u8
*)p_virt
+ size
, 0, size
);
899 "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
902 p_virt
, (u64
)(p_phys
+ size
), (u8
*)p_virt
+ size
, size
);
904 rc
= qed_dmae_host2host(p_hwfn
, p_ptt
, p_phys
, p_phys
+ size
,
908 "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
913 /* Verify that the top half of the allocated memory has the pattern */
914 for (p_tmp
= (u32
*)((u8
*)p_virt
+ size
);
915 p_tmp
< (u32
*)((u8
*)p_virt
+ (2 * size
)); p_tmp
++) {
916 /* The corresponding address in the bottom half */
917 val
= (u32
)(uintptr_t)p_tmp
- size
;
921 "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
923 (u64
)p_phys
+ ((u8
*)p_tmp
- (u8
*)p_virt
),
931 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, 2 * size
, p_virt
, p_phys
);