1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/qed/qed_chain.h>
23 #include "qed_reg_addr.h"
24 #include "qed_sriov.h"
26 #define QED_BAR_ACQUIRE_TIMEOUT 1000
29 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
32 struct list_head list_entry
;
34 struct pxp_ptt_entry pxp
;
39 struct list_head free_list
;
40 spinlock_t lock
; /* ptt synchronized access */
41 struct qed_ptt ptts
[PXP_EXTERNAL_BAR_PF_WINDOW_NUM
];
44 int qed_ptt_pool_alloc(struct qed_hwfn
*p_hwfn
)
46 struct qed_ptt_pool
*p_pool
= kmalloc(sizeof(*p_pool
), GFP_KERNEL
);
52 INIT_LIST_HEAD(&p_pool
->free_list
);
53 for (i
= 0; i
< PXP_EXTERNAL_BAR_PF_WINDOW_NUM
; i
++) {
54 p_pool
->ptts
[i
].idx
= i
;
55 p_pool
->ptts
[i
].pxp
.offset
= QED_BAR_INVALID_OFFSET
;
56 p_pool
->ptts
[i
].pxp
.pretend
.control
= 0;
57 p_pool
->ptts
[i
].hwfn_id
= p_hwfn
->my_id
;
58 if (i
>= RESERVED_PTT_MAX
)
59 list_add(&p_pool
->ptts
[i
].list_entry
,
63 p_hwfn
->p_ptt_pool
= p_pool
;
64 spin_lock_init(&p_pool
->lock
);
69 void qed_ptt_invalidate(struct qed_hwfn
*p_hwfn
)
71 struct qed_ptt
*p_ptt
;
74 for (i
= 0; i
< PXP_EXTERNAL_BAR_PF_WINDOW_NUM
; i
++) {
75 p_ptt
= &p_hwfn
->p_ptt_pool
->ptts
[i
];
76 p_ptt
->pxp
.offset
= QED_BAR_INVALID_OFFSET
;
80 void qed_ptt_pool_free(struct qed_hwfn
*p_hwfn
)
82 kfree(p_hwfn
->p_ptt_pool
);
83 p_hwfn
->p_ptt_pool
= NULL
;
86 struct qed_ptt
*qed_ptt_acquire(struct qed_hwfn
*p_hwfn
)
88 struct qed_ptt
*p_ptt
;
91 /* Take the free PTT from the list */
92 for (i
= 0; i
< QED_BAR_ACQUIRE_TIMEOUT
; i
++) {
93 spin_lock_bh(&p_hwfn
->p_ptt_pool
->lock
);
95 if (!list_empty(&p_hwfn
->p_ptt_pool
->free_list
)) {
96 p_ptt
= list_first_entry(&p_hwfn
->p_ptt_pool
->free_list
,
97 struct qed_ptt
, list_entry
);
98 list_del(&p_ptt
->list_entry
);
100 spin_unlock_bh(&p_hwfn
->p_ptt_pool
->lock
);
102 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
103 "allocated ptt %d\n", p_ptt
->idx
);
107 spin_unlock_bh(&p_hwfn
->p_ptt_pool
->lock
);
108 usleep_range(1000, 2000);
111 DP_NOTICE(p_hwfn
, "PTT acquire timeout - failed to allocate PTT\n");
115 void qed_ptt_release(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
117 spin_lock_bh(&p_hwfn
->p_ptt_pool
->lock
);
118 list_add(&p_ptt
->list_entry
, &p_hwfn
->p_ptt_pool
->free_list
);
119 spin_unlock_bh(&p_hwfn
->p_ptt_pool
->lock
);
122 u32
qed_ptt_get_hw_addr(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
124 /* The HW is using DWORDS and we need to translate it to Bytes */
125 return le32_to_cpu(p_ptt
->pxp
.offset
) << 2;
128 static u32
qed_ptt_config_addr(struct qed_ptt
*p_ptt
)
130 return PXP_PF_WINDOW_ADMIN_PER_PF_START
+
131 p_ptt
->idx
* sizeof(struct pxp_ptt_entry
);
134 u32
qed_ptt_get_bar_addr(struct qed_ptt
*p_ptt
)
136 return PXP_EXTERNAL_BAR_PF_WINDOW_START
+
137 p_ptt
->idx
* PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE
;
140 void qed_ptt_set_win(struct qed_hwfn
*p_hwfn
,
141 struct qed_ptt
*p_ptt
, u32 new_hw_addr
)
145 prev_hw_addr
= qed_ptt_get_hw_addr(p_hwfn
, p_ptt
);
147 if (new_hw_addr
== prev_hw_addr
)
150 /* Update PTT entery in admin window */
151 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
152 "Updating PTT entry %d to offset 0x%x\n",
153 p_ptt
->idx
, new_hw_addr
);
155 /* The HW is using DWORDS and the address is in Bytes */
156 p_ptt
->pxp
.offset
= cpu_to_le32(new_hw_addr
>> 2);
159 qed_ptt_config_addr(p_ptt
) +
160 offsetof(struct pxp_ptt_entry
, offset
),
161 le32_to_cpu(p_ptt
->pxp
.offset
));
164 static u32
qed_set_ptt(struct qed_hwfn
*p_hwfn
,
165 struct qed_ptt
*p_ptt
, u32 hw_addr
)
167 u32 win_hw_addr
= qed_ptt_get_hw_addr(p_hwfn
, p_ptt
);
170 offset
= hw_addr
- win_hw_addr
;
172 if (p_ptt
->hwfn_id
!= p_hwfn
->my_id
)
174 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
175 p_ptt
->idx
, p_ptt
->hwfn_id
, p_hwfn
->my_id
);
177 /* Verify the address is within the window */
178 if (hw_addr
< win_hw_addr
||
179 offset
>= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE
) {
180 qed_ptt_set_win(p_hwfn
, p_ptt
, hw_addr
);
184 return qed_ptt_get_bar_addr(p_ptt
) + offset
;
187 struct qed_ptt
*qed_get_reserved_ptt(struct qed_hwfn
*p_hwfn
,
188 enum reserved_ptts ptt_idx
)
190 if (ptt_idx
>= RESERVED_PTT_MAX
) {
192 "Requested PTT %d is out of range\n", ptt_idx
);
196 return &p_hwfn
->p_ptt_pool
->ptts
[ptt_idx
];
199 void qed_wr(struct qed_hwfn
*p_hwfn
,
200 struct qed_ptt
*p_ptt
,
201 u32 hw_addr
, u32 val
)
203 u32 bar_addr
= qed_set_ptt(p_hwfn
, p_ptt
, hw_addr
);
205 REG_WR(p_hwfn
, bar_addr
, val
);
206 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
207 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
208 bar_addr
, hw_addr
, val
);
211 u32
qed_rd(struct qed_hwfn
*p_hwfn
,
212 struct qed_ptt
*p_ptt
,
215 u32 bar_addr
= qed_set_ptt(p_hwfn
, p_ptt
, hw_addr
);
216 u32 val
= REG_RD(p_hwfn
, bar_addr
);
218 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
219 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
220 bar_addr
, hw_addr
, val
);
225 static void qed_memcpy_hw(struct qed_hwfn
*p_hwfn
,
226 struct qed_ptt
*p_ptt
,
227 void *addr
, u32 hw_addr
, size_t n
, bool to_device
)
229 u32 dw_count
, *host_addr
, hw_offset
;
230 size_t quota
, done
= 0;
231 u32 __iomem
*reg_addr
;
234 quota
= min_t(size_t, n
- done
,
235 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE
);
237 if (IS_PF(p_hwfn
->cdev
)) {
238 qed_ptt_set_win(p_hwfn
, p_ptt
, hw_addr
+ done
);
239 hw_offset
= qed_ptt_get_bar_addr(p_ptt
);
241 hw_offset
= hw_addr
+ done
;
244 dw_count
= quota
/ 4;
245 host_addr
= (u32
*)((u8
*)addr
+ done
);
246 reg_addr
= (u32 __iomem
*)REG_ADDR(p_hwfn
, hw_offset
);
249 DIRECT_REG_WR(reg_addr
++, *host_addr
++);
252 *host_addr
++ = DIRECT_REG_RD(reg_addr
++);
258 void qed_memcpy_from(struct qed_hwfn
*p_hwfn
,
259 struct qed_ptt
*p_ptt
, void *dest
, u32 hw_addr
, size_t n
)
261 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
262 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
263 hw_addr
, dest
, hw_addr
, (unsigned long)n
);
265 qed_memcpy_hw(p_hwfn
, p_ptt
, dest
, hw_addr
, n
, false);
268 void qed_memcpy_to(struct qed_hwfn
*p_hwfn
,
269 struct qed_ptt
*p_ptt
, u32 hw_addr
, void *src
, size_t n
)
271 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
272 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
273 hw_addr
, hw_addr
, src
, (unsigned long)n
);
275 qed_memcpy_hw(p_hwfn
, p_ptt
, src
, hw_addr
, n
, true);
278 void qed_fid_pretend(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, u16 fid
)
282 SET_FIELD(control
, PXP_PRETEND_CMD_IS_CONCRETE
, 1);
283 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_FUNCTION
, 1);
285 /* Every pretend undos previous pretends, including
286 * previous port pretend.
288 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, 0);
289 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 0);
290 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
292 if (!GET_FIELD(fid
, PXP_CONCRETE_FID_VFVALID
))
293 fid
= GET_FIELD(fid
, PXP_CONCRETE_FID_PFID
);
295 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
296 p_ptt
->pxp
.pretend
.fid
.concrete_fid
.fid
= cpu_to_le16(fid
);
299 qed_ptt_config_addr(p_ptt
) +
300 offsetof(struct pxp_ptt_entry
, pretend
),
301 *(u32
*)&p_ptt
->pxp
.pretend
);
304 void qed_port_pretend(struct qed_hwfn
*p_hwfn
,
305 struct qed_ptt
*p_ptt
, u8 port_id
)
309 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, port_id
);
310 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 1);
311 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
313 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
316 qed_ptt_config_addr(p_ptt
) +
317 offsetof(struct pxp_ptt_entry
, pretend
),
318 *(u32
*)&p_ptt
->pxp
.pretend
);
321 void qed_port_unpretend(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
325 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, 0);
326 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 0);
327 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
329 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
332 qed_ptt_config_addr(p_ptt
) +
333 offsetof(struct pxp_ptt_entry
, pretend
),
334 *(u32
*)&p_ptt
->pxp
.pretend
);
337 void qed_port_fid_pretend(struct qed_hwfn
*p_hwfn
,
338 struct qed_ptt
*p_ptt
, u8 port_id
, u16 fid
)
342 SET_FIELD(control
, PXP_PRETEND_CMD_PORT
, port_id
);
343 SET_FIELD(control
, PXP_PRETEND_CMD_USE_PORT
, 1);
344 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_PORT
, 1);
345 SET_FIELD(control
, PXP_PRETEND_CMD_IS_CONCRETE
, 1);
346 SET_FIELD(control
, PXP_PRETEND_CMD_PRETEND_FUNCTION
, 1);
347 if (!GET_FIELD(fid
, PXP_CONCRETE_FID_VFVALID
))
348 fid
= GET_FIELD(fid
, PXP_CONCRETE_FID_PFID
);
349 p_ptt
->pxp
.pretend
.control
= cpu_to_le16(control
);
350 p_ptt
->pxp
.pretend
.fid
.concrete_fid
.fid
= cpu_to_le16(fid
);
352 qed_ptt_config_addr(p_ptt
) +
353 offsetof(struct pxp_ptt_entry
, pretend
),
354 *(u32
*)&p_ptt
->pxp
.pretend
);
357 u32
qed_vfid_to_concrete(struct qed_hwfn
*p_hwfn
, u8 vfid
)
359 u32 concrete_fid
= 0;
361 SET_FIELD(concrete_fid
, PXP_CONCRETE_FID_PFID
, p_hwfn
->rel_pf_id
);
362 SET_FIELD(concrete_fid
, PXP_CONCRETE_FID_VFID
, vfid
);
363 SET_FIELD(concrete_fid
, PXP_CONCRETE_FID_VFVALID
, 1);
369 #define QED_DMAE_FLAGS_IS_SET(params, flag) \
370 ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
372 static void qed_dmae_opcode(struct qed_hwfn
*p_hwfn
,
373 const u8 is_src_type_grc
,
374 const u8 is_dst_type_grc
,
375 struct qed_dmae_params
*p_params
)
377 u8 src_pfid
, dst_pfid
, port_id
;
381 /* Whether the source is the PCIe or the GRC.
382 * 0- The source is the PCIe
383 * 1- The source is the GRC.
385 SET_FIELD(opcode
, DMAE_CMD_SRC
,
386 (is_src_type_grc
? dmae_cmd_src_grc
: dmae_cmd_src_pcie
));
387 src_pfid
= QED_DMAE_FLAGS_IS_SET(p_params
, SRC_PF_VALID
) ?
388 p_params
->src_pfid
: p_hwfn
->rel_pf_id
;
389 SET_FIELD(opcode
, DMAE_CMD_SRC_PF_ID
, src_pfid
);
391 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
392 SET_FIELD(opcode
, DMAE_CMD_DST
,
393 (is_dst_type_grc
? dmae_cmd_dst_grc
: dmae_cmd_dst_pcie
));
394 dst_pfid
= QED_DMAE_FLAGS_IS_SET(p_params
, DST_PF_VALID
) ?
395 p_params
->dst_pfid
: p_hwfn
->rel_pf_id
;
396 SET_FIELD(opcode
, DMAE_CMD_DST_PF_ID
, dst_pfid
);
399 /* Whether to write a completion word to the completion destination:
400 * 0-Do not write a completion word
401 * 1-Write the completion word
403 SET_FIELD(opcode
, DMAE_CMD_COMP_WORD_EN
, 1);
404 SET_FIELD(opcode
, DMAE_CMD_SRC_ADDR_RESET
, 1);
406 if (QED_DMAE_FLAGS_IS_SET(p_params
, COMPLETION_DST
))
407 SET_FIELD(opcode
, DMAE_CMD_COMP_FUNC
, 1);
409 /* swapping mode 3 - big endian */
410 SET_FIELD(opcode
, DMAE_CMD_ENDIANITY_MODE
, DMAE_CMD_ENDIANITY
);
412 port_id
= (QED_DMAE_FLAGS_IS_SET(p_params
, PORT_VALID
)) ?
413 p_params
->port_id
: p_hwfn
->port_id
;
414 SET_FIELD(opcode
, DMAE_CMD_PORT_ID
, port_id
);
416 /* reset source address in next go */
417 SET_FIELD(opcode
, DMAE_CMD_SRC_ADDR_RESET
, 1);
419 /* reset dest address in next go */
420 SET_FIELD(opcode
, DMAE_CMD_DST_ADDR_RESET
, 1);
422 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
423 if (QED_DMAE_FLAGS_IS_SET(p_params
, SRC_VF_VALID
)) {
424 SET_FIELD(opcode
, DMAE_CMD_SRC_VF_ID_VALID
, 1);
425 SET_FIELD(opcode_b
, DMAE_CMD_SRC_VF_ID
, p_params
->src_vfid
);
427 SET_FIELD(opcode_b
, DMAE_CMD_SRC_VF_ID
, 0xFF);
429 if (QED_DMAE_FLAGS_IS_SET(p_params
, DST_VF_VALID
)) {
430 SET_FIELD(opcode
, DMAE_CMD_DST_VF_ID_VALID
, 1);
431 SET_FIELD(opcode_b
, DMAE_CMD_DST_VF_ID
, p_params
->dst_vfid
);
433 SET_FIELD(opcode_b
, DMAE_CMD_DST_VF_ID
, 0xFF);
436 p_hwfn
->dmae_info
.p_dmae_cmd
->opcode
= cpu_to_le32(opcode
);
437 p_hwfn
->dmae_info
.p_dmae_cmd
->opcode_b
= cpu_to_le16(opcode_b
);
440 u32
qed_dmae_idx_to_go_cmd(u8 idx
)
442 /* All the DMAE 'go' registers form an array in internal memory */
443 return DMAE_REG_GO_C0
+ (idx
<< 2);
446 static int qed_dmae_post_command(struct qed_hwfn
*p_hwfn
,
447 struct qed_ptt
*p_ptt
)
449 struct dmae_cmd
*p_command
= p_hwfn
->dmae_info
.p_dmae_cmd
;
450 u8 idx_cmd
= p_hwfn
->dmae_info
.channel
, i
;
453 /* verify address is not NULL */
454 if ((((!p_command
->dst_addr_lo
) && (!p_command
->dst_addr_hi
)) ||
455 ((!p_command
->src_addr_lo
) && (!p_command
->src_addr_hi
)))) {
457 "source or destination address 0 idx_cmd=%d\n"
458 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
460 le32_to_cpu(p_command
->opcode
),
461 le16_to_cpu(p_command
->opcode_b
),
462 le16_to_cpu(p_command
->length_dw
),
463 le32_to_cpu(p_command
->src_addr_hi
),
464 le32_to_cpu(p_command
->src_addr_lo
),
465 le32_to_cpu(p_command
->dst_addr_hi
),
466 le32_to_cpu(p_command
->dst_addr_lo
));
473 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
475 le32_to_cpu(p_command
->opcode
),
476 le16_to_cpu(p_command
->opcode_b
),
477 le16_to_cpu(p_command
->length_dw
),
478 le32_to_cpu(p_command
->src_addr_hi
),
479 le32_to_cpu(p_command
->src_addr_lo
),
480 le32_to_cpu(p_command
->dst_addr_hi
),
481 le32_to_cpu(p_command
->dst_addr_lo
));
483 /* Copy the command to DMAE - need to do it before every call
484 * for source/dest address no reset.
485 * The first 9 DWs are the command registers, the 10 DW is the
486 * GO register, and the rest are result registers
487 * (which are read only by the client).
489 for (i
= 0; i
< DMAE_CMD_SIZE
; i
++) {
490 u32 data
= (i
< DMAE_CMD_SIZE_TO_FILL
) ?
491 *(((u32
*)p_command
) + i
) : 0;
493 qed_wr(p_hwfn
, p_ptt
,
495 (idx_cmd
* DMAE_CMD_SIZE
* sizeof(u32
)) +
496 (i
* sizeof(u32
)), data
);
499 qed_wr(p_hwfn
, p_ptt
, qed_dmae_idx_to_go_cmd(idx_cmd
), DMAE_GO_VALUE
);
504 int qed_dmae_info_alloc(struct qed_hwfn
*p_hwfn
)
506 dma_addr_t
*p_addr
= &p_hwfn
->dmae_info
.completion_word_phys_addr
;
507 struct dmae_cmd
**p_cmd
= &p_hwfn
->dmae_info
.p_dmae_cmd
;
508 u32
**p_buff
= &p_hwfn
->dmae_info
.p_intermediate_buffer
;
509 u32
**p_comp
= &p_hwfn
->dmae_info
.p_completion_word
;
511 *p_comp
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
512 sizeof(u32
), p_addr
, GFP_KERNEL
);
516 p_addr
= &p_hwfn
->dmae_info
.dmae_cmd_phys_addr
;
517 *p_cmd
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
518 sizeof(struct dmae_cmd
),
523 p_addr
= &p_hwfn
->dmae_info
.intermediate_buffer_phys_addr
;
524 *p_buff
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
525 sizeof(u32
) * DMAE_MAX_RW_SIZE
,
530 p_hwfn
->dmae_info
.channel
= p_hwfn
->rel_pf_id
;
534 qed_dmae_info_free(p_hwfn
);
538 void qed_dmae_info_free(struct qed_hwfn
*p_hwfn
)
542 /* Just make sure no one is in the middle */
543 mutex_lock(&p_hwfn
->dmae_info
.mutex
);
545 if (p_hwfn
->dmae_info
.p_completion_word
) {
546 p_phys
= p_hwfn
->dmae_info
.completion_word_phys_addr
;
547 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
549 p_hwfn
->dmae_info
.p_completion_word
, p_phys
);
550 p_hwfn
->dmae_info
.p_completion_word
= NULL
;
553 if (p_hwfn
->dmae_info
.p_dmae_cmd
) {
554 p_phys
= p_hwfn
->dmae_info
.dmae_cmd_phys_addr
;
555 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
556 sizeof(struct dmae_cmd
),
557 p_hwfn
->dmae_info
.p_dmae_cmd
, p_phys
);
558 p_hwfn
->dmae_info
.p_dmae_cmd
= NULL
;
561 if (p_hwfn
->dmae_info
.p_intermediate_buffer
) {
562 p_phys
= p_hwfn
->dmae_info
.intermediate_buffer_phys_addr
;
563 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
564 sizeof(u32
) * DMAE_MAX_RW_SIZE
,
565 p_hwfn
->dmae_info
.p_intermediate_buffer
,
567 p_hwfn
->dmae_info
.p_intermediate_buffer
= NULL
;
570 mutex_unlock(&p_hwfn
->dmae_info
.mutex
);
573 static int qed_dmae_operation_wait(struct qed_hwfn
*p_hwfn
)
575 u32 wait_cnt_limit
= 10000, wait_cnt
= 0;
579 while (*p_hwfn
->dmae_info
.p_completion_word
!= DMAE_COMPLETION_VAL
) {
580 udelay(DMAE_MIN_WAIT_TIME
);
581 if (++wait_cnt
> wait_cnt_limit
) {
582 DP_NOTICE(p_hwfn
->cdev
,
583 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
584 *p_hwfn
->dmae_info
.p_completion_word
,
585 DMAE_COMPLETION_VAL
);
590 /* to sync the completion_word since we are not
591 * using the volatile keyword for p_completion_word
597 *p_hwfn
->dmae_info
.p_completion_word
= 0;
602 static int qed_dmae_execute_sub_operation(struct qed_hwfn
*p_hwfn
,
603 struct qed_ptt
*p_ptt
,
610 dma_addr_t phys
= p_hwfn
->dmae_info
.intermediate_buffer_phys_addr
;
611 struct dmae_cmd
*cmd
= p_hwfn
->dmae_info
.p_dmae_cmd
;
615 case QED_DMAE_ADDRESS_GRC
:
616 case QED_DMAE_ADDRESS_HOST_PHYS
:
617 cmd
->src_addr_hi
= cpu_to_le32(upper_32_bits(src_addr
));
618 cmd
->src_addr_lo
= cpu_to_le32(lower_32_bits(src_addr
));
620 /* for virtual source addresses we use the intermediate buffer. */
621 case QED_DMAE_ADDRESS_HOST_VIRT
:
622 cmd
->src_addr_hi
= cpu_to_le32(upper_32_bits(phys
));
623 cmd
->src_addr_lo
= cpu_to_le32(lower_32_bits(phys
));
624 memcpy(&p_hwfn
->dmae_info
.p_intermediate_buffer
[0],
625 (void *)(uintptr_t)src_addr
,
626 length_dw
* sizeof(u32
));
633 case QED_DMAE_ADDRESS_GRC
:
634 case QED_DMAE_ADDRESS_HOST_PHYS
:
635 cmd
->dst_addr_hi
= cpu_to_le32(upper_32_bits(dst_addr
));
636 cmd
->dst_addr_lo
= cpu_to_le32(lower_32_bits(dst_addr
));
638 /* for virtual source addresses we use the intermediate buffer. */
639 case QED_DMAE_ADDRESS_HOST_VIRT
:
640 cmd
->dst_addr_hi
= cpu_to_le32(upper_32_bits(phys
));
641 cmd
->dst_addr_lo
= cpu_to_le32(lower_32_bits(phys
));
647 cmd
->length_dw
= cpu_to_le16((u16
)length_dw
);
649 qed_dmae_post_command(p_hwfn
, p_ptt
);
651 qed_status
= qed_dmae_operation_wait(p_hwfn
);
655 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
656 src_addr
, dst_addr
, length_dw
);
660 if (dst_type
== QED_DMAE_ADDRESS_HOST_VIRT
)
661 memcpy((void *)(uintptr_t)(dst_addr
),
662 &p_hwfn
->dmae_info
.p_intermediate_buffer
[0],
663 length_dw
* sizeof(u32
));
668 static int qed_dmae_execute_command(struct qed_hwfn
*p_hwfn
,
669 struct qed_ptt
*p_ptt
,
670 u64 src_addr
, u64 dst_addr
,
671 u8 src_type
, u8 dst_type
,
673 struct qed_dmae_params
*p_params
)
675 dma_addr_t phys
= p_hwfn
->dmae_info
.completion_word_phys_addr
;
676 u16 length_cur
= 0, i
= 0, cnt_split
= 0, length_mod
= 0;
677 struct dmae_cmd
*cmd
= p_hwfn
->dmae_info
.p_dmae_cmd
;
678 u64 src_addr_split
= 0, dst_addr_split
= 0;
679 u16 length_limit
= DMAE_MAX_RW_SIZE
;
683 if (p_hwfn
->cdev
->recov_in_prog
) {
686 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
687 src_addr
, src_type
, dst_addr
, dst_type
,
690 /* Let the flow complete w/o any error handling */
694 qed_dmae_opcode(p_hwfn
,
695 (src_type
== QED_DMAE_ADDRESS_GRC
),
696 (dst_type
== QED_DMAE_ADDRESS_GRC
),
699 cmd
->comp_addr_lo
= cpu_to_le32(lower_32_bits(phys
));
700 cmd
->comp_addr_hi
= cpu_to_le32(upper_32_bits(phys
));
701 cmd
->comp_val
= cpu_to_le32(DMAE_COMPLETION_VAL
);
703 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
704 cnt_split
= size_in_dwords
/ length_limit
;
705 length_mod
= size_in_dwords
% length_limit
;
707 src_addr_split
= src_addr
;
708 dst_addr_split
= dst_addr
;
710 for (i
= 0; i
<= cnt_split
; i
++) {
711 offset
= length_limit
* i
;
713 if (!QED_DMAE_FLAGS_IS_SET(p_params
, RW_REPL_SRC
)) {
714 if (src_type
== QED_DMAE_ADDRESS_GRC
)
715 src_addr_split
= src_addr
+ offset
;
717 src_addr_split
= src_addr
+ (offset
* 4);
720 if (dst_type
== QED_DMAE_ADDRESS_GRC
)
721 dst_addr_split
= dst_addr
+ offset
;
723 dst_addr_split
= dst_addr
+ (offset
* 4);
725 length_cur
= (cnt_split
== i
) ? length_mod
: length_limit
;
727 /* might be zero on last iteration */
731 qed_status
= qed_dmae_execute_sub_operation(p_hwfn
,
739 qed_hw_err_notify(p_hwfn
, p_ptt
, QED_HW_ERR_DMAE_FAIL
,
740 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
741 qed_status
, src_addr
,
742 dst_addr
, length_cur
);
750 int qed_dmae_host2grc(struct qed_hwfn
*p_hwfn
,
751 struct qed_ptt
*p_ptt
,
752 u64 source_addr
, u32 grc_addr
, u32 size_in_dwords
,
753 struct qed_dmae_params
*p_params
)
755 u32 grc_addr_in_dw
= grc_addr
/ sizeof(u32
);
759 mutex_lock(&p_hwfn
->dmae_info
.mutex
);
761 rc
= qed_dmae_execute_command(p_hwfn
, p_ptt
, source_addr
,
763 QED_DMAE_ADDRESS_HOST_VIRT
,
764 QED_DMAE_ADDRESS_GRC
,
765 size_in_dwords
, p_params
);
767 mutex_unlock(&p_hwfn
->dmae_info
.mutex
);
772 int qed_dmae_grc2host(struct qed_hwfn
*p_hwfn
,
773 struct qed_ptt
*p_ptt
,
775 dma_addr_t dest_addr
, u32 size_in_dwords
,
776 struct qed_dmae_params
*p_params
)
778 u32 grc_addr_in_dw
= grc_addr
/ sizeof(u32
);
782 mutex_lock(&p_hwfn
->dmae_info
.mutex
);
784 rc
= qed_dmae_execute_command(p_hwfn
, p_ptt
, grc_addr_in_dw
,
785 dest_addr
, QED_DMAE_ADDRESS_GRC
,
786 QED_DMAE_ADDRESS_HOST_VIRT
,
787 size_in_dwords
, p_params
);
789 mutex_unlock(&p_hwfn
->dmae_info
.mutex
);
794 int qed_dmae_host2host(struct qed_hwfn
*p_hwfn
,
795 struct qed_ptt
*p_ptt
,
796 dma_addr_t source_addr
,
797 dma_addr_t dest_addr
,
798 u32 size_in_dwords
, struct qed_dmae_params
*p_params
)
802 mutex_lock(&(p_hwfn
->dmae_info
.mutex
));
804 rc
= qed_dmae_execute_command(p_hwfn
, p_ptt
, source_addr
,
806 QED_DMAE_ADDRESS_HOST_PHYS
,
807 QED_DMAE_ADDRESS_HOST_PHYS
,
808 size_in_dwords
, p_params
);
810 mutex_unlock(&(p_hwfn
->dmae_info
.mutex
));
815 void qed_hw_err_notify(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
816 enum qed_hw_err_type err_type
, const char *fmt
, ...)
818 char buf
[QED_HW_ERR_MAX_STR_SIZE
];
824 len
= vsnprintf(buf
, QED_HW_ERR_MAX_STR_SIZE
, fmt
, vl
);
827 if (len
> QED_HW_ERR_MAX_STR_SIZE
- 1)
828 len
= QED_HW_ERR_MAX_STR_SIZE
- 1;
830 DP_NOTICE(p_hwfn
, "%s", buf
);
833 /* Fan failure cannot be masked by handling of another HW error */
834 if (p_hwfn
->cdev
->recov_in_prog
&&
835 err_type
!= QED_HW_ERR_FAN_FAIL
) {
838 "Recovery is in progress. Avoid notifying about HW error %d.\n",
843 qed_hw_error_occurred(p_hwfn
, err_type
);
846 qed_mcp_send_raw_debug_data(p_hwfn
, p_ptt
, buf
, len
);
849 int qed_dmae_sanity(struct qed_hwfn
*p_hwfn
,
850 struct qed_ptt
*p_ptt
, const char *phase
)
852 u32 size
= PAGE_SIZE
/ 2, val
;
858 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
859 2 * size
, &p_phys
, GFP_KERNEL
);
862 "DMAE sanity [%s]: failed to allocate memory\n",
867 /* Fill the bottom half of the allocated memory with a known pattern */
868 for (p_tmp
= (u32
*)p_virt
;
869 p_tmp
< (u32
*)((u8
*)p_virt
+ size
); p_tmp
++) {
870 /* Save the address itself as the value */
871 val
= (u32
)(uintptr_t)p_tmp
;
875 /* Zero the top half of the allocated memory */
876 memset((u8
*)p_virt
+ size
, 0, size
);
880 "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
883 p_virt
, (u64
)(p_phys
+ size
), (u8
*)p_virt
+ size
, size
);
885 rc
= qed_dmae_host2host(p_hwfn
, p_ptt
, p_phys
, p_phys
+ size
,
889 "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
894 /* Verify that the top half of the allocated memory has the pattern */
895 for (p_tmp
= (u32
*)((u8
*)p_virt
+ size
);
896 p_tmp
< (u32
*)((u8
*)p_virt
+ (2 * size
)); p_tmp
++) {
897 /* The corresponding address in the bottom half */
898 val
= (u32
)(uintptr_t)p_tmp
- size
;
902 "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
904 (u64
)p_phys
+ ((u8
*)p_tmp
- (u8
*)p_virt
),
912 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, 2 * size
, p_virt
, p_phys
);