1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018 Quantenna Communications */
4 #include <linux/kernel.h>
5 #include <linux/firmware.h>
7 #include <linux/vmalloc.h>
8 #include <linux/delay.h>
9 #include <linux/interrupt.h>
10 #include <linux/sched.h>
11 #include <linux/completion.h>
12 #include <linux/crc32.h>
13 #include <linux/spinlock.h>
14 #include <linux/circ_buf.h>
15 #include <linux/log2.h>
17 #include "pcie_priv.h"
18 #include "pearl_pcie_regs.h"
19 #include "pearl_pcie_ipc.h"
20 #include "qtn_hw_ids.h"
26 #define PEARL_TX_BD_SIZE_DEFAULT 32
27 #define PEARL_RX_BD_SIZE_DEFAULT 256
29 struct qtnf_pearl_bda
{
32 __le32 bda_pci_endian
;
38 u8 bda_boardname
[PCIE_BDA_NAMELEN
];
39 __le32 bda_rc_msi_enabled
;
40 u8 bda_hhbm_list
[PCIE_HHBM_MAX_SIZE
];
41 __le32 bda_dsbw_start_index
;
42 __le32 bda_dsbw_end_index
;
43 __le32 bda_dsbw_total_bytes
;
44 __le32 bda_rc_tx_bd_base
;
45 __le32 bda_rc_tx_bd_num
;
46 u8 bda_pcie_mac
[QTN_ENET_ADDR_LENGTH
];
47 struct qtnf_shm_ipc_region bda_shm_reg1
__aligned(4096); /* host TX */
48 struct qtnf_shm_ipc_region bda_shm_reg2
__aligned(4096); /* host RX */
51 struct qtnf_pearl_tx_bd
{
58 struct qtnf_pearl_rx_bd
{
67 struct qtnf_pearl_fw_hdr
{
76 struct qtnf_pcie_pearl_state
{
77 struct qtnf_pcie_bus_priv base
;
79 /* lock for irq configuration changes */
82 struct qtnf_pearl_bda __iomem
*bda
;
83 void __iomem
*pcie_reg_base
;
85 struct qtnf_pearl_tx_bd
*tx_bd_vbase
;
86 dma_addr_t tx_bd_pbase
;
88 struct qtnf_pearl_rx_bd
*rx_bd_vbase
;
89 dma_addr_t rx_bd_pbase
;
91 dma_addr_t bd_table_paddr
;
95 u32 pcie_irq_rx_count
;
96 u32 pcie_irq_tx_count
;
97 u32 pcie_irq_uf_count
;
100 static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state
*ps
)
104 spin_lock_irqsave(&ps
->irq_lock
, flags
);
105 ps
->pcie_irq_mask
= (PCIE_HDP_INT_RX_BITS
| PCIE_HDP_INT_TX_BITS
);
106 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
109 static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state
*ps
)
113 spin_lock_irqsave(&ps
->irq_lock
, flags
);
114 writel(ps
->pcie_irq_mask
, PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
115 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
118 static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state
*ps
)
122 spin_lock_irqsave(&ps
->irq_lock
, flags
);
123 writel(0x0, PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
124 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
127 static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state
*ps
)
131 spin_lock_irqsave(&ps
->irq_lock
, flags
);
132 ps
->pcie_irq_mask
|= PCIE_HDP_INT_RX_BITS
;
133 writel(ps
->pcie_irq_mask
, PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
134 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
137 static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state
*ps
)
141 spin_lock_irqsave(&ps
->irq_lock
, flags
);
142 ps
->pcie_irq_mask
&= ~PCIE_HDP_INT_RX_BITS
;
143 writel(ps
->pcie_irq_mask
, PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
144 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
147 static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state
*ps
)
151 spin_lock_irqsave(&ps
->irq_lock
, flags
);
152 ps
->pcie_irq_mask
|= PCIE_HDP_INT_TX_BITS
;
153 writel(ps
->pcie_irq_mask
, PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
154 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
157 static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state
*ps
)
161 spin_lock_irqsave(&ps
->irq_lock
, flags
);
162 ps
->pcie_irq_mask
&= ~PCIE_HDP_INT_TX_BITS
;
163 writel(ps
->pcie_irq_mask
, PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
164 spin_unlock_irqrestore(&ps
->irq_lock
, flags
);
167 static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state
*ps
)
169 void __iomem
*reg
= ps
->base
.sysctl_bar
+ PEARL_PCIE_CFG0_OFFSET
;
173 cfg
&= ~PEARL_ASSERT_INTX
;
174 qtnf_non_posted_write(cfg
, reg
);
177 static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state
*ps
)
179 const u32 data
= QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET
);
180 void __iomem
*reg
= ps
->base
.sysctl_bar
+
181 QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET
;
183 qtnf_non_posted_write(data
, reg
);
184 msleep(QTN_EP_RESET_WAIT_MS
);
185 pci_restore_state(ps
->base
.pdev
);
188 static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg
)
190 const struct qtnf_pcie_pearl_state
*ps
= arg
;
191 const u32 data
= QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ
);
192 void __iomem
*reg
= ps
->base
.sysctl_bar
+
193 QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET
;
195 qtnf_non_posted_write(data
, reg
);
198 static int qtnf_is_state(__le32 __iomem
*reg
, u32 state
)
205 static void qtnf_set_state(__le32 __iomem
*reg
, u32 state
)
209 qtnf_non_posted_write(state
| s
, reg
);
212 static void qtnf_clear_state(__le32 __iomem
*reg
, u32 state
)
216 qtnf_non_posted_write(s
& ~state
, reg
);
219 static int qtnf_poll_state(__le32 __iomem
*reg
, u32 state
, u32 delay_in_ms
)
223 while ((qtnf_is_state(reg
, state
) == 0)) {
224 usleep_range(1000, 1200);
225 if (++timeout
> delay_in_ms
)
232 static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state
*ps
)
234 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
239 len
= priv
->tx_bd_num
* sizeof(struct qtnf_pearl_tx_bd
) +
240 priv
->rx_bd_num
* sizeof(struct qtnf_pearl_rx_bd
);
242 vaddr
= dmam_alloc_coherent(&priv
->pdev
->dev
, len
, &paddr
, GFP_KERNEL
);
248 ps
->bd_table_vaddr
= vaddr
;
249 ps
->bd_table_paddr
= paddr
;
250 ps
->bd_table_len
= len
;
252 ps
->tx_bd_vbase
= vaddr
;
253 ps
->tx_bd_pbase
= paddr
;
255 pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr
, &paddr
);
257 priv
->tx_bd_r_index
= 0;
258 priv
->tx_bd_w_index
= 0;
262 vaddr
= ((struct qtnf_pearl_tx_bd
*)vaddr
) + priv
->tx_bd_num
;
263 paddr
+= priv
->tx_bd_num
* sizeof(struct qtnf_pearl_tx_bd
);
265 ps
->rx_bd_vbase
= vaddr
;
266 ps
->rx_bd_pbase
= paddr
;
268 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
269 writel(QTN_HOST_HI32(paddr
),
270 PCIE_HDP_TX_HOST_Q_BASE_H(ps
->pcie_reg_base
));
272 writel(QTN_HOST_LO32(paddr
),
273 PCIE_HDP_TX_HOST_Q_BASE_L(ps
->pcie_reg_base
));
274 writel(priv
->rx_bd_num
| (sizeof(struct qtnf_pearl_rx_bd
)) << 16,
275 PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps
->pcie_reg_base
));
277 pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr
, &paddr
);
282 static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state
*ps
, u16 index
)
284 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
285 struct qtnf_pearl_rx_bd
*rxbd
;
289 skb
= netdev_alloc_skb_ip_align(NULL
, SKB_BUF_SIZE
);
291 priv
->rx_skb
[index
] = NULL
;
295 priv
->rx_skb
[index
] = skb
;
296 rxbd
= &ps
->rx_bd_vbase
[index
];
298 paddr
= pci_map_single(priv
->pdev
, skb
->data
,
299 SKB_BUF_SIZE
, PCI_DMA_FROMDEVICE
);
300 if (pci_dma_mapping_error(priv
->pdev
, paddr
)) {
301 pr_err("skb DMA mapping error: %pad\n", &paddr
);
305 /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
306 rxbd
->addr
= cpu_to_le32(QTN_HOST_LO32(paddr
));
307 rxbd
->addr_h
= cpu_to_le32(QTN_HOST_HI32(paddr
));
310 priv
->rx_bd_w_index
= index
;
312 /* sync up all descriptor updates */
315 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
316 writel(QTN_HOST_HI32(paddr
),
317 PCIE_HDP_HHBM_BUF_PTR_H(ps
->pcie_reg_base
));
319 writel(QTN_HOST_LO32(paddr
),
320 PCIE_HDP_HHBM_BUF_PTR(ps
->pcie_reg_base
));
322 writel(index
, PCIE_HDP_TX_HOST_Q_WR_PTR(ps
->pcie_reg_base
));
326 static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state
*ps
)
331 memset(ps
->rx_bd_vbase
, 0x0,
332 ps
->base
.rx_bd_num
* sizeof(struct qtnf_pearl_rx_bd
));
334 for (i
= 0; i
< ps
->base
.rx_bd_num
; i
++) {
335 ret
= pearl_skb2rbd_attach(ps
, i
);
343 /* all rx/tx activity should have ceased before calling this function */
344 static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state
*ps
)
346 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
347 struct qtnf_pearl_tx_bd
*txbd
;
348 struct qtnf_pearl_rx_bd
*rxbd
;
353 /* free rx buffers */
354 for (i
= 0; i
< priv
->rx_bd_num
; i
++) {
355 if (priv
->rx_skb
&& priv
->rx_skb
[i
]) {
356 rxbd
= &ps
->rx_bd_vbase
[i
];
357 skb
= priv
->rx_skb
[i
];
358 paddr
= QTN_HOST_ADDR(le32_to_cpu(rxbd
->addr_h
),
359 le32_to_cpu(rxbd
->addr
));
360 pci_unmap_single(priv
->pdev
, paddr
, SKB_BUF_SIZE
,
362 dev_kfree_skb_any(skb
);
363 priv
->rx_skb
[i
] = NULL
;
367 /* free tx buffers */
368 for (i
= 0; i
< priv
->tx_bd_num
; i
++) {
369 if (priv
->tx_skb
&& priv
->tx_skb
[i
]) {
370 txbd
= &ps
->tx_bd_vbase
[i
];
371 skb
= priv
->tx_skb
[i
];
372 paddr
= QTN_HOST_ADDR(le32_to_cpu(txbd
->addr_h
),
373 le32_to_cpu(txbd
->addr
));
374 pci_unmap_single(priv
->pdev
, paddr
, skb
->len
,
376 dev_kfree_skb_any(skb
);
377 priv
->tx_skb
[i
] = NULL
;
382 static int pearl_hhbm_init(struct qtnf_pcie_pearl_state
*ps
)
386 val
= readl(PCIE_HHBM_CONFIG(ps
->pcie_reg_base
));
387 val
|= HHBM_CONFIG_SOFT_RESET
;
388 writel(val
, PCIE_HHBM_CONFIG(ps
->pcie_reg_base
));
389 usleep_range(50, 100);
390 val
&= ~HHBM_CONFIG_SOFT_RESET
;
391 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
394 writel(val
, PCIE_HHBM_CONFIG(ps
->pcie_reg_base
));
395 writel(ps
->base
.rx_bd_num
, PCIE_HHBM_Q_LIMIT_REG(ps
->pcie_reg_base
));
400 static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state
*ps
,
401 unsigned int tx_bd_size
,
402 unsigned int rx_bd_size
)
404 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
409 tx_bd_size
= PEARL_TX_BD_SIZE_DEFAULT
;
411 val
= tx_bd_size
* sizeof(struct qtnf_pearl_tx_bd
);
413 if (!is_power_of_2(tx_bd_size
) || val
> PCIE_HHBM_MAX_SIZE
) {
414 pr_warn("invalid tx_bd_size value %u, use default %u\n",
415 tx_bd_size
, PEARL_TX_BD_SIZE_DEFAULT
);
416 priv
->tx_bd_num
= PEARL_TX_BD_SIZE_DEFAULT
;
418 priv
->tx_bd_num
= tx_bd_size
;
422 rx_bd_size
= PEARL_RX_BD_SIZE_DEFAULT
;
424 val
= rx_bd_size
* sizeof(dma_addr_t
);
426 if (!is_power_of_2(rx_bd_size
) || val
> PCIE_HHBM_MAX_SIZE
) {
427 pr_warn("invalid rx_bd_size value %u, use default %u\n",
428 rx_bd_size
, PEARL_RX_BD_SIZE_DEFAULT
);
429 priv
->rx_bd_num
= PEARL_RX_BD_SIZE_DEFAULT
;
431 priv
->rx_bd_num
= rx_bd_size
;
434 priv
->rx_bd_w_index
= 0;
435 priv
->rx_bd_r_index
= 0;
437 ret
= pearl_hhbm_init(ps
);
439 pr_err("failed to init h/w queues\n");
443 ret
= qtnf_pcie_alloc_skb_array(priv
);
445 pr_err("failed to allocate skb array\n");
449 ret
= pearl_alloc_bd_table(ps
);
451 pr_err("failed to allocate bd table\n");
455 ret
= pearl_alloc_rx_buffers(ps
);
457 pr_err("failed to allocate rx buffers\n");
464 static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state
*ps
)
466 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
467 struct qtnf_pearl_tx_bd
*txbd
;
475 spin_lock_irqsave(&priv
->tx_reclaim_lock
, flags
);
477 tx_done_index
= readl(PCIE_HDP_RX0DMA_CNT(ps
->pcie_reg_base
))
478 & (priv
->tx_bd_num
- 1);
480 i
= priv
->tx_bd_r_index
;
482 while (CIRC_CNT(tx_done_index
, i
, priv
->tx_bd_num
)) {
483 skb
= priv
->tx_skb
[i
];
485 txbd
= &ps
->tx_bd_vbase
[i
];
486 paddr
= QTN_HOST_ADDR(le32_to_cpu(txbd
->addr_h
),
487 le32_to_cpu(txbd
->addr
));
488 pci_unmap_single(priv
->pdev
, paddr
, skb
->len
,
492 dev_sw_netstats_tx_add(skb
->dev
, 1, skb
->len
);
493 if (unlikely(priv
->tx_stopped
)) {
494 qtnf_wake_all_queues(skb
->dev
);
495 priv
->tx_stopped
= 0;
499 dev_kfree_skb_any(skb
);
502 priv
->tx_skb
[i
] = NULL
;
505 if (++i
>= priv
->tx_bd_num
)
509 priv
->tx_reclaim_done
+= count
;
510 priv
->tx_reclaim_req
++;
511 priv
->tx_bd_r_index
= i
;
513 spin_unlock_irqrestore(&priv
->tx_reclaim_lock
, flags
);
516 static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state
*ps
)
518 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
520 if (!CIRC_SPACE(priv
->tx_bd_w_index
, priv
->tx_bd_r_index
,
522 qtnf_pearl_data_tx_reclaim(ps
);
524 if (!CIRC_SPACE(priv
->tx_bd_w_index
, priv
->tx_bd_r_index
,
526 pr_warn_ratelimited("reclaim full Tx queue\n");
527 priv
->tx_full_count
++;
535 static int qtnf_pcie_skb_send(struct qtnf_bus
*bus
, struct sk_buff
*skb
)
537 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
538 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
539 dma_addr_t txbd_paddr
, skb_paddr
;
540 struct qtnf_pearl_tx_bd
*txbd
;
546 spin_lock_irqsave(&priv
->tx_lock
, flags
);
548 if (!qtnf_tx_queue_ready(ps
)) {
550 netif_tx_stop_all_queues(skb
->dev
);
551 priv
->tx_stopped
= 1;
554 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
555 return NETDEV_TX_BUSY
;
558 i
= priv
->tx_bd_w_index
;
559 priv
->tx_skb
[i
] = skb
;
562 skb_paddr
= pci_map_single(priv
->pdev
, skb
->data
,
563 skb
->len
, PCI_DMA_TODEVICE
);
564 if (pci_dma_mapping_error(priv
->pdev
, skb_paddr
)) {
565 pr_err("skb DMA mapping error: %pad\n", &skb_paddr
);
570 txbd
= &ps
->tx_bd_vbase
[i
];
571 txbd
->addr
= cpu_to_le32(QTN_HOST_LO32(skb_paddr
));
572 txbd
->addr_h
= cpu_to_le32(QTN_HOST_HI32(skb_paddr
));
574 info
= (len
& QTN_PCIE_TX_DESC_LEN_MASK
) << QTN_PCIE_TX_DESC_LEN_SHIFT
;
575 txbd
->info
= cpu_to_le32(info
);
577 /* sync up all descriptor updates before passing them to EP */
580 /* write new TX descriptor to PCIE_RX_FIFO on EP */
581 txbd_paddr
= ps
->tx_bd_pbase
+ i
* sizeof(struct qtnf_pearl_tx_bd
);
583 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
584 writel(QTN_HOST_HI32(txbd_paddr
),
585 PCIE_HDP_HOST_WR_DESC0_H(ps
->pcie_reg_base
));
587 writel(QTN_HOST_LO32(txbd_paddr
),
588 PCIE_HDP_HOST_WR_DESC0(ps
->pcie_reg_base
));
590 if (++i
>= priv
->tx_bd_num
)
593 priv
->tx_bd_w_index
= i
;
597 pr_err_ratelimited("drop skb\n");
599 skb
->dev
->stats
.tx_dropped
++;
600 dev_kfree_skb_any(skb
);
603 priv
->tx_done_count
++;
604 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
606 qtnf_pearl_data_tx_reclaim(ps
);
611 static int qtnf_pcie_data_tx(struct qtnf_bus
*bus
, struct sk_buff
*skb
,
612 unsigned int macid
, unsigned int vifid
)
614 return qtnf_pcie_skb_send(bus
, skb
);
617 static int qtnf_pcie_data_tx_meta(struct qtnf_bus
*bus
, struct sk_buff
*skb
,
618 unsigned int macid
, unsigned int vifid
)
620 struct qtnf_frame_meta_info
*meta
;
621 int tail_need
= sizeof(*meta
) - skb_tailroom(skb
);
624 if (tail_need
> 0 && pskb_expand_head(skb
, 0, tail_need
, GFP_ATOMIC
)) {
625 skb
->dev
->stats
.tx_dropped
++;
626 dev_kfree_skb_any(skb
);
630 meta
= skb_put(skb
, sizeof(*meta
));
631 meta
->magic_s
= HBM_FRAME_META_MAGIC_PATTERN_S
;
632 meta
->magic_e
= HBM_FRAME_META_MAGIC_PATTERN_E
;
636 ret
= qtnf_pcie_skb_send(bus
, skb
);
637 if (unlikely(ret
== NETDEV_TX_BUSY
))
638 __skb_trim(skb
, skb
->len
- sizeof(*meta
));
643 static irqreturn_t
qtnf_pcie_pearl_interrupt(int irq
, void *data
)
645 struct qtnf_bus
*bus
= (struct qtnf_bus
*)data
;
646 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
647 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
650 priv
->pcie_irq_count
++;
651 status
= readl(PCIE_HDP_INT_STATUS(ps
->pcie_reg_base
));
653 qtnf_shm_ipc_irq_handler(&priv
->shm_ipc_ep_in
);
654 qtnf_shm_ipc_irq_handler(&priv
->shm_ipc_ep_out
);
656 if (!(status
& ps
->pcie_irq_mask
))
659 if (status
& PCIE_HDP_INT_RX_BITS
)
660 ps
->pcie_irq_rx_count
++;
662 if (status
& PCIE_HDP_INT_TX_BITS
)
663 ps
->pcie_irq_tx_count
++;
665 if (status
& PCIE_HDP_INT_HHBM_UF
)
666 ps
->pcie_irq_uf_count
++;
668 if (status
& PCIE_HDP_INT_RX_BITS
) {
669 qtnf_dis_rxdone_irq(ps
);
670 napi_schedule(&bus
->mux_napi
);
673 if (status
& PCIE_HDP_INT_TX_BITS
) {
674 qtnf_dis_txdone_irq(ps
);
675 tasklet_hi_schedule(&priv
->reclaim_tq
);
679 /* H/W workaround: clean all bits, not only enabled */
680 qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps
->pcie_reg_base
));
682 if (!priv
->msi_enabled
)
683 qtnf_deassert_intx(ps
);
688 static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state
*ps
)
690 u16 index
= ps
->base
.rx_bd_r_index
;
691 struct qtnf_pearl_rx_bd
*rxbd
;
694 rxbd
= &ps
->rx_bd_vbase
[index
];
695 descw
= le32_to_cpu(rxbd
->info
);
697 if (descw
& QTN_TXDONE_MASK
)
703 static int qtnf_pcie_pearl_rx_poll(struct napi_struct
*napi
, int budget
)
705 struct qtnf_bus
*bus
= container_of(napi
, struct qtnf_bus
, mux_napi
);
706 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
707 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
708 struct net_device
*ndev
= NULL
;
709 struct sk_buff
*skb
= NULL
;
711 struct qtnf_pearl_rx_bd
*rxbd
;
712 dma_addr_t skb_paddr
;
720 while (processed
< budget
) {
721 if (!qtnf_rx_data_ready(ps
))
724 r_idx
= priv
->rx_bd_r_index
;
725 rxbd
= &ps
->rx_bd_vbase
[r_idx
];
726 descw
= le32_to_cpu(rxbd
->info
);
728 skb
= priv
->rx_skb
[r_idx
];
729 psize
= QTN_GET_LEN(descw
);
732 if (!(descw
& QTN_TXDONE_MASK
)) {
733 pr_warn("skip invalid rxbd[%d]\n", r_idx
);
738 pr_warn("skip missing rx_skb[%d]\n", r_idx
);
742 if (skb
&& (skb_tailroom(skb
) < psize
)) {
743 pr_err("skip packet with invalid length: %u > %u\n",
744 psize
, skb_tailroom(skb
));
749 skb_paddr
= QTN_HOST_ADDR(le32_to_cpu(rxbd
->addr_h
),
750 le32_to_cpu(rxbd
->addr
));
751 pci_unmap_single(priv
->pdev
, skb_paddr
, SKB_BUF_SIZE
,
757 ndev
= qtnf_classify_skb(bus
, skb
);
759 dev_sw_netstats_rx_add(ndev
, skb
->len
);
760 skb
->protocol
= eth_type_trans(skb
, ndev
);
761 napi_gro_receive(napi
, skb
);
763 pr_debug("drop untagged skb\n");
764 bus
->mux_dev
.stats
.rx_dropped
++;
765 dev_kfree_skb_any(skb
);
769 bus
->mux_dev
.stats
.rx_dropped
++;
770 dev_kfree_skb_any(skb
);
774 priv
->rx_skb
[r_idx
] = NULL
;
775 if (++r_idx
>= priv
->rx_bd_num
)
778 priv
->rx_bd_r_index
= r_idx
;
780 /* repalce processed buffer by a new one */
781 w_idx
= priv
->rx_bd_w_index
;
782 while (CIRC_SPACE(priv
->rx_bd_w_index
, priv
->rx_bd_r_index
,
783 priv
->rx_bd_num
) > 0) {
784 if (++w_idx
>= priv
->rx_bd_num
)
787 ret
= pearl_skb2rbd_attach(ps
, w_idx
);
789 pr_err("failed to allocate new rx_skb[%d]\n",
799 if (processed
< budget
) {
801 qtnf_en_rxdone_irq(ps
);
808 qtnf_pcie_data_tx_timeout(struct qtnf_bus
*bus
, struct net_device
*ndev
)
810 struct qtnf_pcie_pearl_state
*ps
= (void *)get_bus_priv(bus
);
812 tasklet_hi_schedule(&ps
->base
.reclaim_tq
);
815 static void qtnf_pcie_data_rx_start(struct qtnf_bus
*bus
)
817 struct qtnf_pcie_pearl_state
*ps
= (void *)get_bus_priv(bus
);
819 qtnf_enable_hdp_irqs(ps
);
820 napi_enable(&bus
->mux_napi
);
823 static void qtnf_pcie_data_rx_stop(struct qtnf_bus
*bus
)
825 struct qtnf_pcie_pearl_state
*ps
= (void *)get_bus_priv(bus
);
827 napi_disable(&bus
->mux_napi
);
828 qtnf_disable_hdp_irqs(ps
);
831 static void qtnf_pearl_tx_use_meta_info_set(struct qtnf_bus
*bus
, bool use_meta
)
834 bus
->bus_ops
->data_tx
= qtnf_pcie_data_tx_meta
;
836 bus
->bus_ops
->data_tx
= qtnf_pcie_data_tx
;
839 static struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops
= {
840 /* control path methods */
841 .control_tx
= qtnf_pcie_control_tx
,
843 /* data path methods */
844 .data_tx
= qtnf_pcie_data_tx
,
845 .data_tx_timeout
= qtnf_pcie_data_tx_timeout
,
846 .data_tx_use_meta_set
= qtnf_pearl_tx_use_meta_info_set
,
847 .data_rx_start
= qtnf_pcie_data_rx_start
,
848 .data_rx_stop
= qtnf_pcie_data_rx_stop
,
851 static int qtnf_dbg_irq_stats(struct seq_file
*s
, void *data
)
853 struct qtnf_bus
*bus
= dev_get_drvdata(s
->private);
854 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
855 u32 reg
= readl(PCIE_HDP_INT_EN(ps
->pcie_reg_base
));
858 seq_printf(s
, "pcie_irq_count(%u)\n", ps
->base
.pcie_irq_count
);
859 seq_printf(s
, "pcie_irq_tx_count(%u)\n", ps
->pcie_irq_tx_count
);
860 status
= reg
& PCIE_HDP_INT_TX_BITS
;
861 seq_printf(s
, "pcie_irq_tx_status(%s)\n",
862 (status
== PCIE_HDP_INT_TX_BITS
) ? "EN" : "DIS");
863 seq_printf(s
, "pcie_irq_rx_count(%u)\n", ps
->pcie_irq_rx_count
);
864 status
= reg
& PCIE_HDP_INT_RX_BITS
;
865 seq_printf(s
, "pcie_irq_rx_status(%s)\n",
866 (status
== PCIE_HDP_INT_RX_BITS
) ? "EN" : "DIS");
867 seq_printf(s
, "pcie_irq_uf_count(%u)\n", ps
->pcie_irq_uf_count
);
868 status
= reg
& PCIE_HDP_INT_HHBM_UF
;
869 seq_printf(s
, "pcie_irq_hhbm_uf_status(%s)\n",
870 (status
== PCIE_HDP_INT_HHBM_UF
) ? "EN" : "DIS");
875 static int qtnf_dbg_hdp_stats(struct seq_file
*s
, void *data
)
877 struct qtnf_bus
*bus
= dev_get_drvdata(s
->private);
878 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
879 struct qtnf_pcie_bus_priv
*priv
= &ps
->base
;
881 seq_printf(s
, "tx_full_count(%u)\n", priv
->tx_full_count
);
882 seq_printf(s
, "tx_done_count(%u)\n", priv
->tx_done_count
);
883 seq_printf(s
, "tx_reclaim_done(%u)\n", priv
->tx_reclaim_done
);
884 seq_printf(s
, "tx_reclaim_req(%u)\n", priv
->tx_reclaim_req
);
886 seq_printf(s
, "tx_bd_r_index(%u)\n", priv
->tx_bd_r_index
);
887 seq_printf(s
, "tx_bd_p_index(%u)\n",
888 readl(PCIE_HDP_RX0DMA_CNT(ps
->pcie_reg_base
))
889 & (priv
->tx_bd_num
- 1));
890 seq_printf(s
, "tx_bd_w_index(%u)\n", priv
->tx_bd_w_index
);
891 seq_printf(s
, "tx queue len(%u)\n",
892 CIRC_CNT(priv
->tx_bd_w_index
, priv
->tx_bd_r_index
,
895 seq_printf(s
, "rx_bd_r_index(%u)\n", priv
->rx_bd_r_index
);
896 seq_printf(s
, "rx_bd_p_index(%u)\n",
897 readl(PCIE_HDP_TX0DMA_CNT(ps
->pcie_reg_base
))
898 & (priv
->rx_bd_num
- 1));
899 seq_printf(s
, "rx_bd_w_index(%u)\n", priv
->rx_bd_w_index
);
900 seq_printf(s
, "rx alloc queue len(%u)\n",
901 CIRC_SPACE(priv
->rx_bd_w_index
, priv
->rx_bd_r_index
,
907 static int qtnf_ep_fw_send(struct pci_dev
*pdev
, uint32_t size
,
908 int blk
, const u8
*pblk
, const u8
*fw
)
910 struct qtnf_bus
*bus
= pci_get_drvdata(pdev
);
912 struct qtnf_pearl_fw_hdr
*hdr
;
915 int hds
= sizeof(*hdr
);
916 struct sk_buff
*skb
= NULL
;
920 skb
= __dev_alloc_skb(QTN_PCIE_FW_BUFSZ
, GFP_KERNEL
);
924 skb
->len
= QTN_PCIE_FW_BUFSZ
;
927 hdr
= (struct qtnf_pearl_fw_hdr
*)skb
->data
;
928 memcpy(hdr
->boardflg
, QTN_PCIE_BOARDFLG
, strlen(QTN_PCIE_BOARDFLG
));
929 hdr
->fwsize
= cpu_to_le32(size
);
930 hdr
->seqnum
= cpu_to_le32(blk
);
933 hdr
->type
= cpu_to_le32(QTN_FW_DSUB
);
935 hdr
->type
= cpu_to_le32(QTN_FW_DBEGIN
);
937 pdata
= skb
->data
+ hds
;
939 len
= QTN_PCIE_FW_BUFSZ
- hds
;
940 if (pblk
>= (fw
+ size
- len
)) {
941 len
= fw
+ size
- pblk
;
942 hdr
->type
= cpu_to_le32(QTN_FW_DEND
);
945 hdr
->pktlen
= cpu_to_le32(len
);
946 memcpy(pdata
, pblk
, len
);
947 hdr
->crc
= cpu_to_le32(~crc32(0, pdata
, len
));
949 ret
= qtnf_pcie_skb_send(bus
, skb
);
951 return (ret
== NETDEV_TX_OK
) ? len
: 0;
955 qtnf_ep_fw_load(struct qtnf_pcie_pearl_state
*ps
, const u8
*fw
, u32 fw_size
)
957 int blk_size
= QTN_PCIE_FW_BUFSZ
- sizeof(struct qtnf_pearl_fw_hdr
);
958 int blk_count
= fw_size
/ blk_size
+ ((fw_size
% blk_size
) ? 1 : 0);
964 pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw
, fw_size
);
966 while (blk
< blk_count
) {
967 if (++threshold
> 10000) {
968 pr_err("FW upload failed: too many retries\n");
972 len
= qtnf_ep_fw_send(ps
->base
.pdev
, fw_size
, blk
, pblk
, fw
);
976 if (!((blk
+ 1) & QTN_PCIE_FW_DLMASK
) ||
977 (blk
== (blk_count
- 1))) {
978 qtnf_set_state(&ps
->bda
->bda_rc_state
,
980 if (qtnf_poll_state(&ps
->bda
->bda_ep_state
,
982 QTN_FW_DL_TIMEOUT_MS
)) {
983 pr_err("FW upload failed: SYNC timed out\n");
987 qtnf_clear_state(&ps
->bda
->bda_ep_state
,
990 if (qtnf_is_state(&ps
->bda
->bda_ep_state
,
992 if (blk
== (blk_count
- 1)) {
994 blk_count
& QTN_PCIE_FW_DLMASK
;
996 pblk
-= ((last_round
- 1) *
999 blk
-= QTN_PCIE_FW_DLMASK
;
1000 pblk
-= QTN_PCIE_FW_DLMASK
* blk_size
;
1003 qtnf_clear_state(&ps
->bda
->bda_ep_state
,
1006 pr_warn("FW upload retry: block #%d\n", blk
);
1010 qtnf_pearl_data_tx_reclaim(ps
);
1017 pr_debug("FW upload completed: totally sent %d blocks\n", blk
);
1021 static void qtnf_pearl_fw_work_handler(struct work_struct
*work
)
1023 struct qtnf_bus
*bus
= container_of(work
, struct qtnf_bus
, fw_work
);
1024 struct qtnf_pcie_pearl_state
*ps
= (void *)get_bus_priv(bus
);
1025 u32 state
= QTN_RC_FW_LOADRDY
| QTN_RC_FW_QLINK
;
1026 const char *fwname
= QTN_PCI_PEARL_FW_NAME
;
1027 struct pci_dev
*pdev
= ps
->base
.pdev
;
1028 const struct firmware
*fw
;
1031 if (ps
->base
.flashboot
) {
1032 state
|= QTN_RC_FW_FLASHBOOT
;
1034 ret
= request_firmware(&fw
, fwname
, &pdev
->dev
);
1036 pr_err("failed to get firmware %s\n", fwname
);
1041 qtnf_set_state(&ps
->bda
->bda_rc_state
, state
);
1043 if (qtnf_poll_state(&ps
->bda
->bda_ep_state
, QTN_EP_FW_LOADRDY
,
1044 QTN_FW_DL_TIMEOUT_MS
)) {
1045 pr_err("card is not ready\n");
1047 if (!ps
->base
.flashboot
)
1048 release_firmware(fw
);
1053 qtnf_clear_state(&ps
->bda
->bda_ep_state
, QTN_EP_FW_LOADRDY
);
1055 if (ps
->base
.flashboot
) {
1056 pr_info("booting firmware from flash\n");
1059 pr_info("starting firmware upload: %s\n", fwname
);
1061 ret
= qtnf_ep_fw_load(ps
, fw
->data
, fw
->size
);
1062 release_firmware(fw
);
1064 pr_err("firmware upload error\n");
1069 if (qtnf_poll_state(&ps
->bda
->bda_ep_state
, QTN_EP_FW_DONE
,
1070 QTN_FW_DL_TIMEOUT_MS
)) {
1071 pr_err("firmware bringup timed out\n");
1075 if (qtnf_poll_state(&ps
->bda
->bda_ep_state
,
1076 QTN_EP_FW_QLINK_DONE
, QTN_FW_QLINK_TIMEOUT_MS
)) {
1077 pr_err("firmware runtime failure\n");
1081 pr_info("firmware is up and running\n");
1083 ret
= qtnf_pcie_fw_boot_done(bus
);
1087 qtnf_debugfs_add_entry(bus
, "hdp_stats", qtnf_dbg_hdp_stats
);
1088 qtnf_debugfs_add_entry(bus
, "irq_stats", qtnf_dbg_irq_stats
);
1091 put_device(&pdev
->dev
);
1094 static void qtnf_pearl_reclaim_tasklet_fn(struct tasklet_struct
*t
)
1096 struct qtnf_pcie_pearl_state
*ps
= from_tasklet(ps
, t
, base
.reclaim_tq
);
1098 qtnf_pearl_data_tx_reclaim(ps
);
1099 qtnf_en_txdone_irq(ps
);
1102 static u64
qtnf_pearl_dma_mask_get(void)
1104 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1105 return DMA_BIT_MASK(64);
1107 return DMA_BIT_MASK(32);
1111 static int qtnf_pcie_pearl_probe(struct qtnf_bus
*bus
, unsigned int tx_bd_size
,
1112 unsigned int rx_bd_size
)
1114 struct qtnf_shm_ipc_int ipc_int
;
1115 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
1116 struct pci_dev
*pdev
= ps
->base
.pdev
;
1119 bus
->bus_ops
= &qtnf_pcie_pearl_bus_ops
;
1120 spin_lock_init(&ps
->irq_lock
);
1121 INIT_WORK(&bus
->fw_work
, qtnf_pearl_fw_work_handler
);
1123 ps
->pcie_reg_base
= ps
->base
.dmareg_bar
;
1124 ps
->bda
= ps
->base
.epmem_bar
;
1125 writel(ps
->base
.msi_enabled
, &ps
->bda
->bda_rc_msi_enabled
);
1127 ret
= qtnf_pcie_pearl_init_xfer(ps
, tx_bd_size
, rx_bd_size
);
1129 pr_err("PCIE xfer init failed\n");
1133 /* init default irq settings */
1134 qtnf_init_hdp_irqs(ps
);
1136 /* start with disabled irqs */
1137 qtnf_disable_hdp_irqs(ps
);
1139 ret
= devm_request_irq(&pdev
->dev
, pdev
->irq
,
1140 &qtnf_pcie_pearl_interrupt
, 0,
1141 "qtnf_pearl_irq", (void *)bus
);
1143 pr_err("failed to request pcie irq %d\n", pdev
->irq
);
1144 qtnf_pearl_free_xfer_buffers(ps
);
1148 tasklet_setup(&ps
->base
.reclaim_tq
, qtnf_pearl_reclaim_tasklet_fn
);
1149 netif_napi_add(&bus
->mux_dev
, &bus
->mux_napi
,
1150 qtnf_pcie_pearl_rx_poll
, 10);
1152 ipc_int
.fn
= qtnf_pcie_pearl_ipc_gen_ep_int
;
1154 qtnf_pcie_init_shm_ipc(&ps
->base
, &ps
->bda
->bda_shm_reg1
,
1155 &ps
->bda
->bda_shm_reg2
, &ipc_int
);
1160 static void qtnf_pcie_pearl_remove(struct qtnf_bus
*bus
)
1162 struct qtnf_pcie_pearl_state
*ps
= get_bus_priv(bus
);
1164 qtnf_pearl_reset_ep(ps
);
1165 qtnf_pearl_free_xfer_buffers(ps
);
1168 #ifdef CONFIG_PM_SLEEP
1169 static int qtnf_pcie_pearl_suspend(struct qtnf_bus
*bus
)
1174 static int qtnf_pcie_pearl_resume(struct qtnf_bus
*bus
)
1180 struct qtnf_bus
*qtnf_pcie_pearl_alloc(struct pci_dev
*pdev
)
1182 struct qtnf_bus
*bus
;
1183 struct qtnf_pcie_pearl_state
*ps
;
1185 bus
= devm_kzalloc(&pdev
->dev
, sizeof(*bus
) + sizeof(*ps
), GFP_KERNEL
);
1189 ps
= get_bus_priv(bus
);
1190 ps
->base
.probe_cb
= qtnf_pcie_pearl_probe
;
1191 ps
->base
.remove_cb
= qtnf_pcie_pearl_remove
;
1192 ps
->base
.dma_mask_get_cb
= qtnf_pearl_dma_mask_get
;
1193 #ifdef CONFIG_PM_SLEEP
1194 ps
->base
.resume_cb
= qtnf_pcie_pearl_resume
;
1195 ps
->base
.suspend_cb
= qtnf_pcie_pearl_suspend
;