1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018 Quantenna Communications */
4 #include <linux/kernel.h>
5 #include <linux/firmware.h>
7 #include <linux/vmalloc.h>
8 #include <linux/delay.h>
9 #include <linux/interrupt.h>
10 #include <linux/sched.h>
11 #include <linux/crc32.h>
12 #include <linux/completion.h>
13 #include <linux/spinlock.h>
14 #include <linux/circ_buf.h>
16 #include "pcie_priv.h"
17 #include "topaz_pcie_regs.h"
18 #include "topaz_pcie_ipc.h"
19 #include "qtn_hw_ids.h"
25 #define TOPAZ_TX_BD_SIZE_DEFAULT 128
26 #define TOPAZ_RX_BD_SIZE_DEFAULT 256
28 struct qtnf_topaz_tx_bd
{
33 struct qtnf_topaz_rx_bd
{
38 struct qtnf_extra_bd_params
{
45 #define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n)
47 struct vmac_pkt_info
{
52 struct qtnf_topaz_bda
{
57 __le32 bda_dma_offset
;
61 __le32 bda_ep2h_irqstatus
;
62 __le32 bda_h2ep_irqstatus
;
66 u8 bda_boardname
[PCIE_BDA_NAMELEN
];
67 __le32 bda_pci_pre_status
;
68 __le32 bda_pci_endian
;
69 __le32 bda_pci_post_status
;
70 __le32 bda_h2ep_txd_budget
;
71 __le32 bda_ep2h_txd_budget
;
72 __le32 bda_rc_rx_bd_base
;
73 __le32 bda_rc_rx_bd_num
;
74 __le32 bda_rc_tx_bd_base
;
75 __le32 bda_rc_tx_bd_num
;
78 u8 bda_rc_msi_enabled
;
80 __le32 bda_ep_next_pkt
;
81 struct vmac_pkt_info request
[QTN_PCIE_RC_TX_QUEUE_LEN
];
82 struct qtnf_shm_ipc_region bda_shm_reg1
__aligned(4096);
83 struct qtnf_shm_ipc_region bda_shm_reg2
__aligned(4096);
86 struct qtnf_pcie_topaz_state
{
87 struct qtnf_pcie_bus_priv base
;
88 struct qtnf_topaz_bda __iomem
*bda
;
90 dma_addr_t dma_msi_dummy
;
93 struct qtnf_topaz_tx_bd
*tx_bd_vbase
;
94 struct qtnf_topaz_rx_bd
*rx_bd_vbase
;
96 __le32 __iomem
*ep_next_rx_pkt
;
97 __le32 __iomem
*txqueue_wake
;
98 __le32 __iomem
*ep_pmstate
;
100 unsigned long rx_pkt_count
;
103 static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state
*ts
)
105 void __iomem
*reg
= ts
->base
.sysctl_bar
+ TOPAZ_PCIE_CFG0_OFFSET
;
109 cfg
&= ~TOPAZ_ASSERT_INTX
;
110 qtnf_non_posted_write(cfg
, reg
);
113 static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state
*ts
)
115 void __iomem
*reg
= ts
->base
.sysctl_bar
+ TOPAZ_PCIE_CFG0_OFFSET
;
116 u32 cfg
= readl(reg
);
118 return !!(cfg
& TOPAZ_ASSERT_INTX
);
121 static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state
*ts
)
123 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ
),
124 TOPAZ_LH_IPC4_INT(ts
->base
.sysctl_bar
));
125 msleep(QTN_EP_RESET_WAIT_MS
);
126 pci_restore_state(ts
->base
.pdev
);
129 static void setup_rx_irqs(struct qtnf_pcie_topaz_state
*ts
)
131 void __iomem
*reg
= PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts
->base
.dmareg_bar
);
133 ts
->dma_msi_imwr
= readl(reg
);
136 static void enable_rx_irqs(struct qtnf_pcie_topaz_state
*ts
)
138 void __iomem
*reg
= PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts
->base
.dmareg_bar
);
140 qtnf_non_posted_write(ts
->dma_msi_imwr
, reg
);
143 static void disable_rx_irqs(struct qtnf_pcie_topaz_state
*ts
)
145 void __iomem
*reg
= PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts
->base
.dmareg_bar
);
147 qtnf_non_posted_write(QTN_HOST_LO32(ts
->dma_msi_dummy
), reg
);
150 static void qtnf_topaz_ipc_gen_ep_int(void *arg
)
152 struct qtnf_pcie_topaz_state
*ts
= arg
;
154 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ
),
155 TOPAZ_CTL_M2L_INT(ts
->base
.sysctl_bar
));
158 static int qtnf_is_state(__le32 __iomem
*reg
, u32 state
)
165 static void qtnf_set_state(__le32 __iomem
*reg
, u32 state
)
167 qtnf_non_posted_write(state
, reg
);
170 static int qtnf_poll_state(__le32 __iomem
*reg
, u32 state
, u32 delay_in_ms
)
174 while ((qtnf_is_state(reg
, state
) == 0)) {
175 usleep_range(1000, 1200);
176 if (++timeout
> delay_in_ms
)
183 static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state
*ts
,
184 struct qtnf_topaz_bda __iomem
*bda
)
186 struct qtnf_extra_bd_params __iomem
*extra_params
;
187 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
195 len
= priv
->tx_bd_num
* sizeof(struct qtnf_topaz_tx_bd
) +
196 priv
->rx_bd_num
* sizeof(struct qtnf_topaz_rx_bd
) +
197 sizeof(struct qtnf_extra_bd_params
);
199 vaddr
= dmam_alloc_coherent(&priv
->pdev
->dev
, len
, &paddr
, GFP_KERNEL
);
205 ts
->tx_bd_vbase
= vaddr
;
206 qtnf_non_posted_write(paddr
, &bda
->bda_rc_tx_bd_base
);
208 for (i
= 0; i
< priv
->tx_bd_num
; i
++)
209 ts
->tx_bd_vbase
[i
].info
|= cpu_to_le32(QTN_BD_EMPTY
);
211 pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr
, &paddr
);
213 priv
->tx_bd_r_index
= 0;
214 priv
->tx_bd_w_index
= 0;
218 vaddr
= ((struct qtnf_topaz_tx_bd
*)vaddr
) + priv
->tx_bd_num
;
219 paddr
+= priv
->tx_bd_num
* sizeof(struct qtnf_topaz_tx_bd
);
221 ts
->rx_bd_vbase
= vaddr
;
222 qtnf_non_posted_write(paddr
, &bda
->bda_rc_rx_bd_base
);
224 pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr
, &paddr
);
226 /* extra shared params */
228 vaddr
= ((struct qtnf_topaz_rx_bd
*)vaddr
) + priv
->rx_bd_num
;
229 paddr
+= priv
->rx_bd_num
* sizeof(struct qtnf_topaz_rx_bd
);
231 extra_params
= (struct qtnf_extra_bd_params __iomem
*)vaddr
;
233 ts
->ep_next_rx_pkt
= &extra_params
->param1
;
234 qtnf_non_posted_write(paddr
+ QTNF_BD_PARAM_OFFSET(1),
235 &bda
->bda_ep_next_pkt
);
236 ts
->txqueue_wake
= &extra_params
->param2
;
237 ts
->ep_pmstate
= &extra_params
->param3
;
238 ts
->dma_msi_dummy
= paddr
+ QTNF_BD_PARAM_OFFSET(4);
244 topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state
*ts
, u16 index
, u32 wrap
)
246 struct qtnf_topaz_rx_bd
*rxbd
= &ts
->rx_bd_vbase
[index
];
250 skb
= netdev_alloc_skb_ip_align(NULL
, SKB_BUF_SIZE
);
252 ts
->base
.rx_skb
[index
] = NULL
;
256 ts
->base
.rx_skb
[index
] = skb
;
258 paddr
= pci_map_single(ts
->base
.pdev
, skb
->data
,
259 SKB_BUF_SIZE
, PCI_DMA_FROMDEVICE
);
260 if (pci_dma_mapping_error(ts
->base
.pdev
, paddr
)) {
261 pr_err("skb mapping error: %pad\n", &paddr
);
265 rxbd
->addr
= cpu_to_le32(QTN_HOST_LO32(paddr
));
266 rxbd
->info
= cpu_to_le32(QTN_BD_EMPTY
| wrap
);
268 ts
->base
.rx_bd_w_index
= index
;
273 static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state
*ts
)
278 memset(ts
->rx_bd_vbase
, 0x0,
279 ts
->base
.rx_bd_num
* sizeof(struct qtnf_topaz_rx_bd
));
281 for (i
= 0; i
< ts
->base
.rx_bd_num
; i
++) {
282 ret
= topaz_skb2rbd_attach(ts
, i
, 0);
287 ts
->rx_bd_vbase
[ts
->base
.rx_bd_num
- 1].info
|=
288 cpu_to_le32(QTN_BD_WRAP
);
293 /* all rx/tx activity should have ceased before calling this function */
294 static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state
*ts
)
296 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
297 struct qtnf_topaz_rx_bd
*rxbd
;
298 struct qtnf_topaz_tx_bd
*txbd
;
303 /* free rx buffers */
304 for (i
= 0; i
< priv
->rx_bd_num
; i
++) {
305 if (priv
->rx_skb
&& priv
->rx_skb
[i
]) {
306 rxbd
= &ts
->rx_bd_vbase
[i
];
307 skb
= priv
->rx_skb
[i
];
308 paddr
= QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd
->addr
));
309 pci_unmap_single(priv
->pdev
, paddr
, SKB_BUF_SIZE
,
311 dev_kfree_skb_any(skb
);
312 priv
->rx_skb
[i
] = NULL
;
318 /* free tx buffers */
319 for (i
= 0; i
< priv
->tx_bd_num
; i
++) {
320 if (priv
->tx_skb
&& priv
->tx_skb
[i
]) {
321 txbd
= &ts
->tx_bd_vbase
[i
];
322 skb
= priv
->tx_skb
[i
];
323 paddr
= QTN_HOST_ADDR(0x0, le32_to_cpu(txbd
->addr
));
324 pci_unmap_single(priv
->pdev
, paddr
, SKB_BUF_SIZE
,
326 dev_kfree_skb_any(skb
);
327 priv
->tx_skb
[i
] = NULL
;
334 static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state
*ts
,
335 unsigned int tx_bd_size
,
336 unsigned int rx_bd_size
)
338 struct qtnf_topaz_bda __iomem
*bda
= ts
->bda
;
339 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
343 tx_bd_size
= TOPAZ_TX_BD_SIZE_DEFAULT
;
345 /* check TX BD queue max length according to struct qtnf_topaz_bda */
346 if (tx_bd_size
> QTN_PCIE_RC_TX_QUEUE_LEN
) {
347 pr_warn("TX BD queue cannot exceed %d\n",
348 QTN_PCIE_RC_TX_QUEUE_LEN
);
349 tx_bd_size
= QTN_PCIE_RC_TX_QUEUE_LEN
;
352 priv
->tx_bd_num
= tx_bd_size
;
353 qtnf_non_posted_write(priv
->tx_bd_num
, &bda
->bda_rc_tx_bd_num
);
356 rx_bd_size
= TOPAZ_RX_BD_SIZE_DEFAULT
;
358 if (rx_bd_size
> TOPAZ_RX_BD_SIZE_DEFAULT
) {
359 pr_warn("RX BD queue cannot exceed %d\n",
360 TOPAZ_RX_BD_SIZE_DEFAULT
);
361 rx_bd_size
= TOPAZ_RX_BD_SIZE_DEFAULT
;
364 priv
->rx_bd_num
= rx_bd_size
;
365 qtnf_non_posted_write(priv
->rx_bd_num
, &bda
->bda_rc_rx_bd_num
);
367 priv
->rx_bd_w_index
= 0;
368 priv
->rx_bd_r_index
= 0;
370 ret
= qtnf_pcie_alloc_skb_array(priv
);
372 pr_err("failed to allocate skb array\n");
376 ret
= topaz_alloc_bd_table(ts
, bda
);
378 pr_err("failed to allocate bd table\n");
382 ret
= topaz_alloc_rx_buffers(ts
);
384 pr_err("failed to allocate rx buffers\n");
391 static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state
*ts
)
393 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
394 struct qtnf_topaz_tx_bd
*txbd
;
402 spin_lock_irqsave(&priv
->tx_reclaim_lock
, flags
);
404 tx_done_index
= readl(ts
->ep_next_rx_pkt
);
405 i
= priv
->tx_bd_r_index
;
407 if (CIRC_CNT(priv
->tx_bd_w_index
, tx_done_index
, priv
->tx_bd_num
))
408 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ
),
409 TOPAZ_LH_IPC4_INT(priv
->sysctl_bar
));
411 while (CIRC_CNT(tx_done_index
, i
, priv
->tx_bd_num
)) {
412 skb
= priv
->tx_skb
[i
];
415 txbd
= &ts
->tx_bd_vbase
[i
];
416 paddr
= QTN_HOST_ADDR(0x0, le32_to_cpu(txbd
->addr
));
417 pci_unmap_single(priv
->pdev
, paddr
, skb
->len
,
421 dev_sw_netstats_tx_add(skb
->dev
, 1, skb
->len
);
422 if (unlikely(priv
->tx_stopped
)) {
423 qtnf_wake_all_queues(skb
->dev
);
424 priv
->tx_stopped
= 0;
428 dev_kfree_skb_any(skb
);
431 priv
->tx_skb
[i
] = NULL
;
434 if (++i
>= priv
->tx_bd_num
)
438 priv
->tx_reclaim_done
+= count
;
439 priv
->tx_reclaim_req
++;
440 priv
->tx_bd_r_index
= i
;
442 spin_unlock_irqrestore(&priv
->tx_reclaim_lock
, flags
);
445 static void qtnf_try_stop_xmit(struct qtnf_bus
*bus
, struct net_device
*ndev
)
447 struct qtnf_pcie_topaz_state
*ts
= (void *)get_bus_priv(bus
);
450 netif_tx_stop_all_queues(ndev
);
451 ts
->base
.tx_stopped
= 1;
454 writel(0x0, ts
->txqueue_wake
);
456 /* sync up tx queue status before generating interrupt */
459 /* send irq to card: tx stopped */
460 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ
),
461 TOPAZ_LH_IPC4_INT(ts
->base
.sysctl_bar
));
463 /* schedule reclaim attempt */
464 tasklet_hi_schedule(&ts
->base
.reclaim_tq
);
467 static void qtnf_try_wake_xmit(struct qtnf_bus
*bus
, struct net_device
*ndev
)
469 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
472 ready
= readl(ts
->txqueue_wake
);
474 netif_wake_queue(ndev
);
476 /* re-send irq to card: tx stopped */
477 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ
),
478 TOPAZ_LH_IPC4_INT(ts
->base
.sysctl_bar
));
482 static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state
*ts
)
484 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
486 if (!CIRC_SPACE(priv
->tx_bd_w_index
, priv
->tx_bd_r_index
,
488 qtnf_topaz_data_tx_reclaim(ts
);
490 if (!CIRC_SPACE(priv
->tx_bd_w_index
, priv
->tx_bd_r_index
,
492 priv
->tx_full_count
++;
500 static int qtnf_pcie_data_tx(struct qtnf_bus
*bus
, struct sk_buff
*skb
,
501 unsigned int macid
, unsigned int vifid
)
503 struct qtnf_pcie_topaz_state
*ts
= (void *)get_bus_priv(bus
);
504 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
505 struct qtnf_topaz_bda __iomem
*bda
= ts
->bda
;
506 struct qtnf_topaz_tx_bd
*txbd
;
507 dma_addr_t skb_paddr
;
513 spin_lock_irqsave(&priv
->tx_lock
, flags
);
515 if (!qtnf_tx_queue_ready(ts
)) {
516 qtnf_try_stop_xmit(bus
, skb
->dev
);
517 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
518 return NETDEV_TX_BUSY
;
521 i
= priv
->tx_bd_w_index
;
522 priv
->tx_skb
[i
] = skb
;
525 skb_paddr
= pci_map_single(priv
->pdev
, skb
->data
,
526 skb
->len
, PCI_DMA_TODEVICE
);
527 if (pci_dma_mapping_error(priv
->pdev
, skb_paddr
)) {
532 txbd
= &ts
->tx_bd_vbase
[i
];
533 txbd
->addr
= cpu_to_le32(QTN_HOST_LO32(skb_paddr
));
535 writel(QTN_HOST_LO32(skb_paddr
), &bda
->request
[i
].addr
);
536 writel(len
| QTN_PCIE_TX_VALID_PKT
, &bda
->request
[i
].info
);
538 /* sync up descriptor updates before generating interrupt */
541 /* generate irq to card: tx done */
542 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ
),
543 TOPAZ_LH_IPC4_INT(priv
->sysctl_bar
));
545 if (++i
>= priv
->tx_bd_num
)
548 priv
->tx_bd_w_index
= i
;
553 skb
->dev
->stats
.tx_dropped
++;
554 dev_kfree_skb_any(skb
);
557 priv
->tx_done_count
++;
558 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
560 qtnf_topaz_data_tx_reclaim(ts
);
565 static irqreturn_t
qtnf_pcie_topaz_interrupt(int irq
, void *data
)
567 struct qtnf_bus
*bus
= (struct qtnf_bus
*)data
;
568 struct qtnf_pcie_topaz_state
*ts
= (void *)get_bus_priv(bus
);
569 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
571 if (!priv
->msi_enabled
&& !qtnf_topaz_intx_asserted(ts
))
574 if (!priv
->msi_enabled
)
575 qtnf_deassert_intx(ts
);
577 priv
->pcie_irq_count
++;
579 qtnf_shm_ipc_irq_handler(&priv
->shm_ipc_ep_in
);
580 qtnf_shm_ipc_irq_handler(&priv
->shm_ipc_ep_out
);
582 if (napi_schedule_prep(&bus
->mux_napi
)) {
584 __napi_schedule(&bus
->mux_napi
);
587 tasklet_hi_schedule(&priv
->reclaim_tq
);
592 static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state
*ts
)
594 u16 index
= ts
->base
.rx_bd_r_index
;
595 struct qtnf_topaz_rx_bd
*rxbd
;
598 rxbd
= &ts
->rx_bd_vbase
[index
];
599 descw
= le32_to_cpu(rxbd
->info
);
601 if (descw
& QTN_BD_EMPTY
)
607 static int qtnf_topaz_rx_poll(struct napi_struct
*napi
, int budget
)
609 struct qtnf_bus
*bus
= container_of(napi
, struct qtnf_bus
, mux_napi
);
610 struct qtnf_pcie_topaz_state
*ts
= (void *)get_bus_priv(bus
);
611 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
612 struct net_device
*ndev
= NULL
;
613 struct sk_buff
*skb
= NULL
;
615 struct qtnf_topaz_rx_bd
*rxbd
;
616 dma_addr_t skb_paddr
;
625 while (processed
< budget
) {
626 if (!qtnf_rx_data_ready(ts
))
629 r_idx
= priv
->rx_bd_r_index
;
630 rxbd
= &ts
->rx_bd_vbase
[r_idx
];
631 descw
= le32_to_cpu(rxbd
->info
);
633 skb
= priv
->rx_skb
[r_idx
];
634 poffset
= QTN_GET_OFFSET(descw
);
635 psize
= QTN_GET_LEN(descw
);
638 if (descw
& QTN_BD_EMPTY
) {
639 pr_warn("skip invalid rxbd[%d]\n", r_idx
);
644 pr_warn("skip missing rx_skb[%d]\n", r_idx
);
648 if (skb
&& (skb_tailroom(skb
) < psize
)) {
649 pr_err("skip packet with invalid length: %u > %u\n",
650 psize
, skb_tailroom(skb
));
655 skb_paddr
= QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd
->addr
));
656 pci_unmap_single(priv
->pdev
, skb_paddr
, SKB_BUF_SIZE
,
661 skb_reserve(skb
, poffset
);
663 ndev
= qtnf_classify_skb(bus
, skb
);
665 dev_sw_netstats_rx_add(ndev
, skb
->len
);
666 skb
->protocol
= eth_type_trans(skb
, ndev
);
667 netif_receive_skb(skb
);
669 pr_debug("drop untagged skb\n");
670 bus
->mux_dev
.stats
.rx_dropped
++;
671 dev_kfree_skb_any(skb
);
675 bus
->mux_dev
.stats
.rx_dropped
++;
676 dev_kfree_skb_any(skb
);
680 /* notify card about recv packets once per several packets */
681 if (((++ts
->rx_pkt_count
) & RX_DONE_INTR_MSK
) == 0)
682 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ
),
683 TOPAZ_LH_IPC4_INT(priv
->sysctl_bar
));
685 priv
->rx_skb
[r_idx
] = NULL
;
686 if (++r_idx
>= priv
->rx_bd_num
)
689 priv
->rx_bd_r_index
= r_idx
;
691 /* repalce processed buffer by a new one */
692 w_idx
= priv
->rx_bd_w_index
;
693 while (CIRC_SPACE(priv
->rx_bd_w_index
, priv
->rx_bd_r_index
,
694 priv
->rx_bd_num
) > 0) {
695 if (++w_idx
>= priv
->rx_bd_num
)
698 ret
= topaz_skb2rbd_attach(ts
, w_idx
,
699 descw
& QTN_BD_WRAP
);
701 pr_err("failed to allocate new rx_skb[%d]\n",
711 if (processed
< budget
) {
720 qtnf_pcie_data_tx_timeout(struct qtnf_bus
*bus
, struct net_device
*ndev
)
722 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
724 qtnf_try_wake_xmit(bus
, ndev
);
725 tasklet_hi_schedule(&ts
->base
.reclaim_tq
);
728 static void qtnf_pcie_data_rx_start(struct qtnf_bus
*bus
)
730 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
732 napi_enable(&bus
->mux_napi
);
736 static void qtnf_pcie_data_rx_stop(struct qtnf_bus
*bus
)
738 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
741 napi_disable(&bus
->mux_napi
);
744 static struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops
= {
745 /* control path methods */
746 .control_tx
= qtnf_pcie_control_tx
,
748 /* data path methods */
749 .data_tx
= qtnf_pcie_data_tx
,
750 .data_tx_timeout
= qtnf_pcie_data_tx_timeout
,
751 .data_rx_start
= qtnf_pcie_data_rx_start
,
752 .data_rx_stop
= qtnf_pcie_data_rx_stop
,
755 static int qtnf_dbg_irq_stats(struct seq_file
*s
, void *data
)
757 struct qtnf_bus
*bus
= dev_get_drvdata(s
->private);
758 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
760 seq_printf(s
, "pcie_irq_count(%u)\n", ts
->base
.pcie_irq_count
);
765 static int qtnf_dbg_pkt_stats(struct seq_file
*s
, void *data
)
767 struct qtnf_bus
*bus
= dev_get_drvdata(s
->private);
768 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
769 struct qtnf_pcie_bus_priv
*priv
= &ts
->base
;
770 u32 tx_done_index
= readl(ts
->ep_next_rx_pkt
);
772 seq_printf(s
, "tx_full_count(%u)\n", priv
->tx_full_count
);
773 seq_printf(s
, "tx_done_count(%u)\n", priv
->tx_done_count
);
774 seq_printf(s
, "tx_reclaim_done(%u)\n", priv
->tx_reclaim_done
);
775 seq_printf(s
, "tx_reclaim_req(%u)\n", priv
->tx_reclaim_req
);
777 seq_printf(s
, "tx_bd_r_index(%u)\n", priv
->tx_bd_r_index
);
778 seq_printf(s
, "tx_done_index(%u)\n", tx_done_index
);
779 seq_printf(s
, "tx_bd_w_index(%u)\n", priv
->tx_bd_w_index
);
781 seq_printf(s
, "tx host queue len(%u)\n",
782 CIRC_CNT(priv
->tx_bd_w_index
, priv
->tx_bd_r_index
,
784 seq_printf(s
, "tx reclaim queue len(%u)\n",
785 CIRC_CNT(tx_done_index
, priv
->tx_bd_r_index
,
787 seq_printf(s
, "tx card queue len(%u)\n",
788 CIRC_CNT(priv
->tx_bd_w_index
, tx_done_index
,
791 seq_printf(s
, "rx_bd_r_index(%u)\n", priv
->rx_bd_r_index
);
792 seq_printf(s
, "rx_bd_w_index(%u)\n", priv
->rx_bd_w_index
);
793 seq_printf(s
, "rx alloc queue len(%u)\n",
794 CIRC_SPACE(priv
->rx_bd_w_index
, priv
->rx_bd_r_index
,
800 static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state
*ts
)
802 struct qtnf_topaz_bda __iomem
*bda
= ts
->bda
;
803 u32 offset
= readl(&bda
->bda_dma_offset
);
805 if ((offset
& PCIE_DMA_OFFSET_ERROR_MASK
) != PCIE_DMA_OFFSET_ERROR
)
808 writel(0x0, &bda
->bda_dma_offset
);
811 static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state
*ts
)
813 struct qtnf_topaz_bda __iomem
*bda
= ts
->bda
;
818 writel(QTN_PCI_ENDIAN_DETECT_DATA
, &bda
->bda_pci_endian
);
820 /* flush endian modifications before status update */
823 writel(QTN_PCI_ENDIAN_VALID_STATUS
, &bda
->bda_pci_pre_status
);
825 while (readl(&bda
->bda_pci_post_status
) !=
826 QTN_PCI_ENDIAN_VALID_STATUS
) {
827 usleep_range(1000, 1200);
828 if (++timeout
> QTN_FW_DL_TIMEOUT_MS
) {
829 pr_err("card endianness detection timed out\n");
835 /* do not read before status is updated */
838 endian
= readl(&bda
->bda_pci_endian
);
839 WARN(endian
!= QTN_PCI_LITTLE_ENDIAN
,
840 "%s: unexpected card endianness", __func__
);
843 writel(0, &bda
->bda_pci_pre_status
);
844 writel(0, &bda
->bda_pci_post_status
);
845 writel(0, &bda
->bda_pci_endian
);
850 static int qtnf_pre_init_ep(struct qtnf_bus
*bus
)
852 struct qtnf_pcie_topaz_state
*ts
= (void *)get_bus_priv(bus
);
853 struct qtnf_topaz_bda __iomem
*bda
= ts
->bda
;
857 ret
= qtnf_pcie_endian_detect(ts
);
859 pr_err("failed to detect card endianness\n");
863 writeb(ts
->base
.msi_enabled
, &ts
->bda
->bda_rc_msi_enabled
);
864 qtnf_reset_dma_offset(ts
);
866 /* notify card about driver type and boot mode */
867 flags
= readl(&bda
->bda_flags
) | QTN_BDA_HOST_QLINK_DRV
;
869 if (ts
->base
.flashboot
)
870 flags
|= QTN_BDA_FLASH_BOOT
;
872 flags
&= ~QTN_BDA_FLASH_BOOT
;
874 writel(flags
, &bda
->bda_flags
);
876 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_HOST_RDY
);
877 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_TARGET_RDY
,
878 QTN_FW_DL_TIMEOUT_MS
)) {
879 pr_err("card is not ready to boot...\n");
886 static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state
*ts
)
888 struct pci_dev
*pdev
= ts
->base
.pdev
;
893 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_QLINK_DONE
,
894 QTN_FW_QLINK_TIMEOUT_MS
))
897 enable_irq(pdev
->irq
);
902 qtnf_ep_fw_load(struct qtnf_pcie_topaz_state
*ts
, const u8
*fw
, u32 fw_size
)
904 struct qtnf_topaz_bda __iomem
*bda
= ts
->bda
;
905 struct pci_dev
*pdev
= ts
->base
.pdev
;
906 u32 remaining
= fw_size
;
917 pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw
, fw_size
);
919 blksize
= ts
->base
.fw_blksize
;
921 if (blksize
< PAGE_SIZE
)
924 while (blksize
>= PAGE_SIZE
) {
925 pr_debug("allocating %u bytes to upload FW\n", blksize
);
926 data
= dma_alloc_coherent(&pdev
->dev
, blksize
,
934 pr_err("failed to allocate DMA buffer for FW upload\n");
939 nblocks
= NBLOCKS(fw_size
, blksize
);
940 offset
= readl(&bda
->bda_dma_offset
);
942 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_HOST_LOAD
);
943 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_EP_RDY
,
944 QTN_FW_DL_TIMEOUT_MS
)) {
945 pr_err("card is not ready to download FW\n");
950 for (count
= 0 ; count
< nblocks
; count
++) {
951 size
= (remaining
> blksize
) ? blksize
: remaining
;
953 memcpy(data
, curr
, size
);
954 qtnf_non_posted_write(paddr
+ offset
, &bda
->bda_img
);
955 qtnf_non_posted_write(size
, &bda
->bda_img_size
);
957 pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
958 count
, (void *)curr
, &paddr
, size
);
960 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_BLOCK_RDY
);
961 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
,
962 QTN_BDA_FW_BLOCK_DONE
,
963 QTN_FW_DL_TIMEOUT_MS
)) {
964 pr_err("confirmation for block #%d timed out\n", count
);
969 remaining
= (remaining
< size
) ? remaining
: (remaining
- size
);
973 /* upload completion mark: zero-sized block */
974 qtnf_non_posted_write(0, &bda
->bda_img
);
975 qtnf_non_posted_write(0, &bda
->bda_img_size
);
977 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_BLOCK_RDY
);
978 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_BLOCK_DONE
,
979 QTN_FW_DL_TIMEOUT_MS
)) {
980 pr_err("confirmation for the last block timed out\n");
986 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_BLOCK_END
);
987 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_LOAD_DONE
,
988 QTN_FW_DL_TIMEOUT_MS
)) {
989 pr_err("confirmation for FW upload completion timed out\n");
994 pr_debug("FW upload completed: totally sent %d blocks\n", count
);
997 dma_free_coherent(&pdev
->dev
, blksize
, data
, paddr
);
1003 static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state
*ts
,
1006 const struct firmware
*fw
;
1007 struct pci_dev
*pdev
= ts
->base
.pdev
;
1010 if (qtnf_poll_state(&ts
->bda
->bda_bootstate
,
1011 QTN_BDA_FW_LOAD_RDY
,
1012 QTN_FW_DL_TIMEOUT_MS
)) {
1013 pr_err("%s: card is not ready\n", fwname
);
1017 pr_info("starting firmware upload: %s\n", fwname
);
1019 ret
= request_firmware(&fw
, fwname
, &pdev
->dev
);
1021 pr_err("%s: request_firmware error %d\n", fwname
, ret
);
1025 ret
= qtnf_ep_fw_load(ts
, fw
->data
, fw
->size
);
1026 release_firmware(fw
);
1029 pr_err("%s: FW upload error\n", fwname
);
1034 static void qtnf_topaz_fw_work_handler(struct work_struct
*work
)
1036 struct qtnf_bus
*bus
= container_of(work
, struct qtnf_bus
, fw_work
);
1037 struct qtnf_pcie_topaz_state
*ts
= (void *)get_bus_priv(bus
);
1038 int bootloader_needed
= readl(&ts
->bda
->bda_flags
) & QTN_BDA_XMIT_UBOOT
;
1039 struct pci_dev
*pdev
= ts
->base
.pdev
;
1042 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_TARGET_BOOT
);
1044 if (bootloader_needed
) {
1045 ret
= qtnf_topaz_fw_upload(ts
, QTN_PCI_TOPAZ_BOOTLD_NAME
);
1049 ret
= qtnf_pre_init_ep(bus
);
1053 qtnf_set_state(&ts
->bda
->bda_bootstate
,
1054 QTN_BDA_FW_TARGET_BOOT
);
1057 if (ts
->base
.flashboot
) {
1058 pr_info("booting firmware from flash\n");
1060 ret
= qtnf_poll_state(&ts
->bda
->bda_bootstate
,
1061 QTN_BDA_FW_FLASH_BOOT
,
1062 QTN_FW_DL_TIMEOUT_MS
);
1066 ret
= qtnf_topaz_fw_upload(ts
, QTN_PCI_TOPAZ_FW_NAME
);
1070 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_START
);
1071 ret
= qtnf_poll_state(&ts
->bda
->bda_bootstate
,
1073 QTN_FW_QLINK_TIMEOUT_MS
);
1075 pr_err("FW bringup timed out\n");
1079 qtnf_set_state(&ts
->bda
->bda_bootstate
, QTN_BDA_FW_RUN
);
1080 ret
= qtnf_poll_state(&ts
->bda
->bda_bootstate
,
1082 QTN_FW_QLINK_TIMEOUT_MS
);
1084 pr_err("card bringup timed out\n");
1089 ret
= qtnf_post_init_ep(ts
);
1091 pr_err("FW runtime failure\n");
1095 pr_info("firmware is up and running\n");
1097 ret
= qtnf_pcie_fw_boot_done(bus
);
1101 qtnf_debugfs_add_entry(bus
, "pkt_stats", qtnf_dbg_pkt_stats
);
1102 qtnf_debugfs_add_entry(bus
, "irq_stats", qtnf_dbg_irq_stats
);
1105 put_device(&pdev
->dev
);
1108 static void qtnf_reclaim_tasklet_fn(struct tasklet_struct
*t
)
1110 struct qtnf_pcie_topaz_state
*ts
= from_tasklet(ts
, t
, base
.reclaim_tq
);
1112 qtnf_topaz_data_tx_reclaim(ts
);
1115 static u64
qtnf_topaz_dma_mask_get(void)
1117 return DMA_BIT_MASK(32);
1120 static int qtnf_pcie_topaz_probe(struct qtnf_bus
*bus
,
1121 unsigned int tx_bd_num
, unsigned int rx_bd_num
)
1123 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
1124 struct pci_dev
*pdev
= ts
->base
.pdev
;
1125 struct qtnf_shm_ipc_int ipc_int
;
1126 unsigned long irqflags
;
1129 bus
->bus_ops
= &qtnf_pcie_topaz_bus_ops
;
1130 INIT_WORK(&bus
->fw_work
, qtnf_topaz_fw_work_handler
);
1131 ts
->bda
= ts
->base
.epmem_bar
;
1133 /* assign host msi irq before card init */
1134 if (ts
->base
.msi_enabled
)
1135 irqflags
= IRQF_NOBALANCING
;
1137 irqflags
= IRQF_NOBALANCING
| IRQF_SHARED
;
1139 ret
= devm_request_irq(&pdev
->dev
, pdev
->irq
,
1140 &qtnf_pcie_topaz_interrupt
,
1141 irqflags
, "qtnf_topaz_irq", (void *)bus
);
1143 pr_err("failed to request pcie irq %d\n", pdev
->irq
);
1147 disable_irq(pdev
->irq
);
1149 ret
= qtnf_pre_init_ep(bus
);
1151 pr_err("failed to init card\n");
1155 ret
= qtnf_pcie_topaz_init_xfer(ts
, tx_bd_num
, rx_bd_num
);
1157 pr_err("PCIE xfer init failed\n");
1161 tasklet_setup(&ts
->base
.reclaim_tq
, qtnf_reclaim_tasklet_fn
);
1162 netif_napi_add(&bus
->mux_dev
, &bus
->mux_napi
,
1163 qtnf_topaz_rx_poll
, 10);
1165 ipc_int
.fn
= qtnf_topaz_ipc_gen_ep_int
;
1167 qtnf_pcie_init_shm_ipc(&ts
->base
, &ts
->bda
->bda_shm_reg1
,
1168 &ts
->bda
->bda_shm_reg2
, &ipc_int
);
1173 static void qtnf_pcie_topaz_remove(struct qtnf_bus
*bus
)
1175 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
1177 qtnf_topaz_reset_ep(ts
);
1178 qtnf_topaz_free_xfer_buffers(ts
);
1181 #ifdef CONFIG_PM_SLEEP
1182 static int qtnf_pcie_topaz_suspend(struct qtnf_bus
*bus
)
1184 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
1185 struct pci_dev
*pdev
= ts
->base
.pdev
;
1187 writel((u32 __force
)PCI_D3hot
, ts
->ep_pmstate
);
1189 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ
),
1190 TOPAZ_LH_IPC4_INT(ts
->base
.sysctl_bar
));
1192 pci_save_state(pdev
);
1193 pci_enable_wake(pdev
, PCI_D3hot
, 1);
1194 pci_set_power_state(pdev
, PCI_D3hot
);
1199 static int qtnf_pcie_topaz_resume(struct qtnf_bus
*bus
)
1201 struct qtnf_pcie_topaz_state
*ts
= get_bus_priv(bus
);
1202 struct pci_dev
*pdev
= ts
->base
.pdev
;
1204 pci_set_power_state(pdev
, PCI_D0
);
1205 pci_restore_state(pdev
);
1206 pci_enable_wake(pdev
, PCI_D0
, 0);
1208 writel((u32 __force
)PCI_D0
, ts
->ep_pmstate
);
1210 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ
),
1211 TOPAZ_LH_IPC4_INT(ts
->base
.sysctl_bar
));
1217 struct qtnf_bus
*qtnf_pcie_topaz_alloc(struct pci_dev
*pdev
)
1219 struct qtnf_bus
*bus
;
1220 struct qtnf_pcie_topaz_state
*ts
;
1222 bus
= devm_kzalloc(&pdev
->dev
, sizeof(*bus
) + sizeof(*ts
), GFP_KERNEL
);
1226 ts
= get_bus_priv(bus
);
1227 ts
->base
.probe_cb
= qtnf_pcie_topaz_probe
;
1228 ts
->base
.remove_cb
= qtnf_pcie_topaz_remove
;
1229 ts
->base
.dma_mask_get_cb
= qtnf_topaz_dma_mask_get
;
1230 #ifdef CONFIG_PM_SLEEP
1231 ts
->base
.resume_cb
= qtnf_pcie_topaz_resume
;
1232 ts
->base
.suspend_cb
= qtnf_pcie_topaz_suspend
;