1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
5 #include <linux/module.h>
16 static bool rtw_disable_msi
;
17 module_param_named(disable_msi
, rtw_disable_msi
, bool, 0644);
18 MODULE_PARM_DESC(disable_msi
, "Set Y to disable MSI interrupt support");
20 static u32 rtw_pci_tx_queue_idx_addr
[] = {
21 [RTW_TX_QUEUE_BK
] = RTK_PCI_TXBD_IDX_BKQ
,
22 [RTW_TX_QUEUE_BE
] = RTK_PCI_TXBD_IDX_BEQ
,
23 [RTW_TX_QUEUE_VI
] = RTK_PCI_TXBD_IDX_VIQ
,
24 [RTW_TX_QUEUE_VO
] = RTK_PCI_TXBD_IDX_VOQ
,
25 [RTW_TX_QUEUE_MGMT
] = RTK_PCI_TXBD_IDX_MGMTQ
,
26 [RTW_TX_QUEUE_HI0
] = RTK_PCI_TXBD_IDX_HI0Q
,
27 [RTW_TX_QUEUE_H2C
] = RTK_PCI_TXBD_IDX_H2CQ
,
30 static u8
rtw_pci_get_tx_qsel(struct sk_buff
*skb
, u8 queue
)
33 case RTW_TX_QUEUE_BCN
:
34 return TX_DESC_QSEL_BEACON
;
35 case RTW_TX_QUEUE_H2C
:
36 return TX_DESC_QSEL_H2C
;
37 case RTW_TX_QUEUE_MGMT
:
38 return TX_DESC_QSEL_MGMT
;
39 case RTW_TX_QUEUE_HI0
:
40 return TX_DESC_QSEL_HIGH
;
46 static u8
rtw_pci_read8(struct rtw_dev
*rtwdev
, u32 addr
)
48 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
50 return readb(rtwpci
->mmap
+ addr
);
53 static u16
rtw_pci_read16(struct rtw_dev
*rtwdev
, u32 addr
)
55 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
57 return readw(rtwpci
->mmap
+ addr
);
60 static u32
rtw_pci_read32(struct rtw_dev
*rtwdev
, u32 addr
)
62 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
64 return readl(rtwpci
->mmap
+ addr
);
67 static void rtw_pci_write8(struct rtw_dev
*rtwdev
, u32 addr
, u8 val
)
69 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
71 writeb(val
, rtwpci
->mmap
+ addr
);
74 static void rtw_pci_write16(struct rtw_dev
*rtwdev
, u32 addr
, u16 val
)
76 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
78 writew(val
, rtwpci
->mmap
+ addr
);
81 static void rtw_pci_write32(struct rtw_dev
*rtwdev
, u32 addr
, u32 val
)
83 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
85 writel(val
, rtwpci
->mmap
+ addr
);
88 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring
*tx_ring
, u8 idx
)
90 int offset
= tx_ring
->r
.desc_size
* idx
;
92 return tx_ring
->r
.head
+ offset
;
95 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev
*rtwdev
,
96 struct rtw_pci_tx_ring
*tx_ring
)
98 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
99 struct rtw_pci_tx_data
*tx_data
;
100 struct sk_buff
*skb
, *tmp
;
103 /* free every skb remained in tx list */
104 skb_queue_walk_safe(&tx_ring
->queue
, skb
, tmp
) {
105 __skb_unlink(skb
, &tx_ring
->queue
);
106 tx_data
= rtw_pci_get_tx_data(skb
);
109 pci_unmap_single(pdev
, dma
, skb
->len
, PCI_DMA_TODEVICE
);
110 dev_kfree_skb_any(skb
);
114 static void rtw_pci_free_tx_ring(struct rtw_dev
*rtwdev
,
115 struct rtw_pci_tx_ring
*tx_ring
)
117 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
118 u8
*head
= tx_ring
->r
.head
;
119 u32 len
= tx_ring
->r
.len
;
120 int ring_sz
= len
* tx_ring
->r
.desc_size
;
122 rtw_pci_free_tx_ring_skbs(rtwdev
, tx_ring
);
124 /* free the ring itself */
125 pci_free_consistent(pdev
, ring_sz
, head
, tx_ring
->r
.dma
);
126 tx_ring
->r
.head
= NULL
;
129 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev
*rtwdev
,
130 struct rtw_pci_rx_ring
*rx_ring
)
132 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
134 int buf_sz
= RTK_PCI_RX_BUF_SIZE
;
138 for (i
= 0; i
< rx_ring
->r
.len
; i
++) {
139 skb
= rx_ring
->buf
[i
];
143 dma
= *((dma_addr_t
*)skb
->cb
);
144 pci_unmap_single(pdev
, dma
, buf_sz
, PCI_DMA_FROMDEVICE
);
146 rx_ring
->buf
[i
] = NULL
;
150 static void rtw_pci_free_rx_ring(struct rtw_dev
*rtwdev
,
151 struct rtw_pci_rx_ring
*rx_ring
)
153 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
154 u8
*head
= rx_ring
->r
.head
;
155 int ring_sz
= rx_ring
->r
.desc_size
* rx_ring
->r
.len
;
157 rtw_pci_free_rx_ring_skbs(rtwdev
, rx_ring
);
159 pci_free_consistent(pdev
, ring_sz
, head
, rx_ring
->r
.dma
);
162 static void rtw_pci_free_trx_ring(struct rtw_dev
*rtwdev
)
164 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
165 struct rtw_pci_tx_ring
*tx_ring
;
166 struct rtw_pci_rx_ring
*rx_ring
;
169 for (i
= 0; i
< RTK_MAX_TX_QUEUE_NUM
; i
++) {
170 tx_ring
= &rtwpci
->tx_rings
[i
];
171 rtw_pci_free_tx_ring(rtwdev
, tx_ring
);
174 for (i
= 0; i
< RTK_MAX_RX_QUEUE_NUM
; i
++) {
175 rx_ring
= &rtwpci
->rx_rings
[i
];
176 rtw_pci_free_rx_ring(rtwdev
, rx_ring
);
180 static int rtw_pci_init_tx_ring(struct rtw_dev
*rtwdev
,
181 struct rtw_pci_tx_ring
*tx_ring
,
182 u8 desc_size
, u32 len
)
184 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
185 int ring_sz
= desc_size
* len
;
189 head
= pci_zalloc_consistent(pdev
, ring_sz
, &dma
);
191 rtw_err(rtwdev
, "failed to allocate tx ring\n");
195 skb_queue_head_init(&tx_ring
->queue
);
196 tx_ring
->r
.head
= head
;
197 tx_ring
->r
.dma
= dma
;
198 tx_ring
->r
.len
= len
;
199 tx_ring
->r
.desc_size
= desc_size
;
206 static int rtw_pci_reset_rx_desc(struct rtw_dev
*rtwdev
, struct sk_buff
*skb
,
207 struct rtw_pci_rx_ring
*rx_ring
,
208 u32 idx
, u32 desc_sz
)
210 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
211 struct rtw_pci_rx_buffer_desc
*buf_desc
;
212 int buf_sz
= RTK_PCI_RX_BUF_SIZE
;
218 dma
= pci_map_single(pdev
, skb
->data
, buf_sz
, PCI_DMA_FROMDEVICE
);
219 if (pci_dma_mapping_error(pdev
, dma
))
222 *((dma_addr_t
*)skb
->cb
) = dma
;
223 buf_desc
= (struct rtw_pci_rx_buffer_desc
*)(rx_ring
->r
.head
+
225 memset(buf_desc
, 0, sizeof(*buf_desc
));
226 buf_desc
->buf_size
= cpu_to_le16(RTK_PCI_RX_BUF_SIZE
);
227 buf_desc
->dma
= cpu_to_le32(dma
);
232 static void rtw_pci_sync_rx_desc_device(struct rtw_dev
*rtwdev
, dma_addr_t dma
,
233 struct rtw_pci_rx_ring
*rx_ring
,
234 u32 idx
, u32 desc_sz
)
236 struct device
*dev
= rtwdev
->dev
;
237 struct rtw_pci_rx_buffer_desc
*buf_desc
;
238 int buf_sz
= RTK_PCI_RX_BUF_SIZE
;
240 dma_sync_single_for_device(dev
, dma
, buf_sz
, DMA_FROM_DEVICE
);
242 buf_desc
= (struct rtw_pci_rx_buffer_desc
*)(rx_ring
->r
.head
+
244 memset(buf_desc
, 0, sizeof(*buf_desc
));
245 buf_desc
->buf_size
= cpu_to_le16(RTK_PCI_RX_BUF_SIZE
);
246 buf_desc
->dma
= cpu_to_le32(dma
);
249 static int rtw_pci_init_rx_ring(struct rtw_dev
*rtwdev
,
250 struct rtw_pci_rx_ring
*rx_ring
,
251 u8 desc_size
, u32 len
)
253 struct pci_dev
*pdev
= to_pci_dev(rtwdev
->dev
);
254 struct sk_buff
*skb
= NULL
;
257 int ring_sz
= desc_size
* len
;
258 int buf_sz
= RTK_PCI_RX_BUF_SIZE
;
262 head
= pci_zalloc_consistent(pdev
, ring_sz
, &dma
);
264 rtw_err(rtwdev
, "failed to allocate rx ring\n");
267 rx_ring
->r
.head
= head
;
269 for (i
= 0; i
< len
; i
++) {
270 skb
= dev_alloc_skb(buf_sz
);
277 memset(skb
->data
, 0, buf_sz
);
278 rx_ring
->buf
[i
] = skb
;
279 ret
= rtw_pci_reset_rx_desc(rtwdev
, skb
, rx_ring
, i
, desc_size
);
282 dev_kfree_skb_any(skb
);
287 rx_ring
->r
.dma
= dma
;
288 rx_ring
->r
.len
= len
;
289 rx_ring
->r
.desc_size
= desc_size
;
296 for (i
= 0; i
< allocated
; i
++) {
297 skb
= rx_ring
->buf
[i
];
300 dma
= *((dma_addr_t
*)skb
->cb
);
301 pci_unmap_single(pdev
, dma
, buf_sz
, PCI_DMA_FROMDEVICE
);
302 dev_kfree_skb_any(skb
);
303 rx_ring
->buf
[i
] = NULL
;
305 pci_free_consistent(pdev
, ring_sz
, head
, dma
);
307 rtw_err(rtwdev
, "failed to init rx buffer\n");
312 static int rtw_pci_init_trx_ring(struct rtw_dev
*rtwdev
)
314 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
315 struct rtw_pci_tx_ring
*tx_ring
;
316 struct rtw_pci_rx_ring
*rx_ring
;
317 struct rtw_chip_info
*chip
= rtwdev
->chip
;
318 int i
= 0, j
= 0, tx_alloced
= 0, rx_alloced
= 0;
319 int tx_desc_size
, rx_desc_size
;
323 tx_desc_size
= chip
->tx_buf_desc_sz
;
325 for (i
= 0; i
< RTK_MAX_TX_QUEUE_NUM
; i
++) {
326 tx_ring
= &rtwpci
->tx_rings
[i
];
327 len
= max_num_of_tx_queue(i
);
328 ret
= rtw_pci_init_tx_ring(rtwdev
, tx_ring
, tx_desc_size
, len
);
333 rx_desc_size
= chip
->rx_buf_desc_sz
;
335 for (j
= 0; j
< RTK_MAX_RX_QUEUE_NUM
; j
++) {
336 rx_ring
= &rtwpci
->rx_rings
[j
];
337 ret
= rtw_pci_init_rx_ring(rtwdev
, rx_ring
, rx_desc_size
,
338 RTK_MAX_RX_DESC_NUM
);
347 for (i
= 0; i
< tx_alloced
; i
++) {
348 tx_ring
= &rtwpci
->tx_rings
[i
];
349 rtw_pci_free_tx_ring(rtwdev
, tx_ring
);
353 for (j
= 0; j
< rx_alloced
; j
++) {
354 rx_ring
= &rtwpci
->rx_rings
[j
];
355 rtw_pci_free_rx_ring(rtwdev
, rx_ring
);
361 static void rtw_pci_deinit(struct rtw_dev
*rtwdev
)
363 rtw_pci_free_trx_ring(rtwdev
);
366 static int rtw_pci_init(struct rtw_dev
*rtwdev
)
368 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
371 rtwpci
->irq_mask
[0] = IMR_HIGHDOK
|
380 rtwpci
->irq_mask
[1] = IMR_TXFOVW
|
382 rtwpci
->irq_mask
[3] = IMR_H2CDOK
|
384 spin_lock_init(&rtwpci
->irq_lock
);
385 ret
= rtw_pci_init_trx_ring(rtwdev
);
390 static void rtw_pci_reset_buf_desc(struct rtw_dev
*rtwdev
)
392 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
397 tmp
= rtw_read8(rtwdev
, RTK_PCI_CTRL
+ 3);
398 rtw_write8(rtwdev
, RTK_PCI_CTRL
+ 3, tmp
| 0xf7);
400 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_BCN
].r
.dma
;
401 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_BCNQ
, dma
);
403 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_H2C
].r
.len
;
404 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_H2C
].r
.dma
;
405 rtwpci
->tx_rings
[RTW_TX_QUEUE_H2C
].r
.rp
= 0;
406 rtwpci
->tx_rings
[RTW_TX_QUEUE_H2C
].r
.wp
= 0;
407 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_H2CQ
, len
);
408 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_H2CQ
, dma
);
410 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_BK
].r
.len
;
411 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_BK
].r
.dma
;
412 rtwpci
->tx_rings
[RTW_TX_QUEUE_BK
].r
.rp
= 0;
413 rtwpci
->tx_rings
[RTW_TX_QUEUE_BK
].r
.wp
= 0;
414 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_BKQ
, len
);
415 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_BKQ
, dma
);
417 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_BE
].r
.len
;
418 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_BE
].r
.dma
;
419 rtwpci
->tx_rings
[RTW_TX_QUEUE_BE
].r
.rp
= 0;
420 rtwpci
->tx_rings
[RTW_TX_QUEUE_BE
].r
.wp
= 0;
421 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_BEQ
, len
);
422 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_BEQ
, dma
);
424 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_VO
].r
.len
;
425 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_VO
].r
.dma
;
426 rtwpci
->tx_rings
[RTW_TX_QUEUE_VO
].r
.rp
= 0;
427 rtwpci
->tx_rings
[RTW_TX_QUEUE_VO
].r
.wp
= 0;
428 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_VOQ
, len
);
429 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_VOQ
, dma
);
431 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_VI
].r
.len
;
432 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_VI
].r
.dma
;
433 rtwpci
->tx_rings
[RTW_TX_QUEUE_VI
].r
.rp
= 0;
434 rtwpci
->tx_rings
[RTW_TX_QUEUE_VI
].r
.wp
= 0;
435 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_VIQ
, len
);
436 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_VIQ
, dma
);
438 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_MGMT
].r
.len
;
439 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_MGMT
].r
.dma
;
440 rtwpci
->tx_rings
[RTW_TX_QUEUE_MGMT
].r
.rp
= 0;
441 rtwpci
->tx_rings
[RTW_TX_QUEUE_MGMT
].r
.wp
= 0;
442 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_MGMTQ
, len
);
443 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_MGMTQ
, dma
);
445 len
= rtwpci
->tx_rings
[RTW_TX_QUEUE_HI0
].r
.len
;
446 dma
= rtwpci
->tx_rings
[RTW_TX_QUEUE_HI0
].r
.dma
;
447 rtwpci
->tx_rings
[RTW_TX_QUEUE_HI0
].r
.rp
= 0;
448 rtwpci
->tx_rings
[RTW_TX_QUEUE_HI0
].r
.wp
= 0;
449 rtw_write16(rtwdev
, RTK_PCI_TXBD_NUM_HI0Q
, len
);
450 rtw_write32(rtwdev
, RTK_PCI_TXBD_DESA_HI0Q
, dma
);
452 len
= rtwpci
->rx_rings
[RTW_RX_QUEUE_MPDU
].r
.len
;
453 dma
= rtwpci
->rx_rings
[RTW_RX_QUEUE_MPDU
].r
.dma
;
454 rtwpci
->rx_rings
[RTW_RX_QUEUE_MPDU
].r
.rp
= 0;
455 rtwpci
->rx_rings
[RTW_RX_QUEUE_MPDU
].r
.wp
= 0;
456 rtw_write16(rtwdev
, RTK_PCI_RXBD_NUM_MPDUQ
, len
& 0xfff);
457 rtw_write32(rtwdev
, RTK_PCI_RXBD_DESA_MPDUQ
, dma
);
459 /* reset read/write point */
460 rtw_write32(rtwdev
, RTK_PCI_TXBD_RWPTR_CLR
, 0xffffffff);
462 /* reset H2C Queue index in a single write */
463 rtw_write32_set(rtwdev
, RTK_PCI_TXBD_H2CQ_CSR
,
464 BIT_CLR_H2CQ_HOST_IDX
| BIT_CLR_H2CQ_HW_IDX
);
467 static void rtw_pci_reset_trx_ring(struct rtw_dev
*rtwdev
)
469 rtw_pci_reset_buf_desc(rtwdev
);
472 static void rtw_pci_enable_interrupt(struct rtw_dev
*rtwdev
,
473 struct rtw_pci
*rtwpci
)
475 rtw_write32(rtwdev
, RTK_PCI_HIMR0
, rtwpci
->irq_mask
[0]);
476 rtw_write32(rtwdev
, RTK_PCI_HIMR1
, rtwpci
->irq_mask
[1]);
477 rtw_write32(rtwdev
, RTK_PCI_HIMR3
, rtwpci
->irq_mask
[3]);
478 rtwpci
->irq_enabled
= true;
481 static void rtw_pci_disable_interrupt(struct rtw_dev
*rtwdev
,
482 struct rtw_pci
*rtwpci
)
484 rtw_write32(rtwdev
, RTK_PCI_HIMR0
, 0);
485 rtw_write32(rtwdev
, RTK_PCI_HIMR1
, 0);
486 rtw_write32(rtwdev
, RTK_PCI_HIMR3
, 0);
487 rtwpci
->irq_enabled
= false;
490 static void rtw_pci_dma_reset(struct rtw_dev
*rtwdev
, struct rtw_pci
*rtwpci
)
492 /* reset dma and rx tag */
493 rtw_write32_set(rtwdev
, RTK_PCI_CTRL
,
494 BIT_RST_TRXDMA_INTF
| BIT_RX_TAG_EN
);
498 static int rtw_pci_setup(struct rtw_dev
*rtwdev
)
500 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
502 rtw_pci_reset_trx_ring(rtwdev
);
503 rtw_pci_dma_reset(rtwdev
, rtwpci
);
508 static void rtw_pci_dma_release(struct rtw_dev
*rtwdev
, struct rtw_pci
*rtwpci
)
510 struct rtw_pci_tx_ring
*tx_ring
;
513 rtw_pci_reset_trx_ring(rtwdev
);
514 for (queue
= 0; queue
< RTK_MAX_TX_QUEUE_NUM
; queue
++) {
515 tx_ring
= &rtwpci
->tx_rings
[queue
];
516 rtw_pci_free_tx_ring_skbs(rtwdev
, tx_ring
);
520 static int rtw_pci_start(struct rtw_dev
*rtwdev
)
522 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
525 spin_lock_irqsave(&rtwpci
->irq_lock
, flags
);
526 rtw_pci_enable_interrupt(rtwdev
, rtwpci
);
527 spin_unlock_irqrestore(&rtwpci
->irq_lock
, flags
);
532 static void rtw_pci_stop(struct rtw_dev
*rtwdev
)
534 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
537 spin_lock_irqsave(&rtwpci
->irq_lock
, flags
);
538 rtw_pci_disable_interrupt(rtwdev
, rtwpci
);
539 rtw_pci_dma_release(rtwdev
, rtwpci
);
540 spin_unlock_irqrestore(&rtwpci
->irq_lock
, flags
);
543 static void rtw_pci_deep_ps_enter(struct rtw_dev
*rtwdev
)
545 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
546 struct rtw_pci_tx_ring
*tx_ring
;
547 bool tx_empty
= true;
550 lockdep_assert_held(&rtwpci
->irq_lock
);
552 /* Deep PS state is not allowed to TX-DMA */
553 for (queue
= 0; queue
< RTK_MAX_TX_QUEUE_NUM
; queue
++) {
554 /* BCN queue is rsvd page, does not have DMA interrupt
555 * H2C queue is managed by firmware
557 if (queue
== RTW_TX_QUEUE_BCN
||
558 queue
== RTW_TX_QUEUE_H2C
)
561 tx_ring
= &rtwpci
->tx_rings
[queue
];
563 /* check if there is any skb DMAing */
564 if (skb_queue_len(&tx_ring
->queue
)) {
571 rtw_dbg(rtwdev
, RTW_DBG_PS
,
572 "TX path not empty, cannot enter deep power save state\n");
576 set_bit(RTW_FLAG_LEISURE_PS_DEEP
, rtwdev
->flags
);
577 rtw_power_mode_change(rtwdev
, true);
580 static void rtw_pci_deep_ps_leave(struct rtw_dev
*rtwdev
)
582 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
584 lockdep_assert_held(&rtwpci
->irq_lock
);
586 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP
, rtwdev
->flags
))
587 rtw_power_mode_change(rtwdev
, false);
590 static void rtw_pci_deep_ps(struct rtw_dev
*rtwdev
, bool enter
)
592 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
595 spin_lock_irqsave(&rtwpci
->irq_lock
, flags
);
597 if (enter
&& !test_bit(RTW_FLAG_LEISURE_PS_DEEP
, rtwdev
->flags
))
598 rtw_pci_deep_ps_enter(rtwdev
);
600 if (!enter
&& test_bit(RTW_FLAG_LEISURE_PS_DEEP
, rtwdev
->flags
))
601 rtw_pci_deep_ps_leave(rtwdev
);
603 spin_unlock_irqrestore(&rtwpci
->irq_lock
, flags
);
606 static u8 ac_to_hwq
[] = {
607 [IEEE80211_AC_VO
] = RTW_TX_QUEUE_VO
,
608 [IEEE80211_AC_VI
] = RTW_TX_QUEUE_VI
,
609 [IEEE80211_AC_BE
] = RTW_TX_QUEUE_BE
,
610 [IEEE80211_AC_BK
] = RTW_TX_QUEUE_BK
,
613 static u8
rtw_hw_queue_mapping(struct sk_buff
*skb
)
615 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
616 __le16 fc
= hdr
->frame_control
;
617 u8 q_mapping
= skb_get_queue_mapping(skb
);
620 if (unlikely(ieee80211_is_beacon(fc
)))
621 queue
= RTW_TX_QUEUE_BCN
;
622 else if (unlikely(ieee80211_is_mgmt(fc
) || ieee80211_is_ctl(fc
)))
623 queue
= RTW_TX_QUEUE_MGMT
;
624 else if (WARN_ON_ONCE(q_mapping
>= ARRAY_SIZE(ac_to_hwq
)))
625 queue
= ac_to_hwq
[IEEE80211_AC_BE
];
627 queue
= ac_to_hwq
[q_mapping
];
632 static void rtw_pci_release_rsvd_page(struct rtw_pci
*rtwpci
,
633 struct rtw_pci_tx_ring
*ring
)
635 struct sk_buff
*prev
= skb_dequeue(&ring
->queue
);
636 struct rtw_pci_tx_data
*tx_data
;
642 tx_data
= rtw_pci_get_tx_data(prev
);
644 pci_unmap_single(rtwpci
->pdev
, dma
, prev
->len
,
646 dev_kfree_skb_any(prev
);
649 static void rtw_pci_dma_check(struct rtw_dev
*rtwdev
,
650 struct rtw_pci_rx_ring
*rx_ring
,
653 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
654 struct rtw_chip_info
*chip
= rtwdev
->chip
;
655 struct rtw_pci_rx_buffer_desc
*buf_desc
;
656 u32 desc_sz
= chip
->rx_buf_desc_sz
;
659 buf_desc
= (struct rtw_pci_rx_buffer_desc
*)(rx_ring
->r
.head
+
661 total_pkt_size
= le16_to_cpu(buf_desc
->total_pkt_size
);
663 /* rx tag mismatch, throw a warning */
664 if (total_pkt_size
!= rtwpci
->rx_tag
)
665 rtw_warn(rtwdev
, "pci bus timeout, check dma status\n");
667 rtwpci
->rx_tag
= (rtwpci
->rx_tag
+ 1) % RX_TAG_MAX
;
670 static int rtw_pci_xmit(struct rtw_dev
*rtwdev
,
671 struct rtw_tx_pkt_info
*pkt_info
,
672 struct sk_buff
*skb
, u8 queue
)
674 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
675 struct rtw_chip_info
*chip
= rtwdev
->chip
;
676 struct rtw_pci_tx_ring
*ring
;
677 struct rtw_pci_tx_data
*tx_data
;
679 u32 tx_pkt_desc_sz
= chip
->tx_pkt_desc_sz
;
680 u32 tx_buf_desc_sz
= chip
->tx_buf_desc_sz
;
684 struct rtw_pci_tx_buffer_desc
*buf_desc
;
688 ring
= &rtwpci
->tx_rings
[queue
];
692 if (queue
== RTW_TX_QUEUE_BCN
)
693 rtw_pci_release_rsvd_page(rtwpci
, ring
);
694 else if (!avail_desc(ring
->r
.wp
, ring
->r
.rp
, ring
->r
.len
))
697 pkt_desc
= skb_push(skb
, chip
->tx_pkt_desc_sz
);
698 memset(pkt_desc
, 0, tx_pkt_desc_sz
);
699 pkt_info
->qsel
= rtw_pci_get_tx_qsel(skb
, queue
);
700 rtw_tx_fill_tx_desc(pkt_info
, skb
);
701 dma
= pci_map_single(rtwpci
->pdev
, skb
->data
, skb
->len
,
703 if (pci_dma_mapping_error(rtwpci
->pdev
, dma
))
706 /* after this we got dma mapped, there is no way back */
707 buf_desc
= get_tx_buffer_desc(ring
, tx_buf_desc_sz
);
708 memset(buf_desc
, 0, tx_buf_desc_sz
);
709 psb_len
= (skb
->len
- 1) / 128 + 1;
710 if (queue
== RTW_TX_QUEUE_BCN
)
711 psb_len
|= 1 << RTK_PCI_TXBD_OWN_OFFSET
;
713 buf_desc
[0].psb_len
= cpu_to_le16(psb_len
);
714 buf_desc
[0].buf_size
= cpu_to_le16(tx_pkt_desc_sz
);
715 buf_desc
[0].dma
= cpu_to_le32(dma
);
716 buf_desc
[1].buf_size
= cpu_to_le16(size
);
717 buf_desc
[1].dma
= cpu_to_le32(dma
+ tx_pkt_desc_sz
);
719 tx_data
= rtw_pci_get_tx_data(skb
);
721 tx_data
->sn
= pkt_info
->sn
;
723 spin_lock_irqsave(&rtwpci
->irq_lock
, flags
);
725 rtw_pci_deep_ps_leave(rtwdev
);
726 skb_queue_tail(&ring
->queue
, skb
);
728 /* kick off tx queue */
729 if (queue
!= RTW_TX_QUEUE_BCN
) {
730 if (++ring
->r
.wp
>= ring
->r
.len
)
732 bd_idx
= rtw_pci_tx_queue_idx_addr
[queue
];
733 rtw_write16(rtwdev
, bd_idx
, ring
->r
.wp
& 0xfff);
737 reg_bcn_work
= rtw_read8(rtwdev
, RTK_PCI_TXBD_BCN_WORK
);
738 reg_bcn_work
|= BIT_PCI_BCNQ_FLAG
;
739 rtw_write8(rtwdev
, RTK_PCI_TXBD_BCN_WORK
, reg_bcn_work
);
741 spin_unlock_irqrestore(&rtwpci
->irq_lock
, flags
);
746 static int rtw_pci_write_data_rsvd_page(struct rtw_dev
*rtwdev
, u8
*buf
,
750 struct rtw_tx_pkt_info pkt_info
;
754 tx_pkt_desc_sz
= rtwdev
->chip
->tx_pkt_desc_sz
;
755 length
= size
+ tx_pkt_desc_sz
;
756 skb
= dev_alloc_skb(length
);
760 skb_reserve(skb
, tx_pkt_desc_sz
);
761 memcpy((u8
*)skb_put(skb
, size
), buf
, size
);
762 memset(&pkt_info
, 0, sizeof(pkt_info
));
763 pkt_info
.tx_pkt_size
= size
;
764 pkt_info
.offset
= tx_pkt_desc_sz
;
766 return rtw_pci_xmit(rtwdev
, &pkt_info
, skb
, RTW_TX_QUEUE_BCN
);
769 static int rtw_pci_write_data_h2c(struct rtw_dev
*rtwdev
, u8
*buf
, u32 size
)
772 struct rtw_tx_pkt_info pkt_info
;
776 tx_pkt_desc_sz
= rtwdev
->chip
->tx_pkt_desc_sz
;
777 length
= size
+ tx_pkt_desc_sz
;
778 skb
= dev_alloc_skb(length
);
782 skb_reserve(skb
, tx_pkt_desc_sz
);
783 memcpy((u8
*)skb_put(skb
, size
), buf
, size
);
784 memset(&pkt_info
, 0, sizeof(pkt_info
));
785 pkt_info
.tx_pkt_size
= size
;
787 return rtw_pci_xmit(rtwdev
, &pkt_info
, skb
, RTW_TX_QUEUE_H2C
);
790 static int rtw_pci_tx(struct rtw_dev
*rtwdev
,
791 struct rtw_tx_pkt_info
*pkt_info
,
794 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
795 struct rtw_pci_tx_ring
*ring
;
796 u8 queue
= rtw_hw_queue_mapping(skb
);
799 ret
= rtw_pci_xmit(rtwdev
, pkt_info
, skb
, queue
);
803 ring
= &rtwpci
->tx_rings
[queue
];
804 if (avail_desc(ring
->r
.wp
, ring
->r
.rp
, ring
->r
.len
) < 2) {
805 ieee80211_stop_queue(rtwdev
->hw
, skb_get_queue_mapping(skb
));
806 ring
->queue_stopped
= true;
812 static void rtw_pci_tx_isr(struct rtw_dev
*rtwdev
, struct rtw_pci
*rtwpci
,
815 struct ieee80211_hw
*hw
= rtwdev
->hw
;
816 struct ieee80211_tx_info
*info
;
817 struct rtw_pci_tx_ring
*ring
;
818 struct rtw_pci_tx_data
*tx_data
;
825 ring
= &rtwpci
->tx_rings
[hw_queue
];
827 bd_idx_addr
= rtw_pci_tx_queue_idx_addr
[hw_queue
];
828 bd_idx
= rtw_read32(rtwdev
, bd_idx_addr
);
829 cur_rp
= bd_idx
>> 16;
831 if (cur_rp
>= ring
->r
.rp
)
832 count
= cur_rp
- ring
->r
.rp
;
834 count
= ring
->r
.len
- (ring
->r
.rp
- cur_rp
);
837 skb
= skb_dequeue(&ring
->queue
);
839 rtw_err(rtwdev
, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
840 count
, hw_queue
, bd_idx
, ring
->r
.rp
, cur_rp
);
843 tx_data
= rtw_pci_get_tx_data(skb
);
844 pci_unmap_single(rtwpci
->pdev
, tx_data
->dma
, skb
->len
,
847 /* just free command packets from host to card */
848 if (hw_queue
== RTW_TX_QUEUE_H2C
) {
849 dev_kfree_skb_irq(skb
);
853 if (ring
->queue_stopped
&&
854 avail_desc(ring
->r
.wp
, ring
->r
.rp
, ring
->r
.len
) > 4) {
855 q_map
= skb_get_queue_mapping(skb
);
856 ieee80211_wake_queue(hw
, q_map
);
857 ring
->queue_stopped
= false;
860 skb_pull(skb
, rtwdev
->chip
->tx_pkt_desc_sz
);
862 info
= IEEE80211_SKB_CB(skb
);
864 /* enqueue to wait for tx report */
865 if (info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
) {
866 rtw_tx_report_enqueue(rtwdev
, skb
, tx_data
->sn
);
870 /* always ACK for others, then they won't be marked as drop */
871 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
872 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
874 info
->flags
|= IEEE80211_TX_STAT_ACK
;
876 ieee80211_tx_info_clear_status(info
);
877 ieee80211_tx_status_irqsafe(hw
, skb
);
883 static void rtw_pci_rx_isr(struct rtw_dev
*rtwdev
, struct rtw_pci
*rtwpci
,
886 struct rtw_chip_info
*chip
= rtwdev
->chip
;
887 struct rtw_pci_rx_ring
*ring
;
888 struct rtw_rx_pkt_stat pkt_stat
;
889 struct ieee80211_rx_status rx_status
;
890 struct sk_buff
*skb
, *new;
891 u32 cur_wp
, cur_rp
, tmp
;
894 u32 pkt_desc_sz
= chip
->rx_pkt_desc_sz
;
895 u32 buf_desc_sz
= chip
->rx_buf_desc_sz
;
900 ring
= &rtwpci
->rx_rings
[RTW_RX_QUEUE_MPDU
];
902 tmp
= rtw_read32(rtwdev
, RTK_PCI_RXBD_IDX_MPDUQ
);
905 if (cur_wp
>= ring
->r
.wp
)
906 count
= cur_wp
- ring
->r
.wp
;
908 count
= ring
->r
.len
- (ring
->r
.wp
- cur_wp
);
912 rtw_pci_dma_check(rtwdev
, ring
, cur_rp
);
913 skb
= ring
->buf
[cur_rp
];
914 dma
= *((dma_addr_t
*)skb
->cb
);
915 dma_sync_single_for_cpu(rtwdev
->dev
, dma
, RTK_PCI_RX_BUF_SIZE
,
918 chip
->ops
->query_rx_desc(rtwdev
, rx_desc
, &pkt_stat
, &rx_status
);
920 /* offset from rx_desc to payload */
921 pkt_offset
= pkt_desc_sz
+ pkt_stat
.drv_info_sz
+
924 /* allocate a new skb for this frame,
925 * discard the frame if none available
927 new_len
= pkt_stat
.pkt_len
+ pkt_offset
;
928 new = dev_alloc_skb(new_len
);
929 if (WARN_ONCE(!new, "rx routine starvation\n"))
932 /* put the DMA data including rx_desc from phy to new skb */
933 skb_put_data(new, skb
->data
, new_len
);
935 if (pkt_stat
.is_c2h
) {
936 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev
, pkt_offset
, new);
939 skb_pull(new, pkt_offset
);
941 rtw_rx_stats(rtwdev
, pkt_stat
.vif
, new);
942 memcpy(new->cb
, &rx_status
, sizeof(rx_status
));
943 ieee80211_rx_irqsafe(rtwdev
->hw
, new);
947 /* new skb delivered to mac80211, re-enable original skb DMA */
948 rtw_pci_sync_rx_desc_device(rtwdev
, dma
, ring
, cur_rp
,
951 /* host read next element in ring */
952 if (++cur_rp
>= ring
->r
.len
)
958 rtw_write16(rtwdev
, RTK_PCI_RXBD_IDX_MPDUQ
, ring
->r
.rp
);
961 static void rtw_pci_irq_recognized(struct rtw_dev
*rtwdev
,
962 struct rtw_pci
*rtwpci
, u32
*irq_status
)
964 irq_status
[0] = rtw_read32(rtwdev
, RTK_PCI_HISR0
);
965 irq_status
[1] = rtw_read32(rtwdev
, RTK_PCI_HISR1
);
966 irq_status
[3] = rtw_read32(rtwdev
, RTK_PCI_HISR3
);
967 irq_status
[0] &= rtwpci
->irq_mask
[0];
968 irq_status
[1] &= rtwpci
->irq_mask
[1];
969 irq_status
[3] &= rtwpci
->irq_mask
[3];
970 rtw_write32(rtwdev
, RTK_PCI_HISR0
, irq_status
[0]);
971 rtw_write32(rtwdev
, RTK_PCI_HISR1
, irq_status
[1]);
972 rtw_write32(rtwdev
, RTK_PCI_HISR3
, irq_status
[3]);
975 static irqreturn_t
rtw_pci_interrupt_handler(int irq
, void *dev
)
977 struct rtw_dev
*rtwdev
= dev
;
978 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
980 spin_lock(&rtwpci
->irq_lock
);
981 if (!rtwpci
->irq_enabled
)
984 /* disable RTW PCI interrupt to avoid more interrupts before the end of
987 * disable HIMR here to also avoid new HISR flag being raised before
988 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
989 * are cleared, the edge-triggered interrupt will not be generated when
990 * a new HISR flag is set.
992 rtw_pci_disable_interrupt(rtwdev
, rtwpci
);
994 spin_unlock(&rtwpci
->irq_lock
);
996 return IRQ_WAKE_THREAD
;
999 static irqreturn_t
rtw_pci_interrupt_threadfn(int irq
, void *dev
)
1001 struct rtw_dev
*rtwdev
= dev
;
1002 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1003 unsigned long flags
;
1006 spin_lock_irqsave(&rtwpci
->irq_lock
, flags
);
1007 rtw_pci_irq_recognized(rtwdev
, rtwpci
, irq_status
);
1009 if (irq_status
[0] & IMR_MGNTDOK
)
1010 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_MGMT
);
1011 if (irq_status
[0] & IMR_HIGHDOK
)
1012 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_HI0
);
1013 if (irq_status
[0] & IMR_BEDOK
)
1014 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_BE
);
1015 if (irq_status
[0] & IMR_BKDOK
)
1016 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_BK
);
1017 if (irq_status
[0] & IMR_VODOK
)
1018 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_VO
);
1019 if (irq_status
[0] & IMR_VIDOK
)
1020 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_VI
);
1021 if (irq_status
[3] & IMR_H2CDOK
)
1022 rtw_pci_tx_isr(rtwdev
, rtwpci
, RTW_TX_QUEUE_H2C
);
1023 if (irq_status
[0] & IMR_ROK
)
1024 rtw_pci_rx_isr(rtwdev
, rtwpci
, RTW_RX_QUEUE_MPDU
);
1026 /* all of the jobs for this interrupt have been done */
1027 rtw_pci_enable_interrupt(rtwdev
, rtwpci
);
1028 spin_unlock_irqrestore(&rtwpci
->irq_lock
, flags
);
1033 static int rtw_pci_io_mapping(struct rtw_dev
*rtwdev
,
1034 struct pci_dev
*pdev
)
1036 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1041 ret
= pci_request_regions(pdev
, KBUILD_MODNAME
);
1043 rtw_err(rtwdev
, "failed to request pci regions\n");
1047 len
= pci_resource_len(pdev
, bar_id
);
1048 rtwpci
->mmap
= pci_iomap(pdev
, bar_id
, len
);
1049 if (!rtwpci
->mmap
) {
1050 rtw_err(rtwdev
, "failed to map pci memory\n");
1057 static void rtw_pci_io_unmapping(struct rtw_dev
*rtwdev
,
1058 struct pci_dev
*pdev
)
1060 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1063 pci_iounmap(pdev
, rtwpci
->mmap
);
1064 pci_release_regions(pdev
);
1068 static void rtw_dbi_write8(struct rtw_dev
*rtwdev
, u16 addr
, u8 data
)
1071 u16 remainder
= addr
& ~(BITS_DBI_WREN
| BITS_DBI_ADDR_MASK
);
1075 write_addr
= addr
& BITS_DBI_ADDR_MASK
;
1076 write_addr
|= u16_encode_bits(BIT(remainder
), BITS_DBI_WREN
);
1077 rtw_write8(rtwdev
, REG_DBI_WDATA_V1
+ remainder
, data
);
1078 rtw_write16(rtwdev
, REG_DBI_FLAG_V1
, write_addr
);
1079 rtw_write8(rtwdev
, REG_DBI_FLAG_V1
+ 2, BIT_DBI_WFLAG
>> 16);
1081 for (cnt
= 0; cnt
< RTW_PCI_WR_RETRY_CNT
; cnt
++) {
1082 flag
= rtw_read8(rtwdev
, REG_DBI_FLAG_V1
+ 2);
1089 WARN(flag
, "failed to write to DBI register, addr=0x%04x\n", addr
);
1092 static int rtw_dbi_read8(struct rtw_dev
*rtwdev
, u16 addr
, u8
*value
)
1094 u16 read_addr
= addr
& BITS_DBI_ADDR_MASK
;
1098 rtw_write16(rtwdev
, REG_DBI_FLAG_V1
, read_addr
);
1099 rtw_write8(rtwdev
, REG_DBI_FLAG_V1
+ 2, BIT_DBI_RFLAG
>> 16);
1101 for (cnt
= 0; cnt
< RTW_PCI_WR_RETRY_CNT
; cnt
++) {
1102 flag
= rtw_read8(rtwdev
, REG_DBI_FLAG_V1
+ 2);
1104 read_addr
= REG_DBI_RDATA_V1
+ (addr
& 3);
1105 *value
= rtw_read8(rtwdev
, read_addr
);
1112 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr
);
1116 static void rtw_mdio_write(struct rtw_dev
*rtwdev
, u8 addr
, u16 data
, bool g1
)
1122 rtw_write16(rtwdev
, REG_MDIO_V1
, data
);
1124 page
= addr
< RTW_PCI_MDIO_PG_SZ
? 0 : 1;
1125 page
+= g1
? RTW_PCI_MDIO_PG_OFFS_G1
: RTW_PCI_MDIO_PG_OFFS_G2
;
1126 rtw_write8(rtwdev
, REG_PCIE_MIX_CFG
, addr
& BITS_MDIO_ADDR_MASK
);
1127 rtw_write8(rtwdev
, REG_PCIE_MIX_CFG
+ 3, page
);
1128 rtw_write32_mask(rtwdev
, REG_PCIE_MIX_CFG
, BIT_MDIO_WFLAG_V1
, 1);
1130 for (cnt
= 0; cnt
< RTW_PCI_WR_RETRY_CNT
; cnt
++) {
1131 wflag
= rtw_read32_mask(rtwdev
, REG_PCIE_MIX_CFG
,
1139 WARN(wflag
, "failed to write to MDIO register, addr=0x%02x\n", addr
);
1142 static void rtw_pci_clkreq_set(struct rtw_dev
*rtwdev
, bool enable
)
1147 ret
= rtw_dbi_read8(rtwdev
, RTK_PCIE_LINK_CFG
, &value
);
1149 rtw_err(rtwdev
, "failed to read CLKREQ_L1, ret=%d", ret
);
1154 value
|= BIT_CLKREQ_SW_EN
;
1156 value
&= ~BIT_CLKREQ_SW_EN
;
1158 rtw_dbi_write8(rtwdev
, RTK_PCIE_LINK_CFG
, value
);
1161 static void rtw_pci_aspm_set(struct rtw_dev
*rtwdev
, bool enable
)
1166 ret
= rtw_dbi_read8(rtwdev
, RTK_PCIE_LINK_CFG
, &value
);
1168 rtw_err(rtwdev
, "failed to read ASPM, ret=%d", ret
);
1173 value
|= BIT_L1_SW_EN
;
1175 value
&= ~BIT_L1_SW_EN
;
1177 rtw_dbi_write8(rtwdev
, RTK_PCIE_LINK_CFG
, value
);
1180 static void rtw_pci_link_ps(struct rtw_dev
*rtwdev
, bool enter
)
1182 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1184 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1185 * only be enabled when host supports it.
1187 * And ASPM mechanism should be enabled when driver/firmware enters
1188 * power save mode, without having heavy traffic. Because we've
1189 * experienced some inter-operability issues that the link tends
1190 * to enter L1 state on the fly even when driver is having high
1191 * throughput. This is probably because the ASPM behavior slightly
1192 * varies from different SOC.
1194 if (rtwpci
->link_ctrl
& PCI_EXP_LNKCTL_ASPM_L1
)
1195 rtw_pci_aspm_set(rtwdev
, enter
);
1198 static void rtw_pci_link_cfg(struct rtw_dev
*rtwdev
)
1200 struct rtw_pci
*rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1201 struct pci_dev
*pdev
= rtwpci
->pdev
;
1205 /* Though there is standard PCIE configuration space to set the
1206 * link control register, but by Realtek's design, driver should
1207 * check if host supports CLKREQ/ASPM to enable the HW module.
1209 * These functions are implemented by two HW modules associated,
1210 * one is responsible to access PCIE configuration space to
1211 * follow the host settings, and another is in charge of doing
1212 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1213 * the host does not support it, and due to some reasons or wrong
1214 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1215 * loss if HW misbehaves on the link.
1217 * Hence it's designed that driver should first check the PCIE
1218 * configuration space is sync'ed and enabled, then driver can turn
1219 * on the other module that is actually working on the mechanism.
1221 ret
= pcie_capability_read_word(pdev
, PCI_EXP_LNKCTL
, &link_ctrl
);
1223 rtw_err(rtwdev
, "failed to read PCI cap, ret=%d\n", ret
);
1227 if (link_ctrl
& PCI_EXP_LNKCTL_CLKREQ_EN
)
1228 rtw_pci_clkreq_set(rtwdev
, true);
1230 rtwpci
->link_ctrl
= link_ctrl
;
1233 static void rtw_pci_interface_cfg(struct rtw_dev
*rtwdev
)
1235 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1238 case RTW_CHIP_TYPE_8822C
:
1239 if (rtwdev
->hal
.cut_version
>= RTW_CHIP_VER_CUT_D
)
1240 rtw_write32_mask(rtwdev
, REG_HCI_MIX_CFG
,
1241 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK
, 1);
1248 static void rtw_pci_phy_cfg(struct rtw_dev
*rtwdev
)
1250 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1251 struct rtw_intf_phy_para
*para
;
1257 cut
= BIT(0) << rtwdev
->hal
.cut_version
;
1259 for (i
= 0; i
< chip
->intf_table
->n_gen1_para
; i
++) {
1260 para
= &chip
->intf_table
->gen1_para
[i
];
1261 if (!(para
->cut_mask
& cut
))
1263 if (para
->offset
== 0xffff)
1265 offset
= para
->offset
;
1266 value
= para
->value
;
1267 if (para
->ip_sel
== RTW_IP_SEL_PHY
)
1268 rtw_mdio_write(rtwdev
, offset
, value
, true);
1270 rtw_dbi_write8(rtwdev
, offset
, value
);
1273 for (i
= 0; i
< chip
->intf_table
->n_gen2_para
; i
++) {
1274 para
= &chip
->intf_table
->gen2_para
[i
];
1275 if (!(para
->cut_mask
& cut
))
1277 if (para
->offset
== 0xffff)
1279 offset
= para
->offset
;
1280 value
= para
->value
;
1281 if (para
->ip_sel
== RTW_IP_SEL_PHY
)
1282 rtw_mdio_write(rtwdev
, offset
, value
, false);
1284 rtw_dbi_write8(rtwdev
, offset
, value
);
1287 rtw_pci_link_cfg(rtwdev
);
1291 static int rtw_pci_suspend(struct device
*dev
)
1296 static int rtw_pci_resume(struct device
*dev
)
1301 static SIMPLE_DEV_PM_OPS(rtw_pm_ops
, rtw_pci_suspend
, rtw_pci_resume
);
1302 #define RTW_PM_OPS (&rtw_pm_ops)
1304 #define RTW_PM_OPS NULL
1307 static int rtw_pci_claim(struct rtw_dev
*rtwdev
, struct pci_dev
*pdev
)
1311 ret
= pci_enable_device(pdev
);
1313 rtw_err(rtwdev
, "failed to enable pci device\n");
1317 pci_set_master(pdev
);
1318 pci_set_drvdata(pdev
, rtwdev
->hw
);
1319 SET_IEEE80211_DEV(rtwdev
->hw
, &pdev
->dev
);
1324 static void rtw_pci_declaim(struct rtw_dev
*rtwdev
, struct pci_dev
*pdev
)
1326 pci_clear_master(pdev
);
1327 pci_disable_device(pdev
);
1330 static int rtw_pci_setup_resource(struct rtw_dev
*rtwdev
, struct pci_dev
*pdev
)
1332 struct rtw_pci
*rtwpci
;
1335 rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1336 rtwpci
->pdev
= pdev
;
1338 /* after this driver can access to hw registers */
1339 ret
= rtw_pci_io_mapping(rtwdev
, pdev
);
1341 rtw_err(rtwdev
, "failed to request pci io region\n");
1345 ret
= rtw_pci_init(rtwdev
);
1347 rtw_err(rtwdev
, "failed to allocate pci resources\n");
1354 rtw_pci_io_unmapping(rtwdev
, pdev
);
1360 static void rtw_pci_destroy(struct rtw_dev
*rtwdev
, struct pci_dev
*pdev
)
1362 rtw_pci_deinit(rtwdev
);
1363 rtw_pci_io_unmapping(rtwdev
, pdev
);
1366 static struct rtw_hci_ops rtw_pci_ops
= {
1368 .setup
= rtw_pci_setup
,
1369 .start
= rtw_pci_start
,
1370 .stop
= rtw_pci_stop
,
1371 .deep_ps
= rtw_pci_deep_ps
,
1372 .link_ps
= rtw_pci_link_ps
,
1373 .interface_cfg
= rtw_pci_interface_cfg
,
1375 .read8
= rtw_pci_read8
,
1376 .read16
= rtw_pci_read16
,
1377 .read32
= rtw_pci_read32
,
1378 .write8
= rtw_pci_write8
,
1379 .write16
= rtw_pci_write16
,
1380 .write32
= rtw_pci_write32
,
1381 .write_data_rsvd_page
= rtw_pci_write_data_rsvd_page
,
1382 .write_data_h2c
= rtw_pci_write_data_h2c
,
1385 static int rtw_pci_request_irq(struct rtw_dev
*rtwdev
, struct pci_dev
*pdev
)
1387 unsigned int flags
= PCI_IRQ_LEGACY
;
1390 if (!rtw_disable_msi
)
1391 flags
|= PCI_IRQ_MSI
;
1393 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, flags
);
1395 rtw_err(rtwdev
, "failed to alloc PCI irq vectors\n");
1399 ret
= devm_request_threaded_irq(rtwdev
->dev
, pdev
->irq
,
1400 rtw_pci_interrupt_handler
,
1401 rtw_pci_interrupt_threadfn
,
1402 IRQF_SHARED
, KBUILD_MODNAME
, rtwdev
);
1404 rtw_err(rtwdev
, "failed to request irq %d\n", ret
);
1405 pci_free_irq_vectors(pdev
);
1411 static void rtw_pci_free_irq(struct rtw_dev
*rtwdev
, struct pci_dev
*pdev
)
1413 devm_free_irq(rtwdev
->dev
, pdev
->irq
, rtwdev
);
1414 pci_free_irq_vectors(pdev
);
1417 static int rtw_pci_probe(struct pci_dev
*pdev
,
1418 const struct pci_device_id
*id
)
1420 struct ieee80211_hw
*hw
;
1421 struct rtw_dev
*rtwdev
;
1425 drv_data_size
= sizeof(struct rtw_dev
) + sizeof(struct rtw_pci
);
1426 hw
= ieee80211_alloc_hw(drv_data_size
, &rtw_ops
);
1428 dev_err(&pdev
->dev
, "failed to allocate hw\n");
1434 rtwdev
->dev
= &pdev
->dev
;
1435 rtwdev
->chip
= (struct rtw_chip_info
*)id
->driver_data
;
1436 rtwdev
->hci
.ops
= &rtw_pci_ops
;
1437 rtwdev
->hci
.type
= RTW_HCI_TYPE_PCIE
;
1439 ret
= rtw_core_init(rtwdev
);
1441 goto err_release_hw
;
1443 rtw_dbg(rtwdev
, RTW_DBG_PCI
,
1444 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1445 pdev
->vendor
, pdev
->device
, pdev
->revision
);
1447 ret
= rtw_pci_claim(rtwdev
, pdev
);
1449 rtw_err(rtwdev
, "failed to claim pci device\n");
1450 goto err_deinit_core
;
1453 ret
= rtw_pci_setup_resource(rtwdev
, pdev
);
1455 rtw_err(rtwdev
, "failed to setup pci resources\n");
1456 goto err_pci_declaim
;
1459 ret
= rtw_chip_info_setup(rtwdev
);
1461 rtw_err(rtwdev
, "failed to setup chip information\n");
1462 goto err_destroy_pci
;
1465 rtw_pci_phy_cfg(rtwdev
);
1467 ret
= rtw_register_hw(rtwdev
, hw
);
1469 rtw_err(rtwdev
, "failed to register hw\n");
1470 goto err_destroy_pci
;
1473 ret
= rtw_pci_request_irq(rtwdev
, pdev
);
1475 ieee80211_unregister_hw(hw
);
1476 goto err_destroy_pci
;
1482 rtw_pci_destroy(rtwdev
, pdev
);
1485 rtw_pci_declaim(rtwdev
, pdev
);
1488 rtw_core_deinit(rtwdev
);
1491 ieee80211_free_hw(hw
);
1496 static void rtw_pci_remove(struct pci_dev
*pdev
)
1498 struct ieee80211_hw
*hw
= pci_get_drvdata(pdev
);
1499 struct rtw_dev
*rtwdev
;
1500 struct rtw_pci
*rtwpci
;
1506 rtwpci
= (struct rtw_pci
*)rtwdev
->priv
;
1508 rtw_unregister_hw(rtwdev
, hw
);
1509 rtw_pci_disable_interrupt(rtwdev
, rtwpci
);
1510 rtw_pci_destroy(rtwdev
, pdev
);
1511 rtw_pci_declaim(rtwdev
, pdev
);
1512 rtw_pci_free_irq(rtwdev
, pdev
);
1513 rtw_core_deinit(rtwdev
);
1514 ieee80211_free_hw(hw
);
1517 static const struct pci_device_id rtw_pci_id_table
[] = {
1518 #ifdef CONFIG_RTW88_8822BE
1519 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0xB822, rtw8822b_hw_spec
) },
1521 #ifdef CONFIG_RTW88_8822CE
1522 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0xC822, rtw8822c_hw_spec
) },
1526 MODULE_DEVICE_TABLE(pci
, rtw_pci_id_table
);
1528 static struct pci_driver rtw_pci_driver
= {
1530 .id_table
= rtw_pci_id_table
,
1531 .probe
= rtw_pci_probe
,
1532 .remove
= rtw_pci_remove
,
1533 .driver
.pm
= RTW_PM_OPS
,
1535 module_pci_driver(rtw_pci_driver
);
1537 MODULE_AUTHOR("Realtek Corporation");
1538 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1539 MODULE_LICENSE("Dual BSD/GPL");