1 // SPDX-License-Identifier: ISC
8 mt7603_init_tx_queue(struct mt7603_dev
*dev
, struct mt76_sw_queue
*q
,
11 struct mt76_queue
*hwq
;
14 hwq
= devm_kzalloc(dev
->mt76
.dev
, sizeof(*hwq
), GFP_KERNEL
);
18 err
= mt76_queue_alloc(dev
, hwq
, idx
, n_desc
, 0, MT_TX_RING_BASE
);
22 INIT_LIST_HEAD(&q
->swq
);
25 mt7603_irq_enable(dev
, MT_INT_TX_DONE(idx
));
31 mt7603_rx_loopback_skb(struct mt7603_dev
*dev
, struct sk_buff
*skb
)
33 __le32
*txd
= (__le32
*)skb
->data
;
34 struct ieee80211_hdr
*hdr
;
35 struct ieee80211_sta
*sta
;
36 struct mt7603_sta
*msta
;
37 struct mt76_wcid
*wcid
;
43 if (skb
->len
< MT_TXD_SIZE
+ sizeof(struct ieee80211_hdr
))
46 val
= le32_to_cpu(txd
[1]);
47 idx
= FIELD_GET(MT_TXD1_WLAN_IDX
, val
);
48 skb
->priority
= FIELD_GET(MT_TXD1_TID
, val
);
50 if (idx
>= MT7603_WTBL_STA
- 1)
53 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
57 priv
= msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
58 val
= le32_to_cpu(txd
[0]);
59 skb_set_queue_mapping(skb
, FIELD_GET(MT_TXD0_Q_IDX
, val
));
61 val
&= ~(MT_TXD0_P_IDX
| MT_TXD0_Q_IDX
);
62 val
|= FIELD_PREP(MT_TXD0_Q_IDX
, MT_TX_HW_QUEUE_MGMT
);
63 txd
[0] = cpu_to_le32(val
);
65 sta
= container_of(priv
, struct ieee80211_sta
, drv_priv
);
66 hdr
= (struct ieee80211_hdr
*)&skb
->data
[MT_TXD_SIZE
];
67 tid
= *ieee80211_get_qos_ctl(hdr
) & IEEE80211_QOS_CTL_TID_MASK
;
68 ieee80211_sta_set_buffered(sta
, tid
, true);
70 spin_lock_bh(&dev
->ps_lock
);
71 __skb_queue_tail(&msta
->psq
, skb
);
72 if (skb_queue_len(&msta
->psq
) >= 64) {
73 skb
= __skb_dequeue(&msta
->psq
);
76 spin_unlock_bh(&dev
->ps_lock
);
83 void mt7603_queue_rx_skb(struct mt76_dev
*mdev
, enum mt76_rxq_id q
,
86 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
87 __le32
*rxd
= (__le32
*)skb
->data
;
88 __le32
*end
= (__le32
*)&skb
->data
[skb
->len
];
89 enum rx_pkt_type type
;
91 type
= FIELD_GET(MT_RXD0_PKT_TYPE
, le32_to_cpu(rxd
[0]));
93 if (q
== MT_RXQ_MCU
) {
94 if (type
== PKT_TYPE_RX_EVENT
)
95 mt76_mcu_rx_event(&dev
->mt76
, skb
);
97 mt7603_rx_loopback_skb(dev
, skb
);
103 for (rxd
++; rxd
+ 5 <= end
; rxd
+= 5)
104 mt7603_mac_add_txs(dev
, rxd
);
107 case PKT_TYPE_RX_EVENT
:
108 mt76_mcu_rx_event(&dev
->mt76
, skb
);
110 case PKT_TYPE_NORMAL
:
111 if (mt7603_mac_fill_rx(dev
, skb
) == 0) {
112 mt76_rx(&dev
->mt76
, q
, skb
);
123 mt7603_init_rx_queue(struct mt7603_dev
*dev
, struct mt76_queue
*q
,
124 int idx
, int n_desc
, int bufsize
)
128 err
= mt76_queue_alloc(dev
, q
, idx
, n_desc
, bufsize
,
133 mt7603_irq_enable(dev
, MT_INT_RX_DONE(idx
));
138 static int mt7603_poll_tx(struct napi_struct
*napi
, int budget
)
140 struct mt7603_dev
*dev
;
143 dev
= container_of(napi
, struct mt7603_dev
, mt76
.tx_napi
);
144 dev
->tx_dma_check
= 0;
146 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
147 mt76_queue_tx_cleanup(dev
, i
, false);
149 if (napi_complete_done(napi
, 0))
150 mt7603_irq_enable(dev
, MT_INT_TX_DONE_ALL
);
152 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
153 mt76_queue_tx_cleanup(dev
, i
, false);
155 mt7603_mac_sta_poll(dev
);
157 tasklet_schedule(&dev
->mt76
.tx_tasklet
);
162 int mt7603_dma_init(struct mt7603_dev
*dev
)
164 static const u8 wmm_queue_map
[] = {
165 [IEEE80211_AC_BK
] = 0,
166 [IEEE80211_AC_BE
] = 1,
167 [IEEE80211_AC_VI
] = 2,
168 [IEEE80211_AC_VO
] = 3,
173 mt76_dma_attach(&dev
->mt76
);
175 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
176 MT_WPDMA_GLO_CFG_TX_DMA_EN
|
177 MT_WPDMA_GLO_CFG_RX_DMA_EN
|
178 MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
|
179 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
181 mt76_wr(dev
, MT_WPDMA_RST_IDX
, ~0);
182 mt7603_pse_client_reset(dev
);
184 for (i
= 0; i
< ARRAY_SIZE(wmm_queue_map
); i
++) {
185 ret
= mt7603_init_tx_queue(dev
, &dev
->mt76
.q_tx
[i
],
192 ret
= mt7603_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_PSD
],
193 MT_TX_HW_QUEUE_MGMT
, MT_TX_RING_SIZE
);
197 ret
= mt7603_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_MCU
],
198 MT_TX_HW_QUEUE_MCU
, MT_MCU_RING_SIZE
);
202 ret
= mt7603_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_BEACON
],
203 MT_TX_HW_QUEUE_BCN
, MT_MCU_RING_SIZE
);
207 ret
= mt7603_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_CAB
],
208 MT_TX_HW_QUEUE_BMC
, MT_MCU_RING_SIZE
);
212 ret
= mt7603_init_rx_queue(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU
], 1,
213 MT_MCU_RING_SIZE
, MT_RX_BUF_SIZE
);
217 ret
= mt7603_init_rx_queue(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MAIN
], 0,
218 MT7603_RX_RING_SIZE
, MT_RX_BUF_SIZE
);
222 mt76_wr(dev
, MT_DELAY_INT_CFG
, 0);
223 ret
= mt76_init_queues(dev
);
227 netif_tx_napi_add(&dev
->mt76
.napi_dev
, &dev
->mt76
.tx_napi
,
228 mt7603_poll_tx
, NAPI_POLL_WEIGHT
);
229 napi_enable(&dev
->mt76
.tx_napi
);
234 void mt7603_dma_cleanup(struct mt7603_dev
*dev
)
236 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
237 MT_WPDMA_GLO_CFG_TX_DMA_EN
|
238 MT_WPDMA_GLO_CFG_RX_DMA_EN
|
239 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
241 tasklet_kill(&dev
->mt76
.tx_tasklet
);
242 mt76_dma_cleanup(&dev
->mt76
);