accel/ivpu: Move recovery work to system_unbound_wq
[drm/drm-misc.git] / drivers / net / ethernet / hisilicon / hibmcge / hbg_txrx.c
blobf4f256a0dfea419ac61e40b831ea7b869883a9ec
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
4 #include <net/netdev_queues.h>
5 #include "hbg_common.h"
6 #include "hbg_irq.h"
7 #include "hbg_reg.h"
8 #include "hbg_txrx.h"
10 #define netdev_get_tx_ring(netdev) \
11 (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
13 #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
14 DMA_FROM_DEVICE : DMA_TO_DEVICE)
16 #define hbg_queue_used_num(head, tail, ring) ({ \
17 typeof(ring) _ring = (ring); \
18 ((tail) + _ring->len - (head)) % _ring->len; })
19 #define hbg_queue_left_num(head, tail, ring) ({ \
20 typeof(ring) _r = (ring); \
21 _r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
22 #define hbg_queue_is_empty(head, tail, ring) \
23 (hbg_queue_used_num((head), (tail), (ring)) == 0)
24 #define hbg_queue_is_full(head, tail, ring) \
25 (hbg_queue_left_num((head), (tail), (ring)) == 0)
26 #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
27 #define hbg_queue_move_next(p, ring) ({ \
28 typeof(ring) _ring = (ring); \
29 _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
31 #define HBG_TX_STOP_THRS 2
32 #define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
34 static int hbg_dma_map(struct hbg_buffer *buffer)
36 struct hbg_priv *priv = buffer->priv;
38 buffer->skb_dma = dma_map_single(&priv->pdev->dev,
39 buffer->skb->data, buffer->skb_len,
40 buffer_to_dma_dir(buffer));
41 if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
42 return -ENOMEM;
44 return 0;
47 static void hbg_dma_unmap(struct hbg_buffer *buffer)
49 struct hbg_priv *priv = buffer->priv;
51 if (unlikely(!buffer->skb_dma))
52 return;
54 dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
55 buffer_to_dma_dir(buffer));
56 buffer->skb_dma = 0;
59 static void hbg_init_tx_desc(struct hbg_buffer *buffer,
60 struct hbg_tx_desc *tx_desc)
62 u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
63 u32 word0 = 0;
65 word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE);
66 word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset);
67 if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
68 word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE);
69 word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE);
72 tx_desc->word0 = word0;
73 tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M,
74 buffer->skb->len);
75 tx_desc->word2 = buffer->skb_dma;
76 tx_desc->word3 = buffer->state_dma;
79 netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev)
81 struct hbg_ring *ring = netdev_get_tx_ring(netdev);
82 struct hbg_priv *priv = netdev_priv(netdev);
83 /* This smp_load_acquire() pairs with smp_store_release() in
84 * hbg_napi_tx_recycle() called in tx interrupt handle process.
86 u32 ntc = smp_load_acquire(&ring->ntc);
87 struct hbg_buffer *buffer;
88 struct hbg_tx_desc tx_desc;
89 u32 ntu = ring->ntu;
91 if (unlikely(!skb->len ||
92 skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) {
93 dev_kfree_skb_any(skb);
94 netdev->stats.tx_errors++;
95 return NETDEV_TX_OK;
98 if (!netif_subqueue_maybe_stop(netdev, 0,
99 hbg_queue_left_num(ntc, ntu, ring),
100 HBG_TX_STOP_THRS, HBG_TX_START_THRS))
101 return NETDEV_TX_BUSY;
103 buffer = &ring->queue[ntu];
104 buffer->skb = skb;
105 buffer->skb_len = skb->len;
106 if (unlikely(hbg_dma_map(buffer))) {
107 dev_kfree_skb_any(skb);
108 return NETDEV_TX_OK;
111 buffer->state = HBG_TX_STATE_START;
112 hbg_init_tx_desc(buffer, &tx_desc);
113 hbg_hw_set_tx_desc(priv, &tx_desc);
115 /* This smp_store_release() pairs with smp_load_acquire() in
116 * hbg_napi_tx_recycle() called in tx interrupt handle process.
118 smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
119 dev_sw_netstats_tx_add(netdev, 1, skb->len);
120 return NETDEV_TX_OK;
123 static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
125 if (unlikely(!buffer->skb))
126 return;
128 dev_kfree_skb_any(buffer->skb);
129 buffer->skb = NULL;
132 static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
134 u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
135 struct hbg_priv *priv = buffer->priv;
137 buffer->skb = netdev_alloc_skb(priv->netdev, len);
138 if (unlikely(!buffer->skb))
139 return -ENOMEM;
141 buffer->skb_len = len;
142 memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
143 return 0;
146 static void hbg_buffer_free(struct hbg_buffer *buffer)
148 hbg_dma_unmap(buffer);
149 hbg_buffer_free_skb(buffer);
152 static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
154 struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
155 /* This smp_load_acquire() pairs with smp_store_release() in
156 * hbg_net_start_xmit() called in xmit process.
158 u32 ntu = smp_load_acquire(&ring->ntu);
159 struct hbg_priv *priv = ring->priv;
160 struct hbg_buffer *buffer;
161 u32 ntc = ring->ntc;
162 int packet_done = 0;
164 /* We need do cleanup even if budget is 0.
165 * Per NAPI documentation budget is for Rx.
166 * So We hardcode the amount of work Tx NAPI does to 128.
168 budget = 128;
169 while (packet_done < budget) {
170 if (unlikely(hbg_queue_is_empty(ntc, ntu, ring)))
171 break;
173 /* make sure HW write desc complete */
174 dma_rmb();
176 buffer = &ring->queue[ntc];
177 if (buffer->state != HBG_TX_STATE_COMPLETE)
178 break;
180 hbg_buffer_free(buffer);
181 ntc = hbg_queue_next_prt(ntc, ring);
182 packet_done++;
185 /* This smp_store_release() pairs with smp_load_acquire() in
186 * hbg_net_start_xmit() called in xmit process.
188 smp_store_release(&ring->ntc, ntc);
189 netif_wake_queue(priv->netdev);
191 if (likely(packet_done < budget &&
192 napi_complete_done(napi, packet_done)))
193 hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
195 return packet_done;
198 static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
200 struct hbg_ring *ring = &priv->rx_ring;
201 struct hbg_buffer *buffer;
202 int ret;
204 if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
205 return 0;
207 buffer = &ring->queue[ring->ntu];
208 ret = hbg_buffer_alloc_skb(buffer);
209 if (unlikely(ret))
210 return ret;
212 ret = hbg_dma_map(buffer);
213 if (unlikely(ret)) {
214 hbg_buffer_free_skb(buffer);
215 return ret;
218 hbg_hw_fill_buffer(priv, buffer->skb_dma);
219 hbg_queue_move_next(ntu, ring);
220 return 0;
223 static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
224 struct hbg_buffer *buffer)
226 struct hbg_rx_desc *rx_desc;
228 /* make sure HW write desc complete */
229 dma_rmb();
231 dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
232 buffer->skb_len, DMA_FROM_DEVICE);
234 rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
235 return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
238 static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
240 struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
241 struct hbg_priv *priv = ring->priv;
242 struct hbg_rx_desc *rx_desc;
243 struct hbg_buffer *buffer;
244 u32 packet_done = 0;
245 u32 pkt_len;
247 while (packet_done < budget) {
248 if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
249 break;
251 buffer = &ring->queue[ring->ntc];
252 if (unlikely(!buffer->skb))
253 goto next_buffer;
255 if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
256 break;
257 rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
258 pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
260 hbg_dma_unmap(buffer);
262 skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
263 skb_put(buffer->skb, pkt_len);
264 buffer->skb->protocol = eth_type_trans(buffer->skb,
265 priv->netdev);
267 dev_sw_netstats_rx_add(priv->netdev, pkt_len);
268 napi_gro_receive(napi, buffer->skb);
269 buffer->skb = NULL;
271 next_buffer:
272 hbg_rx_fill_one_buffer(priv);
273 hbg_queue_move_next(ntc, ring);
274 packet_done++;
277 if (likely(packet_done < budget &&
278 napi_complete_done(napi, packet_done)))
279 hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
281 return packet_done;
284 static void hbg_ring_uninit(struct hbg_ring *ring)
286 struct hbg_buffer *buffer;
287 u32 i;
289 if (!ring->queue)
290 return;
292 napi_disable(&ring->napi);
293 netif_napi_del(&ring->napi);
295 for (i = 0; i < ring->len; i++) {
296 buffer = &ring->queue[i];
297 hbg_buffer_free(buffer);
298 buffer->ring = NULL;
299 buffer->priv = NULL;
302 dma_free_coherent(&ring->priv->pdev->dev,
303 ring->len * sizeof(*ring->queue),
304 ring->queue, ring->queue_dma);
305 ring->queue = NULL;
306 ring->queue_dma = 0;
307 ring->len = 0;
308 ring->priv = NULL;
311 static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
312 int (*napi_poll)(struct napi_struct *, int),
313 enum hbg_dir dir)
315 struct hbg_buffer *buffer;
316 u32 i, len;
318 len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
319 ring->queue = dma_alloc_coherent(&priv->pdev->dev,
320 len * sizeof(*ring->queue),
321 &ring->queue_dma, GFP_KERNEL);
322 if (!ring->queue)
323 return -ENOMEM;
325 for (i = 0; i < len; i++) {
326 buffer = &ring->queue[i];
327 buffer->skb_len = 0;
328 buffer->dir = dir;
329 buffer->ring = ring;
330 buffer->priv = priv;
331 buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));
334 ring->dir = dir;
335 ring->priv = priv;
336 ring->ntc = 0;
337 ring->ntu = 0;
338 ring->len = len;
340 if (dir == HBG_DIR_TX)
341 netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
342 else
343 netif_napi_add(priv->netdev, &ring->napi, napi_poll);
345 napi_enable(&ring->napi);
346 return 0;
349 static int hbg_tx_ring_init(struct hbg_priv *priv)
351 struct hbg_ring *tx_ring = &priv->tx_ring;
353 if (!tx_ring->tout_log_buf)
354 tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev,
355 HBG_TX_TIMEOUT_BUF_LEN,
356 GFP_KERNEL);
358 if (!tx_ring->tout_log_buf)
359 return -ENOMEM;
361 return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
364 static int hbg_rx_ring_init(struct hbg_priv *priv)
366 int ret;
367 u32 i;
369 ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
370 if (ret)
371 return ret;
373 for (i = 0; i < priv->rx_ring.len - 1; i++) {
374 ret = hbg_rx_fill_one_buffer(priv);
375 if (ret) {
376 hbg_ring_uninit(&priv->rx_ring);
377 return ret;
381 return 0;
384 int hbg_txrx_init(struct hbg_priv *priv)
386 int ret;
388 ret = hbg_tx_ring_init(priv);
389 if (ret) {
390 dev_err(&priv->pdev->dev,
391 "failed to init tx ring, ret = %d\n", ret);
392 return ret;
395 ret = hbg_rx_ring_init(priv);
396 if (ret) {
397 dev_err(&priv->pdev->dev,
398 "failed to init rx ring, ret = %d\n", ret);
399 hbg_ring_uninit(&priv->tx_ring);
402 return ret;
405 void hbg_txrx_uninit(struct hbg_priv *priv)
407 hbg_ring_uninit(&priv->tx_ring);
408 hbg_ring_uninit(&priv->rx_ring);