1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
4 #include <net/netdev_queues.h>
5 #include "hbg_common.h"
10 #define netdev_get_tx_ring(netdev) \
11 (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
13 #define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
14 DMA_FROM_DEVICE : DMA_TO_DEVICE)
16 #define hbg_queue_used_num(head, tail, ring) ({ \
17 typeof(ring) _ring = (ring); \
18 ((tail) + _ring->len - (head)) % _ring->len; })
19 #define hbg_queue_left_num(head, tail, ring) ({ \
20 typeof(ring) _r = (ring); \
21 _r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
22 #define hbg_queue_is_empty(head, tail, ring) \
23 (hbg_queue_used_num((head), (tail), (ring)) == 0)
24 #define hbg_queue_is_full(head, tail, ring) \
25 (hbg_queue_left_num((head), (tail), (ring)) == 0)
26 #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
27 #define hbg_queue_move_next(p, ring) ({ \
28 typeof(ring) _ring = (ring); \
29 _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
31 #define HBG_TX_STOP_THRS 2
32 #define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
34 static int hbg_dma_map(struct hbg_buffer
*buffer
)
36 struct hbg_priv
*priv
= buffer
->priv
;
38 buffer
->skb_dma
= dma_map_single(&priv
->pdev
->dev
,
39 buffer
->skb
->data
, buffer
->skb_len
,
40 buffer_to_dma_dir(buffer
));
41 if (unlikely(dma_mapping_error(&priv
->pdev
->dev
, buffer
->skb_dma
)))
47 static void hbg_dma_unmap(struct hbg_buffer
*buffer
)
49 struct hbg_priv
*priv
= buffer
->priv
;
51 if (unlikely(!buffer
->skb_dma
))
54 dma_unmap_single(&priv
->pdev
->dev
, buffer
->skb_dma
, buffer
->skb_len
,
55 buffer_to_dma_dir(buffer
));
59 static void hbg_init_tx_desc(struct hbg_buffer
*buffer
,
60 struct hbg_tx_desc
*tx_desc
)
62 u32 ip_offset
= buffer
->skb
->network_header
- buffer
->skb
->mac_header
;
65 word0
|= FIELD_PREP(HBG_TX_DESC_W0_WB_B
, HBG_STATUS_ENABLE
);
66 word0
|= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M
, ip_offset
);
67 if (likely(buffer
->skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
68 word0
|= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B
, HBG_STATUS_ENABLE
);
69 word0
|= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B
, HBG_STATUS_ENABLE
);
72 tx_desc
->word0
= word0
;
73 tx_desc
->word1
= FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M
,
75 tx_desc
->word2
= buffer
->skb_dma
;
76 tx_desc
->word3
= buffer
->state_dma
;
79 netdev_tx_t
hbg_net_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
81 struct hbg_ring
*ring
= netdev_get_tx_ring(netdev
);
82 struct hbg_priv
*priv
= netdev_priv(netdev
);
83 /* This smp_load_acquire() pairs with smp_store_release() in
84 * hbg_napi_tx_recycle() called in tx interrupt handle process.
86 u32 ntc
= smp_load_acquire(&ring
->ntc
);
87 struct hbg_buffer
*buffer
;
88 struct hbg_tx_desc tx_desc
;
91 if (unlikely(!skb
->len
||
92 skb
->len
> hbg_spec_max_frame_len(priv
, HBG_DIR_TX
))) {
93 dev_kfree_skb_any(skb
);
94 netdev
->stats
.tx_errors
++;
98 if (!netif_subqueue_maybe_stop(netdev
, 0,
99 hbg_queue_left_num(ntc
, ntu
, ring
),
100 HBG_TX_STOP_THRS
, HBG_TX_START_THRS
))
101 return NETDEV_TX_BUSY
;
103 buffer
= &ring
->queue
[ntu
];
105 buffer
->skb_len
= skb
->len
;
106 if (unlikely(hbg_dma_map(buffer
))) {
107 dev_kfree_skb_any(skb
);
111 buffer
->state
= HBG_TX_STATE_START
;
112 hbg_init_tx_desc(buffer
, &tx_desc
);
113 hbg_hw_set_tx_desc(priv
, &tx_desc
);
115 /* This smp_store_release() pairs with smp_load_acquire() in
116 * hbg_napi_tx_recycle() called in tx interrupt handle process.
118 smp_store_release(&ring
->ntu
, hbg_queue_next_prt(ntu
, ring
));
119 dev_sw_netstats_tx_add(netdev
, 1, skb
->len
);
123 static void hbg_buffer_free_skb(struct hbg_buffer
*buffer
)
125 if (unlikely(!buffer
->skb
))
128 dev_kfree_skb_any(buffer
->skb
);
132 static int hbg_buffer_alloc_skb(struct hbg_buffer
*buffer
)
134 u32 len
= hbg_spec_max_frame_len(buffer
->priv
, buffer
->dir
);
135 struct hbg_priv
*priv
= buffer
->priv
;
137 buffer
->skb
= netdev_alloc_skb(priv
->netdev
, len
);
138 if (unlikely(!buffer
->skb
))
141 buffer
->skb_len
= len
;
142 memset(buffer
->skb
->data
, 0, HBG_PACKET_HEAD_SIZE
);
146 static void hbg_buffer_free(struct hbg_buffer
*buffer
)
148 hbg_dma_unmap(buffer
);
149 hbg_buffer_free_skb(buffer
);
152 static int hbg_napi_tx_recycle(struct napi_struct
*napi
, int budget
)
154 struct hbg_ring
*ring
= container_of(napi
, struct hbg_ring
, napi
);
155 /* This smp_load_acquire() pairs with smp_store_release() in
156 * hbg_net_start_xmit() called in xmit process.
158 u32 ntu
= smp_load_acquire(&ring
->ntu
);
159 struct hbg_priv
*priv
= ring
->priv
;
160 struct hbg_buffer
*buffer
;
164 /* We need do cleanup even if budget is 0.
165 * Per NAPI documentation budget is for Rx.
166 * So We hardcode the amount of work Tx NAPI does to 128.
169 while (packet_done
< budget
) {
170 if (unlikely(hbg_queue_is_empty(ntc
, ntu
, ring
)))
173 /* make sure HW write desc complete */
176 buffer
= &ring
->queue
[ntc
];
177 if (buffer
->state
!= HBG_TX_STATE_COMPLETE
)
180 hbg_buffer_free(buffer
);
181 ntc
= hbg_queue_next_prt(ntc
, ring
);
185 /* This smp_store_release() pairs with smp_load_acquire() in
186 * hbg_net_start_xmit() called in xmit process.
188 smp_store_release(&ring
->ntc
, ntc
);
189 netif_wake_queue(priv
->netdev
);
191 if (likely(packet_done
< budget
&&
192 napi_complete_done(napi
, packet_done
)))
193 hbg_hw_irq_enable(priv
, HBG_INT_MSK_TX_B
, true);
198 static int hbg_rx_fill_one_buffer(struct hbg_priv
*priv
)
200 struct hbg_ring
*ring
= &priv
->rx_ring
;
201 struct hbg_buffer
*buffer
;
204 if (hbg_queue_is_full(ring
->ntc
, ring
->ntu
, ring
))
207 buffer
= &ring
->queue
[ring
->ntu
];
208 ret
= hbg_buffer_alloc_skb(buffer
);
212 ret
= hbg_dma_map(buffer
);
214 hbg_buffer_free_skb(buffer
);
218 hbg_hw_fill_buffer(priv
, buffer
->skb_dma
);
219 hbg_queue_move_next(ntu
, ring
);
223 static bool hbg_sync_data_from_hw(struct hbg_priv
*priv
,
224 struct hbg_buffer
*buffer
)
226 struct hbg_rx_desc
*rx_desc
;
228 /* make sure HW write desc complete */
231 dma_sync_single_for_cpu(&priv
->pdev
->dev
, buffer
->skb_dma
,
232 buffer
->skb_len
, DMA_FROM_DEVICE
);
234 rx_desc
= (struct hbg_rx_desc
*)buffer
->skb
->data
;
235 return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M
, rx_desc
->word2
) != 0;
238 static int hbg_napi_rx_poll(struct napi_struct
*napi
, int budget
)
240 struct hbg_ring
*ring
= container_of(napi
, struct hbg_ring
, napi
);
241 struct hbg_priv
*priv
= ring
->priv
;
242 struct hbg_rx_desc
*rx_desc
;
243 struct hbg_buffer
*buffer
;
247 while (packet_done
< budget
) {
248 if (unlikely(hbg_queue_is_empty(ring
->ntc
, ring
->ntu
, ring
)))
251 buffer
= &ring
->queue
[ring
->ntc
];
252 if (unlikely(!buffer
->skb
))
255 if (unlikely(!hbg_sync_data_from_hw(priv
, buffer
)))
257 rx_desc
= (struct hbg_rx_desc
*)buffer
->skb
->data
;
258 pkt_len
= FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M
, rx_desc
->word2
);
260 hbg_dma_unmap(buffer
);
262 skb_reserve(buffer
->skb
, HBG_PACKET_HEAD_SIZE
+ NET_IP_ALIGN
);
263 skb_put(buffer
->skb
, pkt_len
);
264 buffer
->skb
->protocol
= eth_type_trans(buffer
->skb
,
267 dev_sw_netstats_rx_add(priv
->netdev
, pkt_len
);
268 napi_gro_receive(napi
, buffer
->skb
);
272 hbg_rx_fill_one_buffer(priv
);
273 hbg_queue_move_next(ntc
, ring
);
277 if (likely(packet_done
< budget
&&
278 napi_complete_done(napi
, packet_done
)))
279 hbg_hw_irq_enable(priv
, HBG_INT_MSK_RX_B
, true);
284 static void hbg_ring_uninit(struct hbg_ring
*ring
)
286 struct hbg_buffer
*buffer
;
292 napi_disable(&ring
->napi
);
293 netif_napi_del(&ring
->napi
);
295 for (i
= 0; i
< ring
->len
; i
++) {
296 buffer
= &ring
->queue
[i
];
297 hbg_buffer_free(buffer
);
302 dma_free_coherent(&ring
->priv
->pdev
->dev
,
303 ring
->len
* sizeof(*ring
->queue
),
304 ring
->queue
, ring
->queue_dma
);
311 static int hbg_ring_init(struct hbg_priv
*priv
, struct hbg_ring
*ring
,
312 int (*napi_poll
)(struct napi_struct
*, int),
315 struct hbg_buffer
*buffer
;
318 len
= hbg_get_spec_fifo_max_num(priv
, dir
) + 1;
319 ring
->queue
= dma_alloc_coherent(&priv
->pdev
->dev
,
320 len
* sizeof(*ring
->queue
),
321 &ring
->queue_dma
, GFP_KERNEL
);
325 for (i
= 0; i
< len
; i
++) {
326 buffer
= &ring
->queue
[i
];
331 buffer
->state_dma
= ring
->queue_dma
+ (i
* sizeof(*buffer
));
340 if (dir
== HBG_DIR_TX
)
341 netif_napi_add_tx(priv
->netdev
, &ring
->napi
, napi_poll
);
343 netif_napi_add(priv
->netdev
, &ring
->napi
, napi_poll
);
345 napi_enable(&ring
->napi
);
349 static int hbg_tx_ring_init(struct hbg_priv
*priv
)
351 struct hbg_ring
*tx_ring
= &priv
->tx_ring
;
353 if (!tx_ring
->tout_log_buf
)
354 tx_ring
->tout_log_buf
= devm_kmalloc(&priv
->pdev
->dev
,
355 HBG_TX_TIMEOUT_BUF_LEN
,
358 if (!tx_ring
->tout_log_buf
)
361 return hbg_ring_init(priv
, tx_ring
, hbg_napi_tx_recycle
, HBG_DIR_TX
);
364 static int hbg_rx_ring_init(struct hbg_priv
*priv
)
369 ret
= hbg_ring_init(priv
, &priv
->rx_ring
, hbg_napi_rx_poll
, HBG_DIR_RX
);
373 for (i
= 0; i
< priv
->rx_ring
.len
- 1; i
++) {
374 ret
= hbg_rx_fill_one_buffer(priv
);
376 hbg_ring_uninit(&priv
->rx_ring
);
384 int hbg_txrx_init(struct hbg_priv
*priv
)
388 ret
= hbg_tx_ring_init(priv
);
390 dev_err(&priv
->pdev
->dev
,
391 "failed to init tx ring, ret = %d\n", ret
);
395 ret
= hbg_rx_ring_init(priv
);
397 dev_err(&priv
->pdev
->dev
,
398 "failed to init rx ring, ret = %d\n", ret
);
399 hbg_ring_uninit(&priv
->tx_ring
);
405 void hbg_txrx_uninit(struct hbg_priv
*priv
)
407 hbg_ring_uninit(&priv
->tx_ring
);
408 hbg_ring_uninit(&priv
->rx_ring
);