2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
19 static struct aq_ring_s
*aq_ring_alloc(struct aq_ring_s
*self
,
20 struct aq_nic_s
*aq_nic
)
25 kcalloc(self
->size
, sizeof(struct aq_ring_buff_s
), GFP_KERNEL
);
27 if (!self
->buff_ring
) {
31 self
->dx_ring
= dma_alloc_coherent(aq_nic_get_dev(aq_nic
),
32 self
->size
* self
->dx_size
,
33 &self
->dx_ring_pa
, GFP_KERNEL
);
47 struct aq_ring_s
*aq_ring_tx_alloc(struct aq_ring_s
*self
,
48 struct aq_nic_s
*aq_nic
,
50 struct aq_nic_cfg_s
*aq_nic_cfg
)
54 self
->aq_nic
= aq_nic
;
56 self
->size
= aq_nic_cfg
->txds
;
57 self
->dx_size
= aq_nic_cfg
->aq_hw_caps
->txd_size
;
59 self
= aq_ring_alloc(self
, aq_nic
);
73 struct aq_ring_s
*aq_ring_rx_alloc(struct aq_ring_s
*self
,
74 struct aq_nic_s
*aq_nic
,
76 struct aq_nic_cfg_s
*aq_nic_cfg
)
80 self
->aq_nic
= aq_nic
;
82 self
->size
= aq_nic_cfg
->rxds
;
83 self
->dx_size
= aq_nic_cfg
->aq_hw_caps
->rxd_size
;
85 self
= aq_ring_alloc(self
, aq_nic
);
99 int aq_ring_init(struct aq_ring_s
*self
)
107 static inline bool aq_ring_dx_in_range(unsigned int h
, unsigned int i
,
110 return (h
< t
) ? ((h
< i
) && (i
< t
)) : ((h
< i
) || (i
< t
));
113 void aq_ring_update_queue_state(struct aq_ring_s
*ring
)
115 if (aq_ring_avail_dx(ring
) <= AQ_CFG_SKB_FRAGS_MAX
)
116 aq_ring_queue_stop(ring
);
117 else if (aq_ring_avail_dx(ring
) > AQ_CFG_RESTART_DESC_THRES
)
118 aq_ring_queue_wake(ring
);
121 void aq_ring_queue_wake(struct aq_ring_s
*ring
)
123 struct net_device
*ndev
= aq_nic_get_ndev(ring
->aq_nic
);
125 if (__netif_subqueue_stopped(ndev
, ring
->idx
)) {
126 netif_wake_subqueue(ndev
, ring
->idx
);
127 ring
->stats
.tx
.queue_restarts
++;
131 void aq_ring_queue_stop(struct aq_ring_s
*ring
)
133 struct net_device
*ndev
= aq_nic_get_ndev(ring
->aq_nic
);
135 if (!__netif_subqueue_stopped(ndev
, ring
->idx
))
136 netif_stop_subqueue(ndev
, ring
->idx
);
139 bool aq_ring_tx_clean(struct aq_ring_s
*self
)
141 struct device
*dev
= aq_nic_get_dev(self
->aq_nic
);
144 for (budget
= AQ_CFG_TX_CLEAN_BUDGET
;
145 budget
&& self
->sw_head
!= self
->hw_head
; budget
--) {
146 struct aq_ring_buff_s
*buff
= &self
->buff_ring
[self
->sw_head
];
148 if (likely(buff
->is_mapped
)) {
149 if (unlikely(buff
->is_sop
)) {
151 buff
->eop_index
!= 0xffffU
&&
152 (!aq_ring_dx_in_range(self
->sw_head
,
157 dma_unmap_single(dev
, buff
->pa
, buff
->len
,
160 dma_unmap_page(dev
, buff
->pa
, buff
->len
,
165 if (unlikely(buff
->is_eop
)) {
166 ++self
->stats
.rx
.packets
;
167 self
->stats
.tx
.bytes
+= buff
->skb
->len
;
169 dev_kfree_skb_any(buff
->skb
);
172 buff
->eop_index
= 0xffffU
;
173 self
->sw_head
= aq_ring_next_dx(self
, self
->sw_head
);
179 static void aq_rx_checksum(struct aq_ring_s
*self
,
180 struct aq_ring_buff_s
*buff
,
183 if (!(self
->aq_nic
->ndev
->features
& NETIF_F_RXCSUM
))
186 if (unlikely(buff
->is_cso_err
)) {
187 ++self
->stats
.rx
.errors
;
188 skb
->ip_summed
= CHECKSUM_NONE
;
191 if (buff
->is_ip_cso
) {
192 __skb_incr_checksum_unnecessary(skb
);
194 skb
->ip_summed
= CHECKSUM_NONE
;
197 if (buff
->is_udp_cso
|| buff
->is_tcp_cso
)
198 __skb_incr_checksum_unnecessary(skb
);
201 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
202 int aq_ring_rx_clean(struct aq_ring_s
*self
,
203 struct napi_struct
*napi
,
207 struct net_device
*ndev
= aq_nic_get_ndev(self
->aq_nic
);
209 bool is_rsc_completed
= true;
211 for (; (self
->sw_head
!= self
->hw_head
) && budget
;
212 self
->sw_head
= aq_ring_next_dx(self
, self
->sw_head
),
213 --budget
, ++(*work_done
)) {
214 struct aq_ring_buff_s
*buff
= &self
->buff_ring
[self
->sw_head
];
215 struct sk_buff
*skb
= NULL
;
216 unsigned int next_
= 0U;
218 struct aq_ring_buff_s
*buff_
= NULL
;
220 if (buff
->is_error
) {
221 __free_pages(buff
->page
, 0);
225 if (buff
->is_cleaned
)
229 for (next_
= buff
->next
,
230 buff_
= &self
->buff_ring
[next_
]; true;
232 buff_
= &self
->buff_ring
[next_
]) {
234 aq_ring_dx_in_range(self
->sw_head
,
238 if (unlikely(!is_rsc_completed
)) {
239 is_rsc_completed
= false;
247 if (!is_rsc_completed
) {
253 /* for single fragment packets use build_skb() */
255 buff
->len
<= AQ_CFG_RX_FRAME_MAX
- AQ_SKB_ALIGN
) {
256 skb
= build_skb(page_address(buff
->page
),
257 AQ_CFG_RX_FRAME_MAX
);
258 if (unlikely(!skb
)) {
263 skb_put(skb
, buff
->len
);
265 skb
= netdev_alloc_skb(ndev
, ETH_HLEN
);
266 if (unlikely(!skb
)) {
270 skb_put(skb
, ETH_HLEN
);
271 memcpy(skb
->data
, page_address(buff
->page
), ETH_HLEN
);
273 skb_add_rx_frag(skb
, 0, buff
->page
, ETH_HLEN
,
274 buff
->len
- ETH_HLEN
,
275 SKB_TRUESIZE(buff
->len
- ETH_HLEN
));
278 for (i
= 1U, next_
= buff
->next
,
279 buff_
= &self
->buff_ring
[next_
];
280 true; next_
= buff_
->next
,
281 buff_
= &self
->buff_ring
[next_
], ++i
) {
282 skb_add_rx_frag(skb
, i
,
285 SKB_TRUESIZE(buff
->len
-
287 buff_
->is_cleaned
= 1;
295 skb
->protocol
= eth_type_trans(skb
, ndev
);
297 aq_rx_checksum(self
, buff
, skb
);
299 skb_set_hash(skb
, buff
->rss_hash
,
300 buff
->is_hash_l4
? PKT_HASH_TYPE_L4
:
303 skb_record_rx_queue(skb
, self
->idx
);
305 ++self
->stats
.rx
.packets
;
306 self
->stats
.rx
.bytes
+= skb
->len
;
308 napi_gro_receive(napi
, skb
);
315 int aq_ring_rx_fill(struct aq_ring_s
*self
)
317 unsigned int pages_order
= fls(AQ_CFG_RX_FRAME_MAX
/ PAGE_SIZE
+
318 (AQ_CFG_RX_FRAME_MAX
% PAGE_SIZE
? 1 : 0)) - 1;
319 struct aq_ring_buff_s
*buff
= NULL
;
323 for (i
= aq_ring_avail_dx(self
); i
--;
324 self
->sw_tail
= aq_ring_next_dx(self
, self
->sw_tail
)) {
325 buff
= &self
->buff_ring
[self
->sw_tail
];
328 buff
->len
= AQ_CFG_RX_FRAME_MAX
;
330 buff
->page
= alloc_pages(GFP_ATOMIC
| __GFP_COMP
, pages_order
);
336 buff
->pa
= dma_map_page(aq_nic_get_dev(self
->aq_nic
),
338 AQ_CFG_RX_FRAME_MAX
, DMA_FROM_DEVICE
);
340 if (dma_mapping_error(aq_nic_get_dev(self
->aq_nic
), buff
->pa
)) {
350 if (buff
&& buff
->page
)
351 __free_pages(buff
->page
, 0);
357 void aq_ring_rx_deinit(struct aq_ring_s
*self
)
362 for (; self
->sw_head
!= self
->sw_tail
;
363 self
->sw_head
= aq_ring_next_dx(self
, self
->sw_head
)) {
364 struct aq_ring_buff_s
*buff
= &self
->buff_ring
[self
->sw_head
];
366 dma_unmap_page(aq_nic_get_dev(self
->aq_nic
), buff
->pa
,
367 AQ_CFG_RX_FRAME_MAX
, DMA_FROM_DEVICE
);
369 __free_pages(buff
->page
, 0);
375 void aq_ring_free(struct aq_ring_s
*self
)
380 kfree(self
->buff_ring
);
383 dma_free_coherent(aq_nic_get_dev(self
->aq_nic
),
384 self
->size
* self
->dx_size
, self
->dx_ring
,