2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
11 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
19 #include <linux/netdevice.h>
22 const struct aq_hw_ops
*aq_hw_ops
;
23 struct aq_hw_s
*aq_hw
;
24 struct aq_nic_s
*aq_nic
;
25 unsigned int tx_rings
;
26 unsigned int rx_rings
;
27 struct aq_ring_param_s aq_ring_param
;
28 struct napi_struct napi
;
29 struct aq_ring_s ring
[AQ_CFG_TCS_MAX
][2];
32 #define AQ_VEC_TX_ID 0
33 #define AQ_VEC_RX_ID 1
35 static int aq_vec_poll(struct napi_struct
*napi
, int budget
)
37 struct aq_vec_s
*self
= container_of(napi
, struct aq_vec_s
, napi
);
38 unsigned int sw_tail_old
= 0U;
39 struct aq_ring_s
*ring
= NULL
;
40 bool was_tx_cleaned
= true;
48 for (i
= 0U, ring
= self
->ring
[0];
49 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
50 if (self
->aq_hw_ops
->hw_ring_tx_head_update
) {
51 err
= self
->aq_hw_ops
->hw_ring_tx_head_update(
58 if (ring
[AQ_VEC_TX_ID
].sw_head
!=
59 ring
[AQ_VEC_TX_ID
].hw_head
) {
60 was_tx_cleaned
= aq_ring_tx_clean(&ring
[AQ_VEC_TX_ID
]);
61 aq_ring_update_queue_state(&ring
[AQ_VEC_TX_ID
]);
64 err
= self
->aq_hw_ops
->hw_ring_rx_receive(self
->aq_hw
,
69 if (ring
[AQ_VEC_RX_ID
].sw_head
!=
70 ring
[AQ_VEC_RX_ID
].hw_head
) {
71 err
= aq_ring_rx_clean(&ring
[AQ_VEC_RX_ID
],
78 sw_tail_old
= ring
[AQ_VEC_RX_ID
].sw_tail
;
80 err
= aq_ring_rx_fill(&ring
[AQ_VEC_RX_ID
]);
84 err
= self
->aq_hw_ops
->hw_ring_rx_fill(
86 &ring
[AQ_VEC_RX_ID
], sw_tail_old
);
95 if (work_done
< budget
) {
96 napi_complete_done(napi
, work_done
);
97 self
->aq_hw_ops
->hw_irq_enable(self
->aq_hw
,
98 1U << self
->aq_ring_param
.vec_idx
);
105 struct aq_vec_s
*aq_vec_alloc(struct aq_nic_s
*aq_nic
, unsigned int idx
,
106 struct aq_nic_cfg_s
*aq_nic_cfg
)
108 struct aq_vec_s
*self
= NULL
;
109 struct aq_ring_s
*ring
= NULL
;
113 self
= kzalloc(sizeof(*self
), GFP_KERNEL
);
119 self
->aq_nic
= aq_nic
;
120 self
->aq_ring_param
.vec_idx
= idx
;
121 self
->aq_ring_param
.cpu
=
122 idx
+ aq_nic_cfg
->aq_rss
.base_cpu_number
;
124 cpumask_set_cpu(self
->aq_ring_param
.cpu
,
125 &self
->aq_ring_param
.affinity_mask
);
130 netif_napi_add(aq_nic_get_ndev(aq_nic
), &self
->napi
,
131 aq_vec_poll
, AQ_CFG_NAPI_WEIGHT
);
133 for (i
= 0; i
< aq_nic_cfg
->tcs
; ++i
) {
134 unsigned int idx_ring
= AQ_NIC_TCVEC2RING(self
->nic
,
136 self
->aq_ring_param
.vec_idx
);
138 ring
= aq_ring_tx_alloc(&self
->ring
[i
][AQ_VEC_TX_ID
], aq_nic
,
139 idx_ring
, aq_nic_cfg
);
147 aq_nic_set_tx_ring(aq_nic
, idx_ring
, ring
);
149 ring
= aq_ring_rx_alloc(&self
->ring
[i
][AQ_VEC_RX_ID
], aq_nic
,
150 idx_ring
, aq_nic_cfg
);
167 int aq_vec_init(struct aq_vec_s
*self
, const struct aq_hw_ops
*aq_hw_ops
,
168 struct aq_hw_s
*aq_hw
)
170 struct aq_ring_s
*ring
= NULL
;
174 self
->aq_hw_ops
= aq_hw_ops
;
177 for (i
= 0U, ring
= self
->ring
[0];
178 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
179 err
= aq_ring_init(&ring
[AQ_VEC_TX_ID
]);
183 err
= self
->aq_hw_ops
->hw_ring_tx_init(self
->aq_hw
,
185 &self
->aq_ring_param
);
189 err
= aq_ring_init(&ring
[AQ_VEC_RX_ID
]);
193 err
= self
->aq_hw_ops
->hw_ring_rx_init(self
->aq_hw
,
195 &self
->aq_ring_param
);
199 err
= aq_ring_rx_fill(&ring
[AQ_VEC_RX_ID
]);
203 err
= self
->aq_hw_ops
->hw_ring_rx_fill(self
->aq_hw
,
204 &ring
[AQ_VEC_RX_ID
], 0U);
213 int aq_vec_start(struct aq_vec_s
*self
)
215 struct aq_ring_s
*ring
= NULL
;
219 for (i
= 0U, ring
= self
->ring
[0];
220 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
221 err
= self
->aq_hw_ops
->hw_ring_tx_start(self
->aq_hw
,
222 &ring
[AQ_VEC_TX_ID
]);
226 err
= self
->aq_hw_ops
->hw_ring_rx_start(self
->aq_hw
,
227 &ring
[AQ_VEC_RX_ID
]);
232 napi_enable(&self
->napi
);
238 void aq_vec_stop(struct aq_vec_s
*self
)
240 struct aq_ring_s
*ring
= NULL
;
243 for (i
= 0U, ring
= self
->ring
[0];
244 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
245 self
->aq_hw_ops
->hw_ring_tx_stop(self
->aq_hw
,
246 &ring
[AQ_VEC_TX_ID
]);
248 self
->aq_hw_ops
->hw_ring_rx_stop(self
->aq_hw
,
249 &ring
[AQ_VEC_RX_ID
]);
252 napi_disable(&self
->napi
);
255 void aq_vec_deinit(struct aq_vec_s
*self
)
257 struct aq_ring_s
*ring
= NULL
;
263 for (i
= 0U, ring
= self
->ring
[0];
264 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
265 aq_ring_tx_clean(&ring
[AQ_VEC_TX_ID
]);
266 aq_ring_rx_deinit(&ring
[AQ_VEC_RX_ID
]);
271 void aq_vec_free(struct aq_vec_s
*self
)
273 struct aq_ring_s
*ring
= NULL
;
279 for (i
= 0U, ring
= self
->ring
[0];
280 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
281 aq_ring_free(&ring
[AQ_VEC_TX_ID
]);
282 aq_ring_free(&ring
[AQ_VEC_RX_ID
]);
285 netif_napi_del(&self
->napi
);
292 irqreturn_t
aq_vec_isr(int irq
, void *private)
294 struct aq_vec_s
*self
= private;
301 napi_schedule(&self
->napi
);
304 return err
>= 0 ? IRQ_HANDLED
: IRQ_NONE
;
307 irqreturn_t
aq_vec_isr_legacy(int irq
, void *private)
309 struct aq_vec_s
*self
= private;
317 err
= self
->aq_hw_ops
->hw_irq_read(self
->aq_hw
, &irq_mask
);
322 self
->aq_hw_ops
->hw_irq_disable(self
->aq_hw
,
323 1U << self
->aq_ring_param
.vec_idx
);
324 napi_schedule(&self
->napi
);
326 self
->aq_hw_ops
->hw_irq_enable(self
->aq_hw
, 1U);
331 return err
>= 0 ? IRQ_HANDLED
: IRQ_NONE
;
334 cpumask_t
*aq_vec_get_affinity_mask(struct aq_vec_s
*self
)
336 return &self
->aq_ring_param
.affinity_mask
;
339 void aq_vec_add_stats(struct aq_vec_s
*self
,
340 struct aq_ring_stats_rx_s
*stats_rx
,
341 struct aq_ring_stats_tx_s
*stats_tx
)
343 struct aq_ring_s
*ring
= NULL
;
346 for (r
= 0U, ring
= self
->ring
[0];
347 self
->tx_rings
> r
; ++r
, ring
= self
->ring
[r
]) {
348 struct aq_ring_stats_tx_s
*tx
= &ring
[AQ_VEC_TX_ID
].stats
.tx
;
349 struct aq_ring_stats_rx_s
*rx
= &ring
[AQ_VEC_RX_ID
].stats
.rx
;
351 stats_rx
->packets
+= rx
->packets
;
352 stats_rx
->bytes
+= rx
->bytes
;
353 stats_rx
->errors
+= rx
->errors
;
354 stats_rx
->jumbo_packets
+= rx
->jumbo_packets
;
355 stats_rx
->lro_packets
+= rx
->lro_packets
;
357 stats_tx
->packets
+= tx
->packets
;
358 stats_tx
->bytes
+= tx
->bytes
;
359 stats_tx
->errors
+= tx
->errors
;
360 stats_tx
->queue_restarts
+= tx
->queue_restarts
;
364 int aq_vec_get_sw_stats(struct aq_vec_s
*self
, u64
*data
, unsigned int *p_count
)
366 unsigned int count
= 0U;
367 struct aq_ring_stats_rx_s stats_rx
;
368 struct aq_ring_stats_tx_s stats_tx
;
370 memset(&stats_rx
, 0U, sizeof(struct aq_ring_stats_rx_s
));
371 memset(&stats_tx
, 0U, sizeof(struct aq_ring_stats_tx_s
));
372 aq_vec_add_stats(self
, &stats_rx
, &stats_tx
);
374 /* This data should mimic aq_ethtool_queue_stat_names structure
376 data
[count
] += stats_rx
.packets
;
377 data
[++count
] += stats_tx
.packets
;
378 data
[++count
] += stats_tx
.queue_restarts
;
379 data
[++count
] += stats_rx
.jumbo_packets
;
380 data
[++count
] += stats_rx
.lro_packets
;
381 data
[++count
] += stats_rx
.errors
;