1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
7 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
8 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
16 #include <linux/netdevice.h>
19 const struct aq_hw_ops
*aq_hw_ops
;
20 struct aq_hw_s
*aq_hw
;
21 struct aq_nic_s
*aq_nic
;
22 unsigned int tx_rings
;
23 unsigned int rx_rings
;
24 struct aq_ring_param_s aq_ring_param
;
25 struct napi_struct napi
;
26 struct aq_ring_s ring
[AQ_CFG_TCS_MAX
][2];
29 #define AQ_VEC_TX_ID 0
30 #define AQ_VEC_RX_ID 1
32 static int aq_vec_poll(struct napi_struct
*napi
, int budget
)
34 struct aq_vec_s
*self
= container_of(napi
, struct aq_vec_s
, napi
);
35 unsigned int sw_tail_old
= 0U;
36 struct aq_ring_s
*ring
= NULL
;
37 bool was_tx_cleaned
= true;
45 for (i
= 0U, ring
= self
->ring
[0];
46 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
47 if (self
->aq_hw_ops
->hw_ring_tx_head_update
) {
48 err
= self
->aq_hw_ops
->hw_ring_tx_head_update(
55 if (ring
[AQ_VEC_TX_ID
].sw_head
!=
56 ring
[AQ_VEC_TX_ID
].hw_head
) {
57 was_tx_cleaned
= aq_ring_tx_clean(&ring
[AQ_VEC_TX_ID
]);
58 aq_ring_update_queue_state(&ring
[AQ_VEC_TX_ID
]);
61 err
= self
->aq_hw_ops
->hw_ring_rx_receive(self
->aq_hw
,
66 if (ring
[AQ_VEC_RX_ID
].sw_head
!=
67 ring
[AQ_VEC_RX_ID
].hw_head
) {
68 err
= aq_ring_rx_clean(&ring
[AQ_VEC_RX_ID
],
75 sw_tail_old
= ring
[AQ_VEC_RX_ID
].sw_tail
;
77 err
= aq_ring_rx_fill(&ring
[AQ_VEC_RX_ID
]);
81 err
= self
->aq_hw_ops
->hw_ring_rx_fill(
83 &ring
[AQ_VEC_RX_ID
], sw_tail_old
);
93 if (work_done
< budget
) {
94 napi_complete_done(napi
, work_done
);
95 self
->aq_hw_ops
->hw_irq_enable(self
->aq_hw
,
96 1U << self
->aq_ring_param
.vec_idx
);
103 struct aq_vec_s
*aq_vec_alloc(struct aq_nic_s
*aq_nic
, unsigned int idx
,
104 struct aq_nic_cfg_s
*aq_nic_cfg
)
106 struct aq_ring_s
*ring
= NULL
;
107 struct aq_vec_s
*self
= NULL
;
111 self
= kzalloc(sizeof(*self
), GFP_KERNEL
);
117 self
->aq_nic
= aq_nic
;
118 self
->aq_ring_param
.vec_idx
= idx
;
119 self
->aq_ring_param
.cpu
=
120 idx
+ aq_nic_cfg
->aq_rss
.base_cpu_number
;
122 cpumask_set_cpu(self
->aq_ring_param
.cpu
,
123 &self
->aq_ring_param
.affinity_mask
);
128 netif_napi_add(aq_nic_get_ndev(aq_nic
), &self
->napi
,
129 aq_vec_poll
, AQ_CFG_NAPI_WEIGHT
);
131 for (i
= 0; i
< aq_nic_cfg
->tcs
; ++i
) {
132 unsigned int idx_ring
= AQ_NIC_TCVEC2RING(self
->nic
,
134 self
->aq_ring_param
.vec_idx
);
136 ring
= aq_ring_tx_alloc(&self
->ring
[i
][AQ_VEC_TX_ID
], aq_nic
,
137 idx_ring
, aq_nic_cfg
);
145 aq_nic_set_tx_ring(aq_nic
, idx_ring
, ring
);
147 ring
= aq_ring_rx_alloc(&self
->ring
[i
][AQ_VEC_RX_ID
], aq_nic
,
148 idx_ring
, aq_nic_cfg
);
166 int aq_vec_init(struct aq_vec_s
*self
, const struct aq_hw_ops
*aq_hw_ops
,
167 struct aq_hw_s
*aq_hw
)
169 struct aq_ring_s
*ring
= NULL
;
173 self
->aq_hw_ops
= aq_hw_ops
;
176 for (i
= 0U, ring
= self
->ring
[0];
177 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
178 err
= aq_ring_init(&ring
[AQ_VEC_TX_ID
]);
182 err
= self
->aq_hw_ops
->hw_ring_tx_init(self
->aq_hw
,
184 &self
->aq_ring_param
);
188 err
= aq_ring_init(&ring
[AQ_VEC_RX_ID
]);
192 err
= self
->aq_hw_ops
->hw_ring_rx_init(self
->aq_hw
,
194 &self
->aq_ring_param
);
198 err
= aq_ring_rx_fill(&ring
[AQ_VEC_RX_ID
]);
202 err
= self
->aq_hw_ops
->hw_ring_rx_fill(self
->aq_hw
,
203 &ring
[AQ_VEC_RX_ID
], 0U);
212 int aq_vec_start(struct aq_vec_s
*self
)
214 struct aq_ring_s
*ring
= NULL
;
218 for (i
= 0U, ring
= self
->ring
[0];
219 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
220 err
= self
->aq_hw_ops
->hw_ring_tx_start(self
->aq_hw
,
221 &ring
[AQ_VEC_TX_ID
]);
225 err
= self
->aq_hw_ops
->hw_ring_rx_start(self
->aq_hw
,
226 &ring
[AQ_VEC_RX_ID
]);
231 napi_enable(&self
->napi
);
237 void aq_vec_stop(struct aq_vec_s
*self
)
239 struct aq_ring_s
*ring
= NULL
;
242 for (i
= 0U, ring
= self
->ring
[0];
243 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
244 self
->aq_hw_ops
->hw_ring_tx_stop(self
->aq_hw
,
245 &ring
[AQ_VEC_TX_ID
]);
247 self
->aq_hw_ops
->hw_ring_rx_stop(self
->aq_hw
,
248 &ring
[AQ_VEC_RX_ID
]);
251 napi_disable(&self
->napi
);
254 void aq_vec_deinit(struct aq_vec_s
*self
)
256 struct aq_ring_s
*ring
= NULL
;
262 for (i
= 0U, ring
= self
->ring
[0];
263 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
264 aq_ring_tx_clean(&ring
[AQ_VEC_TX_ID
]);
265 aq_ring_rx_deinit(&ring
[AQ_VEC_RX_ID
]);
271 void aq_vec_free(struct aq_vec_s
*self
)
273 struct aq_ring_s
*ring
= NULL
;
279 for (i
= 0U, ring
= self
->ring
[0];
280 self
->tx_rings
> i
; ++i
, ring
= self
->ring
[i
]) {
281 aq_ring_free(&ring
[AQ_VEC_TX_ID
]);
282 aq_ring_free(&ring
[AQ_VEC_RX_ID
]);
285 netif_napi_del(&self
->napi
);
292 irqreturn_t
aq_vec_isr(int irq
, void *private)
294 struct aq_vec_s
*self
= private;
301 napi_schedule(&self
->napi
);
304 return err
>= 0 ? IRQ_HANDLED
: IRQ_NONE
;
307 irqreturn_t
aq_vec_isr_legacy(int irq
, void *private)
309 struct aq_vec_s
*self
= private;
315 err
= self
->aq_hw_ops
->hw_irq_read(self
->aq_hw
, &irq_mask
);
320 self
->aq_hw_ops
->hw_irq_disable(self
->aq_hw
,
321 1U << self
->aq_ring_param
.vec_idx
);
322 napi_schedule(&self
->napi
);
324 self
->aq_hw_ops
->hw_irq_enable(self
->aq_hw
, 1U);
331 cpumask_t
*aq_vec_get_affinity_mask(struct aq_vec_s
*self
)
333 return &self
->aq_ring_param
.affinity_mask
;
336 void aq_vec_add_stats(struct aq_vec_s
*self
,
337 struct aq_ring_stats_rx_s
*stats_rx
,
338 struct aq_ring_stats_tx_s
*stats_tx
)
340 struct aq_ring_s
*ring
= NULL
;
343 for (r
= 0U, ring
= self
->ring
[0];
344 self
->tx_rings
> r
; ++r
, ring
= self
->ring
[r
]) {
345 struct aq_ring_stats_tx_s
*tx
= &ring
[AQ_VEC_TX_ID
].stats
.tx
;
346 struct aq_ring_stats_rx_s
*rx
= &ring
[AQ_VEC_RX_ID
].stats
.rx
;
348 stats_rx
->packets
+= rx
->packets
;
349 stats_rx
->bytes
+= rx
->bytes
;
350 stats_rx
->errors
+= rx
->errors
;
351 stats_rx
->jumbo_packets
+= rx
->jumbo_packets
;
352 stats_rx
->lro_packets
+= rx
->lro_packets
;
353 stats_rx
->pg_losts
+= rx
->pg_losts
;
354 stats_rx
->pg_flips
+= rx
->pg_flips
;
355 stats_rx
->pg_reuses
+= rx
->pg_reuses
;
357 stats_tx
->packets
+= tx
->packets
;
358 stats_tx
->bytes
+= tx
->bytes
;
359 stats_tx
->errors
+= tx
->errors
;
360 stats_tx
->queue_restarts
+= tx
->queue_restarts
;
364 int aq_vec_get_sw_stats(struct aq_vec_s
*self
, u64
*data
, unsigned int *p_count
)
366 struct aq_ring_stats_rx_s stats_rx
;
367 struct aq_ring_stats_tx_s stats_tx
;
368 unsigned int count
= 0U;
370 memset(&stats_rx
, 0U, sizeof(struct aq_ring_stats_rx_s
));
371 memset(&stats_tx
, 0U, sizeof(struct aq_ring_stats_tx_s
));
372 aq_vec_add_stats(self
, &stats_rx
, &stats_tx
);
374 /* This data should mimic aq_ethtool_queue_stat_names structure
376 data
[count
] += stats_rx
.packets
;
377 data
[++count
] += stats_tx
.packets
;
378 data
[++count
] += stats_tx
.queue_restarts
;
379 data
[++count
] += stats_rx
.jumbo_packets
;
380 data
[++count
] += stats_rx
.lro_packets
;
381 data
[++count
] += stats_rx
.errors
;