1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_mqprio.h"
8 static int cxgb4_mqprio_validate(struct net_device
*dev
,
9 struct tc_mqprio_qopt_offload
*mqprio
)
11 u64 min_rate
= 0, max_rate
= 0, max_link_rate
;
12 struct port_info
*pi
= netdev2pinfo(dev
);
13 struct adapter
*adap
= netdev2adap(dev
);
14 u32 speed
, qcount
= 0, qoffset
= 0;
15 u32 start_a
, start_b
, end_a
, end_b
;
19 if (!mqprio
->qopt
.num_tc
)
22 if (mqprio
->qopt
.hw
!= TC_MQPRIO_HW_OFFLOAD_TCS
) {
23 netdev_err(dev
, "Only full TC hardware offload is supported\n");
25 } else if (mqprio
->mode
!= TC_MQPRIO_MODE_CHANNEL
) {
26 netdev_err(dev
, "Only channel mode offload is supported\n");
28 } else if (mqprio
->shaper
!= TC_MQPRIO_SHAPER_BW_RATE
) {
29 netdev_err(dev
, "Only bandwidth rate shaper supported\n");
31 } else if (mqprio
->qopt
.num_tc
> adap
->params
.nsched_cls
) {
33 "Only %u traffic classes supported by hardware\n",
34 adap
->params
.nsched_cls
);
38 ret
= t4_get_link_params(pi
, NULL
, &speed
, NULL
);
40 netdev_err(dev
, "Failed to get link speed, ret: %d\n", ret
);
44 /* Convert from Mbps to bps */
45 max_link_rate
= (u64
)speed
* 1000 * 1000;
47 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
48 qoffset
= max_t(u16
, mqprio
->qopt
.offset
[i
], qoffset
);
49 qcount
+= mqprio
->qopt
.count
[i
];
51 start_a
= mqprio
->qopt
.offset
[i
];
52 end_a
= start_a
+ mqprio
->qopt
.count
[i
] - 1;
53 for (j
= i
+ 1; j
< mqprio
->qopt
.num_tc
; j
++) {
54 start_b
= mqprio
->qopt
.offset
[j
];
55 end_b
= start_b
+ mqprio
->qopt
.count
[j
] - 1;
57 /* If queue count is 0, then the traffic
58 * belonging to this class will not use
59 * ETHOFLD queues. So, no need to validate
62 if (!mqprio
->qopt
.count
[i
])
65 if (!mqprio
->qopt
.count
[j
])
68 if (max_t(u32
, start_a
, start_b
) <=
69 min_t(u32
, end_a
, end_b
)) {
71 "Queues can't overlap across tc\n");
76 /* Convert byte per second to bits per second */
77 min_rate
+= (mqprio
->min_rate
[i
] * 8);
78 max_rate
+= (mqprio
->max_rate
[i
] * 8);
81 if (qoffset
>= adap
->tids
.neotids
|| qcount
> adap
->tids
.neotids
)
84 if (min_rate
> max_link_rate
|| max_rate
> max_link_rate
) {
86 "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
87 min_rate
, max_rate
, max_link_rate
);
94 static int cxgb4_init_eosw_txq(struct net_device
*dev
,
95 struct sge_eosw_txq
*eosw_txq
,
98 struct adapter
*adap
= netdev2adap(dev
);
99 struct tx_sw_desc
*ring
;
101 memset(eosw_txq
, 0, sizeof(*eosw_txq
));
103 ring
= kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM
,
104 sizeof(*ring
), GFP_KERNEL
);
108 eosw_txq
->desc
= ring
;
109 eosw_txq
->ndesc
= CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM
;
110 spin_lock_init(&eosw_txq
->lock
);
111 eosw_txq
->state
= CXGB4_EO_STATE_CLOSED
;
112 eosw_txq
->eotid
= eotid
;
113 eosw_txq
->hwtid
= adap
->tids
.eotid_base
+ eosw_txq
->eotid
;
114 eosw_txq
->cred
= adap
->params
.ofldq_wr_cred
;
115 eosw_txq
->hwqid
= hwqid
;
116 eosw_txq
->netdev
= dev
;
117 tasklet_setup(&eosw_txq
->qresume_tsk
, cxgb4_ethofld_restart
);
121 static void cxgb4_clean_eosw_txq(struct net_device
*dev
,
122 struct sge_eosw_txq
*eosw_txq
)
124 struct adapter
*adap
= netdev2adap(dev
);
126 cxgb4_eosw_txq_free_desc(adap
, eosw_txq
, eosw_txq
->ndesc
);
128 eosw_txq
->last_pidx
= 0;
130 eosw_txq
->last_cidx
= 0;
131 eosw_txq
->flowc_idx
= 0;
133 eosw_txq
->cred
= adap
->params
.ofldq_wr_cred
;
134 eosw_txq
->ncompl
= 0;
135 eosw_txq
->last_compl
= 0;
136 eosw_txq
->state
= CXGB4_EO_STATE_CLOSED
;
139 static void cxgb4_free_eosw_txq(struct net_device
*dev
,
140 struct sge_eosw_txq
*eosw_txq
)
142 spin_lock_bh(&eosw_txq
->lock
);
143 cxgb4_clean_eosw_txq(dev
, eosw_txq
);
144 kfree(eosw_txq
->desc
);
145 spin_unlock_bh(&eosw_txq
->lock
);
146 tasklet_kill(&eosw_txq
->qresume_tsk
);
149 static int cxgb4_mqprio_alloc_hw_resources(struct net_device
*dev
)
151 struct port_info
*pi
= netdev2pinfo(dev
);
152 struct adapter
*adap
= netdev2adap(dev
);
153 struct sge_ofld_rxq
*eorxq
;
154 struct sge_eohw_txq
*eotxq
;
158 /* Allocate ETHOFLD hardware queue structures if not done already */
159 if (!refcount_read(&adap
->tc_mqprio
->refcnt
)) {
160 adap
->sge
.eohw_rxq
= kcalloc(adap
->sge
.eoqsets
,
161 sizeof(struct sge_ofld_rxq
),
163 if (!adap
->sge
.eohw_rxq
)
166 adap
->sge
.eohw_txq
= kcalloc(adap
->sge
.eoqsets
,
167 sizeof(struct sge_eohw_txq
),
169 if (!adap
->sge
.eohw_txq
) {
170 kfree(adap
->sge
.eohw_rxq
);
174 refcount_set(&adap
->tc_mqprio
->refcnt
, 1);
176 refcount_inc(&adap
->tc_mqprio
->refcnt
);
179 if (!(adap
->flags
& CXGB4_USING_MSIX
))
180 msix
= -((int)adap
->sge
.intrq
.abs_id
+ 1);
182 for (i
= 0; i
< pi
->nqsets
; i
++) {
183 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
184 eotxq
= &adap
->sge
.eohw_txq
[pi
->first_qset
+ i
];
186 /* Allocate Rxqs for receiving ETHOFLD Tx completions */
188 msix
= cxgb4_get_msix_idx_from_bmap(adap
);
191 goto out_free_queues
;
194 eorxq
->msix
= &adap
->msix_info
[msix
];
195 snprintf(eorxq
->msix
->desc
,
196 sizeof(eorxq
->msix
->desc
),
197 "%s-eorxq%d", dev
->name
, i
);
200 init_rspq(adap
, &eorxq
->rspq
,
201 CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC
,
202 CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT
,
203 CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM
,
204 CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE
);
206 eorxq
->fl
.size
= CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM
;
208 ret
= t4_sge_alloc_rxq(adap
, &eorxq
->rspq
, false,
209 dev
, msix
, &eorxq
->fl
,
210 cxgb4_ethofld_rx_handler
,
213 goto out_free_queues
;
215 /* Allocate ETHOFLD hardware Txqs */
216 eotxq
->q
.size
= CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM
;
217 ret
= t4_sge_alloc_ethofld_txq(adap
, eotxq
, dev
,
218 eorxq
->rspq
.cntxt_id
);
220 goto out_free_queues
;
222 /* Allocate IRQs, set IRQ affinity, and start Rx */
223 if (adap
->flags
& CXGB4_USING_MSIX
) {
224 ret
= request_irq(eorxq
->msix
->vec
, t4_sge_intr_msix
, 0,
225 eorxq
->msix
->desc
, &eorxq
->rspq
);
229 cxgb4_set_msix_aff(adap
, eorxq
->msix
->vec
,
230 &eorxq
->msix
->aff_mask
, i
);
233 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
234 cxgb4_enable_rx(adap
, &eorxq
->rspq
);
241 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
243 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
244 cxgb4_quiesce_rx(&eorxq
->rspq
);
246 if (adap
->flags
& CXGB4_USING_MSIX
) {
247 cxgb4_clear_msix_aff(eorxq
->msix
->vec
,
248 eorxq
->msix
->aff_mask
);
249 free_irq(eorxq
->msix
->vec
, &eorxq
->rspq
);
254 for (i
= 0; i
< pi
->nqsets
; i
++) {
255 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
256 eotxq
= &adap
->sge
.eohw_txq
[pi
->first_qset
+ i
];
258 if (eorxq
->rspq
.desc
)
259 free_rspq_fl(adap
, &eorxq
->rspq
, &eorxq
->fl
);
261 cxgb4_free_msix_idx_in_bmap(adap
, eorxq
->msix
->idx
);
262 t4_sge_free_ethofld_txq(adap
, eotxq
);
265 if (refcount_dec_and_test(&adap
->tc_mqprio
->refcnt
)) {
266 kfree(adap
->sge
.eohw_txq
);
267 kfree(adap
->sge
.eohw_rxq
);
272 static void cxgb4_mqprio_free_hw_resources(struct net_device
*dev
)
274 struct port_info
*pi
= netdev2pinfo(dev
);
275 struct adapter
*adap
= netdev2adap(dev
);
276 struct sge_ofld_rxq
*eorxq
;
277 struct sge_eohw_txq
*eotxq
;
280 /* Return if no ETHOFLD structures have been allocated yet */
281 if (!refcount_read(&adap
->tc_mqprio
->refcnt
))
284 /* Return if no hardware queues have been allocated */
285 if (!adap
->sge
.eohw_rxq
[pi
->first_qset
].rspq
.desc
)
288 for (i
= 0; i
< pi
->nqsets
; i
++) {
289 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
290 eotxq
= &adap
->sge
.eohw_txq
[pi
->first_qset
+ i
];
292 /* Device removal path will already disable NAPI
293 * before unregistering netdevice. So, only disable
294 * NAPI if we're not in device removal path
296 if (!(adap
->flags
& CXGB4_SHUTTING_DOWN
))
297 cxgb4_quiesce_rx(&eorxq
->rspq
);
299 if (adap
->flags
& CXGB4_USING_MSIX
) {
300 cxgb4_clear_msix_aff(eorxq
->msix
->vec
,
301 eorxq
->msix
->aff_mask
);
302 free_irq(eorxq
->msix
->vec
, &eorxq
->rspq
);
303 cxgb4_free_msix_idx_in_bmap(adap
, eorxq
->msix
->idx
);
306 free_rspq_fl(adap
, &eorxq
->rspq
, &eorxq
->fl
);
307 t4_sge_free_ethofld_txq(adap
, eotxq
);
310 /* Free up ETHOFLD structures if there are no users */
311 if (refcount_dec_and_test(&adap
->tc_mqprio
->refcnt
)) {
312 kfree(adap
->sge
.eohw_txq
);
313 kfree(adap
->sge
.eohw_rxq
);
317 static int cxgb4_mqprio_alloc_tc(struct net_device
*dev
,
318 struct tc_mqprio_qopt_offload
*mqprio
)
320 struct ch_sched_params p
= {
321 .type
= SCHED_CLASS_TYPE_PACKET
,
322 .u
.params
.level
= SCHED_CLASS_LEVEL_CL_RL
,
323 .u
.params
.mode
= SCHED_CLASS_MODE_FLOW
,
324 .u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
,
325 .u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
,
326 .u
.params
.class = SCHED_CLS_NONE
,
327 .u
.params
.weight
= 0,
328 .u
.params
.pktsize
= dev
->mtu
,
330 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
331 struct port_info
*pi
= netdev2pinfo(dev
);
332 struct adapter
*adap
= netdev2adap(dev
);
333 struct sched_class
*e
;
337 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
338 p
.u
.params
.channel
= pi
->tx_chan
;
339 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
340 /* Convert from bytes per second to Kbps */
341 p
.u
.params
.minrate
= div_u64(mqprio
->min_rate
[i
] * 8, 1000);
342 p
.u
.params
.maxrate
= div_u64(mqprio
->max_rate
[i
] * 8, 1000);
344 /* Request larger burst buffer for smaller MTU, so
345 * that hardware can work on more data per burst
348 if (dev
->mtu
<= ETH_DATA_LEN
)
349 p
.u
.params
.burstsize
= 8 * dev
->mtu
;
351 e
= cxgb4_sched_class_alloc(dev
, &p
);
357 tc_port_mqprio
->tc_hwtc_map
[i
] = e
->idx
;
364 cxgb4_sched_class_free(dev
, tc_port_mqprio
->tc_hwtc_map
[i
]);
369 static void cxgb4_mqprio_free_tc(struct net_device
*dev
)
371 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
372 struct port_info
*pi
= netdev2pinfo(dev
);
373 struct adapter
*adap
= netdev2adap(dev
);
376 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
377 for (i
= 0; i
< tc_port_mqprio
->mqprio
.qopt
.num_tc
; i
++)
378 cxgb4_sched_class_free(dev
, tc_port_mqprio
->tc_hwtc_map
[i
]);
381 static int cxgb4_mqprio_class_bind(struct net_device
*dev
,
382 struct sge_eosw_txq
*eosw_txq
,
385 struct ch_sched_flowc fe
;
388 init_completion(&eosw_txq
->completion
);
390 fe
.tid
= eosw_txq
->eotid
;
393 ret
= cxgb4_sched_class_bind(dev
, &fe
, SCHED_FLOWC
);
397 ret
= wait_for_completion_timeout(&eosw_txq
->completion
,
398 CXGB4_FLOWC_WAIT_TIMEOUT
);
405 static void cxgb4_mqprio_class_unbind(struct net_device
*dev
,
406 struct sge_eosw_txq
*eosw_txq
,
409 struct adapter
*adap
= netdev2adap(dev
);
410 struct ch_sched_flowc fe
;
412 /* If we're shutting down, interrupts are disabled and no completions
413 * come back. So, skip waiting for completions in this scenario.
415 if (!(adap
->flags
& CXGB4_SHUTTING_DOWN
))
416 init_completion(&eosw_txq
->completion
);
418 fe
.tid
= eosw_txq
->eotid
;
420 cxgb4_sched_class_unbind(dev
, &fe
, SCHED_FLOWC
);
422 if (!(adap
->flags
& CXGB4_SHUTTING_DOWN
))
423 wait_for_completion_timeout(&eosw_txq
->completion
,
424 CXGB4_FLOWC_WAIT_TIMEOUT
);
427 static int cxgb4_mqprio_enable_offload(struct net_device
*dev
,
428 struct tc_mqprio_qopt_offload
*mqprio
)
430 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
431 u32 qoffset
, qcount
, tot_qcount
, qid
, hwqid
;
432 struct port_info
*pi
= netdev2pinfo(dev
);
433 struct adapter
*adap
= netdev2adap(dev
);
434 struct sge_eosw_txq
*eosw_txq
;
439 ret
= cxgb4_mqprio_alloc_hw_resources(dev
);
443 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
444 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
445 qoffset
= mqprio
->qopt
.offset
[i
];
446 qcount
= mqprio
->qopt
.count
[i
];
447 for (j
= 0; j
< qcount
; j
++) {
448 eotid
= cxgb4_get_free_eotid(&adap
->tids
);
451 goto out_free_eotids
;
455 hwqid
= pi
->first_qset
+ (eotid
% pi
->nqsets
);
456 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qid
];
457 ret
= cxgb4_init_eosw_txq(dev
, eosw_txq
,
460 goto out_free_eotids
;
462 cxgb4_alloc_eotid(&adap
->tids
, eotid
, eosw_txq
);
464 hwtc
= tc_port_mqprio
->tc_hwtc_map
[i
];
465 ret
= cxgb4_mqprio_class_bind(dev
, eosw_txq
, hwtc
);
467 goto out_free_eotids
;
471 memcpy(&tc_port_mqprio
->mqprio
, mqprio
,
472 sizeof(struct tc_mqprio_qopt_offload
));
474 /* Inform the stack about the configured tc params.
476 * Set the correct queue map. If no queue count has been
477 * specified, then send the traffic through default NIC
478 * queues; instead of ETHOFLD queues.
480 ret
= netdev_set_num_tc(dev
, mqprio
->qopt
.num_tc
);
482 goto out_free_eotids
;
484 tot_qcount
= pi
->nqsets
;
485 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
486 qcount
= mqprio
->qopt
.count
[i
];
488 qoffset
= mqprio
->qopt
.offset
[i
] + pi
->nqsets
;
494 ret
= netdev_set_tc_queue(dev
, i
, qcount
, qoffset
);
498 tot_qcount
+= mqprio
->qopt
.count
[i
];
501 ret
= netif_set_real_num_tx_queues(dev
, tot_qcount
);
505 tc_port_mqprio
->state
= CXGB4_MQPRIO_STATE_ACTIVE
;
509 netdev_reset_tc(dev
);
510 i
= mqprio
->qopt
.num_tc
;
514 qoffset
= mqprio
->qopt
.offset
[i
];
515 qcount
= mqprio
->qopt
.count
[i
];
516 for (j
= 0; j
< qcount
; j
++) {
517 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qoffset
+ j
];
519 hwtc
= tc_port_mqprio
->tc_hwtc_map
[i
];
520 cxgb4_mqprio_class_unbind(dev
, eosw_txq
, hwtc
);
522 cxgb4_free_eotid(&adap
->tids
, eosw_txq
->eotid
);
523 cxgb4_free_eosw_txq(dev
, eosw_txq
);
527 cxgb4_mqprio_free_hw_resources(dev
);
531 static void cxgb4_mqprio_disable_offload(struct net_device
*dev
)
533 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
534 struct port_info
*pi
= netdev2pinfo(dev
);
535 struct adapter
*adap
= netdev2adap(dev
);
536 struct sge_eosw_txq
*eosw_txq
;
541 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
542 if (tc_port_mqprio
->state
!= CXGB4_MQPRIO_STATE_ACTIVE
)
545 netdev_reset_tc(dev
);
546 netif_set_real_num_tx_queues(dev
, pi
->nqsets
);
548 for (i
= 0; i
< tc_port_mqprio
->mqprio
.qopt
.num_tc
; i
++) {
549 qoffset
= tc_port_mqprio
->mqprio
.qopt
.offset
[i
];
550 qcount
= tc_port_mqprio
->mqprio
.qopt
.count
[i
];
551 for (j
= 0; j
< qcount
; j
++) {
552 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qoffset
+ j
];
554 hwtc
= tc_port_mqprio
->tc_hwtc_map
[i
];
555 cxgb4_mqprio_class_unbind(dev
, eosw_txq
, hwtc
);
557 cxgb4_free_eotid(&adap
->tids
, eosw_txq
->eotid
);
558 cxgb4_free_eosw_txq(dev
, eosw_txq
);
562 cxgb4_mqprio_free_hw_resources(dev
);
564 /* Free up the traffic classes */
565 cxgb4_mqprio_free_tc(dev
);
567 memset(&tc_port_mqprio
->mqprio
, 0,
568 sizeof(struct tc_mqprio_qopt_offload
));
570 tc_port_mqprio
->state
= CXGB4_MQPRIO_STATE_DISABLED
;
573 int cxgb4_setup_tc_mqprio(struct net_device
*dev
,
574 struct tc_mqprio_qopt_offload
*mqprio
)
576 struct adapter
*adap
= netdev2adap(dev
);
577 bool needs_bring_up
= false;
580 ret
= cxgb4_mqprio_validate(dev
, mqprio
);
584 mutex_lock(&adap
->tc_mqprio
->mqprio_mutex
);
586 /* To configure tc params, the current allocated EOTIDs must
587 * be freed up. However, they can't be freed up if there's
588 * traffic running on the interface. So, ensure interface is
589 * down before configuring tc params.
591 if (netif_running(dev
)) {
593 needs_bring_up
= true;
596 cxgb4_mqprio_disable_offload(dev
);
598 /* If requested for clear, then just return since resources are
599 * already freed up by now.
601 if (!mqprio
->qopt
.num_tc
)
604 /* Allocate free available traffic classes and configure
605 * their rate parameters.
607 ret
= cxgb4_mqprio_alloc_tc(dev
, mqprio
);
611 ret
= cxgb4_mqprio_enable_offload(dev
, mqprio
);
613 cxgb4_mqprio_free_tc(dev
);
621 mutex_unlock(&adap
->tc_mqprio
->mqprio_mutex
);
625 void cxgb4_mqprio_stop_offload(struct adapter
*adap
)
627 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
628 struct net_device
*dev
;
631 if (!adap
->tc_mqprio
|| !adap
->tc_mqprio
->port_mqprio
)
634 mutex_lock(&adap
->tc_mqprio
->mqprio_mutex
);
635 for_each_port(adap
, i
) {
640 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[i
];
641 if (!tc_port_mqprio
->mqprio
.qopt
.num_tc
)
644 cxgb4_mqprio_disable_offload(dev
);
646 mutex_unlock(&adap
->tc_mqprio
->mqprio_mutex
);
649 int cxgb4_init_tc_mqprio(struct adapter
*adap
)
651 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
, *port_mqprio
;
652 struct cxgb4_tc_mqprio
*tc_mqprio
;
653 struct sge_eosw_txq
*eosw_txq
;
657 tc_mqprio
= kzalloc(sizeof(*tc_mqprio
), GFP_KERNEL
);
661 tc_port_mqprio
= kcalloc(adap
->params
.nports
, sizeof(*tc_port_mqprio
),
663 if (!tc_port_mqprio
) {
665 goto out_free_mqprio
;
668 mutex_init(&tc_mqprio
->mqprio_mutex
);
670 tc_mqprio
->port_mqprio
= tc_port_mqprio
;
671 for (i
= 0; i
< adap
->params
.nports
; i
++) {
672 port_mqprio
= &tc_mqprio
->port_mqprio
[i
];
673 eosw_txq
= kcalloc(adap
->tids
.neotids
, sizeof(*eosw_txq
),
679 port_mqprio
->eosw_txq
= eosw_txq
;
682 adap
->tc_mqprio
= tc_mqprio
;
683 refcount_set(&adap
->tc_mqprio
->refcnt
, 0);
687 for (i
= 0; i
< adap
->params
.nports
; i
++) {
688 port_mqprio
= &tc_mqprio
->port_mqprio
[i
];
689 kfree(port_mqprio
->eosw_txq
);
691 kfree(tc_port_mqprio
);
698 void cxgb4_cleanup_tc_mqprio(struct adapter
*adap
)
700 struct cxgb4_tc_port_mqprio
*port_mqprio
;
703 if (adap
->tc_mqprio
) {
704 mutex_lock(&adap
->tc_mqprio
->mqprio_mutex
);
705 if (adap
->tc_mqprio
->port_mqprio
) {
706 for (i
= 0; i
< adap
->params
.nports
; i
++) {
707 struct net_device
*dev
= adap
->port
[i
];
710 cxgb4_mqprio_disable_offload(dev
);
711 port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[i
];
712 kfree(port_mqprio
->eosw_txq
);
714 kfree(adap
->tc_mqprio
->port_mqprio
);
716 mutex_unlock(&adap
->tc_mqprio
->mqprio_mutex
);
717 kfree(adap
->tc_mqprio
);