1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_mqprio.h"
8 static int cxgb4_mqprio_validate(struct net_device
*dev
,
9 struct tc_mqprio_qopt_offload
*mqprio
)
11 u64 min_rate
= 0, max_rate
= 0, max_link_rate
;
12 struct port_info
*pi
= netdev2pinfo(dev
);
13 struct adapter
*adap
= netdev2adap(dev
);
14 u32 speed
, qcount
= 0, qoffset
= 0;
15 u32 start_a
, start_b
, end_a
, end_b
;
19 if (!mqprio
->qopt
.num_tc
)
22 if (mqprio
->qopt
.hw
!= TC_MQPRIO_HW_OFFLOAD_TCS
) {
23 netdev_err(dev
, "Only full TC hardware offload is supported\n");
25 } else if (mqprio
->mode
!= TC_MQPRIO_MODE_CHANNEL
) {
26 netdev_err(dev
, "Only channel mode offload is supported\n");
28 } else if (mqprio
->shaper
!= TC_MQPRIO_SHAPER_BW_RATE
) {
29 netdev_err(dev
, "Only bandwidth rate shaper supported\n");
31 } else if (mqprio
->qopt
.num_tc
> adap
->params
.nsched_cls
) {
33 "Only %u traffic classes supported by hardware\n",
34 adap
->params
.nsched_cls
);
38 ret
= t4_get_link_params(pi
, NULL
, &speed
, NULL
);
40 netdev_err(dev
, "Failed to get link speed, ret: %d\n", ret
);
44 /* Convert from Mbps to bps */
45 max_link_rate
= (u64
)speed
* 1000 * 1000;
47 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
48 qoffset
= max_t(u16
, mqprio
->qopt
.offset
[i
], qoffset
);
49 qcount
+= mqprio
->qopt
.count
[i
];
51 start_a
= mqprio
->qopt
.offset
[i
];
52 end_a
= start_a
+ mqprio
->qopt
.count
[i
] - 1;
53 for (j
= i
+ 1; j
< mqprio
->qopt
.num_tc
; j
++) {
54 start_b
= mqprio
->qopt
.offset
[j
];
55 end_b
= start_b
+ mqprio
->qopt
.count
[j
] - 1;
57 /* If queue count is 0, then the traffic
58 * belonging to this class will not use
59 * ETHOFLD queues. So, no need to validate
62 if (!mqprio
->qopt
.count
[i
])
65 if (!mqprio
->qopt
.count
[j
])
68 if (max_t(u32
, start_a
, start_b
) <=
69 min_t(u32
, end_a
, end_b
)) {
71 "Queues can't overlap across tc\n");
76 /* Convert byte per second to bits per second */
77 min_rate
+= (mqprio
->min_rate
[i
] * 8);
78 max_rate
+= (mqprio
->max_rate
[i
] * 8);
81 if (qoffset
>= adap
->tids
.neotids
|| qcount
> adap
->tids
.neotids
)
84 if (min_rate
> max_link_rate
|| max_rate
> max_link_rate
) {
86 "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
87 min_rate
, max_rate
, max_link_rate
);
94 static int cxgb4_init_eosw_txq(struct net_device
*dev
,
95 struct sge_eosw_txq
*eosw_txq
,
98 struct adapter
*adap
= netdev2adap(dev
);
99 struct tx_sw_desc
*ring
;
101 memset(eosw_txq
, 0, sizeof(*eosw_txq
));
103 ring
= kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM
,
104 sizeof(*ring
), GFP_KERNEL
);
108 eosw_txq
->desc
= ring
;
109 eosw_txq
->ndesc
= CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM
;
110 spin_lock_init(&eosw_txq
->lock
);
111 eosw_txq
->state
= CXGB4_EO_STATE_CLOSED
;
112 eosw_txq
->eotid
= eotid
;
113 eosw_txq
->hwtid
= adap
->tids
.eotid_base
+ eosw_txq
->eotid
;
114 eosw_txq
->cred
= adap
->params
.ofldq_wr_cred
;
115 eosw_txq
->hwqid
= hwqid
;
116 eosw_txq
->netdev
= dev
;
117 tasklet_init(&eosw_txq
->qresume_tsk
, cxgb4_ethofld_restart
,
118 (unsigned long)eosw_txq
);
122 static void cxgb4_clean_eosw_txq(struct net_device
*dev
,
123 struct sge_eosw_txq
*eosw_txq
)
125 struct adapter
*adap
= netdev2adap(dev
);
127 cxgb4_eosw_txq_free_desc(adap
, eosw_txq
, eosw_txq
->ndesc
);
129 eosw_txq
->last_pidx
= 0;
131 eosw_txq
->last_cidx
= 0;
132 eosw_txq
->flowc_idx
= 0;
134 eosw_txq
->cred
= adap
->params
.ofldq_wr_cred
;
135 eosw_txq
->ncompl
= 0;
136 eosw_txq
->last_compl
= 0;
137 eosw_txq
->state
= CXGB4_EO_STATE_CLOSED
;
140 static void cxgb4_free_eosw_txq(struct net_device
*dev
,
141 struct sge_eosw_txq
*eosw_txq
)
143 spin_lock_bh(&eosw_txq
->lock
);
144 cxgb4_clean_eosw_txq(dev
, eosw_txq
);
145 kfree(eosw_txq
->desc
);
146 spin_unlock_bh(&eosw_txq
->lock
);
147 tasklet_kill(&eosw_txq
->qresume_tsk
);
150 static int cxgb4_mqprio_alloc_hw_resources(struct net_device
*dev
)
152 struct port_info
*pi
= netdev2pinfo(dev
);
153 struct adapter
*adap
= netdev2adap(dev
);
154 struct sge_ofld_rxq
*eorxq
;
155 struct sge_eohw_txq
*eotxq
;
159 /* Allocate ETHOFLD hardware queue structures if not done already */
160 if (!refcount_read(&adap
->tc_mqprio
->refcnt
)) {
161 adap
->sge
.eohw_rxq
= kcalloc(adap
->sge
.eoqsets
,
162 sizeof(struct sge_ofld_rxq
),
164 if (!adap
->sge
.eohw_rxq
)
167 adap
->sge
.eohw_txq
= kcalloc(adap
->sge
.eoqsets
,
168 sizeof(struct sge_eohw_txq
),
170 if (!adap
->sge
.eohw_txq
) {
171 kfree(adap
->sge
.eohw_rxq
);
175 refcount_set(&adap
->tc_mqprio
->refcnt
, 1);
177 refcount_inc(&adap
->tc_mqprio
->refcnt
);
180 if (!(adap
->flags
& CXGB4_USING_MSIX
))
181 msix
= -((int)adap
->sge
.intrq
.abs_id
+ 1);
183 for (i
= 0; i
< pi
->nqsets
; i
++) {
184 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
185 eotxq
= &adap
->sge
.eohw_txq
[pi
->first_qset
+ i
];
187 /* Allocate Rxqs for receiving ETHOFLD Tx completions */
189 msix
= cxgb4_get_msix_idx_from_bmap(adap
);
192 goto out_free_queues
;
195 eorxq
->msix
= &adap
->msix_info
[msix
];
196 snprintf(eorxq
->msix
->desc
,
197 sizeof(eorxq
->msix
->desc
),
198 "%s-eorxq%d", dev
->name
, i
);
201 init_rspq(adap
, &eorxq
->rspq
,
202 CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC
,
203 CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT
,
204 CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM
,
205 CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE
);
207 eorxq
->fl
.size
= CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM
;
209 ret
= t4_sge_alloc_rxq(adap
, &eorxq
->rspq
, false,
210 dev
, msix
, &eorxq
->fl
,
211 cxgb4_ethofld_rx_handler
,
214 goto out_free_queues
;
216 /* Allocate ETHOFLD hardware Txqs */
217 eotxq
->q
.size
= CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM
;
218 ret
= t4_sge_alloc_ethofld_txq(adap
, eotxq
, dev
,
219 eorxq
->rspq
.cntxt_id
);
221 goto out_free_queues
;
223 /* Allocate IRQs, set IRQ affinity, and start Rx */
224 if (adap
->flags
& CXGB4_USING_MSIX
) {
225 ret
= request_irq(eorxq
->msix
->vec
, t4_sge_intr_msix
, 0,
226 eorxq
->msix
->desc
, &eorxq
->rspq
);
230 cxgb4_set_msix_aff(adap
, eorxq
->msix
->vec
,
231 &eorxq
->msix
->aff_mask
, i
);
234 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
235 cxgb4_enable_rx(adap
, &eorxq
->rspq
);
242 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
244 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
245 cxgb4_quiesce_rx(&eorxq
->rspq
);
247 if (adap
->flags
& CXGB4_USING_MSIX
) {
248 cxgb4_clear_msix_aff(eorxq
->msix
->vec
,
249 eorxq
->msix
->aff_mask
);
250 free_irq(eorxq
->msix
->vec
, &eorxq
->rspq
);
255 for (i
= 0; i
< pi
->nqsets
; i
++) {
256 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
257 eotxq
= &adap
->sge
.eohw_txq
[pi
->first_qset
+ i
];
259 if (eorxq
->rspq
.desc
)
260 free_rspq_fl(adap
, &eorxq
->rspq
, &eorxq
->fl
);
262 cxgb4_free_msix_idx_in_bmap(adap
, eorxq
->msix
->idx
);
263 t4_sge_free_ethofld_txq(adap
, eotxq
);
266 if (refcount_dec_and_test(&adap
->tc_mqprio
->refcnt
)) {
267 kfree(adap
->sge
.eohw_txq
);
268 kfree(adap
->sge
.eohw_rxq
);
273 static void cxgb4_mqprio_free_hw_resources(struct net_device
*dev
)
275 struct port_info
*pi
= netdev2pinfo(dev
);
276 struct adapter
*adap
= netdev2adap(dev
);
277 struct sge_ofld_rxq
*eorxq
;
278 struct sge_eohw_txq
*eotxq
;
281 /* Return if no ETHOFLD structures have been allocated yet */
282 if (!refcount_read(&adap
->tc_mqprio
->refcnt
))
285 /* Return if no hardware queues have been allocated */
286 if (!adap
->sge
.eohw_rxq
[pi
->first_qset
].rspq
.desc
)
289 for (i
= 0; i
< pi
->nqsets
; i
++) {
290 eorxq
= &adap
->sge
.eohw_rxq
[pi
->first_qset
+ i
];
291 eotxq
= &adap
->sge
.eohw_txq
[pi
->first_qset
+ i
];
293 /* Device removal path will already disable NAPI
294 * before unregistering netdevice. So, only disable
295 * NAPI if we're not in device removal path
297 if (!(adap
->flags
& CXGB4_SHUTTING_DOWN
))
298 cxgb4_quiesce_rx(&eorxq
->rspq
);
300 if (adap
->flags
& CXGB4_USING_MSIX
) {
301 cxgb4_clear_msix_aff(eorxq
->msix
->vec
,
302 eorxq
->msix
->aff_mask
);
303 free_irq(eorxq
->msix
->vec
, &eorxq
->rspq
);
306 free_rspq_fl(adap
, &eorxq
->rspq
, &eorxq
->fl
);
307 t4_sge_free_ethofld_txq(adap
, eotxq
);
310 /* Free up ETHOFLD structures if there are no users */
311 if (refcount_dec_and_test(&adap
->tc_mqprio
->refcnt
)) {
312 kfree(adap
->sge
.eohw_txq
);
313 kfree(adap
->sge
.eohw_rxq
);
317 static int cxgb4_mqprio_alloc_tc(struct net_device
*dev
,
318 struct tc_mqprio_qopt_offload
*mqprio
)
320 struct ch_sched_params p
= {
321 .type
= SCHED_CLASS_TYPE_PACKET
,
322 .u
.params
.level
= SCHED_CLASS_LEVEL_CL_RL
,
323 .u
.params
.mode
= SCHED_CLASS_MODE_FLOW
,
324 .u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
,
325 .u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
,
326 .u
.params
.class = SCHED_CLS_NONE
,
327 .u
.params
.weight
= 0,
328 .u
.params
.pktsize
= dev
->mtu
,
330 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
331 struct port_info
*pi
= netdev2pinfo(dev
);
332 struct adapter
*adap
= netdev2adap(dev
);
333 struct sched_class
*e
;
337 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
338 p
.u
.params
.channel
= pi
->tx_chan
;
339 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
340 /* Convert from bytes per second to Kbps */
341 p
.u
.params
.minrate
= div_u64(mqprio
->min_rate
[i
] * 8, 1000);
342 p
.u
.params
.maxrate
= div_u64(mqprio
->max_rate
[i
] * 8, 1000);
344 e
= cxgb4_sched_class_alloc(dev
, &p
);
350 tc_port_mqprio
->tc_hwtc_map
[i
] = e
->idx
;
357 cxgb4_sched_class_free(dev
, tc_port_mqprio
->tc_hwtc_map
[i
]);
362 static void cxgb4_mqprio_free_tc(struct net_device
*dev
)
364 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
365 struct port_info
*pi
= netdev2pinfo(dev
);
366 struct adapter
*adap
= netdev2adap(dev
);
369 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
370 for (i
= 0; i
< tc_port_mqprio
->mqprio
.qopt
.num_tc
; i
++)
371 cxgb4_sched_class_free(dev
, tc_port_mqprio
->tc_hwtc_map
[i
]);
374 static int cxgb4_mqprio_class_bind(struct net_device
*dev
,
375 struct sge_eosw_txq
*eosw_txq
,
378 struct ch_sched_flowc fe
;
381 init_completion(&eosw_txq
->completion
);
383 fe
.tid
= eosw_txq
->eotid
;
386 ret
= cxgb4_sched_class_bind(dev
, &fe
, SCHED_FLOWC
);
390 ret
= wait_for_completion_timeout(&eosw_txq
->completion
,
391 CXGB4_FLOWC_WAIT_TIMEOUT
);
398 static void cxgb4_mqprio_class_unbind(struct net_device
*dev
,
399 struct sge_eosw_txq
*eosw_txq
,
402 struct adapter
*adap
= netdev2adap(dev
);
403 struct ch_sched_flowc fe
;
405 /* If we're shutting down, interrupts are disabled and no completions
406 * come back. So, skip waiting for completions in this scenario.
408 if (!(adap
->flags
& CXGB4_SHUTTING_DOWN
))
409 init_completion(&eosw_txq
->completion
);
411 fe
.tid
= eosw_txq
->eotid
;
413 cxgb4_sched_class_unbind(dev
, &fe
, SCHED_FLOWC
);
415 if (!(adap
->flags
& CXGB4_SHUTTING_DOWN
))
416 wait_for_completion_timeout(&eosw_txq
->completion
,
417 CXGB4_FLOWC_WAIT_TIMEOUT
);
420 static int cxgb4_mqprio_enable_offload(struct net_device
*dev
,
421 struct tc_mqprio_qopt_offload
*mqprio
)
423 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
424 u32 qoffset
, qcount
, tot_qcount
, qid
, hwqid
;
425 struct port_info
*pi
= netdev2pinfo(dev
);
426 struct adapter
*adap
= netdev2adap(dev
);
427 struct sge_eosw_txq
*eosw_txq
;
432 ret
= cxgb4_mqprio_alloc_hw_resources(dev
);
436 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
437 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
438 qoffset
= mqprio
->qopt
.offset
[i
];
439 qcount
= mqprio
->qopt
.count
[i
];
440 for (j
= 0; j
< qcount
; j
++) {
441 eotid
= cxgb4_get_free_eotid(&adap
->tids
);
444 goto out_free_eotids
;
448 hwqid
= pi
->first_qset
+ (eotid
% pi
->nqsets
);
449 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qid
];
450 ret
= cxgb4_init_eosw_txq(dev
, eosw_txq
,
453 goto out_free_eotids
;
455 cxgb4_alloc_eotid(&adap
->tids
, eotid
, eosw_txq
);
457 hwtc
= tc_port_mqprio
->tc_hwtc_map
[i
];
458 ret
= cxgb4_mqprio_class_bind(dev
, eosw_txq
, hwtc
);
460 goto out_free_eotids
;
464 memcpy(&tc_port_mqprio
->mqprio
, mqprio
,
465 sizeof(struct tc_mqprio_qopt_offload
));
467 /* Inform the stack about the configured tc params.
469 * Set the correct queue map. If no queue count has been
470 * specified, then send the traffic through default NIC
471 * queues; instead of ETHOFLD queues.
473 ret
= netdev_set_num_tc(dev
, mqprio
->qopt
.num_tc
);
475 goto out_free_eotids
;
477 tot_qcount
= pi
->nqsets
;
478 for (i
= 0; i
< mqprio
->qopt
.num_tc
; i
++) {
479 qcount
= mqprio
->qopt
.count
[i
];
481 qoffset
= mqprio
->qopt
.offset
[i
] + pi
->nqsets
;
487 ret
= netdev_set_tc_queue(dev
, i
, qcount
, qoffset
);
491 tot_qcount
+= mqprio
->qopt
.count
[i
];
494 ret
= netif_set_real_num_tx_queues(dev
, tot_qcount
);
498 tc_port_mqprio
->state
= CXGB4_MQPRIO_STATE_ACTIVE
;
502 netdev_reset_tc(dev
);
503 i
= mqprio
->qopt
.num_tc
;
507 qoffset
= mqprio
->qopt
.offset
[i
];
508 qcount
= mqprio
->qopt
.count
[i
];
509 for (j
= 0; j
< qcount
; j
++) {
510 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qoffset
+ j
];
512 hwtc
= tc_port_mqprio
->tc_hwtc_map
[i
];
513 cxgb4_mqprio_class_unbind(dev
, eosw_txq
, hwtc
);
515 cxgb4_free_eotid(&adap
->tids
, eosw_txq
->eotid
);
516 cxgb4_free_eosw_txq(dev
, eosw_txq
);
520 cxgb4_mqprio_free_hw_resources(dev
);
524 static void cxgb4_mqprio_disable_offload(struct net_device
*dev
)
526 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
527 struct port_info
*pi
= netdev2pinfo(dev
);
528 struct adapter
*adap
= netdev2adap(dev
);
529 struct sge_eosw_txq
*eosw_txq
;
534 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
535 if (tc_port_mqprio
->state
!= CXGB4_MQPRIO_STATE_ACTIVE
)
538 netdev_reset_tc(dev
);
539 netif_set_real_num_tx_queues(dev
, pi
->nqsets
);
541 for (i
= 0; i
< tc_port_mqprio
->mqprio
.qopt
.num_tc
; i
++) {
542 qoffset
= tc_port_mqprio
->mqprio
.qopt
.offset
[i
];
543 qcount
= tc_port_mqprio
->mqprio
.qopt
.count
[i
];
544 for (j
= 0; j
< qcount
; j
++) {
545 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qoffset
+ j
];
547 hwtc
= tc_port_mqprio
->tc_hwtc_map
[i
];
548 cxgb4_mqprio_class_unbind(dev
, eosw_txq
, hwtc
);
550 cxgb4_free_eotid(&adap
->tids
, eosw_txq
->eotid
);
551 cxgb4_free_eosw_txq(dev
, eosw_txq
);
555 cxgb4_mqprio_free_hw_resources(dev
);
557 /* Free up the traffic classes */
558 cxgb4_mqprio_free_tc(dev
);
560 memset(&tc_port_mqprio
->mqprio
, 0,
561 sizeof(struct tc_mqprio_qopt_offload
));
563 tc_port_mqprio
->state
= CXGB4_MQPRIO_STATE_DISABLED
;
566 int cxgb4_setup_tc_mqprio(struct net_device
*dev
,
567 struct tc_mqprio_qopt_offload
*mqprio
)
569 bool needs_bring_up
= false;
572 ret
= cxgb4_mqprio_validate(dev
, mqprio
);
576 /* To configure tc params, the current allocated EOTIDs must
577 * be freed up. However, they can't be freed up if there's
578 * traffic running on the interface. So, ensure interface is
579 * down before configuring tc params.
581 if (netif_running(dev
)) {
583 needs_bring_up
= true;
586 cxgb4_mqprio_disable_offload(dev
);
588 /* If requested for clear, then just return since resources are
589 * already freed up by now.
591 if (!mqprio
->qopt
.num_tc
)
594 /* Allocate free available traffic classes and configure
595 * their rate parameters.
597 ret
= cxgb4_mqprio_alloc_tc(dev
, mqprio
);
601 ret
= cxgb4_mqprio_enable_offload(dev
, mqprio
);
603 cxgb4_mqprio_free_tc(dev
);
614 int cxgb4_init_tc_mqprio(struct adapter
*adap
)
616 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
, *port_mqprio
;
617 struct cxgb4_tc_mqprio
*tc_mqprio
;
618 struct sge_eosw_txq
*eosw_txq
;
622 tc_mqprio
= kzalloc(sizeof(*tc_mqprio
), GFP_KERNEL
);
626 tc_port_mqprio
= kcalloc(adap
->params
.nports
, sizeof(*tc_port_mqprio
),
628 if (!tc_port_mqprio
) {
630 goto out_free_mqprio
;
633 tc_mqprio
->port_mqprio
= tc_port_mqprio
;
634 for (i
= 0; i
< adap
->params
.nports
; i
++) {
635 port_mqprio
= &tc_mqprio
->port_mqprio
[i
];
636 eosw_txq
= kcalloc(adap
->tids
.neotids
, sizeof(*eosw_txq
),
642 port_mqprio
->eosw_txq
= eosw_txq
;
645 adap
->tc_mqprio
= tc_mqprio
;
646 refcount_set(&adap
->tc_mqprio
->refcnt
, 0);
650 for (i
= 0; i
< adap
->params
.nports
; i
++) {
651 port_mqprio
= &tc_mqprio
->port_mqprio
[i
];
652 kfree(port_mqprio
->eosw_txq
);
654 kfree(tc_port_mqprio
);
661 void cxgb4_cleanup_tc_mqprio(struct adapter
*adap
)
663 struct cxgb4_tc_port_mqprio
*port_mqprio
;
666 if (adap
->tc_mqprio
) {
667 if (adap
->tc_mqprio
->port_mqprio
) {
668 for (i
= 0; i
< adap
->params
.nports
; i
++) {
669 struct net_device
*dev
= adap
->port
[i
];
672 cxgb4_mqprio_disable_offload(dev
);
673 port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[i
];
674 kfree(port_mqprio
->eosw_txq
);
676 kfree(adap
->tc_mqprio
->port_mqprio
);
678 kfree(adap
->tc_mqprio
);