2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
49 #include "cxgb4_uld.h"
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
56 /* Flush the aggregated lro sessions */
57 static void uldrx_flush_handler(struct sge_rspq
*q
)
59 struct adapter
*adap
= q
->adap
;
61 if (adap
->uld
[q
->uld
].lro_flush
)
62 adap
->uld
[q
->uld
].lro_flush(&q
->lro_mgr
);
66 * uldrx_handler - response queue handler for ULD queues
67 * @q: the response queue that received the packet
68 * @rsp: the response queue descriptor holding the offload message
69 * @gl: the gather list of packet fragments
71 * Deliver an ingress offload packet to a ULD. All processing is done by
72 * the ULD, we just maintain statistics.
74 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
75 const struct pkt_gl
*gl
)
77 struct adapter
*adap
= q
->adap
;
78 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
81 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
82 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
83 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
87 ret
= adap
->uld
[q
->uld
].lro_rx_handler(adap
->uld
[q
->uld
].handle
,
91 ret
= adap
->uld
[q
->uld
].rx_handler(adap
->uld
[q
->uld
].handle
,
101 else if (gl
== CXGB4_MSG_AN
)
108 static int alloc_uld_rxqs(struct adapter
*adap
,
109 struct sge_uld_rxq_info
*rxq_info
, bool lro
)
111 unsigned int nq
= rxq_info
->nrxq
+ rxq_info
->nciq
;
112 struct sge_ofld_rxq
*q
= rxq_info
->uldrxq
;
113 unsigned short *ids
= rxq_info
->rspq_id
;
114 int i
, err
, msi_idx
, que_idx
= 0;
115 struct sge
*s
= &adap
->sge
;
116 unsigned int per_chan
;
118 per_chan
= rxq_info
->nrxq
/ adap
->params
.nports
;
120 if (adap
->flags
& CXGB4_USING_MSIX
)
123 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
125 for (i
= 0; i
< nq
; i
++, q
++) {
126 if (i
== rxq_info
->nrxq
) {
127 /* start allocation of concentrator queues */
128 per_chan
= rxq_info
->nciq
/ adap
->params
.nports
;
133 msi_idx
= cxgb4_get_msix_idx_from_bmap(adap
);
139 snprintf(adap
->msix_info
[msi_idx
].desc
,
140 sizeof(adap
->msix_info
[msi_idx
].desc
),
142 adap
->port
[0]->name
, rxq_info
->name
, i
);
144 q
->msix
= &adap
->msix_info
[msi_idx
];
146 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
147 adap
->port
[que_idx
++ / per_chan
],
149 q
->fl
.size
? &q
->fl
: NULL
,
151 lro
? uldrx_flush_handler
: NULL
,
156 memset(&q
->stats
, 0, sizeof(q
->stats
));
158 ids
[i
] = q
->rspq
.abs_id
;
162 q
= rxq_info
->uldrxq
;
163 for ( ; i
; i
--, q
++) {
165 free_rspq_fl(adap
, &q
->rspq
,
166 q
->fl
.size
? &q
->fl
: NULL
);
168 cxgb4_free_msix_idx_in_bmap(adap
, q
->msix
->idx
);
174 setup_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
, bool lro
)
176 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
179 ret
= alloc_uld_rxqs(adap
, rxq_info
, lro
);
183 /* Tell uP to route control queue completions to rdma rspq */
184 if (adap
->flags
& CXGB4_FULL_INIT_DONE
&& uld_type
== CXGB4_ULD_RDMA
) {
185 struct sge
*s
= &adap
->sge
;
186 unsigned int cmplqid
;
189 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
190 for_each_port(adap
, i
) {
191 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
192 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
193 FW_PARAMS_PARAM_X_V(cmdop
) |
194 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
195 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
,
196 0, 1, ¶m
, &cmplqid
);
202 static void t4_free_uld_rxqs(struct adapter
*adap
, int n
,
203 struct sge_ofld_rxq
*q
)
205 for ( ; n
; n
--, q
++) {
207 free_rspq_fl(adap
, &q
->rspq
,
208 q
->fl
.size
? &q
->fl
: NULL
);
212 static void free_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
214 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
216 if (adap
->flags
& CXGB4_FULL_INIT_DONE
&& uld_type
== CXGB4_ULD_RDMA
) {
217 struct sge
*s
= &adap
->sge
;
218 u32 param
, cmdop
, cmplqid
= 0;
221 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
222 for_each_port(adap
, i
) {
223 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
224 FW_PARAMS_PARAM_X_V(cmdop
) |
225 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
226 t4_set_params(adap
, adap
->mbox
, adap
->pf
,
227 0, 1, ¶m
, &cmplqid
);
232 t4_free_uld_rxqs(adap
, rxq_info
->nciq
,
233 rxq_info
->uldrxq
+ rxq_info
->nrxq
);
234 t4_free_uld_rxqs(adap
, rxq_info
->nrxq
, rxq_info
->uldrxq
);
237 static int cfg_queues_uld(struct adapter
*adap
, unsigned int uld_type
,
238 const struct cxgb4_uld_info
*uld_info
)
240 struct sge
*s
= &adap
->sge
;
241 struct sge_uld_rxq_info
*rxq_info
;
242 int i
, nrxq
, ciq_size
;
244 rxq_info
= kzalloc(sizeof(*rxq_info
), GFP_KERNEL
);
248 if (adap
->flags
& CXGB4_USING_MSIX
&& uld_info
->nrxq
> s
->nqs_per_uld
) {
250 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
252 i
= min_t(int, uld_info
->nrxq
,
254 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
256 if (!uld_info
->ciq
) {
259 if (adap
->flags
& CXGB4_USING_MSIX
)
260 rxq_info
->nciq
= min_t(int, s
->nqs_per_uld
,
263 rxq_info
->nciq
= min_t(int, MAX_OFLD_QSETS
,
265 rxq_info
->nciq
= ((rxq_info
->nciq
/ adap
->params
.nports
) *
266 adap
->params
.nports
);
267 rxq_info
->nciq
= max_t(int, rxq_info
->nciq
,
268 adap
->params
.nports
);
271 nrxq
= rxq_info
->nrxq
+ rxq_info
->nciq
; /* total rxq's */
272 rxq_info
->uldrxq
= kcalloc(nrxq
, sizeof(struct sge_ofld_rxq
),
274 if (!rxq_info
->uldrxq
) {
279 rxq_info
->rspq_id
= kcalloc(nrxq
, sizeof(unsigned short), GFP_KERNEL
);
280 if (!rxq_info
->rspq_id
) {
281 kfree(rxq_info
->uldrxq
);
286 for (i
= 0; i
< rxq_info
->nrxq
; i
++) {
287 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
289 init_rspq(adap
, &r
->rspq
, 5, 1, uld_info
->rxq_size
, 64);
290 r
->rspq
.uld
= uld_type
;
294 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
295 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
296 dev_warn(adap
->pdev_dev
, "CIQ size too small for available IQs\n");
297 ciq_size
= SGE_MAX_IQ_SIZE
;
300 for (i
= rxq_info
->nrxq
; i
< nrxq
; i
++) {
301 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
303 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
304 r
->rspq
.uld
= uld_type
;
307 memcpy(rxq_info
->name
, uld_info
->name
, IFNAMSIZ
);
308 adap
->sge
.uld_rxq_info
[uld_type
] = rxq_info
;
313 static void free_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
315 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
317 adap
->sge
.uld_rxq_info
[uld_type
] = NULL
;
318 kfree(rxq_info
->rspq_id
);
319 kfree(rxq_info
->uldrxq
);
324 request_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
326 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
327 struct msix_info
*minfo
;
331 for_each_uldrxq(rxq_info
, idx
) {
332 minfo
= rxq_info
->uldrxq
[idx
].msix
;
333 err
= request_irq(minfo
->vec
,
336 &rxq_info
->uldrxq
[idx
].rspq
);
340 cxgb4_set_msix_aff(adap
, minfo
->vec
,
341 &minfo
->aff_mask
, idx
);
347 minfo
= rxq_info
->uldrxq
[idx
].msix
;
348 cxgb4_clear_msix_aff(minfo
->vec
, minfo
->aff_mask
);
349 cxgb4_free_msix_idx_in_bmap(adap
, minfo
->idx
);
350 free_irq(minfo
->vec
, &rxq_info
->uldrxq
[idx
].rspq
);
356 free_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
358 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
359 struct msix_info
*minfo
;
362 for_each_uldrxq(rxq_info
, idx
) {
363 minfo
= rxq_info
->uldrxq
[idx
].msix
;
364 cxgb4_clear_msix_aff(minfo
->vec
, minfo
->aff_mask
);
365 cxgb4_free_msix_idx_in_bmap(adap
, minfo
->idx
);
366 free_irq(minfo
->vec
, &rxq_info
->uldrxq
[idx
].rspq
);
370 static void enable_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
372 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
375 for_each_uldrxq(rxq_info
, idx
) {
376 struct sge_rspq
*q
= &rxq_info
->uldrxq
[idx
].rspq
;
381 cxgb4_enable_rx(adap
, q
);
385 static void quiesce_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
387 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
390 for_each_uldrxq(rxq_info
, idx
) {
391 struct sge_rspq
*q
= &rxq_info
->uldrxq
[idx
].rspq
;
401 free_sge_txq_uld(struct adapter
*adap
, struct sge_uld_txq_info
*txq_info
)
403 int nq
= txq_info
->ntxq
;
406 for (i
= 0; i
< nq
; i
++) {
407 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
409 if (txq
&& txq
->q
.desc
) {
410 tasklet_kill(&txq
->qresume_tsk
);
411 t4_ofld_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
413 free_tx_desc(adap
, &txq
->q
, txq
->q
.in_use
, false);
415 __skb_queue_purge(&txq
->sendq
);
416 free_txq(adap
, &txq
->q
);
422 alloc_sge_txq_uld(struct adapter
*adap
, struct sge_uld_txq_info
*txq_info
,
423 unsigned int uld_type
)
425 struct sge
*s
= &adap
->sge
;
426 int nq
= txq_info
->ntxq
;
429 j
= nq
/ adap
->params
.nports
;
430 for (i
= 0; i
< nq
; i
++) {
431 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
434 err
= t4_sge_alloc_uld_txq(adap
, txq
, adap
->port
[i
/ j
],
435 s
->fw_evtq
.cntxt_id
, uld_type
);
441 free_sge_txq_uld(adap
, txq_info
);
446 release_sge_txq_uld(struct adapter
*adap
, unsigned int uld_type
)
448 struct sge_uld_txq_info
*txq_info
= NULL
;
449 int tx_uld_type
= TX_ULD(uld_type
);
451 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
453 if (txq_info
&& atomic_dec_and_test(&txq_info
->users
)) {
454 free_sge_txq_uld(adap
, txq_info
);
455 kfree(txq_info
->uldtxq
);
457 adap
->sge
.uld_txq_info
[tx_uld_type
] = NULL
;
462 setup_sge_txq_uld(struct adapter
*adap
, unsigned int uld_type
,
463 const struct cxgb4_uld_info
*uld_info
)
465 struct sge_uld_txq_info
*txq_info
= NULL
;
468 tx_uld_type
= TX_ULD(uld_type
);
469 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
471 if ((tx_uld_type
== CXGB4_TX_OFLD
) && txq_info
&&
472 (atomic_inc_return(&txq_info
->users
) > 1))
475 txq_info
= kzalloc(sizeof(*txq_info
), GFP_KERNEL
);
478 if (uld_type
== CXGB4_ULD_CRYPTO
) {
479 i
= min_t(int, adap
->vres
.ncrypto_fc
,
481 txq_info
->ntxq
= rounddown(i
, adap
->params
.nports
);
482 if (txq_info
->ntxq
<= 0) {
483 dev_warn(adap
->pdev_dev
, "Crypto Tx Queues can't be zero\n");
489 i
= min_t(int, uld_info
->ntxq
, num_online_cpus());
490 txq_info
->ntxq
= roundup(i
, adap
->params
.nports
);
492 txq_info
->uldtxq
= kcalloc(txq_info
->ntxq
, sizeof(struct sge_uld_txq
),
494 if (!txq_info
->uldtxq
) {
499 if (alloc_sge_txq_uld(adap
, txq_info
, tx_uld_type
)) {
500 kfree(txq_info
->uldtxq
);
505 atomic_inc(&txq_info
->users
);
506 adap
->sge
.uld_txq_info
[tx_uld_type
] = txq_info
;
510 static void uld_queue_init(struct adapter
*adap
, unsigned int uld_type
,
511 struct cxgb4_lld_info
*lli
)
513 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
514 int tx_uld_type
= TX_ULD(uld_type
);
515 struct sge_uld_txq_info
*txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
517 lli
->rxq_ids
= rxq_info
->rspq_id
;
518 lli
->nrxq
= rxq_info
->nrxq
;
519 lli
->ciq_ids
= rxq_info
->rspq_id
+ rxq_info
->nrxq
;
520 lli
->nciq
= rxq_info
->nciq
;
521 lli
->ntxq
= txq_info
->ntxq
;
524 int t4_uld_mem_alloc(struct adapter
*adap
)
526 struct sge
*s
= &adap
->sge
;
528 adap
->uld
= kcalloc(CXGB4_ULD_MAX
, sizeof(*adap
->uld
), GFP_KERNEL
);
532 s
->uld_rxq_info
= kcalloc(CXGB4_ULD_MAX
,
533 sizeof(struct sge_uld_rxq_info
*),
535 if (!s
->uld_rxq_info
)
538 s
->uld_txq_info
= kcalloc(CXGB4_TX_MAX
,
539 sizeof(struct sge_uld_txq_info
*),
541 if (!s
->uld_txq_info
)
546 kfree(s
->uld_rxq_info
);
552 void t4_uld_mem_free(struct adapter
*adap
)
554 struct sge
*s
= &adap
->sge
;
556 kfree(s
->uld_txq_info
);
557 kfree(s
->uld_rxq_info
);
561 /* This function should be called with uld_mutex taken. */
562 static void cxgb4_shutdown_uld_adapter(struct adapter
*adap
, enum cxgb4_uld type
)
564 if (adap
->uld
[type
].handle
) {
565 adap
->uld
[type
].handle
= NULL
;
566 adap
->uld
[type
].add
= NULL
;
567 release_sge_txq_uld(adap
, type
);
569 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
570 quiesce_rx_uld(adap
, type
);
572 if (adap
->flags
& CXGB4_USING_MSIX
)
573 free_msix_queue_irqs_uld(adap
, type
);
575 free_sge_queues_uld(adap
, type
);
576 free_queues_uld(adap
, type
);
580 void t4_uld_clean_up(struct adapter
*adap
)
584 mutex_lock(&uld_mutex
);
585 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++) {
586 if (!adap
->uld
[i
].handle
)
589 cxgb4_shutdown_uld_adapter(adap
, i
);
591 mutex_unlock(&uld_mutex
);
594 static void uld_init(struct adapter
*adap
, struct cxgb4_lld_info
*lld
)
598 lld
->pdev
= adap
->pdev
;
600 lld
->l2t
= adap
->l2t
;
601 lld
->tids
= &adap
->tids
;
602 lld
->ports
= adap
->port
;
603 lld
->vr
= &adap
->vres
;
604 lld
->mtus
= adap
->params
.mtus
;
605 lld
->nchan
= adap
->params
.nports
;
606 lld
->nports
= adap
->params
.nports
;
607 lld
->wr_cred
= adap
->params
.ofldq_wr_cred
;
608 lld
->crypto
= adap
->params
.crypto
;
609 lld
->iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
610 lld
->iscsi_tagmask
= t4_read_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
);
611 lld
->iscsi_pgsz_order
= t4_read_reg(adap
, ULP_RX_ISCSI_PSZ_A
);
612 lld
->iscsi_llimit
= t4_read_reg(adap
, ULP_RX_ISCSI_LLIMIT_A
);
613 lld
->iscsi_ppm
= &adap
->iscsi_ppm
;
614 lld
->adapter_type
= adap
->params
.chip
;
615 lld
->cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
616 lld
->udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
617 lld
->ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
618 lld
->sge_host_page_size
= 1 << (adap
->params
.sge
.hps
+ 10);
619 lld
->filt_mode
= adap
->params
.tp
.vlan_pri_map
;
620 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
621 for (i
= 0; i
< NCHAN
; i
++)
623 lld
->gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
624 lld
->db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
625 lld
->fw_vers
= adap
->params
.fw_vers
;
626 lld
->dbfifo_int_thresh
= dbfifo_int_thresh
;
627 lld
->sge_ingpadboundary
= adap
->sge
.fl_align
;
628 lld
->sge_egrstatuspagesize
= adap
->sge
.stat_len
;
629 lld
->sge_pktshift
= adap
->sge
.pktshift
;
630 lld
->ulp_crypto
= adap
->params
.crypto
;
631 lld
->enable_fw_ofld_conn
= adap
->flags
& CXGB4_FW_OFLD_CONN
;
632 lld
->max_ordird_qp
= adap
->params
.max_ordird_qp
;
633 lld
->max_ird_adapter
= adap
->params
.max_ird_adapter
;
634 lld
->ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
635 lld
->nodeid
= dev_to_node(adap
->pdev_dev
);
636 lld
->fr_nsmr_tpte_wr_support
= adap
->params
.fr_nsmr_tpte_wr_support
;
637 lld
->write_w_imm_support
= adap
->params
.write_w_imm_support
;
638 lld
->write_cmpl_support
= adap
->params
.write_cmpl_support
;
641 static int uld_attach(struct adapter
*adap
, unsigned int uld
)
643 struct cxgb4_lld_info lli
;
646 uld_init(adap
, &lli
);
647 uld_queue_init(adap
, uld
, &lli
);
649 handle
= adap
->uld
[uld
].add(&lli
);
650 if (IS_ERR(handle
)) {
651 dev_warn(adap
->pdev_dev
,
652 "could not attach to the %s driver, error %ld\n",
653 adap
->uld
[uld
].name
, PTR_ERR(handle
));
654 return PTR_ERR(handle
);
657 adap
->uld
[uld
].handle
= handle
;
658 t4_register_netevent_notifier();
660 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
661 adap
->uld
[uld
].state_change(handle
, CXGB4_STATE_UP
);
666 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
667 static bool cxgb4_uld_in_use(struct adapter
*adap
)
669 const struct tid_info
*t
= &adap
->tids
;
671 return (atomic_read(&t
->conns_in_use
) || t
->stids_in_use
);
674 /* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
675 * @adap: adapter info
676 * @enable: 1 to enable / 0 to disable ktls settings.
678 int cxgb4_set_ktls_feature(struct adapter
*adap
, bool enable
)
682 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
683 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW
) |
684 FW_PARAMS_PARAM_Y_V(enable
) |
685 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE
);
688 if (!refcount_read(&adap
->chcr_ktls
.ktls_refcount
)) {
689 /* At this moment if ULD connection are up means, other
690 * ULD is/are already active, return failure.
692 if (cxgb4_uld_in_use(adap
)) {
693 dev_dbg(adap
->pdev_dev
,
694 "ULD connections (tid/stid) active. Can't enable kTLS\n");
697 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
,
698 0, 1, ¶ms
, ¶ms
);
701 refcount_set(&adap
->chcr_ktls
.ktls_refcount
, 1);
702 pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
704 /* ktls settings already up, just increment refcount. */
705 refcount_inc(&adap
->chcr_ktls
.ktls_refcount
);
708 /* return failure if refcount is already 0. */
709 if (!refcount_read(&adap
->chcr_ktls
.ktls_refcount
))
711 /* decrement refcount and test, if 0, disable ktls feature,
712 * else return command success.
714 if (refcount_dec_and_test(&adap
->chcr_ktls
.ktls_refcount
)) {
715 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
,
716 0, 1, ¶ms
, ¶ms
);
719 pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
727 static void cxgb4_uld_alloc_resources(struct adapter
*adap
,
729 const struct cxgb4_uld_info
*p
)
733 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
734 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
736 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
738 ret
= cfg_queues_uld(adap
, type
, p
);
741 ret
= setup_sge_queues_uld(adap
, type
, p
->lro
);
744 if (adap
->flags
& CXGB4_USING_MSIX
) {
745 ret
= request_msix_queue_irqs_uld(adap
, type
);
749 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
750 enable_rx_uld(adap
, type
);
751 if (adap
->uld
[type
].add
)
753 ret
= setup_sge_txq_uld(adap
, type
, p
);
756 adap
->uld
[type
] = *p
;
757 ret
= uld_attach(adap
, type
);
762 release_sge_txq_uld(adap
, type
);
764 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
765 quiesce_rx_uld(adap
, type
);
766 if (adap
->flags
& CXGB4_USING_MSIX
)
767 free_msix_queue_irqs_uld(adap
, type
);
769 free_sge_queues_uld(adap
, type
);
771 free_queues_uld(adap
, type
);
773 dev_warn(adap
->pdev_dev
,
774 "ULD registration failed for uld type %d\n", type
);
777 void cxgb4_uld_enable(struct adapter
*adap
)
779 struct cxgb4_uld_list
*uld_entry
;
781 mutex_lock(&uld_mutex
);
782 list_add_tail(&adap
->list_node
, &adapter_list
);
783 list_for_each_entry(uld_entry
, &uld_list
, list_node
)
784 cxgb4_uld_alloc_resources(adap
, uld_entry
->uld_type
,
785 &uld_entry
->uld_info
);
786 mutex_unlock(&uld_mutex
);
789 /* cxgb4_register_uld - register an upper-layer driver
790 * @type: the ULD type
791 * @p: the ULD methods
793 * Registers an upper-layer driver with this driver and notifies the ULD
794 * about any presently available devices that support its type.
796 void cxgb4_register_uld(enum cxgb4_uld type
,
797 const struct cxgb4_uld_info
*p
)
799 struct cxgb4_uld_list
*uld_entry
;
800 struct adapter
*adap
;
802 if (type
>= CXGB4_ULD_MAX
)
805 uld_entry
= kzalloc(sizeof(*uld_entry
), GFP_KERNEL
);
809 memcpy(&uld_entry
->uld_info
, p
, sizeof(struct cxgb4_uld_info
));
810 mutex_lock(&uld_mutex
);
811 list_for_each_entry(adap
, &adapter_list
, list_node
)
812 cxgb4_uld_alloc_resources(adap
, type
, p
);
814 uld_entry
->uld_type
= type
;
815 list_add_tail(&uld_entry
->list_node
, &uld_list
);
816 mutex_unlock(&uld_mutex
);
819 EXPORT_SYMBOL(cxgb4_register_uld
);
822 * cxgb4_unregister_uld - unregister an upper-layer driver
823 * @type: the ULD type
825 * Unregisters an existing upper-layer driver.
827 int cxgb4_unregister_uld(enum cxgb4_uld type
)
829 struct cxgb4_uld_list
*uld_entry
, *tmp
;
830 struct adapter
*adap
;
832 if (type
>= CXGB4_ULD_MAX
)
835 mutex_lock(&uld_mutex
);
836 list_for_each_entry(adap
, &adapter_list
, list_node
) {
837 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
838 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
840 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
843 cxgb4_shutdown_uld_adapter(adap
, type
);
846 list_for_each_entry_safe(uld_entry
, tmp
, &uld_list
, list_node
) {
847 if (uld_entry
->uld_type
== type
) {
848 list_del(&uld_entry
->list_node
);
852 mutex_unlock(&uld_mutex
);
856 EXPORT_SYMBOL(cxgb4_unregister_uld
);