2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
49 #include "cxgb4_uld.h"
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
56 /* Flush the aggregated lro sessions */
57 static void uldrx_flush_handler(struct sge_rspq
*q
)
59 struct adapter
*adap
= q
->adap
;
61 if (adap
->uld
[q
->uld
].lro_flush
)
62 adap
->uld
[q
->uld
].lro_flush(&q
->lro_mgr
);
66 * uldrx_handler - response queue handler for ULD queues
67 * @q: the response queue that received the packet
68 * @rsp: the response queue descriptor holding the offload message
69 * @gl: the gather list of packet fragments
71 * Deliver an ingress offload packet to a ULD. All processing is done by
72 * the ULD, we just maintain statistics.
74 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
75 const struct pkt_gl
*gl
)
77 struct adapter
*adap
= q
->adap
;
78 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
81 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
82 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
83 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
87 ret
= adap
->uld
[q
->uld
].lro_rx_handler(adap
->uld
[q
->uld
].handle
,
91 ret
= adap
->uld
[q
->uld
].rx_handler(adap
->uld
[q
->uld
].handle
,
101 else if (gl
== CXGB4_MSG_AN
)
108 static int alloc_uld_rxqs(struct adapter
*adap
,
109 struct sge_uld_rxq_info
*rxq_info
, bool lro
)
111 unsigned int nq
= rxq_info
->nrxq
+ rxq_info
->nciq
;
112 struct sge_ofld_rxq
*q
= rxq_info
->uldrxq
;
113 unsigned short *ids
= rxq_info
->rspq_id
;
114 int i
, err
, msi_idx
, que_idx
= 0;
115 struct sge
*s
= &adap
->sge
;
116 unsigned int per_chan
;
118 per_chan
= rxq_info
->nrxq
/ adap
->params
.nports
;
120 if (adap
->flags
& CXGB4_USING_MSIX
)
123 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
125 for (i
= 0; i
< nq
; i
++, q
++) {
126 if (i
== rxq_info
->nrxq
) {
127 /* start allocation of concentrator queues */
128 per_chan
= rxq_info
->nciq
/ adap
->params
.nports
;
133 msi_idx
= cxgb4_get_msix_idx_from_bmap(adap
);
139 snprintf(adap
->msix_info
[msi_idx
].desc
,
140 sizeof(adap
->msix_info
[msi_idx
].desc
),
142 adap
->port
[0]->name
, rxq_info
->name
, i
);
144 q
->msix
= &adap
->msix_info
[msi_idx
];
146 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
147 adap
->port
[que_idx
++ / per_chan
],
149 q
->fl
.size
? &q
->fl
: NULL
,
151 lro
? uldrx_flush_handler
: NULL
,
156 memset(&q
->stats
, 0, sizeof(q
->stats
));
158 ids
[i
] = q
->rspq
.abs_id
;
162 q
= rxq_info
->uldrxq
;
163 for ( ; i
; i
--, q
++) {
165 free_rspq_fl(adap
, &q
->rspq
,
166 q
->fl
.size
? &q
->fl
: NULL
);
168 cxgb4_free_msix_idx_in_bmap(adap
, q
->msix
->idx
);
174 setup_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
, bool lro
)
176 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
179 ret
= !(!alloc_uld_rxqs(adap
, rxq_info
, lro
));
181 /* Tell uP to route control queue completions to rdma rspq */
182 if (adap
->flags
& CXGB4_FULL_INIT_DONE
&&
183 !ret
&& uld_type
== CXGB4_ULD_RDMA
) {
184 struct sge
*s
= &adap
->sge
;
185 unsigned int cmplqid
;
188 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
189 for_each_port(adap
, i
) {
190 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
191 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
192 FW_PARAMS_PARAM_X_V(cmdop
) |
193 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
194 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
,
195 0, 1, ¶m
, &cmplqid
);
201 static void t4_free_uld_rxqs(struct adapter
*adap
, int n
,
202 struct sge_ofld_rxq
*q
)
204 for ( ; n
; n
--, q
++) {
206 free_rspq_fl(adap
, &q
->rspq
,
207 q
->fl
.size
? &q
->fl
: NULL
);
211 static void free_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
213 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
215 if (adap
->flags
& CXGB4_FULL_INIT_DONE
&& uld_type
== CXGB4_ULD_RDMA
) {
216 struct sge
*s
= &adap
->sge
;
217 u32 param
, cmdop
, cmplqid
= 0;
220 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
221 for_each_port(adap
, i
) {
222 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
223 FW_PARAMS_PARAM_X_V(cmdop
) |
224 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
225 t4_set_params(adap
, adap
->mbox
, adap
->pf
,
226 0, 1, ¶m
, &cmplqid
);
231 t4_free_uld_rxqs(adap
, rxq_info
->nciq
,
232 rxq_info
->uldrxq
+ rxq_info
->nrxq
);
233 t4_free_uld_rxqs(adap
, rxq_info
->nrxq
, rxq_info
->uldrxq
);
236 static int cfg_queues_uld(struct adapter
*adap
, unsigned int uld_type
,
237 const struct cxgb4_uld_info
*uld_info
)
239 struct sge
*s
= &adap
->sge
;
240 struct sge_uld_rxq_info
*rxq_info
;
241 int i
, nrxq
, ciq_size
;
243 rxq_info
= kzalloc(sizeof(*rxq_info
), GFP_KERNEL
);
247 if (adap
->flags
& CXGB4_USING_MSIX
&& uld_info
->nrxq
> s
->nqs_per_uld
) {
249 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
251 i
= min_t(int, uld_info
->nrxq
,
253 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
255 if (!uld_info
->ciq
) {
258 if (adap
->flags
& CXGB4_USING_MSIX
)
259 rxq_info
->nciq
= min_t(int, s
->nqs_per_uld
,
262 rxq_info
->nciq
= min_t(int, MAX_OFLD_QSETS
,
264 rxq_info
->nciq
= ((rxq_info
->nciq
/ adap
->params
.nports
) *
265 adap
->params
.nports
);
266 rxq_info
->nciq
= max_t(int, rxq_info
->nciq
,
267 adap
->params
.nports
);
270 nrxq
= rxq_info
->nrxq
+ rxq_info
->nciq
; /* total rxq's */
271 rxq_info
->uldrxq
= kcalloc(nrxq
, sizeof(struct sge_ofld_rxq
),
273 if (!rxq_info
->uldrxq
) {
278 rxq_info
->rspq_id
= kcalloc(nrxq
, sizeof(unsigned short), GFP_KERNEL
);
279 if (!rxq_info
->rspq_id
) {
280 kfree(rxq_info
->uldrxq
);
285 for (i
= 0; i
< rxq_info
->nrxq
; i
++) {
286 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
288 init_rspq(adap
, &r
->rspq
, 5, 1, uld_info
->rxq_size
, 64);
289 r
->rspq
.uld
= uld_type
;
293 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
294 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
295 dev_warn(adap
->pdev_dev
, "CIQ size too small for available IQs\n");
296 ciq_size
= SGE_MAX_IQ_SIZE
;
299 for (i
= rxq_info
->nrxq
; i
< nrxq
; i
++) {
300 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
302 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
303 r
->rspq
.uld
= uld_type
;
306 memcpy(rxq_info
->name
, uld_info
->name
, IFNAMSIZ
);
307 adap
->sge
.uld_rxq_info
[uld_type
] = rxq_info
;
312 static void free_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
314 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
316 adap
->sge
.uld_rxq_info
[uld_type
] = NULL
;
317 kfree(rxq_info
->rspq_id
);
318 kfree(rxq_info
->uldrxq
);
323 request_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
325 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
326 struct msix_info
*minfo
;
330 for_each_uldrxq(rxq_info
, idx
) {
331 minfo
= rxq_info
->uldrxq
[idx
].msix
;
332 err
= request_irq(minfo
->vec
,
335 &rxq_info
->uldrxq
[idx
].rspq
);
339 cxgb4_set_msix_aff(adap
, minfo
->vec
,
340 &minfo
->aff_mask
, idx
);
346 minfo
= rxq_info
->uldrxq
[idx
].msix
;
347 cxgb4_clear_msix_aff(minfo
->vec
, minfo
->aff_mask
);
348 cxgb4_free_msix_idx_in_bmap(adap
, minfo
->idx
);
349 free_irq(minfo
->vec
, &rxq_info
->uldrxq
[idx
].rspq
);
355 free_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
357 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
358 struct msix_info
*minfo
;
361 for_each_uldrxq(rxq_info
, idx
) {
362 minfo
= rxq_info
->uldrxq
[idx
].msix
;
363 cxgb4_clear_msix_aff(minfo
->vec
, minfo
->aff_mask
);
364 cxgb4_free_msix_idx_in_bmap(adap
, minfo
->idx
);
365 free_irq(minfo
->vec
, &rxq_info
->uldrxq
[idx
].rspq
);
369 static void enable_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
371 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
374 for_each_uldrxq(rxq_info
, idx
) {
375 struct sge_rspq
*q
= &rxq_info
->uldrxq
[idx
].rspq
;
380 cxgb4_enable_rx(adap
, q
);
384 static void quiesce_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
386 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
389 for_each_uldrxq(rxq_info
, idx
) {
390 struct sge_rspq
*q
= &rxq_info
->uldrxq
[idx
].rspq
;
400 free_sge_txq_uld(struct adapter
*adap
, struct sge_uld_txq_info
*txq_info
)
402 int nq
= txq_info
->ntxq
;
405 for (i
= 0; i
< nq
; i
++) {
406 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
408 if (txq
&& txq
->q
.desc
) {
409 tasklet_kill(&txq
->qresume_tsk
);
410 t4_ofld_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
412 free_tx_desc(adap
, &txq
->q
, txq
->q
.in_use
, false);
414 __skb_queue_purge(&txq
->sendq
);
415 free_txq(adap
, &txq
->q
);
421 alloc_sge_txq_uld(struct adapter
*adap
, struct sge_uld_txq_info
*txq_info
,
422 unsigned int uld_type
)
424 struct sge
*s
= &adap
->sge
;
425 int nq
= txq_info
->ntxq
;
428 j
= nq
/ adap
->params
.nports
;
429 for (i
= 0; i
< nq
; i
++) {
430 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
433 err
= t4_sge_alloc_uld_txq(adap
, txq
, adap
->port
[i
/ j
],
434 s
->fw_evtq
.cntxt_id
, uld_type
);
440 free_sge_txq_uld(adap
, txq_info
);
445 release_sge_txq_uld(struct adapter
*adap
, unsigned int uld_type
)
447 struct sge_uld_txq_info
*txq_info
= NULL
;
448 int tx_uld_type
= TX_ULD(uld_type
);
450 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
452 if (txq_info
&& atomic_dec_and_test(&txq_info
->users
)) {
453 free_sge_txq_uld(adap
, txq_info
);
454 kfree(txq_info
->uldtxq
);
456 adap
->sge
.uld_txq_info
[tx_uld_type
] = NULL
;
461 setup_sge_txq_uld(struct adapter
*adap
, unsigned int uld_type
,
462 const struct cxgb4_uld_info
*uld_info
)
464 struct sge_uld_txq_info
*txq_info
= NULL
;
467 tx_uld_type
= TX_ULD(uld_type
);
468 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
470 if ((tx_uld_type
== CXGB4_TX_OFLD
) && txq_info
&&
471 (atomic_inc_return(&txq_info
->users
) > 1))
474 txq_info
= kzalloc(sizeof(*txq_info
), GFP_KERNEL
);
477 if (uld_type
== CXGB4_ULD_CRYPTO
) {
478 i
= min_t(int, adap
->vres
.ncrypto_fc
,
480 txq_info
->ntxq
= rounddown(i
, adap
->params
.nports
);
481 if (txq_info
->ntxq
<= 0) {
482 dev_warn(adap
->pdev_dev
, "Crypto Tx Queues can't be zero\n");
488 i
= min_t(int, uld_info
->ntxq
, num_online_cpus());
489 txq_info
->ntxq
= roundup(i
, adap
->params
.nports
);
491 txq_info
->uldtxq
= kcalloc(txq_info
->ntxq
, sizeof(struct sge_uld_txq
),
493 if (!txq_info
->uldtxq
) {
498 if (alloc_sge_txq_uld(adap
, txq_info
, tx_uld_type
)) {
499 kfree(txq_info
->uldtxq
);
504 atomic_inc(&txq_info
->users
);
505 adap
->sge
.uld_txq_info
[tx_uld_type
] = txq_info
;
509 static void uld_queue_init(struct adapter
*adap
, unsigned int uld_type
,
510 struct cxgb4_lld_info
*lli
)
512 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
513 int tx_uld_type
= TX_ULD(uld_type
);
514 struct sge_uld_txq_info
*txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
516 lli
->rxq_ids
= rxq_info
->rspq_id
;
517 lli
->nrxq
= rxq_info
->nrxq
;
518 lli
->ciq_ids
= rxq_info
->rspq_id
+ rxq_info
->nrxq
;
519 lli
->nciq
= rxq_info
->nciq
;
520 lli
->ntxq
= txq_info
->ntxq
;
523 int t4_uld_mem_alloc(struct adapter
*adap
)
525 struct sge
*s
= &adap
->sge
;
527 adap
->uld
= kcalloc(CXGB4_ULD_MAX
, sizeof(*adap
->uld
), GFP_KERNEL
);
531 s
->uld_rxq_info
= kcalloc(CXGB4_ULD_MAX
,
532 sizeof(struct sge_uld_rxq_info
*),
534 if (!s
->uld_rxq_info
)
537 s
->uld_txq_info
= kcalloc(CXGB4_TX_MAX
,
538 sizeof(struct sge_uld_txq_info
*),
540 if (!s
->uld_txq_info
)
545 kfree(s
->uld_rxq_info
);
551 void t4_uld_mem_free(struct adapter
*adap
)
553 struct sge
*s
= &adap
->sge
;
555 kfree(s
->uld_txq_info
);
556 kfree(s
->uld_rxq_info
);
560 /* This function should be called with uld_mutex taken. */
561 static void cxgb4_shutdown_uld_adapter(struct adapter
*adap
, enum cxgb4_uld type
)
563 if (adap
->uld
[type
].handle
) {
564 adap
->uld
[type
].handle
= NULL
;
565 adap
->uld
[type
].add
= NULL
;
566 release_sge_txq_uld(adap
, type
);
568 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
569 quiesce_rx_uld(adap
, type
);
571 if (adap
->flags
& CXGB4_USING_MSIX
)
572 free_msix_queue_irqs_uld(adap
, type
);
574 free_sge_queues_uld(adap
, type
);
575 free_queues_uld(adap
, type
);
579 void t4_uld_clean_up(struct adapter
*adap
)
583 mutex_lock(&uld_mutex
);
584 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++) {
585 if (!adap
->uld
[i
].handle
)
588 cxgb4_shutdown_uld_adapter(adap
, i
);
590 mutex_unlock(&uld_mutex
);
593 static void uld_init(struct adapter
*adap
, struct cxgb4_lld_info
*lld
)
597 lld
->pdev
= adap
->pdev
;
599 lld
->l2t
= adap
->l2t
;
600 lld
->tids
= &adap
->tids
;
601 lld
->ports
= adap
->port
;
602 lld
->vr
= &adap
->vres
;
603 lld
->mtus
= adap
->params
.mtus
;
604 lld
->nchan
= adap
->params
.nports
;
605 lld
->nports
= adap
->params
.nports
;
606 lld
->wr_cred
= adap
->params
.ofldq_wr_cred
;
607 lld
->crypto
= adap
->params
.crypto
;
608 lld
->iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
609 lld
->iscsi_tagmask
= t4_read_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
);
610 lld
->iscsi_pgsz_order
= t4_read_reg(adap
, ULP_RX_ISCSI_PSZ_A
);
611 lld
->iscsi_llimit
= t4_read_reg(adap
, ULP_RX_ISCSI_LLIMIT_A
);
612 lld
->iscsi_ppm
= &adap
->iscsi_ppm
;
613 lld
->adapter_type
= adap
->params
.chip
;
614 lld
->cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
615 lld
->udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
616 lld
->ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
617 lld
->sge_host_page_size
= 1 << (adap
->params
.sge
.hps
+ 10);
618 lld
->filt_mode
= adap
->params
.tp
.vlan_pri_map
;
619 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
620 for (i
= 0; i
< NCHAN
; i
++)
622 lld
->gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
623 lld
->db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
624 lld
->fw_vers
= adap
->params
.fw_vers
;
625 lld
->dbfifo_int_thresh
= dbfifo_int_thresh
;
626 lld
->sge_ingpadboundary
= adap
->sge
.fl_align
;
627 lld
->sge_egrstatuspagesize
= adap
->sge
.stat_len
;
628 lld
->sge_pktshift
= adap
->sge
.pktshift
;
629 lld
->ulp_crypto
= adap
->params
.crypto
;
630 lld
->enable_fw_ofld_conn
= adap
->flags
& CXGB4_FW_OFLD_CONN
;
631 lld
->max_ordird_qp
= adap
->params
.max_ordird_qp
;
632 lld
->max_ird_adapter
= adap
->params
.max_ird_adapter
;
633 lld
->ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
634 lld
->nodeid
= dev_to_node(adap
->pdev_dev
);
635 lld
->fr_nsmr_tpte_wr_support
= adap
->params
.fr_nsmr_tpte_wr_support
;
636 lld
->write_w_imm_support
= adap
->params
.write_w_imm_support
;
637 lld
->write_cmpl_support
= adap
->params
.write_cmpl_support
;
640 static int uld_attach(struct adapter
*adap
, unsigned int uld
)
642 struct cxgb4_lld_info lli
;
645 uld_init(adap
, &lli
);
646 uld_queue_init(adap
, uld
, &lli
);
648 handle
= adap
->uld
[uld
].add(&lli
);
649 if (IS_ERR(handle
)) {
650 dev_warn(adap
->pdev_dev
,
651 "could not attach to the %s driver, error %ld\n",
652 adap
->uld
[uld
].name
, PTR_ERR(handle
));
653 return PTR_ERR(handle
);
656 adap
->uld
[uld
].handle
= handle
;
657 t4_register_netevent_notifier();
659 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
660 adap
->uld
[uld
].state_change(handle
, CXGB4_STATE_UP
);
665 /* cxgb4_register_uld - register an upper-layer driver
666 * @type: the ULD type
667 * @p: the ULD methods
669 * Registers an upper-layer driver with this driver and notifies the ULD
670 * about any presently available devices that support its type.
672 void cxgb4_register_uld(enum cxgb4_uld type
,
673 const struct cxgb4_uld_info
*p
)
675 struct adapter
*adap
;
678 if (type
>= CXGB4_ULD_MAX
)
681 mutex_lock(&uld_mutex
);
682 list_for_each_entry(adap
, &adapter_list
, list_node
) {
683 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
684 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
686 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
688 ret
= cfg_queues_uld(adap
, type
, p
);
691 ret
= setup_sge_queues_uld(adap
, type
, p
->lro
);
694 if (adap
->flags
& CXGB4_USING_MSIX
) {
695 ret
= request_msix_queue_irqs_uld(adap
, type
);
699 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
700 enable_rx_uld(adap
, type
);
701 if (adap
->uld
[type
].add
)
703 ret
= setup_sge_txq_uld(adap
, type
, p
);
706 adap
->uld
[type
] = *p
;
707 ret
= uld_attach(adap
, type
);
712 release_sge_txq_uld(adap
, type
);
714 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
715 quiesce_rx_uld(adap
, type
);
716 if (adap
->flags
& CXGB4_USING_MSIX
)
717 free_msix_queue_irqs_uld(adap
, type
);
719 free_sge_queues_uld(adap
, type
);
721 free_queues_uld(adap
, type
);
723 dev_warn(adap
->pdev_dev
,
724 "ULD registration failed for uld type %d\n", type
);
726 mutex_unlock(&uld_mutex
);
729 EXPORT_SYMBOL(cxgb4_register_uld
);
732 * cxgb4_unregister_uld - unregister an upper-layer driver
733 * @type: the ULD type
735 * Unregisters an existing upper-layer driver.
737 int cxgb4_unregister_uld(enum cxgb4_uld type
)
739 struct adapter
*adap
;
741 if (type
>= CXGB4_ULD_MAX
)
744 mutex_lock(&uld_mutex
);
745 list_for_each_entry(adap
, &adapter_list
, list_node
) {
746 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
747 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
749 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
752 cxgb4_shutdown_uld_adapter(adap
, type
);
754 mutex_unlock(&uld_mutex
);
758 EXPORT_SYMBOL(cxgb4_unregister_uld
);