2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
49 #include "cxgb4_uld.h"
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
56 static int get_msix_idx_from_bmap(struct adapter
*adap
)
58 struct uld_msix_bmap
*bmap
= &adap
->msix_bmap_ulds
;
60 unsigned int msix_idx
;
62 spin_lock_irqsave(&bmap
->lock
, flags
);
63 msix_idx
= find_first_zero_bit(bmap
->msix_bmap
, bmap
->mapsize
);
64 if (msix_idx
< bmap
->mapsize
) {
65 __set_bit(msix_idx
, bmap
->msix_bmap
);
67 spin_unlock_irqrestore(&bmap
->lock
, flags
);
71 spin_unlock_irqrestore(&bmap
->lock
, flags
);
75 static void free_msix_idx_in_bmap(struct adapter
*adap
, unsigned int msix_idx
)
77 struct uld_msix_bmap
*bmap
= &adap
->msix_bmap_ulds
;
80 spin_lock_irqsave(&bmap
->lock
, flags
);
81 __clear_bit(msix_idx
, bmap
->msix_bmap
);
82 spin_unlock_irqrestore(&bmap
->lock
, flags
);
85 /* Flush the aggregated lro sessions */
86 static void uldrx_flush_handler(struct sge_rspq
*q
)
88 struct adapter
*adap
= q
->adap
;
90 if (adap
->uld
[q
->uld
].lro_flush
)
91 adap
->uld
[q
->uld
].lro_flush(&q
->lro_mgr
);
95 * uldrx_handler - response queue handler for ULD queues
96 * @q: the response queue that received the packet
97 * @rsp: the response queue descriptor holding the offload message
98 * @gl: the gather list of packet fragments
100 * Deliver an ingress offload packet to a ULD. All processing is done by
101 * the ULD, we just maintain statistics.
103 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
104 const struct pkt_gl
*gl
)
106 struct adapter
*adap
= q
->adap
;
107 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
112 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
115 if (q
->flush_handler
)
116 ret
= adap
->uld
[q
->uld
].lro_rx_handler(adap
->uld
[q
->uld
].handle
,
117 rsp
, gl
, &q
->lro_mgr
,
120 ret
= adap
->uld
[q
->uld
].rx_handler(adap
->uld
[q
->uld
].handle
,
130 else if (gl
== CXGB4_MSG_AN
)
137 static int alloc_uld_rxqs(struct adapter
*adap
,
138 struct sge_uld_rxq_info
*rxq_info
, bool lro
)
140 struct sge
*s
= &adap
->sge
;
141 unsigned int nq
= rxq_info
->nrxq
+ rxq_info
->nciq
;
142 struct sge_ofld_rxq
*q
= rxq_info
->uldrxq
;
143 unsigned short *ids
= rxq_info
->rspq_id
;
144 unsigned int bmap_idx
= 0;
145 unsigned int per_chan
;
146 int i
, err
, msi_idx
, que_idx
= 0;
148 per_chan
= rxq_info
->nrxq
/ adap
->params
.nports
;
150 if (adap
->flags
& USING_MSIX
)
153 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
155 for (i
= 0; i
< nq
; i
++, q
++) {
156 if (i
== rxq_info
->nrxq
) {
157 /* start allocation of concentrator queues */
158 per_chan
= rxq_info
->nciq
/ adap
->params
.nports
;
163 bmap_idx
= get_msix_idx_from_bmap(adap
);
164 msi_idx
= adap
->msix_info_ulds
[bmap_idx
].idx
;
166 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
167 adap
->port
[que_idx
++ / per_chan
],
169 q
->fl
.size
? &q
->fl
: NULL
,
171 lro
? uldrx_flush_handler
: NULL
,
176 rxq_info
->msix_tbl
[i
] = bmap_idx
;
177 memset(&q
->stats
, 0, sizeof(q
->stats
));
179 ids
[i
] = q
->rspq
.abs_id
;
183 q
= rxq_info
->uldrxq
;
184 for ( ; i
; i
--, q
++) {
186 free_rspq_fl(adap
, &q
->rspq
,
187 q
->fl
.size
? &q
->fl
: NULL
);
193 setup_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
, bool lro
)
195 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
198 if (adap
->flags
& USING_MSIX
) {
199 rxq_info
->msix_tbl
= kcalloc((rxq_info
->nrxq
+ rxq_info
->nciq
),
200 sizeof(unsigned short),
202 if (!rxq_info
->msix_tbl
)
206 ret
= !(!alloc_uld_rxqs(adap
, rxq_info
, lro
));
208 /* Tell uP to route control queue completions to rdma rspq */
209 if (adap
->flags
& FULL_INIT_DONE
&&
210 !ret
&& uld_type
== CXGB4_ULD_RDMA
) {
211 struct sge
*s
= &adap
->sge
;
212 unsigned int cmplqid
;
215 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
216 for_each_port(adap
, i
) {
217 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
218 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
219 FW_PARAMS_PARAM_X_V(cmdop
) |
220 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
221 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
,
222 0, 1, ¶m
, &cmplqid
);
228 static void t4_free_uld_rxqs(struct adapter
*adap
, int n
,
229 struct sge_ofld_rxq
*q
)
231 for ( ; n
; n
--, q
++) {
233 free_rspq_fl(adap
, &q
->rspq
,
234 q
->fl
.size
? &q
->fl
: NULL
);
238 static void free_sge_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
240 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
242 if (adap
->flags
& FULL_INIT_DONE
&& uld_type
== CXGB4_ULD_RDMA
) {
243 struct sge
*s
= &adap
->sge
;
244 u32 param
, cmdop
, cmplqid
= 0;
247 cmdop
= FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
;
248 for_each_port(adap
, i
) {
249 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
250 FW_PARAMS_PARAM_X_V(cmdop
) |
251 FW_PARAMS_PARAM_YZ_V(s
->ctrlq
[i
].q
.cntxt_id
));
252 t4_set_params(adap
, adap
->mbox
, adap
->pf
,
253 0, 1, ¶m
, &cmplqid
);
258 t4_free_uld_rxqs(adap
, rxq_info
->nciq
,
259 rxq_info
->uldrxq
+ rxq_info
->nrxq
);
260 t4_free_uld_rxqs(adap
, rxq_info
->nrxq
, rxq_info
->uldrxq
);
261 if (adap
->flags
& USING_MSIX
)
262 kfree(rxq_info
->msix_tbl
);
265 static int cfg_queues_uld(struct adapter
*adap
, unsigned int uld_type
,
266 const struct cxgb4_uld_info
*uld_info
)
268 struct sge
*s
= &adap
->sge
;
269 struct sge_uld_rxq_info
*rxq_info
;
270 int i
, nrxq
, ciq_size
;
272 rxq_info
= kzalloc(sizeof(*rxq_info
), GFP_KERNEL
);
276 if (adap
->flags
& USING_MSIX
&& uld_info
->nrxq
> s
->nqs_per_uld
) {
278 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
280 i
= min_t(int, uld_info
->nrxq
,
282 rxq_info
->nrxq
= roundup(i
, adap
->params
.nports
);
284 if (!uld_info
->ciq
) {
287 if (adap
->flags
& USING_MSIX
)
288 rxq_info
->nciq
= min_t(int, s
->nqs_per_uld
,
291 rxq_info
->nciq
= min_t(int, MAX_OFLD_QSETS
,
293 rxq_info
->nciq
= ((rxq_info
->nciq
/ adap
->params
.nports
) *
294 adap
->params
.nports
);
295 rxq_info
->nciq
= max_t(int, rxq_info
->nciq
,
296 adap
->params
.nports
);
299 nrxq
= rxq_info
->nrxq
+ rxq_info
->nciq
; /* total rxq's */
300 rxq_info
->uldrxq
= kcalloc(nrxq
, sizeof(struct sge_ofld_rxq
),
302 if (!rxq_info
->uldrxq
) {
307 rxq_info
->rspq_id
= kcalloc(nrxq
, sizeof(unsigned short), GFP_KERNEL
);
308 if (!rxq_info
->rspq_id
) {
309 kfree(rxq_info
->uldrxq
);
314 for (i
= 0; i
< rxq_info
->nrxq
; i
++) {
315 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
317 init_rspq(adap
, &r
->rspq
, 5, 1, uld_info
->rxq_size
, 64);
318 r
->rspq
.uld
= uld_type
;
322 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
323 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
324 dev_warn(adap
->pdev_dev
, "CIQ size too small for available IQs\n");
325 ciq_size
= SGE_MAX_IQ_SIZE
;
328 for (i
= rxq_info
->nrxq
; i
< nrxq
; i
++) {
329 struct sge_ofld_rxq
*r
= &rxq_info
->uldrxq
[i
];
331 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
332 r
->rspq
.uld
= uld_type
;
335 memcpy(rxq_info
->name
, uld_info
->name
, IFNAMSIZ
);
336 adap
->sge
.uld_rxq_info
[uld_type
] = rxq_info
;
341 static void free_queues_uld(struct adapter
*adap
, unsigned int uld_type
)
343 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
345 adap
->sge
.uld_rxq_info
[uld_type
] = NULL
;
346 kfree(rxq_info
->rspq_id
);
347 kfree(rxq_info
->uldrxq
);
352 request_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
354 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
356 unsigned int idx
, bmap_idx
;
358 for_each_uldrxq(rxq_info
, idx
) {
359 bmap_idx
= rxq_info
->msix_tbl
[idx
];
360 err
= request_irq(adap
->msix_info_ulds
[bmap_idx
].vec
,
362 adap
->msix_info_ulds
[bmap_idx
].desc
,
363 &rxq_info
->uldrxq
[idx
].rspq
);
370 bmap_idx
= rxq_info
->msix_tbl
[idx
];
371 free_msix_idx_in_bmap(adap
, bmap_idx
);
372 free_irq(adap
->msix_info_ulds
[bmap_idx
].vec
,
373 &rxq_info
->uldrxq
[idx
].rspq
);
379 free_msix_queue_irqs_uld(struct adapter
*adap
, unsigned int uld_type
)
381 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
382 unsigned int idx
, bmap_idx
;
384 for_each_uldrxq(rxq_info
, idx
) {
385 bmap_idx
= rxq_info
->msix_tbl
[idx
];
387 free_msix_idx_in_bmap(adap
, bmap_idx
);
388 free_irq(adap
->msix_info_ulds
[bmap_idx
].vec
,
389 &rxq_info
->uldrxq
[idx
].rspq
);
393 static void name_msix_vecs_uld(struct adapter
*adap
, unsigned int uld_type
)
395 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
396 int n
= sizeof(adap
->msix_info_ulds
[0].desc
);
397 unsigned int idx
, bmap_idx
;
399 for_each_uldrxq(rxq_info
, idx
) {
400 bmap_idx
= rxq_info
->msix_tbl
[idx
];
402 snprintf(adap
->msix_info_ulds
[bmap_idx
].desc
, n
, "%s-%s%d",
403 adap
->port
[0]->name
, rxq_info
->name
, idx
);
407 static void enable_rx(struct adapter
*adap
, struct sge_rspq
*q
)
413 napi_enable(&q
->napi
);
415 /* 0-increment GTS to start the timer and enable interrupts */
416 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
417 SEINTARM_V(q
->intr_params
) |
418 INGRESSQID_V(q
->cntxt_id
));
421 static void quiesce_rx(struct adapter
*adap
, struct sge_rspq
*q
)
424 napi_disable(&q
->napi
);
427 static void enable_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
429 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
432 for_each_uldrxq(rxq_info
, idx
)
433 enable_rx(adap
, &rxq_info
->uldrxq
[idx
].rspq
);
436 static void quiesce_rx_uld(struct adapter
*adap
, unsigned int uld_type
)
438 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
441 for_each_uldrxq(rxq_info
, idx
)
442 quiesce_rx(adap
, &rxq_info
->uldrxq
[idx
].rspq
);
446 free_sge_txq_uld(struct adapter
*adap
, struct sge_uld_txq_info
*txq_info
)
448 int nq
= txq_info
->ntxq
;
451 for (i
= 0; i
< nq
; i
++) {
452 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
454 if (txq
&& txq
->q
.desc
) {
455 tasklet_kill(&txq
->qresume_tsk
);
456 t4_ofld_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
458 free_tx_desc(adap
, &txq
->q
, txq
->q
.in_use
, false);
460 __skb_queue_purge(&txq
->sendq
);
461 free_txq(adap
, &txq
->q
);
467 alloc_sge_txq_uld(struct adapter
*adap
, struct sge_uld_txq_info
*txq_info
,
468 unsigned int uld_type
)
470 struct sge
*s
= &adap
->sge
;
471 int nq
= txq_info
->ntxq
;
474 j
= nq
/ adap
->params
.nports
;
475 for (i
= 0; i
< nq
; i
++) {
476 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
479 err
= t4_sge_alloc_uld_txq(adap
, txq
, adap
->port
[i
/ j
],
480 s
->fw_evtq
.cntxt_id
, uld_type
);
486 free_sge_txq_uld(adap
, txq_info
);
491 release_sge_txq_uld(struct adapter
*adap
, unsigned int uld_type
)
493 struct sge_uld_txq_info
*txq_info
= NULL
;
494 int tx_uld_type
= TX_ULD(uld_type
);
496 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
498 if (txq_info
&& atomic_dec_and_test(&txq_info
->users
)) {
499 free_sge_txq_uld(adap
, txq_info
);
500 kfree(txq_info
->uldtxq
);
502 adap
->sge
.uld_txq_info
[tx_uld_type
] = NULL
;
507 setup_sge_txq_uld(struct adapter
*adap
, unsigned int uld_type
,
508 const struct cxgb4_uld_info
*uld_info
)
510 struct sge_uld_txq_info
*txq_info
= NULL
;
513 tx_uld_type
= TX_ULD(uld_type
);
514 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
516 if ((tx_uld_type
== CXGB4_TX_OFLD
) && txq_info
&&
517 (atomic_inc_return(&txq_info
->users
) > 1))
520 txq_info
= kzalloc(sizeof(*txq_info
), GFP_KERNEL
);
524 i
= min_t(int, uld_info
->ntxq
, num_online_cpus());
525 txq_info
->ntxq
= roundup(i
, adap
->params
.nports
);
527 txq_info
->uldtxq
= kcalloc(txq_info
->ntxq
, sizeof(struct sge_uld_txq
),
529 if (!txq_info
->uldtxq
) {
534 if (alloc_sge_txq_uld(adap
, txq_info
, tx_uld_type
)) {
535 kfree(txq_info
->uldtxq
);
540 atomic_inc(&txq_info
->users
);
541 adap
->sge
.uld_txq_info
[tx_uld_type
] = txq_info
;
545 static void uld_queue_init(struct adapter
*adap
, unsigned int uld_type
,
546 struct cxgb4_lld_info
*lli
)
548 struct sge_uld_rxq_info
*rxq_info
= adap
->sge
.uld_rxq_info
[uld_type
];
550 lli
->rxq_ids
= rxq_info
->rspq_id
;
551 lli
->nrxq
= rxq_info
->nrxq
;
552 lli
->ciq_ids
= rxq_info
->rspq_id
+ rxq_info
->nrxq
;
553 lli
->nciq
= rxq_info
->nciq
;
556 int t4_uld_mem_alloc(struct adapter
*adap
)
558 struct sge
*s
= &adap
->sge
;
560 adap
->uld
= kcalloc(CXGB4_ULD_MAX
, sizeof(*adap
->uld
), GFP_KERNEL
);
564 s
->uld_rxq_info
= kzalloc(CXGB4_ULD_MAX
*
565 sizeof(struct sge_uld_rxq_info
*),
567 if (!s
->uld_rxq_info
)
570 s
->uld_txq_info
= kzalloc(CXGB4_TX_MAX
*
571 sizeof(struct sge_uld_txq_info
*),
573 if (!s
->uld_txq_info
)
578 kfree(s
->uld_rxq_info
);
584 void t4_uld_mem_free(struct adapter
*adap
)
586 struct sge
*s
= &adap
->sge
;
588 kfree(s
->uld_txq_info
);
589 kfree(s
->uld_rxq_info
);
593 /* This function should be called with uld_mutex taken. */
594 static void cxgb4_shutdown_uld_adapter(struct adapter
*adap
, enum cxgb4_uld type
)
596 if (adap
->uld
[type
].handle
) {
597 adap
->uld
[type
].handle
= NULL
;
598 adap
->uld
[type
].add
= NULL
;
599 release_sge_txq_uld(adap
, type
);
601 if (adap
->flags
& FULL_INIT_DONE
)
602 quiesce_rx_uld(adap
, type
);
604 if (adap
->flags
& USING_MSIX
)
605 free_msix_queue_irqs_uld(adap
, type
);
607 free_sge_queues_uld(adap
, type
);
608 free_queues_uld(adap
, type
);
612 void t4_uld_clean_up(struct adapter
*adap
)
616 mutex_lock(&uld_mutex
);
617 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++) {
618 if (!adap
->uld
[i
].handle
)
621 cxgb4_shutdown_uld_adapter(adap
, i
);
623 mutex_unlock(&uld_mutex
);
626 static void uld_init(struct adapter
*adap
, struct cxgb4_lld_info
*lld
)
630 lld
->pdev
= adap
->pdev
;
632 lld
->l2t
= adap
->l2t
;
633 lld
->tids
= &adap
->tids
;
634 lld
->ports
= adap
->port
;
635 lld
->vr
= &adap
->vres
;
636 lld
->mtus
= adap
->params
.mtus
;
637 lld
->ntxq
= adap
->sge
.ofldqsets
;
638 lld
->nchan
= adap
->params
.nports
;
639 lld
->nports
= adap
->params
.nports
;
640 lld
->wr_cred
= adap
->params
.ofldq_wr_cred
;
641 lld
->crypto
= adap
->params
.crypto
;
642 lld
->iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
643 lld
->iscsi_tagmask
= t4_read_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
);
644 lld
->iscsi_pgsz_order
= t4_read_reg(adap
, ULP_RX_ISCSI_PSZ_A
);
645 lld
->iscsi_llimit
= t4_read_reg(adap
, ULP_RX_ISCSI_LLIMIT_A
);
646 lld
->iscsi_ppm
= &adap
->iscsi_ppm
;
647 lld
->adapter_type
= adap
->params
.chip
;
648 lld
->cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
649 lld
->udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
650 lld
->ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
651 lld
->filt_mode
= adap
->params
.tp
.vlan_pri_map
;
652 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
653 for (i
= 0; i
< NCHAN
; i
++)
655 lld
->gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
656 lld
->db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
657 lld
->fw_vers
= adap
->params
.fw_vers
;
658 lld
->dbfifo_int_thresh
= dbfifo_int_thresh
;
659 lld
->sge_ingpadboundary
= adap
->sge
.fl_align
;
660 lld
->sge_egrstatuspagesize
= adap
->sge
.stat_len
;
661 lld
->sge_pktshift
= adap
->sge
.pktshift
;
662 lld
->ulp_crypto
= adap
->params
.crypto
;
663 lld
->enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
664 lld
->max_ordird_qp
= adap
->params
.max_ordird_qp
;
665 lld
->max_ird_adapter
= adap
->params
.max_ird_adapter
;
666 lld
->ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
667 lld
->nodeid
= dev_to_node(adap
->pdev_dev
);
668 lld
->fr_nsmr_tpte_wr_support
= adap
->params
.fr_nsmr_tpte_wr_support
;
669 lld
->write_w_imm_support
= adap
->params
.write_w_imm_support
;
670 lld
->write_cmpl_support
= adap
->params
.write_cmpl_support
;
673 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
676 struct cxgb4_lld_info lli
;
678 uld_init(adap
, &lli
);
679 uld_queue_init(adap
, uld
, &lli
);
681 handle
= adap
->uld
[uld
].add(&lli
);
682 if (IS_ERR(handle
)) {
683 dev_warn(adap
->pdev_dev
,
684 "could not attach to the %s driver, error %ld\n",
685 adap
->uld
[uld
].name
, PTR_ERR(handle
));
689 adap
->uld
[uld
].handle
= handle
;
690 t4_register_netevent_notifier();
692 if (adap
->flags
& FULL_INIT_DONE
)
693 adap
->uld
[uld
].state_change(handle
, CXGB4_STATE_UP
);
697 * cxgb4_register_uld - register an upper-layer driver
698 * @type: the ULD type
699 * @p: the ULD methods
701 * Registers an upper-layer driver with this driver and notifies the ULD
702 * about any presently available devices that support its type. Returns
703 * %-EBUSY if a ULD of the same type is already registered.
705 int cxgb4_register_uld(enum cxgb4_uld type
,
706 const struct cxgb4_uld_info
*p
)
709 unsigned int adap_idx
= 0;
710 struct adapter
*adap
;
712 if (type
>= CXGB4_ULD_MAX
)
715 mutex_lock(&uld_mutex
);
716 list_for_each_entry(adap
, &adapter_list
, list_node
) {
717 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
718 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
720 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
722 ret
= cfg_queues_uld(adap
, type
, p
);
725 ret
= setup_sge_queues_uld(adap
, type
, p
->lro
);
728 if (adap
->flags
& USING_MSIX
) {
729 name_msix_vecs_uld(adap
, type
);
730 ret
= request_msix_queue_irqs_uld(adap
, type
);
734 if (adap
->flags
& FULL_INIT_DONE
)
735 enable_rx_uld(adap
, type
);
736 if (adap
->uld
[type
].add
) {
740 ret
= setup_sge_txq_uld(adap
, type
, p
);
743 adap
->uld
[type
] = *p
;
744 uld_attach(adap
, type
);
747 mutex_unlock(&uld_mutex
);
751 if (adap
->flags
& FULL_INIT_DONE
)
752 quiesce_rx_uld(adap
, type
);
753 if (adap
->flags
& USING_MSIX
)
754 free_msix_queue_irqs_uld(adap
, type
);
756 free_sge_queues_uld(adap
, type
);
758 free_queues_uld(adap
, type
);
761 list_for_each_entry(adap
, &adapter_list
, list_node
) {
762 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
763 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
765 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
769 adap
->uld
[type
].handle
= NULL
;
770 adap
->uld
[type
].add
= NULL
;
771 release_sge_txq_uld(adap
, type
);
772 if (adap
->flags
& FULL_INIT_DONE
)
773 quiesce_rx_uld(adap
, type
);
774 if (adap
->flags
& USING_MSIX
)
775 free_msix_queue_irqs_uld(adap
, type
);
776 free_sge_queues_uld(adap
, type
);
777 free_queues_uld(adap
, type
);
780 mutex_unlock(&uld_mutex
);
783 EXPORT_SYMBOL(cxgb4_register_uld
);
786 * cxgb4_unregister_uld - unregister an upper-layer driver
787 * @type: the ULD type
789 * Unregisters an existing upper-layer driver.
791 int cxgb4_unregister_uld(enum cxgb4_uld type
)
793 struct adapter
*adap
;
795 if (type
>= CXGB4_ULD_MAX
)
798 mutex_lock(&uld_mutex
);
799 list_for_each_entry(adap
, &adapter_list
, list_node
) {
800 if ((type
== CXGB4_ULD_CRYPTO
&& !is_pci_uld(adap
)) ||
801 (type
!= CXGB4_ULD_CRYPTO
&& !is_offload(adap
)))
803 if (type
== CXGB4_ULD_ISCSIT
&& is_t4(adap
->params
.chip
))
806 cxgb4_shutdown_uld_adapter(adap
, type
);
808 mutex_unlock(&uld_mutex
);
812 EXPORT_SYMBOL(cxgb4_unregister_uld
);