2 * Copyright (c) 2016 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #define DRV_NAME "cxgbit"
10 #define DRV_VERSION "1.0.0-ko"
11 #define pr_fmt(fmt) DRV_NAME ": " fmt
15 #ifdef CONFIG_CHELSIO_T4_DCB
16 #include <net/dcbevent.h>
17 #include "cxgb4_dcb.h"
20 LIST_HEAD(cdev_list_head
);
22 DEFINE_MUTEX(cdev_list_lock
);
24 void _cxgbit_free_cdev(struct kref
*kref
)
26 struct cxgbit_device
*cdev
;
28 cdev
= container_of(kref
, struct cxgbit_device
, kref
);
30 cxgbi_ppm_release(cdev2ppm(cdev
));
34 static void cxgbit_set_mdsl(struct cxgbit_device
*cdev
)
36 struct cxgb4_lld_info
*lldi
= &cdev
->lldi
;
39 #define ULP2_MAX_PKT_LEN 16224
40 #define ISCSI_PDU_NONPAYLOAD_LEN 312
41 mdsl
= min_t(u32
, lldi
->iscsi_iolen
- ISCSI_PDU_NONPAYLOAD_LEN
,
42 ULP2_MAX_PKT_LEN
- ISCSI_PDU_NONPAYLOAD_LEN
);
43 mdsl
= min_t(u32
, mdsl
, 8192);
44 mdsl
= min_t(u32
, mdsl
, (MAX_SKB_FRAGS
- 1) * PAGE_SIZE
);
49 static void *cxgbit_uld_add(const struct cxgb4_lld_info
*lldi
)
51 struct cxgbit_device
*cdev
;
53 if (is_t4(lldi
->adapter_type
))
54 return ERR_PTR(-ENODEV
);
56 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
58 return ERR_PTR(-ENOMEM
);
60 kref_init(&cdev
->kref
);
61 spin_lock_init(&cdev
->np_lock
);
65 cxgbit_set_mdsl(cdev
);
67 if (cxgbit_ddp_init(cdev
) < 0) {
69 return ERR_PTR(-EINVAL
);
72 if (!test_bit(CDEV_DDP_ENABLE
, &cdev
->flags
))
73 pr_info("cdev %s ddp init failed\n",
74 pci_name(lldi
->pdev
));
76 if (lldi
->fw_vers
>= 0x10d2b00)
77 set_bit(CDEV_ISO_ENABLE
, &cdev
->flags
);
79 spin_lock_init(&cdev
->cskq
.lock
);
80 INIT_LIST_HEAD(&cdev
->cskq
.list
);
82 mutex_lock(&cdev_list_lock
);
83 list_add_tail(&cdev
->list
, &cdev_list_head
);
84 mutex_unlock(&cdev_list_lock
);
86 pr_info("cdev %s added for iSCSI target transport\n",
87 pci_name(lldi
->pdev
));
92 static void cxgbit_close_conn(struct cxgbit_device
*cdev
)
94 struct cxgbit_sock
*csk
;
96 bool wakeup_thread
= false;
98 spin_lock_bh(&cdev
->cskq
.lock
);
99 list_for_each_entry(csk
, &cdev
->cskq
.list
, list
) {
100 skb
= alloc_skb(0, GFP_ATOMIC
);
104 spin_lock_bh(&csk
->rxq
.lock
);
105 __skb_queue_tail(&csk
->rxq
, skb
);
106 if (skb_queue_len(&csk
->rxq
) == 1)
107 wakeup_thread
= true;
108 spin_unlock_bh(&csk
->rxq
.lock
);
111 wake_up(&csk
->waitq
);
112 wakeup_thread
= false;
115 spin_unlock_bh(&cdev
->cskq
.lock
);
118 static void cxgbit_detach_cdev(struct cxgbit_device
*cdev
)
120 bool free_cdev
= false;
122 spin_lock_bh(&cdev
->cskq
.lock
);
123 if (list_empty(&cdev
->cskq
.list
))
125 spin_unlock_bh(&cdev
->cskq
.lock
);
128 mutex_lock(&cdev_list_lock
);
129 list_del(&cdev
->list
);
130 mutex_unlock(&cdev_list_lock
);
132 cxgbit_put_cdev(cdev
);
134 cxgbit_close_conn(cdev
);
138 static int cxgbit_uld_state_change(void *handle
, enum cxgb4_state state
)
140 struct cxgbit_device
*cdev
= handle
;
144 set_bit(CDEV_STATE_UP
, &cdev
->flags
);
145 pr_info("cdev %s state UP.\n", pci_name(cdev
->lldi
.pdev
));
147 case CXGB4_STATE_START_RECOVERY
:
148 clear_bit(CDEV_STATE_UP
, &cdev
->flags
);
149 cxgbit_close_conn(cdev
);
150 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev
->lldi
.pdev
));
152 case CXGB4_STATE_DOWN
:
153 pr_info("cdev %s state DOWN.\n", pci_name(cdev
->lldi
.pdev
));
155 case CXGB4_STATE_DETACH
:
156 clear_bit(CDEV_STATE_UP
, &cdev
->flags
);
157 pr_info("cdev %s state DETACH.\n", pci_name(cdev
->lldi
.pdev
));
158 cxgbit_detach_cdev(cdev
);
161 pr_info("cdev %s unknown state %d.\n",
162 pci_name(cdev
->lldi
.pdev
), state
);
169 cxgbit_process_ddpvld(struct cxgbit_sock
*csk
, struct cxgbit_lro_pdu_cb
*pdu_cb
,
173 if (ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT
)) {
174 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk
->tid
, ddpvld
);
175 pdu_cb
->flags
|= PDUCBF_RX_HCRC_ERR
;
178 if (ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT
)) {
179 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk
->tid
, ddpvld
);
180 pdu_cb
->flags
|= PDUCBF_RX_DCRC_ERR
;
183 if (ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT
))
184 pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk
->tid
, ddpvld
);
186 if ((ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT
)) &&
187 (!(pdu_cb
->flags
& PDUCBF_RX_DATA
))) {
188 pdu_cb
->flags
|= PDUCBF_RX_DATA_DDPD
;
193 cxgbit_lro_add_packet_rsp(struct sk_buff
*skb
, u8 op
, const __be64
*rsp
)
195 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
196 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
,
198 struct cpl_rx_iscsi_ddp
*cpl
= (struct cpl_rx_iscsi_ddp
*)(rsp
+ 1);
200 cxgbit_process_ddpvld(lro_cb
->csk
, pdu_cb
, be32_to_cpu(cpl
->ddpvld
));
202 pdu_cb
->flags
|= PDUCBF_RX_STATUS
;
203 pdu_cb
->ddigest
= ntohl(cpl
->ulp_crc
);
204 pdu_cb
->pdulen
= ntohs(cpl
->len
);
206 if (pdu_cb
->flags
& PDUCBF_RX_HDR
)
207 pdu_cb
->complete
= true;
209 lro_cb
->pdu_totallen
+= pdu_cb
->pdulen
;
210 lro_cb
->complete
= true;
215 cxgbit_copy_frags(struct sk_buff
*skb
, const struct pkt_gl
*gl
,
218 u8 skb_frag_idx
= skb_shinfo(skb
)->nr_frags
;
221 /* usually there's just one frag */
222 __skb_fill_page_desc(skb
, skb_frag_idx
, gl
->frags
[0].page
,
223 gl
->frags
[0].offset
+ offset
,
224 gl
->frags
[0].size
- offset
);
225 for (i
= 1; i
< gl
->nfrags
; i
++)
226 __skb_fill_page_desc(skb
, skb_frag_idx
+ i
,
231 skb_shinfo(skb
)->nr_frags
+= gl
->nfrags
;
233 /* get a reference to the last page, we don't own it */
234 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
238 cxgbit_lro_add_packet_gl(struct sk_buff
*skb
, u8 op
, const struct pkt_gl
*gl
)
240 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
241 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
,
245 if (op
== CPL_ISCSI_HDR
) {
246 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)gl
->va
;
248 offset
= sizeof(struct cpl_iscsi_hdr
);
249 pdu_cb
->flags
|= PDUCBF_RX_HDR
;
250 pdu_cb
->seq
= ntohl(cpl
->seq
);
251 len
= ntohs(cpl
->len
);
252 pdu_cb
->hdr
= gl
->va
+ offset
;
254 pdu_cb
->hfrag_idx
= skb_shinfo(skb
)->nr_frags
;
256 if (unlikely(gl
->nfrags
> 1))
257 cxgbit_skcb_flags(skb
) = 0;
259 lro_cb
->complete
= false;
260 } else if (op
== CPL_ISCSI_DATA
) {
261 struct cpl_iscsi_data
*cpl
= (struct cpl_iscsi_data
*)gl
->va
;
263 offset
= sizeof(struct cpl_iscsi_data
);
264 pdu_cb
->flags
|= PDUCBF_RX_DATA
;
265 len
= ntohs(cpl
->len
);
267 pdu_cb
->doffset
= lro_cb
->offset
;
268 pdu_cb
->nr_dfrags
= gl
->nfrags
;
269 pdu_cb
->dfrag_idx
= skb_shinfo(skb
)->nr_frags
;
270 lro_cb
->complete
= false;
272 struct cpl_rx_iscsi_cmp
*cpl
;
274 cpl
= (struct cpl_rx_iscsi_cmp
*)gl
->va
;
275 offset
= sizeof(struct cpl_rx_iscsi_cmp
);
276 pdu_cb
->flags
|= (PDUCBF_RX_HDR
| PDUCBF_RX_STATUS
);
277 len
= be16_to_cpu(cpl
->len
);
278 pdu_cb
->hdr
= gl
->va
+ offset
;
280 pdu_cb
->hfrag_idx
= skb_shinfo(skb
)->nr_frags
;
281 pdu_cb
->ddigest
= be32_to_cpu(cpl
->ulp_crc
);
282 pdu_cb
->pdulen
= ntohs(cpl
->len
);
284 if (unlikely(gl
->nfrags
> 1))
285 cxgbit_skcb_flags(skb
) = 0;
287 cxgbit_process_ddpvld(lro_cb
->csk
, pdu_cb
,
288 be32_to_cpu(cpl
->ddpvld
));
290 if (pdu_cb
->flags
& PDUCBF_RX_DATA_DDPD
) {
291 pdu_cb
->flags
|= PDUCBF_RX_DDP_CMP
;
292 pdu_cb
->complete
= true;
293 } else if (pdu_cb
->flags
& PDUCBF_RX_DATA
) {
294 pdu_cb
->complete
= true;
297 lro_cb
->pdu_totallen
+= pdu_cb
->hlen
+ pdu_cb
->dlen
;
298 lro_cb
->complete
= true;
302 cxgbit_copy_frags(skb
, gl
, offset
);
304 pdu_cb
->frags
+= gl
->nfrags
;
305 lro_cb
->offset
+= len
;
307 skb
->data_len
+= len
;
308 skb
->truesize
+= len
;
311 static struct sk_buff
*
312 cxgbit_lro_init_skb(struct cxgbit_sock
*csk
, u8 op
, const struct pkt_gl
*gl
,
313 const __be64
*rsp
, struct napi_struct
*napi
)
316 struct cxgbit_lro_cb
*lro_cb
;
318 skb
= napi_alloc_skb(napi
, LRO_SKB_MAX_HEADROOM
);
323 memset(skb
->data
, 0, LRO_SKB_MAX_HEADROOM
);
325 cxgbit_skcb_flags(skb
) |= SKCBF_RX_LRO
;
327 lro_cb
= cxgbit_skb_lro_cb(skb
);
336 static void cxgbit_queue_lro_skb(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
338 bool wakeup_thread
= false;
340 spin_lock(&csk
->rxq
.lock
);
341 __skb_queue_tail(&csk
->rxq
, skb
);
342 if (skb_queue_len(&csk
->rxq
) == 1)
343 wakeup_thread
= true;
344 spin_unlock(&csk
->rxq
.lock
);
347 wake_up(&csk
->waitq
);
350 static void cxgbit_lro_flush(struct t4_lro_mgr
*lro_mgr
, struct sk_buff
*skb
)
352 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
353 struct cxgbit_sock
*csk
= lro_cb
->csk
;
357 __skb_unlink(skb
, &lro_mgr
->lroq
);
358 cxgbit_queue_lro_skb(csk
, skb
);
363 lro_mgr
->lro_session_cnt
--;
366 static void cxgbit_uld_lro_flush(struct t4_lro_mgr
*lro_mgr
)
370 while ((skb
= skb_peek(&lro_mgr
->lroq
)))
371 cxgbit_lro_flush(lro_mgr
, skb
);
375 cxgbit_lro_receive(struct cxgbit_sock
*csk
, u8 op
, const __be64
*rsp
,
376 const struct pkt_gl
*gl
, struct t4_lro_mgr
*lro_mgr
,
377 struct napi_struct
*napi
)
380 struct cxgbit_lro_cb
*lro_cb
;
383 pr_err("%s: csk NULL, op 0x%x.\n", __func__
, op
);
391 if (lro_mgr
->lro_session_cnt
>= MAX_LRO_SESSIONS
) {
392 cxgbit_uld_lro_flush(lro_mgr
);
396 skb
= cxgbit_lro_init_skb(csk
, op
, gl
, rsp
, napi
);
402 __skb_queue_tail(&lro_mgr
->lroq
, skb
);
403 lro_mgr
->lro_session_cnt
++;
407 lro_cb
= cxgbit_skb_lro_cb(skb
);
409 if ((gl
&& (((skb_shinfo(skb
)->nr_frags
+ gl
->nfrags
) >
410 MAX_SKB_FRAGS
) || (lro_cb
->pdu_totallen
>= LRO_FLUSH_LEN_MAX
))) ||
411 (lro_cb
->pdu_idx
>= MAX_SKB_FRAGS
)) {
412 cxgbit_lro_flush(lro_mgr
, skb
);
417 cxgbit_lro_add_packet_gl(skb
, op
, gl
);
419 cxgbit_lro_add_packet_rsp(skb
, op
, rsp
);
421 lro_mgr
->lro_merged
++;
430 cxgbit_uld_lro_rx_handler(void *hndl
, const __be64
*rsp
,
431 const struct pkt_gl
*gl
, struct t4_lro_mgr
*lro_mgr
,
432 struct napi_struct
*napi
)
434 struct cxgbit_device
*cdev
= hndl
;
435 struct cxgb4_lld_info
*lldi
= &cdev
->lldi
;
436 struct cpl_tx_data
*rpl
= NULL
;
437 struct cxgbit_sock
*csk
= NULL
;
438 unsigned int tid
= 0;
440 unsigned int op
= *(u8
*)rsp
;
441 bool lro_flush
= true;
446 case CPL_RX_ISCSI_CMP
:
447 case CPL_RX_ISCSI_DDP
:
451 case CPL_ABORT_RPL_RSS
:
452 case CPL_PASS_ESTABLISH
:
454 case CPL_CLOSE_CON_RPL
:
455 case CPL_ABORT_REQ_RSS
:
456 case CPL_SET_TCB_RPL
:
458 rpl
= gl
? (struct cpl_tx_data
*)gl
->va
:
459 (struct cpl_tx_data
*)(rsp
+ 1);
461 csk
= lookup_tid(lldi
->tids
, tid
);
467 if (csk
&& csk
->lro_skb
&& lro_flush
)
468 cxgbit_lro_flush(lro_mgr
, csk
->lro_skb
);
473 if (op
== CPL_RX_ISCSI_DDP
) {
474 if (!cxgbit_lro_receive(csk
, op
, rsp
, NULL
, lro_mgr
,
479 len
= 64 - sizeof(struct rsp_ctrl
) - 8;
480 skb
= napi_alloc_skb(napi
, len
);
484 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
486 if (unlikely(op
!= *(u8
*)gl
->va
)) {
487 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
488 gl
->va
, be64_to_cpu(*rsp
),
489 get_unaligned_be64(gl
->va
),
494 if ((op
== CPL_ISCSI_HDR
) || (op
== CPL_ISCSI_DATA
) ||
495 (op
== CPL_RX_ISCSI_CMP
)) {
496 if (!cxgbit_lro_receive(csk
, op
, rsp
, gl
, lro_mgr
,
501 #define RX_PULL_LEN 128
502 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
507 rpl
= (struct cpl_tx_data
*)skb
->data
;
509 cxgbit_skcb_rx_opcode(skb
) = op
;
511 pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
512 cdev
, op
, rpl
->ot
.opcode_tid
,
513 ntohl(rpl
->ot
.opcode_tid
), skb
);
515 if (op
< NUM_CPL_CMDS
&& cxgbit_cplhandlers
[op
]) {
516 cxgbit_cplhandlers
[op
](cdev
, skb
);
518 pr_err("No handler for opcode 0x%x.\n", op
);
523 pr_err("%s OOM bailing out.\n", __func__
);
527 #ifdef CONFIG_CHELSIO_T4_DCB
528 struct cxgbit_dcb_work
{
529 struct dcb_app_type dcb_app
;
530 struct work_struct work
;
534 cxgbit_update_dcb_priority(struct cxgbit_device
*cdev
, u8 port_id
,
535 u8 dcb_priority
, u16 port_num
)
537 struct cxgbit_sock
*csk
;
540 bool wakeup_thread
= false;
542 spin_lock_bh(&cdev
->cskq
.lock
);
543 list_for_each_entry(csk
, &cdev
->cskq
.list
, list
) {
544 if (csk
->port_id
!= port_id
)
547 if (csk
->com
.local_addr
.ss_family
== AF_INET6
) {
548 struct sockaddr_in6
*sock_in6
;
550 sock_in6
= (struct sockaddr_in6
*)&csk
->com
.local_addr
;
551 local_port
= ntohs(sock_in6
->sin6_port
);
553 struct sockaddr_in
*sock_in
;
555 sock_in
= (struct sockaddr_in
*)&csk
->com
.local_addr
;
556 local_port
= ntohs(sock_in
->sin_port
);
559 if (local_port
!= port_num
)
562 if (csk
->dcb_priority
== dcb_priority
)
565 skb
= alloc_skb(0, GFP_ATOMIC
);
569 spin_lock(&csk
->rxq
.lock
);
570 __skb_queue_tail(&csk
->rxq
, skb
);
571 if (skb_queue_len(&csk
->rxq
) == 1)
572 wakeup_thread
= true;
573 spin_unlock(&csk
->rxq
.lock
);
576 wake_up(&csk
->waitq
);
577 wakeup_thread
= false;
580 spin_unlock_bh(&cdev
->cskq
.lock
);
583 static void cxgbit_dcb_workfn(struct work_struct
*work
)
585 struct cxgbit_dcb_work
*dcb_work
;
586 struct net_device
*ndev
;
587 struct cxgbit_device
*cdev
= NULL
;
588 struct dcb_app_type
*iscsi_app
;
589 u8 priority
, port_id
= 0xff;
591 dcb_work
= container_of(work
, struct cxgbit_dcb_work
, work
);
592 iscsi_app
= &dcb_work
->dcb_app
;
594 if (iscsi_app
->dcbx
& DCB_CAP_DCBX_VER_IEEE
) {
595 if (iscsi_app
->app
.selector
!= IEEE_8021QAZ_APP_SEL_ANY
)
598 priority
= iscsi_app
->app
.priority
;
600 } else if (iscsi_app
->dcbx
& DCB_CAP_DCBX_VER_CEE
) {
601 if (iscsi_app
->app
.selector
!= DCB_APP_IDTYPE_PORTNUM
)
604 if (!iscsi_app
->app
.priority
)
607 priority
= ffs(iscsi_app
->app
.priority
) - 1;
612 pr_debug("priority for ifid %d is %u\n",
613 iscsi_app
->ifindex
, priority
);
615 ndev
= dev_get_by_index(&init_net
, iscsi_app
->ifindex
);
620 mutex_lock(&cdev_list_lock
);
621 cdev
= cxgbit_find_device(ndev
, &port_id
);
626 mutex_unlock(&cdev_list_lock
);
630 cxgbit_update_dcb_priority(cdev
, port_id
, priority
,
631 iscsi_app
->app
.protocol
);
632 mutex_unlock(&cdev_list_lock
);
638 cxgbit_dcbevent_notify(struct notifier_block
*nb
, unsigned long action
,
641 struct cxgbit_dcb_work
*dcb_work
;
642 struct dcb_app_type
*dcb_app
= data
;
644 dcb_work
= kzalloc(sizeof(*dcb_work
), GFP_ATOMIC
);
648 dcb_work
->dcb_app
= *dcb_app
;
649 INIT_WORK(&dcb_work
->work
, cxgbit_dcb_workfn
);
650 schedule_work(&dcb_work
->work
);
655 static enum target_prot_op
cxgbit_get_sup_prot_ops(struct iscsi_conn
*conn
)
657 return TARGET_PROT_NORMAL
;
660 static struct iscsit_transport cxgbit_transport
= {
662 .transport_type
= ISCSI_CXGBIT
,
663 .rdma_shutdown
= false,
664 .priv_size
= sizeof(struct cxgbit_cmd
),
665 .owner
= THIS_MODULE
,
666 .iscsit_setup_np
= cxgbit_setup_np
,
667 .iscsit_accept_np
= cxgbit_accept_np
,
668 .iscsit_free_np
= cxgbit_free_np
,
669 .iscsit_free_conn
= cxgbit_free_conn
,
670 .iscsit_get_login_rx
= cxgbit_get_login_rx
,
671 .iscsit_put_login_tx
= cxgbit_put_login_tx
,
672 .iscsit_immediate_queue
= iscsit_immediate_queue
,
673 .iscsit_response_queue
= iscsit_response_queue
,
674 .iscsit_get_dataout
= iscsit_build_r2ts_for_cmd
,
675 .iscsit_queue_data_in
= iscsit_queue_rsp
,
676 .iscsit_queue_status
= iscsit_queue_rsp
,
677 .iscsit_xmit_pdu
= cxgbit_xmit_pdu
,
678 .iscsit_get_r2t_ttt
= cxgbit_get_r2t_ttt
,
679 .iscsit_get_rx_pdu
= cxgbit_get_rx_pdu
,
680 .iscsit_validate_params
= cxgbit_validate_params
,
681 .iscsit_release_cmd
= cxgbit_release_cmd
,
682 .iscsit_aborted_task
= iscsit_aborted_task
,
683 .iscsit_get_sup_prot_ops
= cxgbit_get_sup_prot_ops
,
686 static struct cxgb4_uld_info cxgbit_uld_info
= {
688 .nrxq
= MAX_ULD_QSETS
,
689 .ntxq
= MAX_ULD_QSETS
,
692 .add
= cxgbit_uld_add
,
693 .state_change
= cxgbit_uld_state_change
,
694 .lro_rx_handler
= cxgbit_uld_lro_rx_handler
,
695 .lro_flush
= cxgbit_uld_lro_flush
,
698 #ifdef CONFIG_CHELSIO_T4_DCB
699 static struct notifier_block cxgbit_dcbevent_nb
= {
700 .notifier_call
= cxgbit_dcbevent_notify
,
704 static int __init
cxgbit_init(void)
706 cxgb4_register_uld(CXGB4_ULD_ISCSIT
, &cxgbit_uld_info
);
707 iscsit_register_transport(&cxgbit_transport
);
709 #ifdef CONFIG_CHELSIO_T4_DCB
710 pr_info("%s dcb enabled.\n", DRV_NAME
);
711 register_dcbevent_notifier(&cxgbit_dcbevent_nb
);
713 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, cb
) <
714 sizeof(union cxgbit_skb_cb
));
718 static void __exit
cxgbit_exit(void)
720 struct cxgbit_device
*cdev
, *tmp
;
722 #ifdef CONFIG_CHELSIO_T4_DCB
723 unregister_dcbevent_notifier(&cxgbit_dcbevent_nb
);
725 mutex_lock(&cdev_list_lock
);
726 list_for_each_entry_safe(cdev
, tmp
, &cdev_list_head
, list
) {
727 list_del(&cdev
->list
);
728 cxgbit_put_cdev(cdev
);
730 mutex_unlock(&cdev_list_lock
);
731 iscsit_unregister_transport(&cxgbit_transport
);
732 cxgb4_unregister_uld(CXGB4_ULD_ISCSIT
);
735 module_init(cxgbit_init
);
736 module_exit(cxgbit_exit
);
738 MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
739 MODULE_AUTHOR("Chelsio Communications");
740 MODULE_VERSION(DRV_VERSION
);
741 MODULE_LICENSE("GPL");