2 * Copyright (c) 2016 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #define DRV_NAME "cxgbit"
10 #define DRV_VERSION "1.0.0-ko"
11 #define pr_fmt(fmt) DRV_NAME ": " fmt
15 #ifdef CONFIG_CHELSIO_T4_DCB
16 #include <net/dcbevent.h>
17 #include "cxgb4_dcb.h"
20 LIST_HEAD(cdev_list_head
);
22 DEFINE_MUTEX(cdev_list_lock
);
24 void _cxgbit_free_cdev(struct kref
*kref
)
26 struct cxgbit_device
*cdev
;
28 cdev
= container_of(kref
, struct cxgbit_device
, kref
);
30 cxgbi_ppm_release(cdev2ppm(cdev
));
34 static void cxgbit_set_mdsl(struct cxgbit_device
*cdev
)
36 struct cxgb4_lld_info
*lldi
= &cdev
->lldi
;
39 #define ULP2_MAX_PKT_LEN 16224
40 #define ISCSI_PDU_NONPAYLOAD_LEN 312
41 mdsl
= min_t(u32
, lldi
->iscsi_iolen
- ISCSI_PDU_NONPAYLOAD_LEN
,
42 ULP2_MAX_PKT_LEN
- ISCSI_PDU_NONPAYLOAD_LEN
);
43 mdsl
= min_t(u32
, mdsl
, 8192);
44 mdsl
= min_t(u32
, mdsl
, (MAX_SKB_FRAGS
- 1) * PAGE_SIZE
);
49 static void *cxgbit_uld_add(const struct cxgb4_lld_info
*lldi
)
51 struct cxgbit_device
*cdev
;
53 if (is_t4(lldi
->adapter_type
))
54 return ERR_PTR(-ENODEV
);
56 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
58 return ERR_PTR(-ENOMEM
);
60 kref_init(&cdev
->kref
);
64 cxgbit_set_mdsl(cdev
);
66 if (cxgbit_ddp_init(cdev
) < 0) {
68 return ERR_PTR(-EINVAL
);
71 if (!test_bit(CDEV_DDP_ENABLE
, &cdev
->flags
))
72 pr_info("cdev %s ddp init failed\n",
73 pci_name(lldi
->pdev
));
75 if (lldi
->fw_vers
>= 0x10d2b00)
76 set_bit(CDEV_ISO_ENABLE
, &cdev
->flags
);
78 spin_lock_init(&cdev
->cskq
.lock
);
79 INIT_LIST_HEAD(&cdev
->cskq
.list
);
81 mutex_lock(&cdev_list_lock
);
82 list_add_tail(&cdev
->list
, &cdev_list_head
);
83 mutex_unlock(&cdev_list_lock
);
85 pr_info("cdev %s added for iSCSI target transport\n",
86 pci_name(lldi
->pdev
));
91 static void cxgbit_close_conn(struct cxgbit_device
*cdev
)
93 struct cxgbit_sock
*csk
;
95 bool wakeup_thread
= false;
97 spin_lock_bh(&cdev
->cskq
.lock
);
98 list_for_each_entry(csk
, &cdev
->cskq
.list
, list
) {
99 skb
= alloc_skb(0, GFP_ATOMIC
);
103 spin_lock_bh(&csk
->rxq
.lock
);
104 __skb_queue_tail(&csk
->rxq
, skb
);
105 if (skb_queue_len(&csk
->rxq
) == 1)
106 wakeup_thread
= true;
107 spin_unlock_bh(&csk
->rxq
.lock
);
110 wake_up(&csk
->waitq
);
111 wakeup_thread
= false;
114 spin_unlock_bh(&cdev
->cskq
.lock
);
117 static void cxgbit_detach_cdev(struct cxgbit_device
*cdev
)
119 bool free_cdev
= false;
121 spin_lock_bh(&cdev
->cskq
.lock
);
122 if (list_empty(&cdev
->cskq
.list
))
124 spin_unlock_bh(&cdev
->cskq
.lock
);
127 mutex_lock(&cdev_list_lock
);
128 list_del(&cdev
->list
);
129 mutex_unlock(&cdev_list_lock
);
131 cxgbit_put_cdev(cdev
);
133 cxgbit_close_conn(cdev
);
137 static int cxgbit_uld_state_change(void *handle
, enum cxgb4_state state
)
139 struct cxgbit_device
*cdev
= handle
;
143 set_bit(CDEV_STATE_UP
, &cdev
->flags
);
144 pr_info("cdev %s state UP.\n", pci_name(cdev
->lldi
.pdev
));
146 case CXGB4_STATE_START_RECOVERY
:
147 clear_bit(CDEV_STATE_UP
, &cdev
->flags
);
148 cxgbit_close_conn(cdev
);
149 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev
->lldi
.pdev
));
151 case CXGB4_STATE_DOWN
:
152 pr_info("cdev %s state DOWN.\n", pci_name(cdev
->lldi
.pdev
));
154 case CXGB4_STATE_DETACH
:
155 clear_bit(CDEV_STATE_UP
, &cdev
->flags
);
156 pr_info("cdev %s state DETACH.\n", pci_name(cdev
->lldi
.pdev
));
157 cxgbit_detach_cdev(cdev
);
160 pr_info("cdev %s unknown state %d.\n",
161 pci_name(cdev
->lldi
.pdev
), state
);
168 cxgbit_process_ddpvld(struct cxgbit_sock
*csk
, struct cxgbit_lro_pdu_cb
*pdu_cb
,
172 if (ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT
)) {
173 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk
->tid
, ddpvld
);
174 pdu_cb
->flags
|= PDUCBF_RX_HCRC_ERR
;
177 if (ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT
)) {
178 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk
->tid
, ddpvld
);
179 pdu_cb
->flags
|= PDUCBF_RX_DCRC_ERR
;
182 if (ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT
))
183 pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk
->tid
, ddpvld
);
185 if ((ddpvld
& (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT
)) &&
186 (!(pdu_cb
->flags
& PDUCBF_RX_DATA
))) {
187 pdu_cb
->flags
|= PDUCBF_RX_DATA_DDPD
;
192 cxgbit_lro_add_packet_rsp(struct sk_buff
*skb
, u8 op
, const __be64
*rsp
)
194 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
195 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
,
197 struct cpl_rx_iscsi_ddp
*cpl
= (struct cpl_rx_iscsi_ddp
*)(rsp
+ 1);
199 cxgbit_process_ddpvld(lro_cb
->csk
, pdu_cb
, be32_to_cpu(cpl
->ddpvld
));
201 pdu_cb
->flags
|= PDUCBF_RX_STATUS
;
202 pdu_cb
->ddigest
= ntohl(cpl
->ulp_crc
);
203 pdu_cb
->pdulen
= ntohs(cpl
->len
);
205 if (pdu_cb
->flags
& PDUCBF_RX_HDR
)
206 pdu_cb
->complete
= true;
208 lro_cb
->pdu_totallen
+= pdu_cb
->pdulen
;
209 lro_cb
->complete
= true;
214 cxgbit_copy_frags(struct sk_buff
*skb
, const struct pkt_gl
*gl
,
217 u8 skb_frag_idx
= skb_shinfo(skb
)->nr_frags
;
220 /* usually there's just one frag */
221 __skb_fill_page_desc(skb
, skb_frag_idx
, gl
->frags
[0].page
,
222 gl
->frags
[0].offset
+ offset
,
223 gl
->frags
[0].size
- offset
);
224 for (i
= 1; i
< gl
->nfrags
; i
++)
225 __skb_fill_page_desc(skb
, skb_frag_idx
+ i
,
230 skb_shinfo(skb
)->nr_frags
+= gl
->nfrags
;
232 /* get a reference to the last page, we don't own it */
233 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
237 cxgbit_lro_add_packet_gl(struct sk_buff
*skb
, u8 op
, const struct pkt_gl
*gl
)
239 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
240 struct cxgbit_lro_pdu_cb
*pdu_cb
= cxgbit_skb_lro_pdu_cb(skb
,
244 if (op
== CPL_ISCSI_HDR
) {
245 struct cpl_iscsi_hdr
*cpl
= (struct cpl_iscsi_hdr
*)gl
->va
;
247 offset
= sizeof(struct cpl_iscsi_hdr
);
248 pdu_cb
->flags
|= PDUCBF_RX_HDR
;
249 pdu_cb
->seq
= ntohl(cpl
->seq
);
250 len
= ntohs(cpl
->len
);
251 pdu_cb
->hdr
= gl
->va
+ offset
;
253 pdu_cb
->hfrag_idx
= skb_shinfo(skb
)->nr_frags
;
255 if (unlikely(gl
->nfrags
> 1))
256 cxgbit_skcb_flags(skb
) = 0;
258 lro_cb
->complete
= false;
259 } else if (op
== CPL_ISCSI_DATA
) {
260 struct cpl_iscsi_data
*cpl
= (struct cpl_iscsi_data
*)gl
->va
;
262 offset
= sizeof(struct cpl_iscsi_data
);
263 pdu_cb
->flags
|= PDUCBF_RX_DATA
;
264 len
= ntohs(cpl
->len
);
266 pdu_cb
->doffset
= lro_cb
->offset
;
267 pdu_cb
->nr_dfrags
= gl
->nfrags
;
268 pdu_cb
->dfrag_idx
= skb_shinfo(skb
)->nr_frags
;
269 lro_cb
->complete
= false;
271 struct cpl_rx_iscsi_cmp
*cpl
;
273 cpl
= (struct cpl_rx_iscsi_cmp
*)gl
->va
;
274 offset
= sizeof(struct cpl_rx_iscsi_cmp
);
275 pdu_cb
->flags
|= (PDUCBF_RX_HDR
| PDUCBF_RX_STATUS
);
276 len
= be16_to_cpu(cpl
->len
);
277 pdu_cb
->hdr
= gl
->va
+ offset
;
279 pdu_cb
->hfrag_idx
= skb_shinfo(skb
)->nr_frags
;
280 pdu_cb
->ddigest
= be32_to_cpu(cpl
->ulp_crc
);
281 pdu_cb
->pdulen
= ntohs(cpl
->len
);
283 if (unlikely(gl
->nfrags
> 1))
284 cxgbit_skcb_flags(skb
) = 0;
286 cxgbit_process_ddpvld(lro_cb
->csk
, pdu_cb
,
287 be32_to_cpu(cpl
->ddpvld
));
289 if (pdu_cb
->flags
& PDUCBF_RX_DATA_DDPD
) {
290 pdu_cb
->flags
|= PDUCBF_RX_DDP_CMP
;
291 pdu_cb
->complete
= true;
292 } else if (pdu_cb
->flags
& PDUCBF_RX_DATA
) {
293 pdu_cb
->complete
= true;
296 lro_cb
->pdu_totallen
+= pdu_cb
->hlen
+ pdu_cb
->dlen
;
297 lro_cb
->complete
= true;
301 cxgbit_copy_frags(skb
, gl
, offset
);
303 pdu_cb
->frags
+= gl
->nfrags
;
304 lro_cb
->offset
+= len
;
306 skb
->data_len
+= len
;
307 skb
->truesize
+= len
;
310 static struct sk_buff
*
311 cxgbit_lro_init_skb(struct cxgbit_sock
*csk
, u8 op
, const struct pkt_gl
*gl
,
312 const __be64
*rsp
, struct napi_struct
*napi
)
315 struct cxgbit_lro_cb
*lro_cb
;
317 skb
= napi_alloc_skb(napi
, LRO_SKB_MAX_HEADROOM
);
322 memset(skb
->data
, 0, LRO_SKB_MAX_HEADROOM
);
324 cxgbit_skcb_flags(skb
) |= SKCBF_RX_LRO
;
326 lro_cb
= cxgbit_skb_lro_cb(skb
);
335 static void cxgbit_queue_lro_skb(struct cxgbit_sock
*csk
, struct sk_buff
*skb
)
337 bool wakeup_thread
= false;
339 spin_lock(&csk
->rxq
.lock
);
340 __skb_queue_tail(&csk
->rxq
, skb
);
341 if (skb_queue_len(&csk
->rxq
) == 1)
342 wakeup_thread
= true;
343 spin_unlock(&csk
->rxq
.lock
);
346 wake_up(&csk
->waitq
);
349 static void cxgbit_lro_flush(struct t4_lro_mgr
*lro_mgr
, struct sk_buff
*skb
)
351 struct cxgbit_lro_cb
*lro_cb
= cxgbit_skb_lro_cb(skb
);
352 struct cxgbit_sock
*csk
= lro_cb
->csk
;
356 __skb_unlink(skb
, &lro_mgr
->lroq
);
357 cxgbit_queue_lro_skb(csk
, skb
);
362 lro_mgr
->lro_session_cnt
--;
365 static void cxgbit_uld_lro_flush(struct t4_lro_mgr
*lro_mgr
)
369 while ((skb
= skb_peek(&lro_mgr
->lroq
)))
370 cxgbit_lro_flush(lro_mgr
, skb
);
374 cxgbit_lro_receive(struct cxgbit_sock
*csk
, u8 op
, const __be64
*rsp
,
375 const struct pkt_gl
*gl
, struct t4_lro_mgr
*lro_mgr
,
376 struct napi_struct
*napi
)
379 struct cxgbit_lro_cb
*lro_cb
;
382 pr_err("%s: csk NULL, op 0x%x.\n", __func__
, op
);
390 if (lro_mgr
->lro_session_cnt
>= MAX_LRO_SESSIONS
) {
391 cxgbit_uld_lro_flush(lro_mgr
);
395 skb
= cxgbit_lro_init_skb(csk
, op
, gl
, rsp
, napi
);
401 __skb_queue_tail(&lro_mgr
->lroq
, skb
);
402 lro_mgr
->lro_session_cnt
++;
406 lro_cb
= cxgbit_skb_lro_cb(skb
);
408 if ((gl
&& (((skb_shinfo(skb
)->nr_frags
+ gl
->nfrags
) >
409 MAX_SKB_FRAGS
) || (lro_cb
->pdu_totallen
>= LRO_FLUSH_LEN_MAX
))) ||
410 (lro_cb
->pdu_idx
>= MAX_SKB_FRAGS
)) {
411 cxgbit_lro_flush(lro_mgr
, skb
);
416 cxgbit_lro_add_packet_gl(skb
, op
, gl
);
418 cxgbit_lro_add_packet_rsp(skb
, op
, rsp
);
420 lro_mgr
->lro_merged
++;
429 cxgbit_uld_lro_rx_handler(void *hndl
, const __be64
*rsp
,
430 const struct pkt_gl
*gl
, struct t4_lro_mgr
*lro_mgr
,
431 struct napi_struct
*napi
)
433 struct cxgbit_device
*cdev
= hndl
;
434 struct cxgb4_lld_info
*lldi
= &cdev
->lldi
;
435 struct cpl_tx_data
*rpl
= NULL
;
436 struct cxgbit_sock
*csk
= NULL
;
437 unsigned int tid
= 0;
439 unsigned int op
= *(u8
*)rsp
;
440 bool lro_flush
= true;
445 case CPL_RX_ISCSI_CMP
:
446 case CPL_RX_ISCSI_DDP
:
450 case CPL_ABORT_RPL_RSS
:
451 case CPL_PASS_ESTABLISH
:
453 case CPL_CLOSE_CON_RPL
:
454 case CPL_ABORT_REQ_RSS
:
455 case CPL_SET_TCB_RPL
:
457 rpl
= gl
? (struct cpl_tx_data
*)gl
->va
:
458 (struct cpl_tx_data
*)(rsp
+ 1);
460 csk
= lookup_tid(lldi
->tids
, tid
);
466 if (csk
&& csk
->lro_skb
&& lro_flush
)
467 cxgbit_lro_flush(lro_mgr
, csk
->lro_skb
);
472 if (op
== CPL_RX_ISCSI_DDP
) {
473 if (!cxgbit_lro_receive(csk
, op
, rsp
, NULL
, lro_mgr
,
478 len
= 64 - sizeof(struct rsp_ctrl
) - 8;
479 skb
= napi_alloc_skb(napi
, len
);
483 skb_copy_to_linear_data(skb
, &rsp
[1], len
);
485 if (unlikely(op
!= *(u8
*)gl
->va
)) {
486 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
487 gl
->va
, be64_to_cpu(*rsp
),
488 get_unaligned_be64(gl
->va
),
493 if ((op
== CPL_ISCSI_HDR
) || (op
== CPL_ISCSI_DATA
) ||
494 (op
== CPL_RX_ISCSI_CMP
)) {
495 if (!cxgbit_lro_receive(csk
, op
, rsp
, gl
, lro_mgr
,
500 #define RX_PULL_LEN 128
501 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
506 rpl
= (struct cpl_tx_data
*)skb
->data
;
508 cxgbit_skcb_rx_opcode(skb
) = op
;
510 pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
511 cdev
, op
, rpl
->ot
.opcode_tid
,
512 ntohl(rpl
->ot
.opcode_tid
), skb
);
514 if (op
< NUM_CPL_CMDS
&& cxgbit_cplhandlers
[op
]) {
515 cxgbit_cplhandlers
[op
](cdev
, skb
);
517 pr_err("No handler for opcode 0x%x.\n", op
);
522 pr_err("%s OOM bailing out.\n", __func__
);
526 #ifdef CONFIG_CHELSIO_T4_DCB
527 struct cxgbit_dcb_work
{
528 struct dcb_app_type dcb_app
;
529 struct work_struct work
;
533 cxgbit_update_dcb_priority(struct cxgbit_device
*cdev
, u8 port_id
,
534 u8 dcb_priority
, u16 port_num
)
536 struct cxgbit_sock
*csk
;
539 bool wakeup_thread
= false;
541 spin_lock_bh(&cdev
->cskq
.lock
);
542 list_for_each_entry(csk
, &cdev
->cskq
.list
, list
) {
543 if (csk
->port_id
!= port_id
)
546 if (csk
->com
.local_addr
.ss_family
== AF_INET6
) {
547 struct sockaddr_in6
*sock_in6
;
549 sock_in6
= (struct sockaddr_in6
*)&csk
->com
.local_addr
;
550 local_port
= ntohs(sock_in6
->sin6_port
);
552 struct sockaddr_in
*sock_in
;
554 sock_in
= (struct sockaddr_in
*)&csk
->com
.local_addr
;
555 local_port
= ntohs(sock_in
->sin_port
);
558 if (local_port
!= port_num
)
561 if (csk
->dcb_priority
== dcb_priority
)
564 skb
= alloc_skb(0, GFP_ATOMIC
);
568 spin_lock(&csk
->rxq
.lock
);
569 __skb_queue_tail(&csk
->rxq
, skb
);
570 if (skb_queue_len(&csk
->rxq
) == 1)
571 wakeup_thread
= true;
572 spin_unlock(&csk
->rxq
.lock
);
575 wake_up(&csk
->waitq
);
576 wakeup_thread
= false;
579 spin_unlock_bh(&cdev
->cskq
.lock
);
582 static void cxgbit_dcb_workfn(struct work_struct
*work
)
584 struct cxgbit_dcb_work
*dcb_work
;
585 struct net_device
*ndev
;
586 struct cxgbit_device
*cdev
= NULL
;
587 struct dcb_app_type
*iscsi_app
;
588 u8 priority
, port_id
= 0xff;
590 dcb_work
= container_of(work
, struct cxgbit_dcb_work
, work
);
591 iscsi_app
= &dcb_work
->dcb_app
;
593 if (iscsi_app
->dcbx
& DCB_CAP_DCBX_VER_IEEE
) {
594 if (iscsi_app
->app
.selector
!= IEEE_8021QAZ_APP_SEL_ANY
)
597 priority
= iscsi_app
->app
.priority
;
599 } else if (iscsi_app
->dcbx
& DCB_CAP_DCBX_VER_CEE
) {
600 if (iscsi_app
->app
.selector
!= DCB_APP_IDTYPE_PORTNUM
)
603 if (!iscsi_app
->app
.priority
)
606 priority
= ffs(iscsi_app
->app
.priority
) - 1;
611 pr_debug("priority for ifid %d is %u\n",
612 iscsi_app
->ifindex
, priority
);
614 ndev
= dev_get_by_index(&init_net
, iscsi_app
->ifindex
);
619 mutex_lock(&cdev_list_lock
);
620 cdev
= cxgbit_find_device(ndev
, &port_id
);
625 mutex_unlock(&cdev_list_lock
);
629 cxgbit_update_dcb_priority(cdev
, port_id
, priority
,
630 iscsi_app
->app
.protocol
);
631 mutex_unlock(&cdev_list_lock
);
637 cxgbit_dcbevent_notify(struct notifier_block
*nb
, unsigned long action
,
640 struct cxgbit_dcb_work
*dcb_work
;
641 struct dcb_app_type
*dcb_app
= data
;
643 dcb_work
= kzalloc(sizeof(*dcb_work
), GFP_ATOMIC
);
647 dcb_work
->dcb_app
= *dcb_app
;
648 INIT_WORK(&dcb_work
->work
, cxgbit_dcb_workfn
);
649 schedule_work(&dcb_work
->work
);
654 static enum target_prot_op
cxgbit_get_sup_prot_ops(struct iscsi_conn
*conn
)
656 return TARGET_PROT_NORMAL
;
659 static struct iscsit_transport cxgbit_transport
= {
661 .transport_type
= ISCSI_CXGBIT
,
662 .rdma_shutdown
= false,
663 .priv_size
= sizeof(struct cxgbit_cmd
),
664 .owner
= THIS_MODULE
,
665 .iscsit_setup_np
= cxgbit_setup_np
,
666 .iscsit_accept_np
= cxgbit_accept_np
,
667 .iscsit_free_np
= cxgbit_free_np
,
668 .iscsit_free_conn
= cxgbit_free_conn
,
669 .iscsit_get_login_rx
= cxgbit_get_login_rx
,
670 .iscsit_put_login_tx
= cxgbit_put_login_tx
,
671 .iscsit_immediate_queue
= iscsit_immediate_queue
,
672 .iscsit_response_queue
= iscsit_response_queue
,
673 .iscsit_get_dataout
= iscsit_build_r2ts_for_cmd
,
674 .iscsit_queue_data_in
= iscsit_queue_rsp
,
675 .iscsit_queue_status
= iscsit_queue_rsp
,
676 .iscsit_xmit_pdu
= cxgbit_xmit_pdu
,
677 .iscsit_get_r2t_ttt
= cxgbit_get_r2t_ttt
,
678 .iscsit_get_rx_pdu
= cxgbit_get_rx_pdu
,
679 .iscsit_validate_params
= cxgbit_validate_params
,
680 .iscsit_release_cmd
= cxgbit_release_cmd
,
681 .iscsit_aborted_task
= iscsit_aborted_task
,
682 .iscsit_get_sup_prot_ops
= cxgbit_get_sup_prot_ops
,
685 static struct cxgb4_uld_info cxgbit_uld_info
= {
687 .nrxq
= MAX_ULD_QSETS
,
688 .ntxq
= MAX_ULD_QSETS
,
691 .add
= cxgbit_uld_add
,
692 .state_change
= cxgbit_uld_state_change
,
693 .lro_rx_handler
= cxgbit_uld_lro_rx_handler
,
694 .lro_flush
= cxgbit_uld_lro_flush
,
697 #ifdef CONFIG_CHELSIO_T4_DCB
698 static struct notifier_block cxgbit_dcbevent_nb
= {
699 .notifier_call
= cxgbit_dcbevent_notify
,
703 static int __init
cxgbit_init(void)
705 cxgb4_register_uld(CXGB4_ULD_ISCSIT
, &cxgbit_uld_info
);
706 iscsit_register_transport(&cxgbit_transport
);
708 #ifdef CONFIG_CHELSIO_T4_DCB
709 pr_info("%s dcb enabled.\n", DRV_NAME
);
710 register_dcbevent_notifier(&cxgbit_dcbevent_nb
);
712 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, cb
) <
713 sizeof(union cxgbit_skb_cb
));
717 static void __exit
cxgbit_exit(void)
719 struct cxgbit_device
*cdev
, *tmp
;
721 #ifdef CONFIG_CHELSIO_T4_DCB
722 unregister_dcbevent_notifier(&cxgbit_dcbevent_nb
);
724 mutex_lock(&cdev_list_lock
);
725 list_for_each_entry_safe(cdev
, tmp
, &cdev_list_head
, list
) {
726 list_del(&cdev
->list
);
727 cxgbit_put_cdev(cdev
);
729 mutex_unlock(&cdev_list_lock
);
730 iscsit_unregister_transport(&cxgbit_transport
);
731 cxgb4_unregister_uld(CXGB4_ULD_ISCSIT
);
734 module_init(cxgbit_init
);
735 module_exit(cxgbit_exit
);
737 MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
738 MODULE_AUTHOR("Chelsio Communications");
739 MODULE_VERSION(DRV_VERSION
);
740 MODULE_LICENSE("GPL");