1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/uio_driver.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33 #include <net/route.h>
35 #include <net/ip6_route.h>
36 #include <scsi/iscsi_if.h>
41 #include "cnic_defs.h"
43 #define DRV_MODULE_NAME "cnic"
44 #define PFX DRV_MODULE_NAME ": "
46 static char version
[] __devinitdata
=
47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
49 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
50 "Chen (zongxi@broadcom.com");
51 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(CNIC_MODULE_VERSION
);
55 static LIST_HEAD(cnic_dev_list
);
56 static DEFINE_RWLOCK(cnic_dev_lock
);
57 static DEFINE_MUTEX(cnic_lock
);
59 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
61 static int cnic_service_bnx2(void *, void *);
62 static int cnic_ctl(void *, struct cnic_ctl_info
*);
64 static struct cnic_ops cnic_bnx2_ops
= {
65 .cnic_owner
= THIS_MODULE
,
66 .cnic_handler
= cnic_service_bnx2
,
70 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*);
71 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*);
72 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*);
73 static int cnic_cm_set_pg(struct cnic_sock
*);
75 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
77 struct cnic_dev
*dev
= uinfo
->priv
;
78 struct cnic_local
*cp
= dev
->cnic_priv
;
80 if (!capable(CAP_NET_ADMIN
))
83 if (cp
->uio_dev
!= -1)
86 cp
->uio_dev
= iminor(inode
);
88 cnic_shutdown_bnx2_rx_ring(dev
);
90 cnic_init_bnx2_tx_ring(dev
);
91 cnic_init_bnx2_rx_ring(dev
);
96 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
98 struct cnic_dev
*dev
= uinfo
->priv
;
99 struct cnic_local
*cp
= dev
->cnic_priv
;
105 static inline void cnic_hold(struct cnic_dev
*dev
)
107 atomic_inc(&dev
->ref_count
);
110 static inline void cnic_put(struct cnic_dev
*dev
)
112 atomic_dec(&dev
->ref_count
);
115 static inline void csk_hold(struct cnic_sock
*csk
)
117 atomic_inc(&csk
->ref_count
);
120 static inline void csk_put(struct cnic_sock
*csk
)
122 atomic_dec(&csk
->ref_count
);
125 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
127 struct cnic_dev
*cdev
;
129 read_lock(&cnic_dev_lock
);
130 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
131 if (netdev
== cdev
->netdev
) {
133 read_unlock(&cnic_dev_lock
);
137 read_unlock(&cnic_dev_lock
);
141 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
143 struct cnic_local
*cp
= dev
->cnic_priv
;
144 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
145 struct drv_ctl_info info
;
146 struct drv_ctl_io
*io
= &info
.data
.io
;
148 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
149 io
->cid_addr
= cid_addr
;
152 ethdev
->drv_ctl(dev
->netdev
, &info
);
155 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
157 struct cnic_local
*cp
= dev
->cnic_priv
;
158 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
159 struct drv_ctl_info info
;
160 struct drv_ctl_io
*io
= &info
.data
.io
;
162 info
.cmd
= DRV_CTL_IO_WR_CMD
;
165 ethdev
->drv_ctl(dev
->netdev
, &info
);
168 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
170 struct cnic_local
*cp
= dev
->cnic_priv
;
171 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
172 struct drv_ctl_info info
;
173 struct drv_ctl_io
*io
= &info
.data
.io
;
175 info
.cmd
= DRV_CTL_IO_RD_CMD
;
177 ethdev
->drv_ctl(dev
->netdev
, &info
);
181 static int cnic_in_use(struct cnic_sock
*csk
)
183 return test_bit(SK_F_INUSE
, &csk
->flags
);
186 static void cnic_kwq_completion(struct cnic_dev
*dev
, u32 count
)
188 struct cnic_local
*cp
= dev
->cnic_priv
;
189 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
190 struct drv_ctl_info info
;
192 info
.cmd
= DRV_CTL_COMPLETION_CMD
;
193 info
.data
.comp
.comp_count
= count
;
194 ethdev
->drv_ctl(dev
->netdev
, &info
);
197 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
198 struct cnic_sock
*csk
)
200 struct iscsi_path path_req
;
203 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
204 struct cnic_ulp_ops
*ulp_ops
;
206 if (cp
->uio_dev
== -1)
210 len
= sizeof(path_req
);
211 buf
= (char *) &path_req
;
212 memset(&path_req
, 0, len
);
214 msg_type
= ISCSI_KEVENT_PATH_REQ
;
215 path_req
.handle
= (u64
) csk
->l5_cid
;
216 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
217 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
218 sizeof(struct in6_addr
));
219 path_req
.ip_addr_len
= 16;
221 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
222 sizeof(struct in_addr
));
223 path_req
.ip_addr_len
= 4;
225 path_req
.vlan_id
= csk
->vlan_id
;
226 path_req
.pmtu
= csk
->mtu
;
230 ulp_ops
= rcu_dereference(cp
->ulp_ops
[CNIC_ULP_ISCSI
]);
232 ulp_ops
->iscsi_nl_send_msg(cp
->dev
, msg_type
, buf
, len
);
237 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
243 case ISCSI_UEVENT_PATH_UPDATE
: {
244 struct cnic_local
*cp
;
246 struct cnic_sock
*csk
;
247 struct iscsi_path
*path_resp
;
249 if (len
< sizeof(*path_resp
))
252 path_resp
= (struct iscsi_path
*) buf
;
254 l5_cid
= (u32
) path_resp
->handle
;
255 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
258 csk
= &cp
->csk_tbl
[l5_cid
];
260 if (cnic_in_use(csk
)) {
261 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
262 if (test_bit(SK_F_IPV6
, &csk
->flags
))
263 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
264 sizeof(struct in6_addr
));
266 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
267 sizeof(struct in_addr
));
268 if (is_valid_ether_addr(csk
->ha
))
279 static int cnic_offld_prep(struct cnic_sock
*csk
)
281 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
284 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
285 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
292 static int cnic_close_prep(struct cnic_sock
*csk
)
294 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
295 smp_mb__after_clear_bit();
297 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
298 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
306 static int cnic_abort_prep(struct cnic_sock
*csk
)
308 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
309 smp_mb__after_clear_bit();
311 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
314 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
315 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
322 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
324 struct cnic_dev
*dev
;
326 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
327 printk(KERN_ERR PFX
"cnic_register_driver: Bad type %d\n",
331 mutex_lock(&cnic_lock
);
332 if (cnic_ulp_tbl
[ulp_type
]) {
333 printk(KERN_ERR PFX
"cnic_register_driver: Type %d has already "
334 "been registered\n", ulp_type
);
335 mutex_unlock(&cnic_lock
);
339 read_lock(&cnic_dev_lock
);
340 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
341 struct cnic_local
*cp
= dev
->cnic_priv
;
343 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
345 read_unlock(&cnic_dev_lock
);
347 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
348 mutex_unlock(&cnic_lock
);
350 /* Prevent race conditions with netdev_event */
352 read_lock(&cnic_dev_lock
);
353 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
354 struct cnic_local
*cp
= dev
->cnic_priv
;
356 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
357 ulp_ops
->cnic_init(dev
);
359 read_unlock(&cnic_dev_lock
);
365 int cnic_unregister_driver(int ulp_type
)
367 struct cnic_dev
*dev
;
369 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
370 printk(KERN_ERR PFX
"cnic_unregister_driver: Bad type %d\n",
374 mutex_lock(&cnic_lock
);
375 if (!cnic_ulp_tbl
[ulp_type
]) {
376 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d has not "
377 "been registered\n", ulp_type
);
380 read_lock(&cnic_dev_lock
);
381 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
382 struct cnic_local
*cp
= dev
->cnic_priv
;
384 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
385 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d "
386 "still has devices registered\n", ulp_type
);
387 read_unlock(&cnic_dev_lock
);
391 read_unlock(&cnic_dev_lock
);
393 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
395 mutex_unlock(&cnic_lock
);
400 mutex_unlock(&cnic_lock
);
404 static int cnic_start_hw(struct cnic_dev
*);
405 static void cnic_stop_hw(struct cnic_dev
*);
407 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
410 struct cnic_local
*cp
= dev
->cnic_priv
;
411 struct cnic_ulp_ops
*ulp_ops
;
413 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
414 printk(KERN_ERR PFX
"cnic_register_device: Bad type %d\n",
418 mutex_lock(&cnic_lock
);
419 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
420 printk(KERN_ERR PFX
"cnic_register_device: Driver with type %d "
421 "has not been registered\n", ulp_type
);
422 mutex_unlock(&cnic_lock
);
425 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
426 printk(KERN_ERR PFX
"cnic_register_device: Type %d has already "
427 "been registered to this device\n", ulp_type
);
428 mutex_unlock(&cnic_lock
);
432 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
433 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
434 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
435 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
438 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
439 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
440 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
442 mutex_unlock(&cnic_lock
);
447 EXPORT_SYMBOL(cnic_register_driver
);
449 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
451 struct cnic_local
*cp
= dev
->cnic_priv
;
453 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
454 printk(KERN_ERR PFX
"cnic_unregister_device: Bad type %d\n",
458 mutex_lock(&cnic_lock
);
459 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
460 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
463 printk(KERN_ERR PFX
"cnic_unregister_device: device not "
464 "registered to this ulp type %d\n", ulp_type
);
465 mutex_unlock(&cnic_lock
);
468 mutex_unlock(&cnic_lock
);
474 EXPORT_SYMBOL(cnic_unregister_driver
);
476 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
478 id_tbl
->start
= start_id
;
481 spin_lock_init(&id_tbl
->lock
);
482 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
489 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
491 kfree(id_tbl
->table
);
492 id_tbl
->table
= NULL
;
495 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
500 if (id
>= id_tbl
->max
)
503 spin_lock(&id_tbl
->lock
);
504 if (!test_bit(id
, id_tbl
->table
)) {
505 set_bit(id
, id_tbl
->table
);
508 spin_unlock(&id_tbl
->lock
);
512 /* Returns -1 if not successful */
513 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
517 spin_lock(&id_tbl
->lock
);
518 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
519 if (id
>= id_tbl
->max
) {
521 if (id_tbl
->next
!= 0) {
522 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
523 if (id
>= id_tbl
->next
)
528 if (id
< id_tbl
->max
) {
529 set_bit(id
, id_tbl
->table
);
530 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
534 spin_unlock(&id_tbl
->lock
);
539 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
545 if (id
>= id_tbl
->max
)
548 clear_bit(id
, id_tbl
->table
);
551 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
558 for (i
= 0; i
< dma
->num_pages
; i
++) {
559 if (dma
->pg_arr
[i
]) {
560 pci_free_consistent(dev
->pcidev
, BCM_PAGE_SIZE
,
561 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
562 dma
->pg_arr
[i
] = NULL
;
566 pci_free_consistent(dev
->pcidev
, dma
->pgtbl_size
,
567 dma
->pgtbl
, dma
->pgtbl_map
);
575 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
578 u32
*page_table
= dma
->pgtbl
;
580 for (i
= 0; i
< dma
->num_pages
; i
++) {
581 /* Each entry needs to be in big endian format. */
582 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
584 *page_table
= (u32
) dma
->pg_map_arr
[i
];
589 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
590 int pages
, int use_pg_tbl
)
593 struct cnic_local
*cp
= dev
->cnic_priv
;
595 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
596 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
597 if (dma
->pg_arr
== NULL
)
600 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
601 dma
->num_pages
= pages
;
603 for (i
= 0; i
< pages
; i
++) {
604 dma
->pg_arr
[i
] = pci_alloc_consistent(dev
->pcidev
,
606 &dma
->pg_map_arr
[i
]);
607 if (dma
->pg_arr
[i
] == NULL
)
613 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
614 ~(BCM_PAGE_SIZE
- 1);
615 dma
->pgtbl
= pci_alloc_consistent(dev
->pcidev
, dma
->pgtbl_size
,
617 if (dma
->pgtbl
== NULL
)
620 cp
->setup_pgtbl(dev
, dma
);
625 cnic_free_dma(dev
, dma
);
629 static void cnic_free_resc(struct cnic_dev
*dev
)
631 struct cnic_local
*cp
= dev
->cnic_priv
;
634 if (cp
->cnic_uinfo
) {
635 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
636 while (cp
->uio_dev
!= -1 && i
< 15) {
640 uio_unregister_device(cp
->cnic_uinfo
);
641 kfree(cp
->cnic_uinfo
);
642 cp
->cnic_uinfo
= NULL
;
646 pci_free_consistent(dev
->pcidev
, cp
->l2_buf_size
,
647 cp
->l2_buf
, cp
->l2_buf_map
);
652 pci_free_consistent(dev
->pcidev
, cp
->l2_ring_size
,
653 cp
->l2_ring
, cp
->l2_ring_map
);
657 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
658 if (cp
->ctx_arr
[i
].ctx
) {
659 pci_free_consistent(dev
->pcidev
, cp
->ctx_blk_size
,
661 cp
->ctx_arr
[i
].mapping
);
662 cp
->ctx_arr
[i
].ctx
= NULL
;
669 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
670 cnic_free_dma(dev
, &cp
->conn_buf_info
);
671 cnic_free_dma(dev
, &cp
->kwq_info
);
672 cnic_free_dma(dev
, &cp
->kcq_info
);
673 kfree(cp
->iscsi_tbl
);
674 cp
->iscsi_tbl
= NULL
;
678 cnic_free_id_tbl(&cp
->cid_tbl
);
681 static int cnic_alloc_context(struct cnic_dev
*dev
)
683 struct cnic_local
*cp
= dev
->cnic_priv
;
685 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
688 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
689 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
690 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
691 sizeof(struct cnic_ctx
);
692 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
693 if (cp
->ctx_arr
== NULL
)
697 for (i
= 0; i
< 2; i
++) {
698 u32 j
, reg
, off
, lo
, hi
;
701 off
= BNX2_PG_CTX_MAP
;
703 off
= BNX2_ISCSI_CTX_MAP
;
705 reg
= cnic_reg_rd_ind(dev
, off
);
708 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
709 cp
->ctx_arr
[k
].cid
= j
;
713 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
718 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
720 pci_alloc_consistent(dev
->pcidev
, BCM_PAGE_SIZE
,
721 &cp
->ctx_arr
[i
].mapping
);
722 if (cp
->ctx_arr
[i
].ctx
== NULL
)
729 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
731 struct cnic_local
*cp
= dev
->cnic_priv
;
732 struct uio_info
*uinfo
;
735 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
738 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
740 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 1);
743 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
745 ret
= cnic_alloc_context(dev
);
749 cp
->l2_ring_size
= 2 * BCM_PAGE_SIZE
;
750 cp
->l2_ring
= pci_alloc_consistent(dev
->pcidev
, cp
->l2_ring_size
,
755 cp
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
756 cp
->l2_buf_size
= PAGE_ALIGN(cp
->l2_buf_size
);
757 cp
->l2_buf
= pci_alloc_consistent(dev
->pcidev
, cp
->l2_buf_size
,
762 uinfo
= kzalloc(sizeof(*uinfo
), GFP_ATOMIC
);
766 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
767 uinfo
->mem
[0].internal_addr
= dev
->regview
;
768 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
769 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
771 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
& PAGE_MASK
;
772 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
773 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
775 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
776 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
778 uinfo
->mem
[2].addr
= (unsigned long) cp
->l2_ring
;
779 uinfo
->mem
[2].size
= cp
->l2_ring_size
;
780 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
782 uinfo
->mem
[3].addr
= (unsigned long) cp
->l2_buf
;
783 uinfo
->mem
[3].size
= cp
->l2_buf_size
;
784 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
786 uinfo
->name
= "bnx2_cnic";
787 uinfo
->version
= CNIC_MODULE_VERSION
;
788 uinfo
->irq
= UIO_IRQ_CUSTOM
;
790 uinfo
->open
= cnic_uio_open
;
791 uinfo
->release
= cnic_uio_close
;
795 ret
= uio_register_device(&dev
->pcidev
->dev
, uinfo
);
801 cp
->cnic_uinfo
= uinfo
;
810 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
812 return cp
->max_kwq_idx
-
813 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
816 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
819 struct cnic_local
*cp
= dev
->cnic_priv
;
820 struct kwqe
*prod_qe
;
821 u16 prod
, sw_prod
, i
;
823 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
824 return -EAGAIN
; /* bnx2 is down */
826 spin_lock_bh(&cp
->cnic_ulp_lock
);
827 if (num_wqes
> cnic_kwq_avail(cp
) &&
828 !(cp
->cnic_local_flags
& CNIC_LCL_FL_KWQ_INIT
)) {
829 spin_unlock_bh(&cp
->cnic_ulp_lock
);
833 cp
->cnic_local_flags
&= ~CNIC_LCL_FL_KWQ_INIT
;
835 prod
= cp
->kwq_prod_idx
;
836 sw_prod
= prod
& MAX_KWQ_IDX
;
837 for (i
= 0; i
< num_wqes
; i
++) {
838 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
839 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
841 sw_prod
= prod
& MAX_KWQ_IDX
;
843 cp
->kwq_prod_idx
= prod
;
845 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
847 spin_unlock_bh(&cp
->cnic_ulp_lock
);
851 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
853 struct cnic_local
*cp
= dev
->cnic_priv
;
859 struct cnic_ulp_ops
*ulp_ops
;
861 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
862 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
864 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
865 cnic_kwq_completion(dev
, 1);
867 while (j
< num_cqes
) {
868 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
870 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
873 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
874 cnic_kwq_completion(dev
, 1);
878 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
879 ulp_type
= CNIC_ULP_RDMA
;
880 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
881 ulp_type
= CNIC_ULP_ISCSI
;
882 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
883 ulp_type
= CNIC_ULP_L4
;
884 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
887 printk(KERN_ERR PFX
"%s: Unknown type of KCQE(0x%x)\n",
888 dev
->netdev
->name
, kcqe_op_flag
);
893 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
894 if (likely(ulp_ops
)) {
895 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
896 cp
->completed_kcq
+ i
, j
);
907 static u16
cnic_bnx2_next_idx(u16 idx
)
912 static u16
cnic_bnx2_hw_idx(u16 idx
)
917 static int cnic_get_kcqes(struct cnic_dev
*dev
, u16 hw_prod
, u16
*sw_prod
)
919 struct cnic_local
*cp
= dev
->cnic_priv
;
922 int kcqe_cnt
= 0, last_cnt
= 0;
924 i
= ri
= last
= *sw_prod
;
927 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
928 kcqe
= &cp
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
929 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
931 ri
= i
& MAX_KCQ_IDX
;
932 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
942 static void cnic_chk_bnx2_pkt_rings(struct cnic_local
*cp
)
944 u16 rx_cons
= *cp
->rx_cons_ptr
;
945 u16 tx_cons
= *cp
->tx_cons_ptr
;
947 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
948 cp
->tx_cons
= tx_cons
;
949 cp
->rx_cons
= rx_cons
;
950 uio_event_notify(cp
->cnic_uinfo
);
954 static int cnic_service_bnx2(void *data
, void *status_blk
)
956 struct cnic_dev
*dev
= data
;
957 struct status_block
*sblk
= status_blk
;
958 struct cnic_local
*cp
= dev
->cnic_priv
;
959 u32 status_idx
= sblk
->status_idx
;
960 u16 hw_prod
, sw_prod
;
963 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
966 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
968 hw_prod
= sblk
->status_completion_producer_index
;
969 sw_prod
= cp
->kcq_prod_idx
;
970 while (sw_prod
!= hw_prod
) {
971 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
975 service_kcqes(dev
, kcqe_cnt
);
977 /* Tell compiler that status_blk fields can change. */
979 if (status_idx
!= sblk
->status_idx
) {
980 status_idx
= sblk
->status_idx
;
981 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
982 hw_prod
= sblk
->status_completion_producer_index
;
988 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
990 cp
->kcq_prod_idx
= sw_prod
;
992 cnic_chk_bnx2_pkt_rings(cp
);
996 static void cnic_service_bnx2_msix(unsigned long data
)
998 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
999 struct cnic_local
*cp
= dev
->cnic_priv
;
1000 struct status_block_msix
*status_blk
= cp
->bnx2_status_blk
;
1001 u32 status_idx
= status_blk
->status_idx
;
1002 u16 hw_prod
, sw_prod
;
1005 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
1007 hw_prod
= status_blk
->status_completion_producer_index
;
1008 sw_prod
= cp
->kcq_prod_idx
;
1009 while (sw_prod
!= hw_prod
) {
1010 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
1014 service_kcqes(dev
, kcqe_cnt
);
1016 /* Tell compiler that status_blk fields can change. */
1018 if (status_idx
!= status_blk
->status_idx
) {
1019 status_idx
= status_blk
->status_idx
;
1020 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
1021 hw_prod
= status_blk
->status_completion_producer_index
;
1027 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
1028 cp
->kcq_prod_idx
= sw_prod
;
1030 cnic_chk_bnx2_pkt_rings(cp
);
1032 cp
->last_status_idx
= status_idx
;
1033 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
1034 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
1037 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
1039 struct cnic_dev
*dev
= dev_instance
;
1040 struct cnic_local
*cp
= dev
->cnic_priv
;
1041 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
1046 prefetch(cp
->status_blk
);
1047 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
1049 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
1050 tasklet_schedule(&cp
->cnic_irq_task
);
1055 static void cnic_ulp_stop(struct cnic_dev
*dev
)
1057 struct cnic_local
*cp
= dev
->cnic_priv
;
1061 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
1062 struct cnic_ulp_ops
*ulp_ops
;
1064 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
1068 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
1069 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
1074 static void cnic_ulp_start(struct cnic_dev
*dev
)
1076 struct cnic_local
*cp
= dev
->cnic_priv
;
1080 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
1081 struct cnic_ulp_ops
*ulp_ops
;
1083 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
1084 if (!ulp_ops
|| !ulp_ops
->cnic_start
)
1087 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
1088 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
1093 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
1095 struct cnic_dev
*dev
= data
;
1097 switch (info
->cmd
) {
1098 case CNIC_CTL_STOP_CMD
:
1100 mutex_lock(&cnic_lock
);
1105 mutex_unlock(&cnic_lock
);
1108 case CNIC_CTL_START_CMD
:
1110 mutex_lock(&cnic_lock
);
1112 if (!cnic_start_hw(dev
))
1113 cnic_ulp_start(dev
);
1115 mutex_unlock(&cnic_lock
);
1124 static void cnic_ulp_init(struct cnic_dev
*dev
)
1127 struct cnic_local
*cp
= dev
->cnic_priv
;
1130 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
1131 struct cnic_ulp_ops
*ulp_ops
;
1133 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[i
]);
1134 if (!ulp_ops
|| !ulp_ops
->cnic_init
)
1137 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
1138 ulp_ops
->cnic_init(dev
);
1144 static void cnic_ulp_exit(struct cnic_dev
*dev
)
1147 struct cnic_local
*cp
= dev
->cnic_priv
;
1150 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
1151 struct cnic_ulp_ops
*ulp_ops
;
1153 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[i
]);
1154 if (!ulp_ops
|| !ulp_ops
->cnic_exit
)
1157 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
1158 ulp_ops
->cnic_exit(dev
);
1164 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
1166 struct cnic_dev
*dev
= csk
->dev
;
1167 struct l4_kwq_offload_pg
*l4kwqe
;
1168 struct kwqe
*wqes
[1];
1170 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
1171 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1172 wqes
[0] = (struct kwqe
*) l4kwqe
;
1174 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
1176 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
1177 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
1179 l4kwqe
->da0
= csk
->ha
[0];
1180 l4kwqe
->da1
= csk
->ha
[1];
1181 l4kwqe
->da2
= csk
->ha
[2];
1182 l4kwqe
->da3
= csk
->ha
[3];
1183 l4kwqe
->da4
= csk
->ha
[4];
1184 l4kwqe
->da5
= csk
->ha
[5];
1186 l4kwqe
->sa0
= dev
->mac_addr
[0];
1187 l4kwqe
->sa1
= dev
->mac_addr
[1];
1188 l4kwqe
->sa2
= dev
->mac_addr
[2];
1189 l4kwqe
->sa3
= dev
->mac_addr
[3];
1190 l4kwqe
->sa4
= dev
->mac_addr
[4];
1191 l4kwqe
->sa5
= dev
->mac_addr
[5];
1193 l4kwqe
->etype
= ETH_P_IP
;
1194 l4kwqe
->ipid_count
= DEF_IPID_COUNT
;
1195 l4kwqe
->host_opaque
= csk
->l5_cid
;
1198 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
1199 l4kwqe
->vlan_tag
= csk
->vlan_id
;
1200 l4kwqe
->l2hdr_nbytes
+= 4;
1203 return dev
->submit_kwqes(dev
, wqes
, 1);
1206 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
1208 struct cnic_dev
*dev
= csk
->dev
;
1209 struct l4_kwq_update_pg
*l4kwqe
;
1210 struct kwqe
*wqes
[1];
1212 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
1213 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1214 wqes
[0] = (struct kwqe
*) l4kwqe
;
1216 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
1218 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
1219 l4kwqe
->pg_cid
= csk
->pg_cid
;
1221 l4kwqe
->da0
= csk
->ha
[0];
1222 l4kwqe
->da1
= csk
->ha
[1];
1223 l4kwqe
->da2
= csk
->ha
[2];
1224 l4kwqe
->da3
= csk
->ha
[3];
1225 l4kwqe
->da4
= csk
->ha
[4];
1226 l4kwqe
->da5
= csk
->ha
[5];
1228 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
1229 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
1231 return dev
->submit_kwqes(dev
, wqes
, 1);
1234 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
1236 struct cnic_dev
*dev
= csk
->dev
;
1237 struct l4_kwq_upload
*l4kwqe
;
1238 struct kwqe
*wqes
[1];
1240 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
1241 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1242 wqes
[0] = (struct kwqe
*) l4kwqe
;
1244 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
1246 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
1247 l4kwqe
->cid
= csk
->pg_cid
;
1249 return dev
->submit_kwqes(dev
, wqes
, 1);
1252 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
1254 struct cnic_dev
*dev
= csk
->dev
;
1255 struct l4_kwq_connect_req1
*l4kwqe1
;
1256 struct l4_kwq_connect_req2
*l4kwqe2
;
1257 struct l4_kwq_connect_req3
*l4kwqe3
;
1258 struct kwqe
*wqes
[3];
1262 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
1263 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
1264 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
1265 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
1266 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
1267 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
1269 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
1271 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
1272 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
1273 l4kwqe3
->ka_interval
= csk
->ka_interval
;
1274 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
1275 l4kwqe3
->tos
= csk
->tos
;
1276 l4kwqe3
->ttl
= csk
->ttl
;
1277 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
1278 l4kwqe3
->pmtu
= csk
->mtu
;
1279 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
1280 l4kwqe3
->snd_buf
= csk
->snd_buf
;
1281 l4kwqe3
->seed
= csk
->seed
;
1283 wqes
[0] = (struct kwqe
*) l4kwqe1
;
1284 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
1285 wqes
[1] = (struct kwqe
*) l4kwqe2
;
1286 wqes
[2] = (struct kwqe
*) l4kwqe3
;
1289 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
1290 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
1292 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
1293 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
1294 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
1295 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
1296 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
1297 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
1298 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
1299 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
1300 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
1301 sizeof(struct tcphdr
);
1303 wqes
[1] = (struct kwqe
*) l4kwqe3
;
1304 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
1305 sizeof(struct tcphdr
);
1308 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
1310 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
1311 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
1312 l4kwqe1
->cid
= csk
->cid
;
1313 l4kwqe1
->pg_cid
= csk
->pg_cid
;
1314 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
1315 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
1316 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
1317 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
1318 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
1319 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
1320 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
1321 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
1322 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
1323 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
1324 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
1325 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
1326 if (csk
->tcp_flags
& SK_TCP_SACK
)
1327 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
1328 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
1329 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
1331 l4kwqe1
->tcp_flags
= tcp_flags
;
1333 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
1336 static int cnic_cm_close_req(struct cnic_sock
*csk
)
1338 struct cnic_dev
*dev
= csk
->dev
;
1339 struct l4_kwq_close_req
*l4kwqe
;
1340 struct kwqe
*wqes
[1];
1342 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
1343 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1344 wqes
[0] = (struct kwqe
*) l4kwqe
;
1346 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
1347 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
1348 l4kwqe
->cid
= csk
->cid
;
1350 return dev
->submit_kwqes(dev
, wqes
, 1);
1353 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
1355 struct cnic_dev
*dev
= csk
->dev
;
1356 struct l4_kwq_reset_req
*l4kwqe
;
1357 struct kwqe
*wqes
[1];
1359 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
1360 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1361 wqes
[0] = (struct kwqe
*) l4kwqe
;
1363 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
1364 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
1365 l4kwqe
->cid
= csk
->cid
;
1367 return dev
->submit_kwqes(dev
, wqes
, 1);
1370 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
1371 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
1373 struct cnic_local
*cp
= dev
->cnic_priv
;
1374 struct cnic_sock
*csk1
;
1376 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
1379 csk1
= &cp
->csk_tbl
[l5_cid
];
1380 if (atomic_read(&csk1
->ref_count
))
1383 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
1388 csk1
->l5_cid
= l5_cid
;
1389 csk1
->ulp_type
= ulp_type
;
1390 csk1
->context
= context
;
1392 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
1393 csk1
->ka_interval
= DEF_KA_INTERVAL
;
1394 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
1395 csk1
->tos
= DEF_TOS
;
1396 csk1
->ttl
= DEF_TTL
;
1397 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
1398 csk1
->rcv_buf
= DEF_RCV_BUF
;
1399 csk1
->snd_buf
= DEF_SND_BUF
;
1400 csk1
->seed
= DEF_SEED
;
1406 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
1408 if (csk
->src_port
) {
1409 struct cnic_dev
*dev
= csk
->dev
;
1410 struct cnic_local
*cp
= dev
->cnic_priv
;
1412 cnic_free_id(&cp
->csk_port_tbl
, csk
->src_port
);
1417 static void cnic_close_conn(struct cnic_sock
*csk
)
1419 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
1420 cnic_cm_upload_pg(csk
);
1421 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
1423 cnic_cm_cleanup(csk
);
1426 static int cnic_cm_destroy(struct cnic_sock
*csk
)
1428 if (!cnic_in_use(csk
))
1432 clear_bit(SK_F_INUSE
, &csk
->flags
);
1433 smp_mb__after_clear_bit();
1434 while (atomic_read(&csk
->ref_count
) != 1)
1436 cnic_cm_cleanup(csk
);
1443 static inline u16
cnic_get_vlan(struct net_device
*dev
,
1444 struct net_device
**vlan_dev
)
1446 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
1447 *vlan_dev
= vlan_dev_real_dev(dev
);
1448 return vlan_dev_vlan_id(dev
);
1454 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
1455 struct dst_entry
**dst
)
1457 #if defined(CONFIG_INET)
1462 memset(&fl
, 0, sizeof(fl
));
1463 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
1465 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
1470 return -ENETUNREACH
;
1474 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
1475 struct dst_entry
**dst
)
1477 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1480 memset(&fl
, 0, sizeof(fl
));
1481 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
1482 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
1483 fl
.oif
= dst_addr
->sin6_scope_id
;
1485 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
1490 return -ENETUNREACH
;
1493 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
1496 struct cnic_dev
*dev
= NULL
;
1497 struct dst_entry
*dst
;
1498 struct net_device
*netdev
= NULL
;
1499 int err
= -ENETUNREACH
;
1501 if (dst_addr
->sin_family
== AF_INET
)
1502 err
= cnic_get_v4_route(dst_addr
, &dst
);
1503 else if (dst_addr
->sin_family
== AF_INET6
) {
1504 struct sockaddr_in6
*dst_addr6
=
1505 (struct sockaddr_in6
*) dst_addr
;
1507 err
= cnic_get_v6_route(dst_addr6
, &dst
);
1517 cnic_get_vlan(dst
->dev
, &netdev
);
1519 dev
= cnic_from_netdev(netdev
);
1528 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1530 struct cnic_dev
*dev
= csk
->dev
;
1531 struct cnic_local
*cp
= dev
->cnic_priv
;
1533 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
1536 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1538 struct cnic_dev
*dev
= csk
->dev
;
1539 struct cnic_local
*cp
= dev
->cnic_priv
;
1540 int is_v6
, err
, rc
= -ENETUNREACH
;
1541 struct dst_entry
*dst
;
1542 struct net_device
*realdev
;
1545 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
1546 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
1548 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
1549 saddr
->remote
.v4
.sin_family
== AF_INET
)
1554 clear_bit(SK_F_IPV6
, &csk
->flags
);
1557 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1558 set_bit(SK_F_IPV6
, &csk
->flags
);
1559 err
= cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
1563 if (!dst
|| dst
->error
|| !dst
->dev
)
1566 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
1567 sizeof(struct in6_addr
));
1568 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
1569 local_port
= saddr
->local
.v6
.sin6_port
;
1575 err
= cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
1579 if (!dst
|| dst
->error
|| !dst
->dev
)
1582 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
1583 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
1584 local_port
= saddr
->local
.v4
.sin_port
;
1587 csk
->vlan_id
= cnic_get_vlan(dst
->dev
, &realdev
);
1588 if (realdev
!= dev
->netdev
)
1591 if (local_port
>= CNIC_LOCAL_PORT_MIN
&&
1592 local_port
< CNIC_LOCAL_PORT_MAX
) {
1593 if (cnic_alloc_id(&cp
->csk_port_tbl
, local_port
))
1599 local_port
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
1600 if (local_port
== -1) {
1605 csk
->src_port
= local_port
;
1607 csk
->mtu
= dst_mtu(dst
);
1615 static void cnic_init_csk_state(struct cnic_sock
*csk
)
1618 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1619 clear_bit(SK_F_CLOSING
, &csk
->flags
);
1622 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1626 if (!cnic_in_use(csk
))
1629 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
1632 cnic_init_csk_state(csk
);
1634 err
= cnic_get_route(csk
, saddr
);
1638 err
= cnic_resolve_addr(csk
, saddr
);
1643 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
1647 static int cnic_cm_abort(struct cnic_sock
*csk
)
1649 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
1652 if (!cnic_in_use(csk
))
1655 if (cnic_abort_prep(csk
))
1656 return cnic_cm_abort_req(csk
);
1658 /* Getting here means that we haven't started connect, or
1659 * connect was not successful.
1662 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
1663 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
1664 opcode
= csk
->state
;
1666 opcode
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
1667 cp
->close_conn(csk
, opcode
);
1672 static int cnic_cm_close(struct cnic_sock
*csk
)
1674 if (!cnic_in_use(csk
))
1677 if (cnic_close_prep(csk
)) {
1678 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
1679 return cnic_cm_close_req(csk
);
1684 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
1687 struct cnic_ulp_ops
*ulp_ops
;
1688 int ulp_type
= csk
->ulp_type
;
1691 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1693 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
1694 ulp_ops
->cm_connect_complete(csk
);
1695 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
1696 ulp_ops
->cm_close_complete(csk
);
1697 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
1698 ulp_ops
->cm_remote_abort(csk
);
1699 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
1700 ulp_ops
->cm_abort_complete(csk
);
1701 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
1702 ulp_ops
->cm_remote_close(csk
);
1707 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
1709 if (cnic_offld_prep(csk
)) {
1710 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
1711 cnic_cm_update_pg(csk
);
1713 cnic_cm_offload_pg(csk
);
1718 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
1720 struct cnic_local
*cp
= dev
->cnic_priv
;
1721 u32 l5_cid
= kcqe
->pg_host_opaque
;
1722 u8 opcode
= kcqe
->op_code
;
1723 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1726 if (!cnic_in_use(csk
))
1729 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
1730 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1733 csk
->pg_cid
= kcqe
->pg_cid
;
1734 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
1735 cnic_cm_conn_req(csk
);
1741 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
1743 struct cnic_local
*cp
= dev
->cnic_priv
;
1744 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
1745 u8 opcode
= l4kcqe
->op_code
;
1747 struct cnic_sock
*csk
;
1749 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
1750 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
1751 cnic_cm_process_offld_pg(dev
, l4kcqe
);
1755 l5_cid
= l4kcqe
->conn_id
;
1757 l5_cid
= l4kcqe
->cid
;
1758 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
1761 csk
= &cp
->csk_tbl
[l5_cid
];
1764 if (!cnic_in_use(csk
)) {
1770 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
1771 if (l4kcqe
->status
== 0)
1772 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
1774 smp_mb__before_clear_bit();
1775 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1776 cnic_cm_upcall(cp
, csk
, opcode
);
1779 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
1780 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
))
1781 csk
->state
= opcode
;
1783 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
1784 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
1785 cp
->close_conn(csk
, opcode
);
1788 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
1789 cnic_cm_upcall(cp
, csk
, opcode
);
1795 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
1797 struct cnic_dev
*dev
= data
;
1800 for (i
= 0; i
< num
; i
++)
1801 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
1804 static struct cnic_ulp_ops cm_ulp_ops
= {
1805 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
1808 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
1810 struct cnic_local
*cp
= dev
->cnic_priv
;
1814 cnic_free_id_tbl(&cp
->csk_port_tbl
);
1817 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
1819 struct cnic_local
*cp
= dev
->cnic_priv
;
1821 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
1826 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
1827 CNIC_LOCAL_PORT_MIN
)) {
1828 cnic_cm_free_mem(dev
);
1834 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
1836 if ((opcode
== csk
->state
) ||
1837 (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
&&
1838 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)) {
1839 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
))
1845 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
1847 struct cnic_dev
*dev
= csk
->dev
;
1848 struct cnic_local
*cp
= dev
->cnic_priv
;
1850 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
1851 if (cnic_ready_to_close(csk
, opcode
)) {
1852 cnic_close_conn(csk
);
1853 cnic_cm_upcall(cp
, csk
, opcode
);
1857 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
1861 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
1865 get_random_bytes(&seed
, 4);
1866 cnic_ctx_wr(dev
, 45, 0, seed
);
1870 static int cnic_cm_open(struct cnic_dev
*dev
)
1872 struct cnic_local
*cp
= dev
->cnic_priv
;
1875 err
= cnic_cm_alloc_mem(dev
);
1879 err
= cp
->start_cm(dev
);
1884 dev
->cm_create
= cnic_cm_create
;
1885 dev
->cm_destroy
= cnic_cm_destroy
;
1886 dev
->cm_connect
= cnic_cm_connect
;
1887 dev
->cm_abort
= cnic_cm_abort
;
1888 dev
->cm_close
= cnic_cm_close
;
1889 dev
->cm_select_dev
= cnic_cm_select_dev
;
1891 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
1892 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
1896 cnic_cm_free_mem(dev
);
1900 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
1902 struct cnic_local
*cp
= dev
->cnic_priv
;
1910 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
1911 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
1913 clear_bit(SK_F_INUSE
, &csk
->flags
);
1914 cnic_cm_cleanup(csk
);
1916 cnic_cm_free_mem(dev
);
1921 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
1923 struct cnic_local
*cp
= dev
->cnic_priv
;
1927 if (CHIP_NUM(cp
) == CHIP_NUM_5709
)
1930 cid_addr
= GET_CID_ADDR(cid
);
1932 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
1933 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
1936 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
1938 struct cnic_local
*cp
= dev
->cnic_priv
;
1940 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
1942 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
1945 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
1947 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
1950 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
1952 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
1953 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
1954 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
1955 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
1956 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
1957 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
1958 for (j
= 0; j
< 10; j
++) {
1960 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
1961 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
1965 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
1973 static void cnic_free_irq(struct cnic_dev
*dev
)
1975 struct cnic_local
*cp
= dev
->cnic_priv
;
1976 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1978 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
1979 cp
->disable_int_sync(dev
);
1980 tasklet_disable(&cp
->cnic_irq_task
);
1981 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
1985 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
1987 struct cnic_local
*cp
= dev
->cnic_priv
;
1988 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1990 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
1992 int sblk_num
= cp
->status_blk_num
;
1993 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
1994 BNX2_HC_SB_CONFIG_1
;
1996 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
1998 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
1999 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
2000 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
2002 cp
->bnx2_status_blk
= cp
->status_blk
;
2003 cp
->last_status_idx
= cp
->bnx2_status_blk
->status_idx
;
2004 tasklet_init(&cp
->cnic_irq_task
, &cnic_service_bnx2_msix
,
2005 (unsigned long) dev
);
2006 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
2009 tasklet_disable(&cp
->cnic_irq_task
);
2012 while (cp
->bnx2_status_blk
->status_completion_producer_index
&&
2014 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
2015 1 << (11 + sblk_num
));
2020 if (cp
->bnx2_status_blk
->status_completion_producer_index
) {
2026 struct status_block
*sblk
= cp
->status_blk
;
2027 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
2030 while (sblk
->status_completion_producer_index
&& i
< 10) {
2031 CNIC_WR(dev
, BNX2_HC_COMMAND
,
2032 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
2037 if (sblk
->status_completion_producer_index
)
2044 printk(KERN_ERR PFX
"%s: " "KCQ index not resetting to 0.\n",
2049 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
2051 struct cnic_local
*cp
= dev
->cnic_priv
;
2052 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2054 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2057 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2058 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2061 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
2063 struct cnic_local
*cp
= dev
->cnic_priv
;
2064 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2066 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2069 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2070 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2071 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
2072 synchronize_irq(ethdev
->irq_arr
[0].vector
);
2075 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
2077 struct cnic_local
*cp
= dev
->cnic_priv
;
2078 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2079 u32 cid_addr
, tx_cid
, sb_id
;
2080 u32 val
, offset0
, offset1
, offset2
, offset3
;
2084 struct status_block
*s_blk
= cp
->status_blk
;
2086 sb_id
= cp
->status_blk_num
;
2088 cnic_init_context(dev
, tx_cid
);
2089 cnic_init_context(dev
, tx_cid
+ 1);
2090 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
2091 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2092 struct status_block_msix
*sblk
= cp
->status_blk
;
2094 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
2095 cnic_init_context(dev
, tx_cid
);
2096 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
2098 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
2100 cp
->tx_cons
= *cp
->tx_cons_ptr
;
2102 cid_addr
= GET_CID_ADDR(tx_cid
);
2103 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
2104 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
2106 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
2107 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
2109 offset0
= BNX2_L2CTX_TYPE_XI
;
2110 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
2111 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
2112 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
2114 offset0
= BNX2_L2CTX_TYPE
;
2115 offset1
= BNX2_L2CTX_CMD_TYPE
;
2116 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
2117 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
2119 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
2120 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
2122 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
2123 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
2125 txbd
= (struct tx_bd
*) cp
->l2_ring
;
2127 buf_map
= cp
->l2_buf_map
;
2128 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
2129 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
2130 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
2132 val
= (u64
) cp
->l2_ring_map
>> 32;
2133 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
2134 txbd
->tx_bd_haddr_hi
= val
;
2136 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
2137 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
2138 txbd
->tx_bd_haddr_lo
= val
;
2141 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
2143 struct cnic_local
*cp
= dev
->cnic_priv
;
2144 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2145 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
2148 struct status_block
*s_blk
= cp
->status_blk
;
2150 sb_id
= cp
->status_blk_num
;
2151 cnic_init_context(dev
, 2);
2152 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
2153 coal_reg
= BNX2_HC_COMMAND
;
2154 coal_val
= CNIC_RD(dev
, coal_reg
);
2155 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2156 struct status_block_msix
*sblk
= cp
->status_blk
;
2158 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
2159 coal_reg
= BNX2_HC_COALESCE_NOW
;
2160 coal_val
= 1 << (11 + sb_id
);
2163 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
2164 CNIC_WR(dev
, coal_reg
, coal_val
);
2169 cp
->rx_cons
= *cp
->rx_cons_ptr
;
2171 cid_addr
= GET_CID_ADDR(2);
2172 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
2173 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
2174 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
2177 val
= 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT
;
2179 val
= BNX2_L2CTX_STATUSB_NUM(sb_id
);
2180 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
2182 rxbd
= (struct rx_bd
*) (cp
->l2_ring
+ BCM_PAGE_SIZE
);
2183 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
2185 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
2187 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
2188 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
2189 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
2190 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
2191 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
2193 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
2194 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
2195 rxbd
->rx_bd_haddr_hi
= val
;
2197 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
2198 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
2199 rxbd
->rx_bd_haddr_lo
= val
;
2201 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
2202 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
2205 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
2207 struct kwqe
*wqes
[1], l2kwqe
;
2209 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
2211 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
2212 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
2213 KWQE_OPCODE_SHIFT
) | 2;
2214 dev
->submit_kwqes(dev
, wqes
, 1);
2217 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
2219 struct cnic_local
*cp
= dev
->cnic_priv
;
2222 val
= cp
->func
<< 2;
2224 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
2226 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
2227 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
2228 dev
->mac_addr
[0] = (u8
) (val
>> 8);
2229 dev
->mac_addr
[1] = (u8
) val
;
2231 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
2233 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
2234 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
2235 dev
->mac_addr
[2] = (u8
) (val
>> 24);
2236 dev
->mac_addr
[3] = (u8
) (val
>> 16);
2237 dev
->mac_addr
[4] = (u8
) (val
>> 8);
2238 dev
->mac_addr
[5] = (u8
) val
;
2240 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
2242 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
2243 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
2244 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
2246 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
2247 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
2248 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
2251 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
2253 struct cnic_local
*cp
= dev
->cnic_priv
;
2254 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2255 struct status_block
*sblk
= cp
->status_blk
;
2259 cnic_set_bnx2_mac(dev
);
2261 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
2262 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
2263 if (BCM_PAGE_BITS
> 12)
2264 val
|= (12 - 8) << 4;
2266 val
|= (BCM_PAGE_BITS
- 8) << 4;
2268 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
2270 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
2271 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
2272 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
2274 err
= cnic_setup_5709_context(dev
, 1);
2278 cnic_init_context(dev
, KWQ_CID
);
2279 cnic_init_context(dev
, KCQ_CID
);
2281 cp
->kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
2282 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
2284 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
2285 cp
->kwq_prod_idx
= 0;
2286 cp
->kwq_con_idx
= 0;
2287 cp
->cnic_local_flags
|= CNIC_LCL_FL_KWQ_INIT
;
2289 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
2290 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
2292 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
2294 /* Initialize the kernel work queue context. */
2295 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
2296 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
2297 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
2299 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
2300 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
2302 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
2303 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
2305 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
2306 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
2308 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
2309 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
2311 cp
->kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
2312 cp
->kcq_io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
2314 cp
->kcq_prod_idx
= 0;
2316 /* Initialize the kernel complete queue context. */
2317 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
2318 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
2319 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
2321 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
2322 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
2324 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
2325 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
2327 val
= (u32
) ((u64
) cp
->kcq_info
.pgtbl_map
>> 32);
2328 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
2330 val
= (u32
) cp
->kcq_info
.pgtbl_map
;
2331 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
2334 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2335 u32 sb_id
= cp
->status_blk_num
;
2336 u32 sb
= BNX2_L2CTX_STATUSB_NUM(sb_id
);
2338 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
2339 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
2340 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
2343 /* Enable Commnad Scheduler notification when we write to the
2344 * host producer index of the kernel contexts. */
2345 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
2347 /* Enable Command Scheduler notification when we write to either
2348 * the Send Queue or Receive Queue producer indexes of the kernel
2349 * bypass contexts. */
2350 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
2351 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
2353 /* Notify COM when the driver post an application buffer. */
2354 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
2356 /* Set the CP and COM doorbells. These two processors polls the
2357 * doorbell for a non zero value before running. This must be done
2358 * after setting up the kernel queue contexts. */
2359 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
2360 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
2362 cnic_init_bnx2_tx_ring(dev
);
2363 cnic_init_bnx2_rx_ring(dev
);
2365 err
= cnic_init_bnx2_irq(dev
);
2367 printk(KERN_ERR PFX
"%s: cnic_init_irq failed\n",
2369 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
2370 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
2377 static int cnic_start_hw(struct cnic_dev
*dev
)
2379 struct cnic_local
*cp
= dev
->cnic_priv
;
2380 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2383 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2386 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
2388 printk(KERN_ERR PFX
"%s: register_cnic failed\n",
2393 dev
->regview
= ethdev
->io_base
;
2394 cp
->chip_id
= ethdev
->chip_id
;
2395 pci_dev_get(dev
->pcidev
);
2396 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
2397 cp
->status_blk
= ethdev
->irq_arr
[0].status_blk
;
2398 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
2400 err
= cp
->alloc_resc(dev
);
2402 printk(KERN_ERR PFX
"%s: allocate resource failure\n",
2407 err
= cp
->start_hw(dev
);
2411 err
= cnic_cm_open(dev
);
2415 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
2417 cp
->enable_int(dev
);
2422 ethdev
->drv_unregister_cnic(dev
->netdev
);
2424 pci_dev_put(dev
->pcidev
);
2429 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
2431 struct cnic_local
*cp
= dev
->cnic_priv
;
2432 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2434 cnic_disable_bnx2_int_sync(dev
);
2436 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
2437 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
2439 cnic_init_context(dev
, KWQ_CID
);
2440 cnic_init_context(dev
, KCQ_CID
);
2442 cnic_setup_5709_context(dev
, 0);
2445 ethdev
->drv_unregister_cnic(dev
->netdev
);
2447 cnic_free_resc(dev
);
2450 static void cnic_stop_hw(struct cnic_dev
*dev
)
2452 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
2453 struct cnic_local
*cp
= dev
->cnic_priv
;
2455 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
2456 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
2458 cnic_cm_shutdown(dev
);
2460 pci_dev_put(dev
->pcidev
);
2464 static void cnic_free_dev(struct cnic_dev
*dev
)
2468 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
2472 if (atomic_read(&dev
->ref_count
) != 0)
2473 printk(KERN_ERR PFX
"%s: Failed waiting for ref count to go"
2474 " to zero.\n", dev
->netdev
->name
);
2476 printk(KERN_INFO PFX
"Removed CNIC device: %s\n", dev
->netdev
->name
);
2477 dev_put(dev
->netdev
);
2481 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
2482 struct pci_dev
*pdev
)
2484 struct cnic_dev
*cdev
;
2485 struct cnic_local
*cp
;
2488 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
2490 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
2492 printk(KERN_ERR PFX
"%s: allocate dev struct failure\n",
2498 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
2499 cdev
->register_device
= cnic_register_device
;
2500 cdev
->unregister_device
= cnic_unregister_device
;
2501 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
2503 cp
= cdev
->cnic_priv
;
2506 cp
->l2_single_buf_size
= 0x400;
2507 cp
->l2_rx_ring_size
= 3;
2509 spin_lock_init(&cp
->cnic_ulp_lock
);
2511 printk(KERN_INFO PFX
"Added CNIC device: %s\n", dev
->name
);
2516 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
2518 struct pci_dev
*pdev
;
2519 struct cnic_dev
*cdev
;
2520 struct cnic_local
*cp
;
2521 struct cnic_eth_dev
*ethdev
= NULL
;
2522 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
2524 probe
= symbol_get(bnx2_cnic_probe
);
2526 ethdev
= (*probe
)(dev
);
2527 symbol_put_addr(probe
);
2532 pdev
= ethdev
->pdev
;
2538 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
2539 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
2542 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
2550 cdev
= cnic_alloc_dev(dev
, pdev
);
2554 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
2555 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
2557 cp
= cdev
->cnic_priv
;
2558 cp
->ethdev
= ethdev
;
2559 cdev
->pcidev
= pdev
;
2561 cp
->cnic_ops
= &cnic_bnx2_ops
;
2562 cp
->start_hw
= cnic_start_bnx2_hw
;
2563 cp
->stop_hw
= cnic_stop_bnx2_hw
;
2564 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
2565 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
2566 cp
->free_resc
= cnic_free_resc
;
2567 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
2568 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
2569 cp
->enable_int
= cnic_enable_bnx2_int
;
2570 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
2571 cp
->close_conn
= cnic_close_bnx2_conn
;
2572 cp
->next_idx
= cnic_bnx2_next_idx
;
2573 cp
->hw_idx
= cnic_bnx2_hw_idx
;
2581 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
2583 struct ethtool_drvinfo drvinfo
;
2584 struct cnic_dev
*cdev
= NULL
;
2586 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
2587 memset(&drvinfo
, 0, sizeof(drvinfo
));
2588 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
2590 if (!strcmp(drvinfo
.driver
, "bnx2"))
2591 cdev
= init_bnx2_cnic(dev
);
2593 write_lock(&cnic_dev_lock
);
2594 list_add(&cdev
->list
, &cnic_dev_list
);
2595 write_unlock(&cnic_dev_lock
);
2602 * netdev event handler
2604 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
2607 struct net_device
*netdev
= ptr
;
2608 struct cnic_dev
*dev
;
2612 dev
= cnic_from_netdev(netdev
);
2614 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
2615 /* Check for the hot-plug device */
2616 dev
= is_cnic_dev(netdev
);
2623 struct cnic_local
*cp
= dev
->cnic_priv
;
2627 else if (event
== NETDEV_UNREGISTER
)
2629 else if (event
== NETDEV_UP
) {
2630 mutex_lock(&cnic_lock
);
2631 if (!cnic_start_hw(dev
))
2632 cnic_ulp_start(dev
);
2633 mutex_unlock(&cnic_lock
);
2637 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2638 struct cnic_ulp_ops
*ulp_ops
;
2641 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
2642 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
2645 ctx
= cp
->ulp_handle
[if_type
];
2647 ulp_ops
->indicate_netevent(ctx
, event
);
2651 if (event
== NETDEV_GOING_DOWN
) {
2652 mutex_lock(&cnic_lock
);
2655 mutex_unlock(&cnic_lock
);
2656 } else if (event
== NETDEV_UNREGISTER
) {
2657 write_lock(&cnic_dev_lock
);
2658 list_del_init(&dev
->list
);
2659 write_unlock(&cnic_dev_lock
);
2671 static struct notifier_block cnic_netdev_notifier
= {
2672 .notifier_call
= cnic_netdev_event
2675 static void cnic_release(void)
2677 struct cnic_dev
*dev
;
2679 while (!list_empty(&cnic_dev_list
)) {
2680 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
2681 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
2687 list_del_init(&dev
->list
);
2692 static int __init
cnic_init(void)
2696 printk(KERN_INFO
"%s", version
);
2698 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
2707 static void __exit
cnic_exit(void)
2709 unregister_netdevice_notifier(&cnic_netdev_notifier
);
2714 module_init(cnic_init
);
2715 module_exit(cnic_exit
);