1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/uio_driver.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33 #include <net/route.h>
35 #include <net/ip6_route.h>
36 #include <scsi/iscsi_if.h>
41 #include "cnic_defs.h"
43 #define DRV_MODULE_NAME "cnic"
44 #define PFX DRV_MODULE_NAME ": "
46 static char version
[] __devinitdata
=
47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
49 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
50 "Chen (zongxi@broadcom.com");
51 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(CNIC_MODULE_VERSION
);
55 static LIST_HEAD(cnic_dev_list
);
56 static DEFINE_RWLOCK(cnic_dev_lock
);
57 static DEFINE_MUTEX(cnic_lock
);
59 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
61 static int cnic_service_bnx2(void *, void *);
62 static int cnic_ctl(void *, struct cnic_ctl_info
*);
64 static struct cnic_ops cnic_bnx2_ops
= {
65 .cnic_owner
= THIS_MODULE
,
66 .cnic_handler
= cnic_service_bnx2
,
70 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*);
71 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*);
72 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*);
73 static int cnic_cm_set_pg(struct cnic_sock
*);
75 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
77 struct cnic_dev
*dev
= uinfo
->priv
;
78 struct cnic_local
*cp
= dev
->cnic_priv
;
80 if (!capable(CAP_NET_ADMIN
))
83 if (cp
->uio_dev
!= -1)
86 cp
->uio_dev
= iminor(inode
);
88 cnic_shutdown_bnx2_rx_ring(dev
);
90 cnic_init_bnx2_tx_ring(dev
);
91 cnic_init_bnx2_rx_ring(dev
);
96 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
98 struct cnic_dev
*dev
= uinfo
->priv
;
99 struct cnic_local
*cp
= dev
->cnic_priv
;
105 static inline void cnic_hold(struct cnic_dev
*dev
)
107 atomic_inc(&dev
->ref_count
);
110 static inline void cnic_put(struct cnic_dev
*dev
)
112 atomic_dec(&dev
->ref_count
);
115 static inline void csk_hold(struct cnic_sock
*csk
)
117 atomic_inc(&csk
->ref_count
);
120 static inline void csk_put(struct cnic_sock
*csk
)
122 atomic_dec(&csk
->ref_count
);
125 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
127 struct cnic_dev
*cdev
;
129 read_lock(&cnic_dev_lock
);
130 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
131 if (netdev
== cdev
->netdev
) {
133 read_unlock(&cnic_dev_lock
);
137 read_unlock(&cnic_dev_lock
);
141 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
143 struct cnic_local
*cp
= dev
->cnic_priv
;
144 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
145 struct drv_ctl_info info
;
146 struct drv_ctl_io
*io
= &info
.data
.io
;
148 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
149 io
->cid_addr
= cid_addr
;
152 ethdev
->drv_ctl(dev
->netdev
, &info
);
155 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
157 struct cnic_local
*cp
= dev
->cnic_priv
;
158 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
159 struct drv_ctl_info info
;
160 struct drv_ctl_io
*io
= &info
.data
.io
;
162 info
.cmd
= DRV_CTL_IO_WR_CMD
;
165 ethdev
->drv_ctl(dev
->netdev
, &info
);
168 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
170 struct cnic_local
*cp
= dev
->cnic_priv
;
171 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
172 struct drv_ctl_info info
;
173 struct drv_ctl_io
*io
= &info
.data
.io
;
175 info
.cmd
= DRV_CTL_IO_RD_CMD
;
177 ethdev
->drv_ctl(dev
->netdev
, &info
);
181 static int cnic_in_use(struct cnic_sock
*csk
)
183 return test_bit(SK_F_INUSE
, &csk
->flags
);
186 static void cnic_kwq_completion(struct cnic_dev
*dev
, u32 count
)
188 struct cnic_local
*cp
= dev
->cnic_priv
;
189 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
190 struct drv_ctl_info info
;
192 info
.cmd
= DRV_CTL_COMPLETION_CMD
;
193 info
.data
.comp
.comp_count
= count
;
194 ethdev
->drv_ctl(dev
->netdev
, &info
);
197 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
198 struct cnic_sock
*csk
)
200 struct iscsi_path path_req
;
203 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
204 struct cnic_ulp_ops
*ulp_ops
;
206 if (cp
->uio_dev
== -1)
210 len
= sizeof(path_req
);
211 buf
= (char *) &path_req
;
212 memset(&path_req
, 0, len
);
214 msg_type
= ISCSI_KEVENT_PATH_REQ
;
215 path_req
.handle
= (u64
) csk
->l5_cid
;
216 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
217 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
218 sizeof(struct in6_addr
));
219 path_req
.ip_addr_len
= 16;
221 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
222 sizeof(struct in_addr
));
223 path_req
.ip_addr_len
= 4;
225 path_req
.vlan_id
= csk
->vlan_id
;
226 path_req
.pmtu
= csk
->mtu
;
230 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
232 ulp_ops
->iscsi_nl_send_msg(cp
->dev
, msg_type
, buf
, len
);
237 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
243 case ISCSI_UEVENT_PATH_UPDATE
: {
244 struct cnic_local
*cp
;
246 struct cnic_sock
*csk
;
247 struct iscsi_path
*path_resp
;
249 if (len
< sizeof(*path_resp
))
252 path_resp
= (struct iscsi_path
*) buf
;
254 l5_cid
= (u32
) path_resp
->handle
;
255 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
258 csk
= &cp
->csk_tbl
[l5_cid
];
260 if (cnic_in_use(csk
)) {
261 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
262 if (test_bit(SK_F_IPV6
, &csk
->flags
))
263 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
264 sizeof(struct in6_addr
));
266 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
267 sizeof(struct in_addr
));
268 if (is_valid_ether_addr(csk
->ha
))
279 static int cnic_offld_prep(struct cnic_sock
*csk
)
281 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
284 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
285 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
292 static int cnic_close_prep(struct cnic_sock
*csk
)
294 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
295 smp_mb__after_clear_bit();
297 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
298 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
306 static int cnic_abort_prep(struct cnic_sock
*csk
)
308 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
309 smp_mb__after_clear_bit();
311 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
314 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
315 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
322 static void cnic_uio_stop(void)
324 struct cnic_dev
*dev
;
326 read_lock(&cnic_dev_lock
);
327 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
328 struct cnic_local
*cp
= dev
->cnic_priv
;
331 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
333 read_unlock(&cnic_dev_lock
);
336 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
338 struct cnic_dev
*dev
;
340 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
341 printk(KERN_ERR PFX
"cnic_register_driver: Bad type %d\n",
345 mutex_lock(&cnic_lock
);
346 if (cnic_ulp_tbl
[ulp_type
]) {
347 printk(KERN_ERR PFX
"cnic_register_driver: Type %d has already "
348 "been registered\n", ulp_type
);
349 mutex_unlock(&cnic_lock
);
353 read_lock(&cnic_dev_lock
);
354 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
355 struct cnic_local
*cp
= dev
->cnic_priv
;
357 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
359 read_unlock(&cnic_dev_lock
);
361 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
362 mutex_unlock(&cnic_lock
);
364 /* Prevent race conditions with netdev_event */
366 read_lock(&cnic_dev_lock
);
367 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
368 struct cnic_local
*cp
= dev
->cnic_priv
;
370 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
371 ulp_ops
->cnic_init(dev
);
373 read_unlock(&cnic_dev_lock
);
379 int cnic_unregister_driver(int ulp_type
)
381 struct cnic_dev
*dev
;
383 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
384 printk(KERN_ERR PFX
"cnic_unregister_driver: Bad type %d\n",
388 mutex_lock(&cnic_lock
);
389 if (!cnic_ulp_tbl
[ulp_type
]) {
390 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d has not "
391 "been registered\n", ulp_type
);
394 read_lock(&cnic_dev_lock
);
395 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
396 struct cnic_local
*cp
= dev
->cnic_priv
;
398 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
399 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d "
400 "still has devices registered\n", ulp_type
);
401 read_unlock(&cnic_dev_lock
);
405 read_unlock(&cnic_dev_lock
);
407 if (ulp_type
== CNIC_ULP_ISCSI
)
410 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
412 mutex_unlock(&cnic_lock
);
417 mutex_unlock(&cnic_lock
);
421 static int cnic_start_hw(struct cnic_dev
*);
422 static void cnic_stop_hw(struct cnic_dev
*);
424 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
427 struct cnic_local
*cp
= dev
->cnic_priv
;
428 struct cnic_ulp_ops
*ulp_ops
;
430 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
431 printk(KERN_ERR PFX
"cnic_register_device: Bad type %d\n",
435 mutex_lock(&cnic_lock
);
436 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
437 printk(KERN_ERR PFX
"cnic_register_device: Driver with type %d "
438 "has not been registered\n", ulp_type
);
439 mutex_unlock(&cnic_lock
);
442 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
443 printk(KERN_ERR PFX
"cnic_register_device: Type %d has already "
444 "been registered to this device\n", ulp_type
);
445 mutex_unlock(&cnic_lock
);
449 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
450 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
451 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
452 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
455 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
456 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
457 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
459 mutex_unlock(&cnic_lock
);
464 EXPORT_SYMBOL(cnic_register_driver
);
466 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
468 struct cnic_local
*cp
= dev
->cnic_priv
;
470 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
471 printk(KERN_ERR PFX
"cnic_unregister_device: Bad type %d\n",
475 mutex_lock(&cnic_lock
);
476 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
477 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
480 printk(KERN_ERR PFX
"cnic_unregister_device: device not "
481 "registered to this ulp type %d\n", ulp_type
);
482 mutex_unlock(&cnic_lock
);
485 mutex_unlock(&cnic_lock
);
491 EXPORT_SYMBOL(cnic_unregister_driver
);
493 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
495 id_tbl
->start
= start_id
;
498 spin_lock_init(&id_tbl
->lock
);
499 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
506 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
508 kfree(id_tbl
->table
);
509 id_tbl
->table
= NULL
;
512 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
517 if (id
>= id_tbl
->max
)
520 spin_lock(&id_tbl
->lock
);
521 if (!test_bit(id
, id_tbl
->table
)) {
522 set_bit(id
, id_tbl
->table
);
525 spin_unlock(&id_tbl
->lock
);
529 /* Returns -1 if not successful */
530 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
534 spin_lock(&id_tbl
->lock
);
535 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
536 if (id
>= id_tbl
->max
) {
538 if (id_tbl
->next
!= 0) {
539 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
540 if (id
>= id_tbl
->next
)
545 if (id
< id_tbl
->max
) {
546 set_bit(id
, id_tbl
->table
);
547 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
551 spin_unlock(&id_tbl
->lock
);
556 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
562 if (id
>= id_tbl
->max
)
565 clear_bit(id
, id_tbl
->table
);
568 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
575 for (i
= 0; i
< dma
->num_pages
; i
++) {
576 if (dma
->pg_arr
[i
]) {
577 pci_free_consistent(dev
->pcidev
, BCM_PAGE_SIZE
,
578 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
579 dma
->pg_arr
[i
] = NULL
;
583 pci_free_consistent(dev
->pcidev
, dma
->pgtbl_size
,
584 dma
->pgtbl
, dma
->pgtbl_map
);
592 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
595 u32
*page_table
= dma
->pgtbl
;
597 for (i
= 0; i
< dma
->num_pages
; i
++) {
598 /* Each entry needs to be in big endian format. */
599 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
601 *page_table
= (u32
) dma
->pg_map_arr
[i
];
606 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
607 int pages
, int use_pg_tbl
)
610 struct cnic_local
*cp
= dev
->cnic_priv
;
612 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
613 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
614 if (dma
->pg_arr
== NULL
)
617 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
618 dma
->num_pages
= pages
;
620 for (i
= 0; i
< pages
; i
++) {
621 dma
->pg_arr
[i
] = pci_alloc_consistent(dev
->pcidev
,
623 &dma
->pg_map_arr
[i
]);
624 if (dma
->pg_arr
[i
] == NULL
)
630 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
631 ~(BCM_PAGE_SIZE
- 1);
632 dma
->pgtbl
= pci_alloc_consistent(dev
->pcidev
, dma
->pgtbl_size
,
634 if (dma
->pgtbl
== NULL
)
637 cp
->setup_pgtbl(dev
, dma
);
642 cnic_free_dma(dev
, dma
);
646 static void cnic_free_resc(struct cnic_dev
*dev
)
648 struct cnic_local
*cp
= dev
->cnic_priv
;
651 if (cp
->cnic_uinfo
) {
652 while (cp
->uio_dev
!= -1 && i
< 15) {
656 uio_unregister_device(cp
->cnic_uinfo
);
657 kfree(cp
->cnic_uinfo
);
658 cp
->cnic_uinfo
= NULL
;
662 pci_free_consistent(dev
->pcidev
, cp
->l2_buf_size
,
663 cp
->l2_buf
, cp
->l2_buf_map
);
668 pci_free_consistent(dev
->pcidev
, cp
->l2_ring_size
,
669 cp
->l2_ring
, cp
->l2_ring_map
);
673 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
674 if (cp
->ctx_arr
[i
].ctx
) {
675 pci_free_consistent(dev
->pcidev
, cp
->ctx_blk_size
,
677 cp
->ctx_arr
[i
].mapping
);
678 cp
->ctx_arr
[i
].ctx
= NULL
;
685 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
686 cnic_free_dma(dev
, &cp
->conn_buf_info
);
687 cnic_free_dma(dev
, &cp
->kwq_info
);
688 cnic_free_dma(dev
, &cp
->kcq_info
);
689 kfree(cp
->iscsi_tbl
);
690 cp
->iscsi_tbl
= NULL
;
694 cnic_free_id_tbl(&cp
->cid_tbl
);
697 static int cnic_alloc_context(struct cnic_dev
*dev
)
699 struct cnic_local
*cp
= dev
->cnic_priv
;
701 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
704 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
705 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
706 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
707 sizeof(struct cnic_ctx
);
708 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
709 if (cp
->ctx_arr
== NULL
)
713 for (i
= 0; i
< 2; i
++) {
714 u32 j
, reg
, off
, lo
, hi
;
717 off
= BNX2_PG_CTX_MAP
;
719 off
= BNX2_ISCSI_CTX_MAP
;
721 reg
= cnic_reg_rd_ind(dev
, off
);
724 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
725 cp
->ctx_arr
[k
].cid
= j
;
729 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
734 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
736 pci_alloc_consistent(dev
->pcidev
, BCM_PAGE_SIZE
,
737 &cp
->ctx_arr
[i
].mapping
);
738 if (cp
->ctx_arr
[i
].ctx
== NULL
)
745 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
747 struct cnic_local
*cp
= dev
->cnic_priv
;
748 struct uio_info
*uinfo
;
751 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
754 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
756 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 1);
759 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
761 ret
= cnic_alloc_context(dev
);
765 cp
->l2_ring_size
= 2 * BCM_PAGE_SIZE
;
766 cp
->l2_ring
= pci_alloc_consistent(dev
->pcidev
, cp
->l2_ring_size
,
771 cp
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
772 cp
->l2_buf_size
= PAGE_ALIGN(cp
->l2_buf_size
);
773 cp
->l2_buf
= pci_alloc_consistent(dev
->pcidev
, cp
->l2_buf_size
,
778 uinfo
= kzalloc(sizeof(*uinfo
), GFP_ATOMIC
);
782 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
783 uinfo
->mem
[0].internal_addr
= dev
->regview
;
784 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
785 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
787 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
& PAGE_MASK
;
788 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
789 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
791 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
792 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
794 uinfo
->mem
[2].addr
= (unsigned long) cp
->l2_ring
;
795 uinfo
->mem
[2].size
= cp
->l2_ring_size
;
796 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
798 uinfo
->mem
[3].addr
= (unsigned long) cp
->l2_buf
;
799 uinfo
->mem
[3].size
= cp
->l2_buf_size
;
800 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
802 uinfo
->name
= "bnx2_cnic";
803 uinfo
->version
= CNIC_MODULE_VERSION
;
804 uinfo
->irq
= UIO_IRQ_CUSTOM
;
806 uinfo
->open
= cnic_uio_open
;
807 uinfo
->release
= cnic_uio_close
;
811 ret
= uio_register_device(&dev
->pcidev
->dev
, uinfo
);
817 cp
->cnic_uinfo
= uinfo
;
826 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
828 return cp
->max_kwq_idx
-
829 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
832 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
835 struct cnic_local
*cp
= dev
->cnic_priv
;
836 struct kwqe
*prod_qe
;
837 u16 prod
, sw_prod
, i
;
839 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
840 return -EAGAIN
; /* bnx2 is down */
842 spin_lock_bh(&cp
->cnic_ulp_lock
);
843 if (num_wqes
> cnic_kwq_avail(cp
) &&
844 !(cp
->cnic_local_flags
& CNIC_LCL_FL_KWQ_INIT
)) {
845 spin_unlock_bh(&cp
->cnic_ulp_lock
);
849 cp
->cnic_local_flags
&= ~CNIC_LCL_FL_KWQ_INIT
;
851 prod
= cp
->kwq_prod_idx
;
852 sw_prod
= prod
& MAX_KWQ_IDX
;
853 for (i
= 0; i
< num_wqes
; i
++) {
854 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
855 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
857 sw_prod
= prod
& MAX_KWQ_IDX
;
859 cp
->kwq_prod_idx
= prod
;
861 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
863 spin_unlock_bh(&cp
->cnic_ulp_lock
);
867 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
869 struct cnic_local
*cp
= dev
->cnic_priv
;
875 struct cnic_ulp_ops
*ulp_ops
;
877 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
878 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
880 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
881 cnic_kwq_completion(dev
, 1);
883 while (j
< num_cqes
) {
884 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
886 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
889 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
890 cnic_kwq_completion(dev
, 1);
894 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
895 ulp_type
= CNIC_ULP_RDMA
;
896 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
897 ulp_type
= CNIC_ULP_ISCSI
;
898 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
899 ulp_type
= CNIC_ULP_L4
;
900 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
903 printk(KERN_ERR PFX
"%s: Unknown type of KCQE(0x%x)\n",
904 dev
->netdev
->name
, kcqe_op_flag
);
909 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
910 if (likely(ulp_ops
)) {
911 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
912 cp
->completed_kcq
+ i
, j
);
923 static u16
cnic_bnx2_next_idx(u16 idx
)
928 static u16
cnic_bnx2_hw_idx(u16 idx
)
933 static int cnic_get_kcqes(struct cnic_dev
*dev
, u16 hw_prod
, u16
*sw_prod
)
935 struct cnic_local
*cp
= dev
->cnic_priv
;
938 int kcqe_cnt
= 0, last_cnt
= 0;
940 i
= ri
= last
= *sw_prod
;
943 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
944 kcqe
= &cp
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
945 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
947 ri
= i
& MAX_KCQ_IDX
;
948 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
958 static void cnic_chk_bnx2_pkt_rings(struct cnic_local
*cp
)
960 u16 rx_cons
= *cp
->rx_cons_ptr
;
961 u16 tx_cons
= *cp
->tx_cons_ptr
;
963 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
964 cp
->tx_cons
= tx_cons
;
965 cp
->rx_cons
= rx_cons
;
966 uio_event_notify(cp
->cnic_uinfo
);
970 static int cnic_service_bnx2(void *data
, void *status_blk
)
972 struct cnic_dev
*dev
= data
;
973 struct status_block
*sblk
= status_blk
;
974 struct cnic_local
*cp
= dev
->cnic_priv
;
975 u32 status_idx
= sblk
->status_idx
;
976 u16 hw_prod
, sw_prod
;
979 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
982 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
984 hw_prod
= sblk
->status_completion_producer_index
;
985 sw_prod
= cp
->kcq_prod_idx
;
986 while (sw_prod
!= hw_prod
) {
987 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
991 service_kcqes(dev
, kcqe_cnt
);
993 /* Tell compiler that status_blk fields can change. */
995 if (status_idx
!= sblk
->status_idx
) {
996 status_idx
= sblk
->status_idx
;
997 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
998 hw_prod
= sblk
->status_completion_producer_index
;
1004 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
1006 cp
->kcq_prod_idx
= sw_prod
;
1008 cnic_chk_bnx2_pkt_rings(cp
);
1012 static void cnic_service_bnx2_msix(unsigned long data
)
1014 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
1015 struct cnic_local
*cp
= dev
->cnic_priv
;
1016 struct status_block_msix
*status_blk
= cp
->bnx2_status_blk
;
1017 u32 status_idx
= status_blk
->status_idx
;
1018 u16 hw_prod
, sw_prod
;
1021 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
1023 hw_prod
= status_blk
->status_completion_producer_index
;
1024 sw_prod
= cp
->kcq_prod_idx
;
1025 while (sw_prod
!= hw_prod
) {
1026 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
1030 service_kcqes(dev
, kcqe_cnt
);
1032 /* Tell compiler that status_blk fields can change. */
1034 if (status_idx
!= status_blk
->status_idx
) {
1035 status_idx
= status_blk
->status_idx
;
1036 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
1037 hw_prod
= status_blk
->status_completion_producer_index
;
1043 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
1044 cp
->kcq_prod_idx
= sw_prod
;
1046 cnic_chk_bnx2_pkt_rings(cp
);
1048 cp
->last_status_idx
= status_idx
;
1049 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
1050 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
1053 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
1055 struct cnic_dev
*dev
= dev_instance
;
1056 struct cnic_local
*cp
= dev
->cnic_priv
;
1057 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
1062 prefetch(cp
->status_blk
);
1063 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
1065 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
1066 tasklet_schedule(&cp
->cnic_irq_task
);
1071 static void cnic_ulp_stop(struct cnic_dev
*dev
)
1073 struct cnic_local
*cp
= dev
->cnic_priv
;
1077 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
1080 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
1081 struct cnic_ulp_ops
*ulp_ops
;
1083 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
1087 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
1088 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
1093 static void cnic_ulp_start(struct cnic_dev
*dev
)
1095 struct cnic_local
*cp
= dev
->cnic_priv
;
1099 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
1100 struct cnic_ulp_ops
*ulp_ops
;
1102 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
1103 if (!ulp_ops
|| !ulp_ops
->cnic_start
)
1106 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
1107 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
1112 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
1114 struct cnic_dev
*dev
= data
;
1116 switch (info
->cmd
) {
1117 case CNIC_CTL_STOP_CMD
:
1119 mutex_lock(&cnic_lock
);
1124 mutex_unlock(&cnic_lock
);
1127 case CNIC_CTL_START_CMD
:
1129 mutex_lock(&cnic_lock
);
1131 if (!cnic_start_hw(dev
))
1132 cnic_ulp_start(dev
);
1134 mutex_unlock(&cnic_lock
);
1143 static void cnic_ulp_init(struct cnic_dev
*dev
)
1146 struct cnic_local
*cp
= dev
->cnic_priv
;
1149 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
1150 struct cnic_ulp_ops
*ulp_ops
;
1152 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[i
]);
1153 if (!ulp_ops
|| !ulp_ops
->cnic_init
)
1156 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
1157 ulp_ops
->cnic_init(dev
);
1163 static void cnic_ulp_exit(struct cnic_dev
*dev
)
1166 struct cnic_local
*cp
= dev
->cnic_priv
;
1169 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
1170 struct cnic_ulp_ops
*ulp_ops
;
1172 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[i
]);
1173 if (!ulp_ops
|| !ulp_ops
->cnic_exit
)
1176 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
1177 ulp_ops
->cnic_exit(dev
);
1183 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
1185 struct cnic_dev
*dev
= csk
->dev
;
1186 struct l4_kwq_offload_pg
*l4kwqe
;
1187 struct kwqe
*wqes
[1];
1189 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
1190 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1191 wqes
[0] = (struct kwqe
*) l4kwqe
;
1193 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
1195 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
1196 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
1198 l4kwqe
->da0
= csk
->ha
[0];
1199 l4kwqe
->da1
= csk
->ha
[1];
1200 l4kwqe
->da2
= csk
->ha
[2];
1201 l4kwqe
->da3
= csk
->ha
[3];
1202 l4kwqe
->da4
= csk
->ha
[4];
1203 l4kwqe
->da5
= csk
->ha
[5];
1205 l4kwqe
->sa0
= dev
->mac_addr
[0];
1206 l4kwqe
->sa1
= dev
->mac_addr
[1];
1207 l4kwqe
->sa2
= dev
->mac_addr
[2];
1208 l4kwqe
->sa3
= dev
->mac_addr
[3];
1209 l4kwqe
->sa4
= dev
->mac_addr
[4];
1210 l4kwqe
->sa5
= dev
->mac_addr
[5];
1212 l4kwqe
->etype
= ETH_P_IP
;
1213 l4kwqe
->ipid_count
= DEF_IPID_COUNT
;
1214 l4kwqe
->host_opaque
= csk
->l5_cid
;
1217 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
1218 l4kwqe
->vlan_tag
= csk
->vlan_id
;
1219 l4kwqe
->l2hdr_nbytes
+= 4;
1222 return dev
->submit_kwqes(dev
, wqes
, 1);
1225 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
1227 struct cnic_dev
*dev
= csk
->dev
;
1228 struct l4_kwq_update_pg
*l4kwqe
;
1229 struct kwqe
*wqes
[1];
1231 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
1232 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1233 wqes
[0] = (struct kwqe
*) l4kwqe
;
1235 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
1237 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
1238 l4kwqe
->pg_cid
= csk
->pg_cid
;
1240 l4kwqe
->da0
= csk
->ha
[0];
1241 l4kwqe
->da1
= csk
->ha
[1];
1242 l4kwqe
->da2
= csk
->ha
[2];
1243 l4kwqe
->da3
= csk
->ha
[3];
1244 l4kwqe
->da4
= csk
->ha
[4];
1245 l4kwqe
->da5
= csk
->ha
[5];
1247 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
1248 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
1250 return dev
->submit_kwqes(dev
, wqes
, 1);
1253 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
1255 struct cnic_dev
*dev
= csk
->dev
;
1256 struct l4_kwq_upload
*l4kwqe
;
1257 struct kwqe
*wqes
[1];
1259 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
1260 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1261 wqes
[0] = (struct kwqe
*) l4kwqe
;
1263 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
1265 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
1266 l4kwqe
->cid
= csk
->pg_cid
;
1268 return dev
->submit_kwqes(dev
, wqes
, 1);
1271 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
1273 struct cnic_dev
*dev
= csk
->dev
;
1274 struct l4_kwq_connect_req1
*l4kwqe1
;
1275 struct l4_kwq_connect_req2
*l4kwqe2
;
1276 struct l4_kwq_connect_req3
*l4kwqe3
;
1277 struct kwqe
*wqes
[3];
1281 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
1282 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
1283 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
1284 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
1285 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
1286 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
1288 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
1290 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
1291 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
1292 l4kwqe3
->ka_interval
= csk
->ka_interval
;
1293 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
1294 l4kwqe3
->tos
= csk
->tos
;
1295 l4kwqe3
->ttl
= csk
->ttl
;
1296 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
1297 l4kwqe3
->pmtu
= csk
->mtu
;
1298 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
1299 l4kwqe3
->snd_buf
= csk
->snd_buf
;
1300 l4kwqe3
->seed
= csk
->seed
;
1302 wqes
[0] = (struct kwqe
*) l4kwqe1
;
1303 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
1304 wqes
[1] = (struct kwqe
*) l4kwqe2
;
1305 wqes
[2] = (struct kwqe
*) l4kwqe3
;
1308 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
1309 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
1311 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
1312 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
1313 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
1314 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
1315 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
1316 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
1317 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
1318 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
1319 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
1320 sizeof(struct tcphdr
);
1322 wqes
[1] = (struct kwqe
*) l4kwqe3
;
1323 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
1324 sizeof(struct tcphdr
);
1327 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
1329 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
1330 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
1331 l4kwqe1
->cid
= csk
->cid
;
1332 l4kwqe1
->pg_cid
= csk
->pg_cid
;
1333 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
1334 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
1335 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
1336 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
1337 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
1338 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
1339 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
1340 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
1341 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
1342 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
1343 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
1344 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
1345 if (csk
->tcp_flags
& SK_TCP_SACK
)
1346 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
1347 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
1348 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
1350 l4kwqe1
->tcp_flags
= tcp_flags
;
1352 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
1355 static int cnic_cm_close_req(struct cnic_sock
*csk
)
1357 struct cnic_dev
*dev
= csk
->dev
;
1358 struct l4_kwq_close_req
*l4kwqe
;
1359 struct kwqe
*wqes
[1];
1361 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
1362 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1363 wqes
[0] = (struct kwqe
*) l4kwqe
;
1365 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
1366 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
1367 l4kwqe
->cid
= csk
->cid
;
1369 return dev
->submit_kwqes(dev
, wqes
, 1);
1372 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
1374 struct cnic_dev
*dev
= csk
->dev
;
1375 struct l4_kwq_reset_req
*l4kwqe
;
1376 struct kwqe
*wqes
[1];
1378 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
1379 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1380 wqes
[0] = (struct kwqe
*) l4kwqe
;
1382 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
1383 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
1384 l4kwqe
->cid
= csk
->cid
;
1386 return dev
->submit_kwqes(dev
, wqes
, 1);
1389 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
1390 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
1392 struct cnic_local
*cp
= dev
->cnic_priv
;
1393 struct cnic_sock
*csk1
;
1395 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
1398 csk1
= &cp
->csk_tbl
[l5_cid
];
1399 if (atomic_read(&csk1
->ref_count
))
1402 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
1407 csk1
->l5_cid
= l5_cid
;
1408 csk1
->ulp_type
= ulp_type
;
1409 csk1
->context
= context
;
1411 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
1412 csk1
->ka_interval
= DEF_KA_INTERVAL
;
1413 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
1414 csk1
->tos
= DEF_TOS
;
1415 csk1
->ttl
= DEF_TTL
;
1416 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
1417 csk1
->rcv_buf
= DEF_RCV_BUF
;
1418 csk1
->snd_buf
= DEF_SND_BUF
;
1419 csk1
->seed
= DEF_SEED
;
1425 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
1427 if (csk
->src_port
) {
1428 struct cnic_dev
*dev
= csk
->dev
;
1429 struct cnic_local
*cp
= dev
->cnic_priv
;
1431 cnic_free_id(&cp
->csk_port_tbl
, csk
->src_port
);
1436 static void cnic_close_conn(struct cnic_sock
*csk
)
1438 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
1439 cnic_cm_upload_pg(csk
);
1440 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
1442 cnic_cm_cleanup(csk
);
1445 static int cnic_cm_destroy(struct cnic_sock
*csk
)
1447 if (!cnic_in_use(csk
))
1451 clear_bit(SK_F_INUSE
, &csk
->flags
);
1452 smp_mb__after_clear_bit();
1453 while (atomic_read(&csk
->ref_count
) != 1)
1455 cnic_cm_cleanup(csk
);
1462 static inline u16
cnic_get_vlan(struct net_device
*dev
,
1463 struct net_device
**vlan_dev
)
1465 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
1466 *vlan_dev
= vlan_dev_real_dev(dev
);
1467 return vlan_dev_vlan_id(dev
);
1473 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
1474 struct dst_entry
**dst
)
1476 #if defined(CONFIG_INET)
1481 memset(&fl
, 0, sizeof(fl
));
1482 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
1484 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
1489 return -ENETUNREACH
;
1493 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
1494 struct dst_entry
**dst
)
1496 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1499 memset(&fl
, 0, sizeof(fl
));
1500 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
1501 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
1502 fl
.oif
= dst_addr
->sin6_scope_id
;
1504 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
1509 return -ENETUNREACH
;
1512 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
1515 struct cnic_dev
*dev
= NULL
;
1516 struct dst_entry
*dst
;
1517 struct net_device
*netdev
= NULL
;
1518 int err
= -ENETUNREACH
;
1520 if (dst_addr
->sin_family
== AF_INET
)
1521 err
= cnic_get_v4_route(dst_addr
, &dst
);
1522 else if (dst_addr
->sin_family
== AF_INET6
) {
1523 struct sockaddr_in6
*dst_addr6
=
1524 (struct sockaddr_in6
*) dst_addr
;
1526 err
= cnic_get_v6_route(dst_addr6
, &dst
);
1536 cnic_get_vlan(dst
->dev
, &netdev
);
1538 dev
= cnic_from_netdev(netdev
);
1547 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1549 struct cnic_dev
*dev
= csk
->dev
;
1550 struct cnic_local
*cp
= dev
->cnic_priv
;
1552 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
1555 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1557 struct cnic_dev
*dev
= csk
->dev
;
1558 struct cnic_local
*cp
= dev
->cnic_priv
;
1559 int is_v6
, err
, rc
= -ENETUNREACH
;
1560 struct dst_entry
*dst
;
1561 struct net_device
*realdev
;
1564 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
1565 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
1567 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
1568 saddr
->remote
.v4
.sin_family
== AF_INET
)
1573 clear_bit(SK_F_IPV6
, &csk
->flags
);
1576 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1577 set_bit(SK_F_IPV6
, &csk
->flags
);
1578 err
= cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
1582 if (!dst
|| dst
->error
|| !dst
->dev
)
1585 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
1586 sizeof(struct in6_addr
));
1587 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
1588 local_port
= saddr
->local
.v6
.sin6_port
;
1594 err
= cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
1598 if (!dst
|| dst
->error
|| !dst
->dev
)
1601 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
1602 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
1603 local_port
= saddr
->local
.v4
.sin_port
;
1606 csk
->vlan_id
= cnic_get_vlan(dst
->dev
, &realdev
);
1607 if (realdev
!= dev
->netdev
)
1610 if (local_port
>= CNIC_LOCAL_PORT_MIN
&&
1611 local_port
< CNIC_LOCAL_PORT_MAX
) {
1612 if (cnic_alloc_id(&cp
->csk_port_tbl
, local_port
))
1618 local_port
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
1619 if (local_port
== -1) {
1624 csk
->src_port
= local_port
;
1626 csk
->mtu
= dst_mtu(dst
);
1634 static void cnic_init_csk_state(struct cnic_sock
*csk
)
1637 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1638 clear_bit(SK_F_CLOSING
, &csk
->flags
);
1641 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1645 if (!cnic_in_use(csk
))
1648 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
1651 cnic_init_csk_state(csk
);
1653 err
= cnic_get_route(csk
, saddr
);
1657 err
= cnic_resolve_addr(csk
, saddr
);
1662 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
1666 static int cnic_cm_abort(struct cnic_sock
*csk
)
1668 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
1671 if (!cnic_in_use(csk
))
1674 if (cnic_abort_prep(csk
))
1675 return cnic_cm_abort_req(csk
);
1677 /* Getting here means that we haven't started connect, or
1678 * connect was not successful.
1681 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
1682 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
1683 opcode
= csk
->state
;
1685 opcode
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
1686 cp
->close_conn(csk
, opcode
);
1691 static int cnic_cm_close(struct cnic_sock
*csk
)
1693 if (!cnic_in_use(csk
))
1696 if (cnic_close_prep(csk
)) {
1697 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
1698 return cnic_cm_close_req(csk
);
1703 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
1706 struct cnic_ulp_ops
*ulp_ops
;
1707 int ulp_type
= csk
->ulp_type
;
1710 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1712 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
1713 ulp_ops
->cm_connect_complete(csk
);
1714 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
1715 ulp_ops
->cm_close_complete(csk
);
1716 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
1717 ulp_ops
->cm_remote_abort(csk
);
1718 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
1719 ulp_ops
->cm_abort_complete(csk
);
1720 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
1721 ulp_ops
->cm_remote_close(csk
);
1726 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
1728 if (cnic_offld_prep(csk
)) {
1729 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
1730 cnic_cm_update_pg(csk
);
1732 cnic_cm_offload_pg(csk
);
1737 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
1739 struct cnic_local
*cp
= dev
->cnic_priv
;
1740 u32 l5_cid
= kcqe
->pg_host_opaque
;
1741 u8 opcode
= kcqe
->op_code
;
1742 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1745 if (!cnic_in_use(csk
))
1748 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
1749 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1752 csk
->pg_cid
= kcqe
->pg_cid
;
1753 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
1754 cnic_cm_conn_req(csk
);
1760 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
1762 struct cnic_local
*cp
= dev
->cnic_priv
;
1763 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
1764 u8 opcode
= l4kcqe
->op_code
;
1766 struct cnic_sock
*csk
;
1768 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
1769 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
1770 cnic_cm_process_offld_pg(dev
, l4kcqe
);
1774 l5_cid
= l4kcqe
->conn_id
;
1776 l5_cid
= l4kcqe
->cid
;
1777 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
1780 csk
= &cp
->csk_tbl
[l5_cid
];
1783 if (!cnic_in_use(csk
)) {
1789 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
1790 if (l4kcqe
->status
== 0)
1791 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
1793 smp_mb__before_clear_bit();
1794 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1795 cnic_cm_upcall(cp
, csk
, opcode
);
1798 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
1799 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
))
1800 csk
->state
= opcode
;
1802 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
1803 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
1804 cp
->close_conn(csk
, opcode
);
1807 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
1808 cnic_cm_upcall(cp
, csk
, opcode
);
1814 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
1816 struct cnic_dev
*dev
= data
;
1819 for (i
= 0; i
< num
; i
++)
1820 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
1823 static struct cnic_ulp_ops cm_ulp_ops
= {
1824 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
1827 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
1829 struct cnic_local
*cp
= dev
->cnic_priv
;
1833 cnic_free_id_tbl(&cp
->csk_port_tbl
);
1836 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
1838 struct cnic_local
*cp
= dev
->cnic_priv
;
1840 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
1845 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
1846 CNIC_LOCAL_PORT_MIN
)) {
1847 cnic_cm_free_mem(dev
);
1853 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
1855 if ((opcode
== csk
->state
) ||
1856 (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
&&
1857 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)) {
1858 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
))
1864 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
1866 struct cnic_dev
*dev
= csk
->dev
;
1867 struct cnic_local
*cp
= dev
->cnic_priv
;
1869 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
1870 if (cnic_ready_to_close(csk
, opcode
)) {
1871 cnic_close_conn(csk
);
1872 cnic_cm_upcall(cp
, csk
, opcode
);
1876 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
1880 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
1884 get_random_bytes(&seed
, 4);
1885 cnic_ctx_wr(dev
, 45, 0, seed
);
1889 static int cnic_cm_open(struct cnic_dev
*dev
)
1891 struct cnic_local
*cp
= dev
->cnic_priv
;
1894 err
= cnic_cm_alloc_mem(dev
);
1898 err
= cp
->start_cm(dev
);
1903 dev
->cm_create
= cnic_cm_create
;
1904 dev
->cm_destroy
= cnic_cm_destroy
;
1905 dev
->cm_connect
= cnic_cm_connect
;
1906 dev
->cm_abort
= cnic_cm_abort
;
1907 dev
->cm_close
= cnic_cm_close
;
1908 dev
->cm_select_dev
= cnic_cm_select_dev
;
1910 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
1911 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
1915 cnic_cm_free_mem(dev
);
1919 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
1921 struct cnic_local
*cp
= dev
->cnic_priv
;
1929 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
1930 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
1932 clear_bit(SK_F_INUSE
, &csk
->flags
);
1933 cnic_cm_cleanup(csk
);
1935 cnic_cm_free_mem(dev
);
1940 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
1942 struct cnic_local
*cp
= dev
->cnic_priv
;
1946 if (CHIP_NUM(cp
) == CHIP_NUM_5709
)
1949 cid_addr
= GET_CID_ADDR(cid
);
1951 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
1952 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
1955 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
1957 struct cnic_local
*cp
= dev
->cnic_priv
;
1959 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
1961 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
1964 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
1966 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
1969 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
1971 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
1972 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
1973 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
1974 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
1975 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
1976 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
1977 for (j
= 0; j
< 10; j
++) {
1979 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
1980 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
1984 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
1992 static void cnic_free_irq(struct cnic_dev
*dev
)
1994 struct cnic_local
*cp
= dev
->cnic_priv
;
1995 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1997 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
1998 cp
->disable_int_sync(dev
);
1999 tasklet_disable(&cp
->cnic_irq_task
);
2000 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
2004 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
2006 struct cnic_local
*cp
= dev
->cnic_priv
;
2007 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2009 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2011 int sblk_num
= cp
->status_blk_num
;
2012 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
2013 BNX2_HC_SB_CONFIG_1
;
2015 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
2017 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
2018 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
2019 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
2021 cp
->bnx2_status_blk
= cp
->status_blk
;
2022 cp
->last_status_idx
= cp
->bnx2_status_blk
->status_idx
;
2023 tasklet_init(&cp
->cnic_irq_task
, &cnic_service_bnx2_msix
,
2024 (unsigned long) dev
);
2025 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
2028 tasklet_disable(&cp
->cnic_irq_task
);
2031 while (cp
->bnx2_status_blk
->status_completion_producer_index
&&
2033 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
2034 1 << (11 + sblk_num
));
2039 if (cp
->bnx2_status_blk
->status_completion_producer_index
) {
2045 struct status_block
*sblk
= cp
->status_blk
;
2046 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
2049 while (sblk
->status_completion_producer_index
&& i
< 10) {
2050 CNIC_WR(dev
, BNX2_HC_COMMAND
,
2051 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
2056 if (sblk
->status_completion_producer_index
)
2063 printk(KERN_ERR PFX
"%s: " "KCQ index not resetting to 0.\n",
2068 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
2070 struct cnic_local
*cp
= dev
->cnic_priv
;
2071 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2073 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2076 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2077 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2080 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
2082 struct cnic_local
*cp
= dev
->cnic_priv
;
2083 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2085 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2088 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2089 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2090 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
2091 synchronize_irq(ethdev
->irq_arr
[0].vector
);
2094 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
2096 struct cnic_local
*cp
= dev
->cnic_priv
;
2097 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2098 u32 cid_addr
, tx_cid
, sb_id
;
2099 u32 val
, offset0
, offset1
, offset2
, offset3
;
2103 struct status_block
*s_blk
= cp
->status_blk
;
2105 sb_id
= cp
->status_blk_num
;
2107 cnic_init_context(dev
, tx_cid
);
2108 cnic_init_context(dev
, tx_cid
+ 1);
2109 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
2110 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2111 struct status_block_msix
*sblk
= cp
->status_blk
;
2113 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
2114 cnic_init_context(dev
, tx_cid
);
2115 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
2117 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
2119 cp
->tx_cons
= *cp
->tx_cons_ptr
;
2121 cid_addr
= GET_CID_ADDR(tx_cid
);
2122 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
2123 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
2125 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
2126 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
2128 offset0
= BNX2_L2CTX_TYPE_XI
;
2129 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
2130 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
2131 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
2133 offset0
= BNX2_L2CTX_TYPE
;
2134 offset1
= BNX2_L2CTX_CMD_TYPE
;
2135 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
2136 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
2138 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
2139 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
2141 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
2142 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
2144 txbd
= (struct tx_bd
*) cp
->l2_ring
;
2146 buf_map
= cp
->l2_buf_map
;
2147 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
2148 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
2149 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
2151 val
= (u64
) cp
->l2_ring_map
>> 32;
2152 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
2153 txbd
->tx_bd_haddr_hi
= val
;
2155 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
2156 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
2157 txbd
->tx_bd_haddr_lo
= val
;
2160 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
2162 struct cnic_local
*cp
= dev
->cnic_priv
;
2163 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2164 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
2167 struct status_block
*s_blk
= cp
->status_blk
;
2169 sb_id
= cp
->status_blk_num
;
2170 cnic_init_context(dev
, 2);
2171 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
2172 coal_reg
= BNX2_HC_COMMAND
;
2173 coal_val
= CNIC_RD(dev
, coal_reg
);
2174 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2175 struct status_block_msix
*sblk
= cp
->status_blk
;
2177 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
2178 coal_reg
= BNX2_HC_COALESCE_NOW
;
2179 coal_val
= 1 << (11 + sb_id
);
2182 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
2183 CNIC_WR(dev
, coal_reg
, coal_val
);
2188 cp
->rx_cons
= *cp
->rx_cons_ptr
;
2190 cid_addr
= GET_CID_ADDR(2);
2191 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
2192 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
2193 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
2196 val
= 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT
;
2198 val
= BNX2_L2CTX_STATUSB_NUM(sb_id
);
2199 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
2201 rxbd
= (struct rx_bd
*) (cp
->l2_ring
+ BCM_PAGE_SIZE
);
2202 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
2204 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
2206 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
2207 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
2208 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
2209 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
2210 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
2212 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
2213 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
2214 rxbd
->rx_bd_haddr_hi
= val
;
2216 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
2217 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
2218 rxbd
->rx_bd_haddr_lo
= val
;
2220 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
2221 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
2224 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
2226 struct kwqe
*wqes
[1], l2kwqe
;
2228 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
2230 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
2231 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
2232 KWQE_OPCODE_SHIFT
) | 2;
2233 dev
->submit_kwqes(dev
, wqes
, 1);
2236 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
2238 struct cnic_local
*cp
= dev
->cnic_priv
;
2241 val
= cp
->func
<< 2;
2243 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
2245 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
2246 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
2247 dev
->mac_addr
[0] = (u8
) (val
>> 8);
2248 dev
->mac_addr
[1] = (u8
) val
;
2250 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
2252 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
2253 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
2254 dev
->mac_addr
[2] = (u8
) (val
>> 24);
2255 dev
->mac_addr
[3] = (u8
) (val
>> 16);
2256 dev
->mac_addr
[4] = (u8
) (val
>> 8);
2257 dev
->mac_addr
[5] = (u8
) val
;
2259 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
2261 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
2262 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
2263 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
2265 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
2266 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
2267 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
2270 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
2272 struct cnic_local
*cp
= dev
->cnic_priv
;
2273 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2274 struct status_block
*sblk
= cp
->status_blk
;
2278 cnic_set_bnx2_mac(dev
);
2280 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
2281 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
2282 if (BCM_PAGE_BITS
> 12)
2283 val
|= (12 - 8) << 4;
2285 val
|= (BCM_PAGE_BITS
- 8) << 4;
2287 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
2289 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
2290 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
2291 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
2293 err
= cnic_setup_5709_context(dev
, 1);
2297 cnic_init_context(dev
, KWQ_CID
);
2298 cnic_init_context(dev
, KCQ_CID
);
2300 cp
->kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
2301 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
2303 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
2304 cp
->kwq_prod_idx
= 0;
2305 cp
->kwq_con_idx
= 0;
2306 cp
->cnic_local_flags
|= CNIC_LCL_FL_KWQ_INIT
;
2308 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
2309 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
2311 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
2313 /* Initialize the kernel work queue context. */
2314 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
2315 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
2316 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
2318 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
2319 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
2321 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
2322 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
2324 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
2325 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
2327 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
2328 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
2330 cp
->kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
2331 cp
->kcq_io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
2333 cp
->kcq_prod_idx
= 0;
2335 /* Initialize the kernel complete queue context. */
2336 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
2337 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
2338 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
2340 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
2341 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
2343 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
2344 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
2346 val
= (u32
) ((u64
) cp
->kcq_info
.pgtbl_map
>> 32);
2347 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
2349 val
= (u32
) cp
->kcq_info
.pgtbl_map
;
2350 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
2353 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2354 u32 sb_id
= cp
->status_blk_num
;
2355 u32 sb
= BNX2_L2CTX_STATUSB_NUM(sb_id
);
2357 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
2358 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
2359 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
2362 /* Enable Commnad Scheduler notification when we write to the
2363 * host producer index of the kernel contexts. */
2364 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
2366 /* Enable Command Scheduler notification when we write to either
2367 * the Send Queue or Receive Queue producer indexes of the kernel
2368 * bypass contexts. */
2369 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
2370 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
2372 /* Notify COM when the driver post an application buffer. */
2373 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
2375 /* Set the CP and COM doorbells. These two processors polls the
2376 * doorbell for a non zero value before running. This must be done
2377 * after setting up the kernel queue contexts. */
2378 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
2379 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
2381 cnic_init_bnx2_tx_ring(dev
);
2382 cnic_init_bnx2_rx_ring(dev
);
2384 err
= cnic_init_bnx2_irq(dev
);
2386 printk(KERN_ERR PFX
"%s: cnic_init_irq failed\n",
2388 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
2389 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
2396 static int cnic_start_hw(struct cnic_dev
*dev
)
2398 struct cnic_local
*cp
= dev
->cnic_priv
;
2399 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2402 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2405 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
2407 printk(KERN_ERR PFX
"%s: register_cnic failed\n",
2412 dev
->regview
= ethdev
->io_base
;
2413 cp
->chip_id
= ethdev
->chip_id
;
2414 pci_dev_get(dev
->pcidev
);
2415 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
2416 cp
->status_blk
= ethdev
->irq_arr
[0].status_blk
;
2417 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
2419 err
= cp
->alloc_resc(dev
);
2421 printk(KERN_ERR PFX
"%s: allocate resource failure\n",
2426 err
= cp
->start_hw(dev
);
2430 err
= cnic_cm_open(dev
);
2434 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
2436 cp
->enable_int(dev
);
2441 ethdev
->drv_unregister_cnic(dev
->netdev
);
2443 pci_dev_put(dev
->pcidev
);
2448 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
2450 struct cnic_local
*cp
= dev
->cnic_priv
;
2451 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2453 cnic_disable_bnx2_int_sync(dev
);
2455 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
2456 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
2458 cnic_init_context(dev
, KWQ_CID
);
2459 cnic_init_context(dev
, KCQ_CID
);
2461 cnic_setup_5709_context(dev
, 0);
2464 ethdev
->drv_unregister_cnic(dev
->netdev
);
2466 cnic_free_resc(dev
);
2469 static void cnic_stop_hw(struct cnic_dev
*dev
)
2471 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
2472 struct cnic_local
*cp
= dev
->cnic_priv
;
2474 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
2475 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
2477 cnic_cm_shutdown(dev
);
2479 pci_dev_put(dev
->pcidev
);
2483 static void cnic_free_dev(struct cnic_dev
*dev
)
2487 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
2491 if (atomic_read(&dev
->ref_count
) != 0)
2492 printk(KERN_ERR PFX
"%s: Failed waiting for ref count to go"
2493 " to zero.\n", dev
->netdev
->name
);
2495 printk(KERN_INFO PFX
"Removed CNIC device: %s\n", dev
->netdev
->name
);
2496 dev_put(dev
->netdev
);
2500 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
2501 struct pci_dev
*pdev
)
2503 struct cnic_dev
*cdev
;
2504 struct cnic_local
*cp
;
2507 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
2509 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
2511 printk(KERN_ERR PFX
"%s: allocate dev struct failure\n",
2517 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
2518 cdev
->register_device
= cnic_register_device
;
2519 cdev
->unregister_device
= cnic_unregister_device
;
2520 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
2522 cp
= cdev
->cnic_priv
;
2525 cp
->l2_single_buf_size
= 0x400;
2526 cp
->l2_rx_ring_size
= 3;
2528 spin_lock_init(&cp
->cnic_ulp_lock
);
2530 printk(KERN_INFO PFX
"Added CNIC device: %s\n", dev
->name
);
2535 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
2537 struct pci_dev
*pdev
;
2538 struct cnic_dev
*cdev
;
2539 struct cnic_local
*cp
;
2540 struct cnic_eth_dev
*ethdev
= NULL
;
2541 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
2543 probe
= symbol_get(bnx2_cnic_probe
);
2545 ethdev
= (*probe
)(dev
);
2546 symbol_put_addr(probe
);
2551 pdev
= ethdev
->pdev
;
2557 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
2558 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
2561 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
2569 cdev
= cnic_alloc_dev(dev
, pdev
);
2573 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
2574 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
2576 cp
= cdev
->cnic_priv
;
2577 cp
->ethdev
= ethdev
;
2578 cdev
->pcidev
= pdev
;
2580 cp
->cnic_ops
= &cnic_bnx2_ops
;
2581 cp
->start_hw
= cnic_start_bnx2_hw
;
2582 cp
->stop_hw
= cnic_stop_bnx2_hw
;
2583 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
2584 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
2585 cp
->free_resc
= cnic_free_resc
;
2586 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
2587 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
2588 cp
->enable_int
= cnic_enable_bnx2_int
;
2589 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
2590 cp
->close_conn
= cnic_close_bnx2_conn
;
2591 cp
->next_idx
= cnic_bnx2_next_idx
;
2592 cp
->hw_idx
= cnic_bnx2_hw_idx
;
2600 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
2602 struct ethtool_drvinfo drvinfo
;
2603 struct cnic_dev
*cdev
= NULL
;
2605 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
2606 memset(&drvinfo
, 0, sizeof(drvinfo
));
2607 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
2609 if (!strcmp(drvinfo
.driver
, "bnx2"))
2610 cdev
= init_bnx2_cnic(dev
);
2612 write_lock(&cnic_dev_lock
);
2613 list_add(&cdev
->list
, &cnic_dev_list
);
2614 write_unlock(&cnic_dev_lock
);
2621 * netdev event handler
2623 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
2626 struct net_device
*netdev
= ptr
;
2627 struct cnic_dev
*dev
;
2631 dev
= cnic_from_netdev(netdev
);
2633 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
2634 /* Check for the hot-plug device */
2635 dev
= is_cnic_dev(netdev
);
2642 struct cnic_local
*cp
= dev
->cnic_priv
;
2646 else if (event
== NETDEV_UNREGISTER
)
2648 else if (event
== NETDEV_UP
) {
2649 mutex_lock(&cnic_lock
);
2650 if (!cnic_start_hw(dev
))
2651 cnic_ulp_start(dev
);
2652 mutex_unlock(&cnic_lock
);
2656 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2657 struct cnic_ulp_ops
*ulp_ops
;
2660 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
2661 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
2664 ctx
= cp
->ulp_handle
[if_type
];
2666 ulp_ops
->indicate_netevent(ctx
, event
);
2670 if (event
== NETDEV_GOING_DOWN
) {
2671 mutex_lock(&cnic_lock
);
2674 mutex_unlock(&cnic_lock
);
2675 } else if (event
== NETDEV_UNREGISTER
) {
2676 write_lock(&cnic_dev_lock
);
2677 list_del_init(&dev
->list
);
2678 write_unlock(&cnic_dev_lock
);
2690 static struct notifier_block cnic_netdev_notifier
= {
2691 .notifier_call
= cnic_netdev_event
2694 static void cnic_release(void)
2696 struct cnic_dev
*dev
;
2698 while (!list_empty(&cnic_dev_list
)) {
2699 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
2700 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
2706 list_del_init(&dev
->list
);
2711 static int __init
cnic_init(void)
2715 printk(KERN_INFO
"%s", version
);
2717 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
2726 static void __exit
cnic_exit(void)
2728 unregister_netdevice_notifier(&cnic_netdev_notifier
);
2733 module_init(cnic_init
);
2734 module_exit(cnic_exit
);