1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/uio_driver.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33 #include <net/route.h>
35 #include <net/ip6_route.h>
36 #include <scsi/iscsi_if.h>
40 #include "bnx2x_reg.h"
41 #include "bnx2x_fw_defs.h"
42 #include "bnx2x_hsi.h"
43 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
44 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
46 #include "cnic_defs.h"
48 #define DRV_MODULE_NAME "cnic"
49 #define PFX DRV_MODULE_NAME ": "
51 static char version
[] __devinitdata
=
52 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
54 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
55 "Chen (zongxi@broadcom.com");
56 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(CNIC_MODULE_VERSION
);
60 static LIST_HEAD(cnic_dev_list
);
61 static DEFINE_RWLOCK(cnic_dev_lock
);
62 static DEFINE_MUTEX(cnic_lock
);
64 static struct cnic_ulp_ops
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
66 static int cnic_service_bnx2(void *, void *);
67 static int cnic_ctl(void *, struct cnic_ctl_info
*);
69 static struct cnic_ops cnic_bnx2_ops
= {
70 .cnic_owner
= THIS_MODULE
,
71 .cnic_handler
= cnic_service_bnx2
,
75 static void cnic_shutdown_rings(struct cnic_dev
*);
76 static void cnic_init_rings(struct cnic_dev
*);
77 static int cnic_cm_set_pg(struct cnic_sock
*);
79 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
81 struct cnic_dev
*dev
= uinfo
->priv
;
82 struct cnic_local
*cp
= dev
->cnic_priv
;
84 if (!capable(CAP_NET_ADMIN
))
87 if (cp
->uio_dev
!= -1)
91 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
96 cp
->uio_dev
= iminor(inode
);
104 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
106 struct cnic_dev
*dev
= uinfo
->priv
;
107 struct cnic_local
*cp
= dev
->cnic_priv
;
109 cnic_shutdown_rings(dev
);
115 static inline void cnic_hold(struct cnic_dev
*dev
)
117 atomic_inc(&dev
->ref_count
);
120 static inline void cnic_put(struct cnic_dev
*dev
)
122 atomic_dec(&dev
->ref_count
);
125 static inline void csk_hold(struct cnic_sock
*csk
)
127 atomic_inc(&csk
->ref_count
);
130 static inline void csk_put(struct cnic_sock
*csk
)
132 atomic_dec(&csk
->ref_count
);
135 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
137 struct cnic_dev
*cdev
;
139 read_lock(&cnic_dev_lock
);
140 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
141 if (netdev
== cdev
->netdev
) {
143 read_unlock(&cnic_dev_lock
);
147 read_unlock(&cnic_dev_lock
);
151 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
153 atomic_inc(&ulp_ops
->ref_count
);
156 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
158 atomic_dec(&ulp_ops
->ref_count
);
161 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
163 struct cnic_local
*cp
= dev
->cnic_priv
;
164 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
165 struct drv_ctl_info info
;
166 struct drv_ctl_io
*io
= &info
.data
.io
;
168 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
169 io
->cid_addr
= cid_addr
;
172 ethdev
->drv_ctl(dev
->netdev
, &info
);
175 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
177 struct cnic_local
*cp
= dev
->cnic_priv
;
178 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
179 struct drv_ctl_info info
;
180 struct drv_ctl_io
*io
= &info
.data
.io
;
182 info
.cmd
= DRV_CTL_IO_WR_CMD
;
185 ethdev
->drv_ctl(dev
->netdev
, &info
);
188 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
190 struct cnic_local
*cp
= dev
->cnic_priv
;
191 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
192 struct drv_ctl_info info
;
193 struct drv_ctl_io
*io
= &info
.data
.io
;
195 info
.cmd
= DRV_CTL_IO_RD_CMD
;
197 ethdev
->drv_ctl(dev
->netdev
, &info
);
201 static int cnic_in_use(struct cnic_sock
*csk
)
203 return test_bit(SK_F_INUSE
, &csk
->flags
);
206 static void cnic_kwq_completion(struct cnic_dev
*dev
, u32 count
)
208 struct cnic_local
*cp
= dev
->cnic_priv
;
209 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
210 struct drv_ctl_info info
;
212 info
.cmd
= DRV_CTL_COMPLETION_CMD
;
213 info
.data
.comp
.comp_count
= count
;
214 ethdev
->drv_ctl(dev
->netdev
, &info
);
217 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
218 struct cnic_sock
*csk
)
220 struct iscsi_path path_req
;
223 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
224 struct cnic_ulp_ops
*ulp_ops
;
226 if (cp
->uio_dev
== -1)
230 len
= sizeof(path_req
);
231 buf
= (char *) &path_req
;
232 memset(&path_req
, 0, len
);
234 msg_type
= ISCSI_KEVENT_PATH_REQ
;
235 path_req
.handle
= (u64
) csk
->l5_cid
;
236 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
237 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
238 sizeof(struct in6_addr
));
239 path_req
.ip_addr_len
= 16;
241 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
242 sizeof(struct in_addr
));
243 path_req
.ip_addr_len
= 4;
245 path_req
.vlan_id
= csk
->vlan_id
;
246 path_req
.pmtu
= csk
->mtu
;
250 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
252 ulp_ops
->iscsi_nl_send_msg(cp
->dev
, msg_type
, buf
, len
);
257 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
263 case ISCSI_UEVENT_PATH_UPDATE
: {
264 struct cnic_local
*cp
;
266 struct cnic_sock
*csk
;
267 struct iscsi_path
*path_resp
;
269 if (len
< sizeof(*path_resp
))
272 path_resp
= (struct iscsi_path
*) buf
;
274 l5_cid
= (u32
) path_resp
->handle
;
275 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
278 csk
= &cp
->csk_tbl
[l5_cid
];
280 if (cnic_in_use(csk
)) {
281 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
282 if (test_bit(SK_F_IPV6
, &csk
->flags
))
283 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
284 sizeof(struct in6_addr
));
286 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
287 sizeof(struct in_addr
));
288 if (is_valid_ether_addr(csk
->ha
))
299 static int cnic_offld_prep(struct cnic_sock
*csk
)
301 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
304 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
305 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
312 static int cnic_close_prep(struct cnic_sock
*csk
)
314 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
315 smp_mb__after_clear_bit();
317 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
318 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
326 static int cnic_abort_prep(struct cnic_sock
*csk
)
328 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
329 smp_mb__after_clear_bit();
331 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
334 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
335 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
342 static void cnic_uio_stop(void)
344 struct cnic_dev
*dev
;
346 read_lock(&cnic_dev_lock
);
347 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
348 struct cnic_local
*cp
= dev
->cnic_priv
;
351 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
353 read_unlock(&cnic_dev_lock
);
356 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
358 struct cnic_dev
*dev
;
360 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
361 printk(KERN_ERR PFX
"cnic_register_driver: Bad type %d\n",
365 mutex_lock(&cnic_lock
);
366 if (cnic_ulp_tbl
[ulp_type
]) {
367 printk(KERN_ERR PFX
"cnic_register_driver: Type %d has already "
368 "been registered\n", ulp_type
);
369 mutex_unlock(&cnic_lock
);
373 read_lock(&cnic_dev_lock
);
374 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
375 struct cnic_local
*cp
= dev
->cnic_priv
;
377 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
379 read_unlock(&cnic_dev_lock
);
381 atomic_set(&ulp_ops
->ref_count
, 0);
382 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
383 mutex_unlock(&cnic_lock
);
385 /* Prevent race conditions with netdev_event */
387 read_lock(&cnic_dev_lock
);
388 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
389 struct cnic_local
*cp
= dev
->cnic_priv
;
391 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
392 ulp_ops
->cnic_init(dev
);
394 read_unlock(&cnic_dev_lock
);
400 int cnic_unregister_driver(int ulp_type
)
402 struct cnic_dev
*dev
;
403 struct cnic_ulp_ops
*ulp_ops
;
406 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
407 printk(KERN_ERR PFX
"cnic_unregister_driver: Bad type %d\n",
411 mutex_lock(&cnic_lock
);
412 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
414 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d has not "
415 "been registered\n", ulp_type
);
418 read_lock(&cnic_dev_lock
);
419 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
420 struct cnic_local
*cp
= dev
->cnic_priv
;
422 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
423 printk(KERN_ERR PFX
"cnic_unregister_driver: Type %d "
424 "still has devices registered\n", ulp_type
);
425 read_unlock(&cnic_dev_lock
);
429 read_unlock(&cnic_dev_lock
);
431 if (ulp_type
== CNIC_ULP_ISCSI
)
434 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], NULL
);
436 mutex_unlock(&cnic_lock
);
438 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
443 if (atomic_read(&ulp_ops
->ref_count
) != 0)
444 printk(KERN_WARNING PFX
"%s: Failed waiting for ref count to go"
445 " to zero.\n", dev
->netdev
->name
);
449 mutex_unlock(&cnic_lock
);
453 static int cnic_start_hw(struct cnic_dev
*);
454 static void cnic_stop_hw(struct cnic_dev
*);
456 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
459 struct cnic_local
*cp
= dev
->cnic_priv
;
460 struct cnic_ulp_ops
*ulp_ops
;
462 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
463 printk(KERN_ERR PFX
"cnic_register_device: Bad type %d\n",
467 mutex_lock(&cnic_lock
);
468 if (cnic_ulp_tbl
[ulp_type
] == NULL
) {
469 printk(KERN_ERR PFX
"cnic_register_device: Driver with type %d "
470 "has not been registered\n", ulp_type
);
471 mutex_unlock(&cnic_lock
);
474 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
475 printk(KERN_ERR PFX
"cnic_register_device: Type %d has already "
476 "been registered to this device\n", ulp_type
);
477 mutex_unlock(&cnic_lock
);
481 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
482 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
483 ulp_ops
= cnic_ulp_tbl
[ulp_type
];
484 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
487 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
488 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
489 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
491 mutex_unlock(&cnic_lock
);
496 EXPORT_SYMBOL(cnic_register_driver
);
498 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
500 struct cnic_local
*cp
= dev
->cnic_priv
;
503 if (ulp_type
>= MAX_CNIC_ULP_TYPE
) {
504 printk(KERN_ERR PFX
"cnic_unregister_device: Bad type %d\n",
508 mutex_lock(&cnic_lock
);
509 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
510 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], NULL
);
513 printk(KERN_ERR PFX
"cnic_unregister_device: device not "
514 "registered to this ulp type %d\n", ulp_type
);
515 mutex_unlock(&cnic_lock
);
518 mutex_unlock(&cnic_lock
);
522 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
527 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
528 printk(KERN_WARNING PFX
"%s: Failed waiting for ULP up call"
529 " to complete.\n", dev
->netdev
->name
);
533 EXPORT_SYMBOL(cnic_unregister_driver
);
535 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
)
537 id_tbl
->start
= start_id
;
540 spin_lock_init(&id_tbl
->lock
);
541 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
548 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
550 kfree(id_tbl
->table
);
551 id_tbl
->table
= NULL
;
554 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
559 if (id
>= id_tbl
->max
)
562 spin_lock(&id_tbl
->lock
);
563 if (!test_bit(id
, id_tbl
->table
)) {
564 set_bit(id
, id_tbl
->table
);
567 spin_unlock(&id_tbl
->lock
);
571 /* Returns -1 if not successful */
572 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
576 spin_lock(&id_tbl
->lock
);
577 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
578 if (id
>= id_tbl
->max
) {
580 if (id_tbl
->next
!= 0) {
581 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
582 if (id
>= id_tbl
->next
)
587 if (id
< id_tbl
->max
) {
588 set_bit(id
, id_tbl
->table
);
589 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
593 spin_unlock(&id_tbl
->lock
);
598 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
604 if (id
>= id_tbl
->max
)
607 clear_bit(id
, id_tbl
->table
);
610 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
617 for (i
= 0; i
< dma
->num_pages
; i
++) {
618 if (dma
->pg_arr
[i
]) {
619 pci_free_consistent(dev
->pcidev
, BCM_PAGE_SIZE
,
620 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
621 dma
->pg_arr
[i
] = NULL
;
625 pci_free_consistent(dev
->pcidev
, dma
->pgtbl_size
,
626 dma
->pgtbl
, dma
->pgtbl_map
);
634 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
637 u32
*page_table
= dma
->pgtbl
;
639 for (i
= 0; i
< dma
->num_pages
; i
++) {
640 /* Each entry needs to be in big endian format. */
641 *page_table
= (u32
) ((u64
) dma
->pg_map_arr
[i
] >> 32);
643 *page_table
= (u32
) dma
->pg_map_arr
[i
];
648 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
649 int pages
, int use_pg_tbl
)
652 struct cnic_local
*cp
= dev
->cnic_priv
;
654 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
655 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
656 if (dma
->pg_arr
== NULL
)
659 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
660 dma
->num_pages
= pages
;
662 for (i
= 0; i
< pages
; i
++) {
663 dma
->pg_arr
[i
] = pci_alloc_consistent(dev
->pcidev
,
665 &dma
->pg_map_arr
[i
]);
666 if (dma
->pg_arr
[i
] == NULL
)
672 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
673 ~(BCM_PAGE_SIZE
- 1);
674 dma
->pgtbl
= pci_alloc_consistent(dev
->pcidev
, dma
->pgtbl_size
,
676 if (dma
->pgtbl
== NULL
)
679 cp
->setup_pgtbl(dev
, dma
);
684 cnic_free_dma(dev
, dma
);
688 static void cnic_free_context(struct cnic_dev
*dev
)
690 struct cnic_local
*cp
= dev
->cnic_priv
;
693 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
694 if (cp
->ctx_arr
[i
].ctx
) {
695 pci_free_consistent(dev
->pcidev
, cp
->ctx_blk_size
,
697 cp
->ctx_arr
[i
].mapping
);
698 cp
->ctx_arr
[i
].ctx
= NULL
;
703 static void cnic_free_resc(struct cnic_dev
*dev
)
705 struct cnic_local
*cp
= dev
->cnic_priv
;
708 if (cp
->cnic_uinfo
) {
709 while (cp
->uio_dev
!= -1 && i
< 15) {
713 uio_unregister_device(cp
->cnic_uinfo
);
714 kfree(cp
->cnic_uinfo
);
715 cp
->cnic_uinfo
= NULL
;
719 pci_free_consistent(dev
->pcidev
, cp
->l2_buf_size
,
720 cp
->l2_buf
, cp
->l2_buf_map
);
725 pci_free_consistent(dev
->pcidev
, cp
->l2_ring_size
,
726 cp
->l2_ring
, cp
->l2_ring_map
);
730 cnic_free_context(dev
);
735 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
736 cnic_free_dma(dev
, &cp
->conn_buf_info
);
737 cnic_free_dma(dev
, &cp
->kwq_info
);
738 cnic_free_dma(dev
, &cp
->kcq_info
);
739 kfree(cp
->iscsi_tbl
);
740 cp
->iscsi_tbl
= NULL
;
744 cnic_free_id_tbl(&cp
->cid_tbl
);
747 static int cnic_alloc_context(struct cnic_dev
*dev
)
749 struct cnic_local
*cp
= dev
->cnic_priv
;
751 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
754 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
755 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
756 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
757 sizeof(struct cnic_ctx
);
758 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
759 if (cp
->ctx_arr
== NULL
)
763 for (i
= 0; i
< 2; i
++) {
764 u32 j
, reg
, off
, lo
, hi
;
767 off
= BNX2_PG_CTX_MAP
;
769 off
= BNX2_ISCSI_CTX_MAP
;
771 reg
= cnic_reg_rd_ind(dev
, off
);
774 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
775 cp
->ctx_arr
[k
].cid
= j
;
779 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
784 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
786 pci_alloc_consistent(dev
->pcidev
, BCM_PAGE_SIZE
,
787 &cp
->ctx_arr
[i
].mapping
);
788 if (cp
->ctx_arr
[i
].ctx
== NULL
)
795 static int cnic_alloc_l2_rings(struct cnic_dev
*dev
, int pages
)
797 struct cnic_local
*cp
= dev
->cnic_priv
;
799 cp
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
800 cp
->l2_ring
= pci_alloc_consistent(dev
->pcidev
, cp
->l2_ring_size
,
805 cp
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
806 cp
->l2_buf_size
= PAGE_ALIGN(cp
->l2_buf_size
);
807 cp
->l2_buf
= pci_alloc_consistent(dev
->pcidev
, cp
->l2_buf_size
,
815 static int cnic_alloc_uio(struct cnic_dev
*dev
) {
816 struct cnic_local
*cp
= dev
->cnic_priv
;
817 struct uio_info
*uinfo
;
820 uinfo
= kzalloc(sizeof(*uinfo
), GFP_ATOMIC
);
824 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
825 uinfo
->mem
[0].internal_addr
= dev
->regview
;
826 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
827 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
829 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
830 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
& PAGE_MASK
;
831 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
832 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
834 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
836 uinfo
->name
= "bnx2_cnic";
839 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
841 uinfo
->mem
[2].addr
= (unsigned long) cp
->l2_ring
;
842 uinfo
->mem
[2].size
= cp
->l2_ring_size
;
843 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
845 uinfo
->mem
[3].addr
= (unsigned long) cp
->l2_buf
;
846 uinfo
->mem
[3].size
= cp
->l2_buf_size
;
847 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
849 uinfo
->version
= CNIC_MODULE_VERSION
;
850 uinfo
->irq
= UIO_IRQ_CUSTOM
;
852 uinfo
->open
= cnic_uio_open
;
853 uinfo
->release
= cnic_uio_close
;
857 ret
= uio_register_device(&dev
->pcidev
->dev
, uinfo
);
863 cp
->cnic_uinfo
= uinfo
;
867 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
869 struct cnic_local
*cp
= dev
->cnic_priv
;
872 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
875 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
877 ret
= cnic_alloc_dma(dev
, &cp
->kcq_info
, KCQ_PAGE_CNT
, 1);
880 cp
->kcq
= (struct kcqe
**) cp
->kcq_info
.pg_arr
;
882 ret
= cnic_alloc_context(dev
);
886 ret
= cnic_alloc_l2_rings(dev
, 2);
890 ret
= cnic_alloc_uio(dev
);
901 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
903 return cp
->max_kwq_idx
-
904 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
907 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
910 struct cnic_local
*cp
= dev
->cnic_priv
;
911 struct kwqe
*prod_qe
;
912 u16 prod
, sw_prod
, i
;
914 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
915 return -EAGAIN
; /* bnx2 is down */
917 spin_lock_bh(&cp
->cnic_ulp_lock
);
918 if (num_wqes
> cnic_kwq_avail(cp
) &&
919 !(cp
->cnic_local_flags
& CNIC_LCL_FL_KWQ_INIT
)) {
920 spin_unlock_bh(&cp
->cnic_ulp_lock
);
924 cp
->cnic_local_flags
&= ~CNIC_LCL_FL_KWQ_INIT
;
926 prod
= cp
->kwq_prod_idx
;
927 sw_prod
= prod
& MAX_KWQ_IDX
;
928 for (i
= 0; i
< num_wqes
; i
++) {
929 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
930 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
932 sw_prod
= prod
& MAX_KWQ_IDX
;
934 cp
->kwq_prod_idx
= prod
;
936 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
938 spin_unlock_bh(&cp
->cnic_ulp_lock
);
942 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
944 struct cnic_local
*cp
= dev
->cnic_priv
;
950 struct cnic_ulp_ops
*ulp_ops
;
952 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
953 u32 kcqe_layer
= kcqe_op_flag
& KCQE_FLAGS_LAYER_MASK
;
955 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
956 cnic_kwq_completion(dev
, 1);
958 while (j
< num_cqes
) {
959 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
961 if ((next_op
& KCQE_FLAGS_LAYER_MASK
) != kcqe_layer
)
964 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
965 cnic_kwq_completion(dev
, 1);
969 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
970 ulp_type
= CNIC_ULP_RDMA
;
971 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
972 ulp_type
= CNIC_ULP_ISCSI
;
973 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
974 ulp_type
= CNIC_ULP_L4
;
975 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
978 printk(KERN_ERR PFX
"%s: Unknown type of KCQE(0x%x)\n",
979 dev
->netdev
->name
, kcqe_op_flag
);
984 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
985 if (likely(ulp_ops
)) {
986 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
987 cp
->completed_kcq
+ i
, j
);
998 static u16
cnic_bnx2_next_idx(u16 idx
)
1003 static u16
cnic_bnx2_hw_idx(u16 idx
)
1008 static int cnic_get_kcqes(struct cnic_dev
*dev
, u16 hw_prod
, u16
*sw_prod
)
1010 struct cnic_local
*cp
= dev
->cnic_priv
;
1013 int kcqe_cnt
= 0, last_cnt
= 0;
1015 i
= ri
= last
= *sw_prod
;
1018 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
1019 kcqe
= &cp
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
1020 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
1021 i
= cp
->next_idx(i
);
1022 ri
= i
& MAX_KCQ_IDX
;
1023 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
1024 last_cnt
= kcqe_cnt
;
1033 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
1035 u16 rx_cons
= *cp
->rx_cons_ptr
;
1036 u16 tx_cons
= *cp
->tx_cons_ptr
;
1038 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
1039 cp
->tx_cons
= tx_cons
;
1040 cp
->rx_cons
= rx_cons
;
1041 uio_event_notify(cp
->cnic_uinfo
);
1045 static int cnic_service_bnx2(void *data
, void *status_blk
)
1047 struct cnic_dev
*dev
= data
;
1048 struct status_block
*sblk
= status_blk
;
1049 struct cnic_local
*cp
= dev
->cnic_priv
;
1050 u32 status_idx
= sblk
->status_idx
;
1051 u16 hw_prod
, sw_prod
;
1054 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
1057 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
1059 hw_prod
= sblk
->status_completion_producer_index
;
1060 sw_prod
= cp
->kcq_prod_idx
;
1061 while (sw_prod
!= hw_prod
) {
1062 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
1066 service_kcqes(dev
, kcqe_cnt
);
1068 /* Tell compiler that status_blk fields can change. */
1070 if (status_idx
!= sblk
->status_idx
) {
1071 status_idx
= sblk
->status_idx
;
1072 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
1073 hw_prod
= sblk
->status_completion_producer_index
;
1079 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
1081 cp
->kcq_prod_idx
= sw_prod
;
1083 cnic_chk_pkt_rings(cp
);
1087 static void cnic_service_bnx2_msix(unsigned long data
)
1089 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
1090 struct cnic_local
*cp
= dev
->cnic_priv
;
1091 struct status_block_msix
*status_blk
= cp
->bnx2_status_blk
;
1092 u32 status_idx
= status_blk
->status_idx
;
1093 u16 hw_prod
, sw_prod
;
1096 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
1098 hw_prod
= status_blk
->status_completion_producer_index
;
1099 sw_prod
= cp
->kcq_prod_idx
;
1100 while (sw_prod
!= hw_prod
) {
1101 kcqe_cnt
= cnic_get_kcqes(dev
, hw_prod
, &sw_prod
);
1105 service_kcqes(dev
, kcqe_cnt
);
1107 /* Tell compiler that status_blk fields can change. */
1109 if (status_idx
!= status_blk
->status_idx
) {
1110 status_idx
= status_blk
->status_idx
;
1111 cp
->kwq_con_idx
= status_blk
->status_cmd_consumer_index
;
1112 hw_prod
= status_blk
->status_completion_producer_index
;
1118 CNIC_WR16(dev
, cp
->kcq_io_addr
, sw_prod
);
1119 cp
->kcq_prod_idx
= sw_prod
;
1121 cnic_chk_pkt_rings(cp
);
1123 cp
->last_status_idx
= status_idx
;
1124 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
1125 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
1128 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
1130 struct cnic_dev
*dev
= dev_instance
;
1131 struct cnic_local
*cp
= dev
->cnic_priv
;
1132 u16 prod
= cp
->kcq_prod_idx
& MAX_KCQ_IDX
;
1137 prefetch(cp
->status_blk
);
1138 prefetch(&cp
->kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
1140 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
1141 tasklet_schedule(&cp
->cnic_irq_task
);
1146 static void cnic_ulp_stop(struct cnic_dev
*dev
)
1148 struct cnic_local
*cp
= dev
->cnic_priv
;
1152 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
1154 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
1155 struct cnic_ulp_ops
*ulp_ops
;
1157 mutex_lock(&cnic_lock
);
1158 ulp_ops
= cp
->ulp_ops
[if_type
];
1160 mutex_unlock(&cnic_lock
);
1163 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
1164 mutex_unlock(&cnic_lock
);
1166 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
1167 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
1169 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
1173 static void cnic_ulp_start(struct cnic_dev
*dev
)
1175 struct cnic_local
*cp
= dev
->cnic_priv
;
1178 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
1179 struct cnic_ulp_ops
*ulp_ops
;
1181 mutex_lock(&cnic_lock
);
1182 ulp_ops
= cp
->ulp_ops
[if_type
];
1183 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
1184 mutex_unlock(&cnic_lock
);
1187 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
1188 mutex_unlock(&cnic_lock
);
1190 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
1191 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
1193 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
1197 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
1199 struct cnic_dev
*dev
= data
;
1201 switch (info
->cmd
) {
1202 case CNIC_CTL_STOP_CMD
:
1210 case CNIC_CTL_START_CMD
:
1213 if (!cnic_start_hw(dev
))
1214 cnic_ulp_start(dev
);
1224 static void cnic_ulp_init(struct cnic_dev
*dev
)
1227 struct cnic_local
*cp
= dev
->cnic_priv
;
1229 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
1230 struct cnic_ulp_ops
*ulp_ops
;
1232 mutex_lock(&cnic_lock
);
1233 ulp_ops
= cnic_ulp_tbl
[i
];
1234 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
1235 mutex_unlock(&cnic_lock
);
1239 mutex_unlock(&cnic_lock
);
1241 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
1242 ulp_ops
->cnic_init(dev
);
1248 static void cnic_ulp_exit(struct cnic_dev
*dev
)
1251 struct cnic_local
*cp
= dev
->cnic_priv
;
1253 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
1254 struct cnic_ulp_ops
*ulp_ops
;
1256 mutex_lock(&cnic_lock
);
1257 ulp_ops
= cnic_ulp_tbl
[i
];
1258 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
1259 mutex_unlock(&cnic_lock
);
1263 mutex_unlock(&cnic_lock
);
1265 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
1266 ulp_ops
->cnic_exit(dev
);
1272 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
1274 struct cnic_dev
*dev
= csk
->dev
;
1275 struct l4_kwq_offload_pg
*l4kwqe
;
1276 struct kwqe
*wqes
[1];
1278 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
1279 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1280 wqes
[0] = (struct kwqe
*) l4kwqe
;
1282 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
1284 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
1285 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
1287 l4kwqe
->da0
= csk
->ha
[0];
1288 l4kwqe
->da1
= csk
->ha
[1];
1289 l4kwqe
->da2
= csk
->ha
[2];
1290 l4kwqe
->da3
= csk
->ha
[3];
1291 l4kwqe
->da4
= csk
->ha
[4];
1292 l4kwqe
->da5
= csk
->ha
[5];
1294 l4kwqe
->sa0
= dev
->mac_addr
[0];
1295 l4kwqe
->sa1
= dev
->mac_addr
[1];
1296 l4kwqe
->sa2
= dev
->mac_addr
[2];
1297 l4kwqe
->sa3
= dev
->mac_addr
[3];
1298 l4kwqe
->sa4
= dev
->mac_addr
[4];
1299 l4kwqe
->sa5
= dev
->mac_addr
[5];
1301 l4kwqe
->etype
= ETH_P_IP
;
1302 l4kwqe
->ipid_count
= DEF_IPID_COUNT
;
1303 l4kwqe
->host_opaque
= csk
->l5_cid
;
1306 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
1307 l4kwqe
->vlan_tag
= csk
->vlan_id
;
1308 l4kwqe
->l2hdr_nbytes
+= 4;
1311 return dev
->submit_kwqes(dev
, wqes
, 1);
1314 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
1316 struct cnic_dev
*dev
= csk
->dev
;
1317 struct l4_kwq_update_pg
*l4kwqe
;
1318 struct kwqe
*wqes
[1];
1320 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
1321 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1322 wqes
[0] = (struct kwqe
*) l4kwqe
;
1324 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
1326 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
1327 l4kwqe
->pg_cid
= csk
->pg_cid
;
1329 l4kwqe
->da0
= csk
->ha
[0];
1330 l4kwqe
->da1
= csk
->ha
[1];
1331 l4kwqe
->da2
= csk
->ha
[2];
1332 l4kwqe
->da3
= csk
->ha
[3];
1333 l4kwqe
->da4
= csk
->ha
[4];
1334 l4kwqe
->da5
= csk
->ha
[5];
1336 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
1337 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
1339 return dev
->submit_kwqes(dev
, wqes
, 1);
1342 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
1344 struct cnic_dev
*dev
= csk
->dev
;
1345 struct l4_kwq_upload
*l4kwqe
;
1346 struct kwqe
*wqes
[1];
1348 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
1349 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1350 wqes
[0] = (struct kwqe
*) l4kwqe
;
1352 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
1354 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
1355 l4kwqe
->cid
= csk
->pg_cid
;
1357 return dev
->submit_kwqes(dev
, wqes
, 1);
1360 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
1362 struct cnic_dev
*dev
= csk
->dev
;
1363 struct l4_kwq_connect_req1
*l4kwqe1
;
1364 struct l4_kwq_connect_req2
*l4kwqe2
;
1365 struct l4_kwq_connect_req3
*l4kwqe3
;
1366 struct kwqe
*wqes
[3];
1370 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
1371 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
1372 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
1373 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
1374 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
1375 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
1377 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
1379 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
1380 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
1381 l4kwqe3
->ka_interval
= csk
->ka_interval
;
1382 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
1383 l4kwqe3
->tos
= csk
->tos
;
1384 l4kwqe3
->ttl
= csk
->ttl
;
1385 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
1386 l4kwqe3
->pmtu
= csk
->mtu
;
1387 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
1388 l4kwqe3
->snd_buf
= csk
->snd_buf
;
1389 l4kwqe3
->seed
= csk
->seed
;
1391 wqes
[0] = (struct kwqe
*) l4kwqe1
;
1392 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
1393 wqes
[1] = (struct kwqe
*) l4kwqe2
;
1394 wqes
[2] = (struct kwqe
*) l4kwqe3
;
1397 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
1398 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
1400 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
1401 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
1402 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
1403 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
1404 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
1405 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
1406 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
1407 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
1408 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
1409 sizeof(struct tcphdr
);
1411 wqes
[1] = (struct kwqe
*) l4kwqe3
;
1412 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
1413 sizeof(struct tcphdr
);
1416 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
1418 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
1419 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
1420 l4kwqe1
->cid
= csk
->cid
;
1421 l4kwqe1
->pg_cid
= csk
->pg_cid
;
1422 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
1423 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
1424 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
1425 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
1426 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
1427 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
1428 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
1429 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
1430 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
1431 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
1432 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
1433 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
1434 if (csk
->tcp_flags
& SK_TCP_SACK
)
1435 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
1436 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
1437 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
1439 l4kwqe1
->tcp_flags
= tcp_flags
;
1441 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
1444 static int cnic_cm_close_req(struct cnic_sock
*csk
)
1446 struct cnic_dev
*dev
= csk
->dev
;
1447 struct l4_kwq_close_req
*l4kwqe
;
1448 struct kwqe
*wqes
[1];
1450 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
1451 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1452 wqes
[0] = (struct kwqe
*) l4kwqe
;
1454 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
1455 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
1456 l4kwqe
->cid
= csk
->cid
;
1458 return dev
->submit_kwqes(dev
, wqes
, 1);
1461 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
1463 struct cnic_dev
*dev
= csk
->dev
;
1464 struct l4_kwq_reset_req
*l4kwqe
;
1465 struct kwqe
*wqes
[1];
1467 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
1468 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
1469 wqes
[0] = (struct kwqe
*) l4kwqe
;
1471 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
1472 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
1473 l4kwqe
->cid
= csk
->cid
;
1475 return dev
->submit_kwqes(dev
, wqes
, 1);
1478 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
1479 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
1481 struct cnic_local
*cp
= dev
->cnic_priv
;
1482 struct cnic_sock
*csk1
;
1484 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
1487 csk1
= &cp
->csk_tbl
[l5_cid
];
1488 if (atomic_read(&csk1
->ref_count
))
1491 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
1496 csk1
->l5_cid
= l5_cid
;
1497 csk1
->ulp_type
= ulp_type
;
1498 csk1
->context
= context
;
1500 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
1501 csk1
->ka_interval
= DEF_KA_INTERVAL
;
1502 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
1503 csk1
->tos
= DEF_TOS
;
1504 csk1
->ttl
= DEF_TTL
;
1505 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
1506 csk1
->rcv_buf
= DEF_RCV_BUF
;
1507 csk1
->snd_buf
= DEF_SND_BUF
;
1508 csk1
->seed
= DEF_SEED
;
1514 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
1516 if (csk
->src_port
) {
1517 struct cnic_dev
*dev
= csk
->dev
;
1518 struct cnic_local
*cp
= dev
->cnic_priv
;
1520 cnic_free_id(&cp
->csk_port_tbl
, csk
->src_port
);
1525 static void cnic_close_conn(struct cnic_sock
*csk
)
1527 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
1528 cnic_cm_upload_pg(csk
);
1529 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
1531 cnic_cm_cleanup(csk
);
1534 static int cnic_cm_destroy(struct cnic_sock
*csk
)
1536 if (!cnic_in_use(csk
))
1540 clear_bit(SK_F_INUSE
, &csk
->flags
);
1541 smp_mb__after_clear_bit();
1542 while (atomic_read(&csk
->ref_count
) != 1)
1544 cnic_cm_cleanup(csk
);
1551 static inline u16
cnic_get_vlan(struct net_device
*dev
,
1552 struct net_device
**vlan_dev
)
1554 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
1555 *vlan_dev
= vlan_dev_real_dev(dev
);
1556 return vlan_dev_vlan_id(dev
);
1562 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
1563 struct dst_entry
**dst
)
1565 #if defined(CONFIG_INET)
1570 memset(&fl
, 0, sizeof(fl
));
1571 fl
.nl_u
.ip4_u
.daddr
= dst_addr
->sin_addr
.s_addr
;
1573 err
= ip_route_output_key(&init_net
, &rt
, &fl
);
1578 return -ENETUNREACH
;
1582 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
1583 struct dst_entry
**dst
)
1585 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1588 memset(&fl
, 0, sizeof(fl
));
1589 ipv6_addr_copy(&fl
.fl6_dst
, &dst_addr
->sin6_addr
);
1590 if (ipv6_addr_type(&fl
.fl6_dst
) & IPV6_ADDR_LINKLOCAL
)
1591 fl
.oif
= dst_addr
->sin6_scope_id
;
1593 *dst
= ip6_route_output(&init_net
, NULL
, &fl
);
1598 return -ENETUNREACH
;
1601 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
1604 struct cnic_dev
*dev
= NULL
;
1605 struct dst_entry
*dst
;
1606 struct net_device
*netdev
= NULL
;
1607 int err
= -ENETUNREACH
;
1609 if (dst_addr
->sin_family
== AF_INET
)
1610 err
= cnic_get_v4_route(dst_addr
, &dst
);
1611 else if (dst_addr
->sin_family
== AF_INET6
) {
1612 struct sockaddr_in6
*dst_addr6
=
1613 (struct sockaddr_in6
*) dst_addr
;
1615 err
= cnic_get_v6_route(dst_addr6
, &dst
);
1625 cnic_get_vlan(dst
->dev
, &netdev
);
1627 dev
= cnic_from_netdev(netdev
);
1636 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1638 struct cnic_dev
*dev
= csk
->dev
;
1639 struct cnic_local
*cp
= dev
->cnic_priv
;
1641 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
1644 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1646 struct cnic_dev
*dev
= csk
->dev
;
1647 struct cnic_local
*cp
= dev
->cnic_priv
;
1648 int is_v6
, err
, rc
= -ENETUNREACH
;
1649 struct dst_entry
*dst
;
1650 struct net_device
*realdev
;
1653 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
1654 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
1656 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
1657 saddr
->remote
.v4
.sin_family
== AF_INET
)
1662 clear_bit(SK_F_IPV6
, &csk
->flags
);
1665 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1666 set_bit(SK_F_IPV6
, &csk
->flags
);
1667 err
= cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
1671 if (!dst
|| dst
->error
|| !dst
->dev
)
1674 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
1675 sizeof(struct in6_addr
));
1676 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
1677 local_port
= saddr
->local
.v6
.sin6_port
;
1683 err
= cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
1687 if (!dst
|| dst
->error
|| !dst
->dev
)
1690 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
1691 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
1692 local_port
= saddr
->local
.v4
.sin_port
;
1695 csk
->vlan_id
= cnic_get_vlan(dst
->dev
, &realdev
);
1696 if (realdev
!= dev
->netdev
)
1699 if (local_port
>= CNIC_LOCAL_PORT_MIN
&&
1700 local_port
< CNIC_LOCAL_PORT_MAX
) {
1701 if (cnic_alloc_id(&cp
->csk_port_tbl
, local_port
))
1707 local_port
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
1708 if (local_port
== -1) {
1713 csk
->src_port
= local_port
;
1715 csk
->mtu
= dst_mtu(dst
);
1723 static void cnic_init_csk_state(struct cnic_sock
*csk
)
1726 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1727 clear_bit(SK_F_CLOSING
, &csk
->flags
);
1730 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
1734 if (!cnic_in_use(csk
))
1737 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
1740 cnic_init_csk_state(csk
);
1742 err
= cnic_get_route(csk
, saddr
);
1746 err
= cnic_resolve_addr(csk
, saddr
);
1751 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
1755 static int cnic_cm_abort(struct cnic_sock
*csk
)
1757 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
1760 if (!cnic_in_use(csk
))
1763 if (cnic_abort_prep(csk
))
1764 return cnic_cm_abort_req(csk
);
1766 /* Getting here means that we haven't started connect, or
1767 * connect was not successful.
1770 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
1771 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
1772 opcode
= csk
->state
;
1774 opcode
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
1775 cp
->close_conn(csk
, opcode
);
1780 static int cnic_cm_close(struct cnic_sock
*csk
)
1782 if (!cnic_in_use(csk
))
1785 if (cnic_close_prep(csk
)) {
1786 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
1787 return cnic_cm_close_req(csk
);
1792 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
1795 struct cnic_ulp_ops
*ulp_ops
;
1796 int ulp_type
= csk
->ulp_type
;
1799 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1801 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
1802 ulp_ops
->cm_connect_complete(csk
);
1803 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
1804 ulp_ops
->cm_close_complete(csk
);
1805 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
1806 ulp_ops
->cm_remote_abort(csk
);
1807 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
1808 ulp_ops
->cm_abort_complete(csk
);
1809 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
1810 ulp_ops
->cm_remote_close(csk
);
1815 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
1817 if (cnic_offld_prep(csk
)) {
1818 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
1819 cnic_cm_update_pg(csk
);
1821 cnic_cm_offload_pg(csk
);
1826 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
1828 struct cnic_local
*cp
= dev
->cnic_priv
;
1829 u32 l5_cid
= kcqe
->pg_host_opaque
;
1830 u8 opcode
= kcqe
->op_code
;
1831 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
1834 if (!cnic_in_use(csk
))
1837 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
1838 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1841 csk
->pg_cid
= kcqe
->pg_cid
;
1842 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
1843 cnic_cm_conn_req(csk
);
1849 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
1851 struct cnic_local
*cp
= dev
->cnic_priv
;
1852 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
1853 u8 opcode
= l4kcqe
->op_code
;
1855 struct cnic_sock
*csk
;
1857 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
1858 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
1859 cnic_cm_process_offld_pg(dev
, l4kcqe
);
1863 l5_cid
= l4kcqe
->conn_id
;
1865 l5_cid
= l4kcqe
->cid
;
1866 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
1869 csk
= &cp
->csk_tbl
[l5_cid
];
1872 if (!cnic_in_use(csk
)) {
1878 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
1879 if (l4kcqe
->status
== 0)
1880 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
1882 smp_mb__before_clear_bit();
1883 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
1884 cnic_cm_upcall(cp
, csk
, opcode
);
1887 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
1888 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
))
1889 csk
->state
= opcode
;
1891 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
1892 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
1893 cp
->close_conn(csk
, opcode
);
1896 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
1897 cnic_cm_upcall(cp
, csk
, opcode
);
1903 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
1905 struct cnic_dev
*dev
= data
;
1908 for (i
= 0; i
< num
; i
++)
1909 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
1912 static struct cnic_ulp_ops cm_ulp_ops
= {
1913 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
1916 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
1918 struct cnic_local
*cp
= dev
->cnic_priv
;
1922 cnic_free_id_tbl(&cp
->csk_port_tbl
);
1925 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
1927 struct cnic_local
*cp
= dev
->cnic_priv
;
1929 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
1934 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
1935 CNIC_LOCAL_PORT_MIN
)) {
1936 cnic_cm_free_mem(dev
);
1942 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
1944 if ((opcode
== csk
->state
) ||
1945 (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
&&
1946 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)) {
1947 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
))
1953 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
1955 struct cnic_dev
*dev
= csk
->dev
;
1956 struct cnic_local
*cp
= dev
->cnic_priv
;
1958 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
1959 if (cnic_ready_to_close(csk
, opcode
)) {
1960 cnic_close_conn(csk
);
1961 cnic_cm_upcall(cp
, csk
, opcode
);
1965 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
1969 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
1973 get_random_bytes(&seed
, 4);
1974 cnic_ctx_wr(dev
, 45, 0, seed
);
1978 static int cnic_cm_open(struct cnic_dev
*dev
)
1980 struct cnic_local
*cp
= dev
->cnic_priv
;
1983 err
= cnic_cm_alloc_mem(dev
);
1987 err
= cp
->start_cm(dev
);
1992 dev
->cm_create
= cnic_cm_create
;
1993 dev
->cm_destroy
= cnic_cm_destroy
;
1994 dev
->cm_connect
= cnic_cm_connect
;
1995 dev
->cm_abort
= cnic_cm_abort
;
1996 dev
->cm_close
= cnic_cm_close
;
1997 dev
->cm_select_dev
= cnic_cm_select_dev
;
1999 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
2000 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
2004 cnic_cm_free_mem(dev
);
2008 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
2010 struct cnic_local
*cp
= dev
->cnic_priv
;
2018 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
2019 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
2021 clear_bit(SK_F_INUSE
, &csk
->flags
);
2022 cnic_cm_cleanup(csk
);
2024 cnic_cm_free_mem(dev
);
2029 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
2031 struct cnic_local
*cp
= dev
->cnic_priv
;
2035 if (CHIP_NUM(cp
) == CHIP_NUM_5709
)
2038 cid_addr
= GET_CID_ADDR(cid
);
2040 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
2041 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
2044 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
2046 struct cnic_local
*cp
= dev
->cnic_priv
;
2048 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
2050 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
2053 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
2055 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
2058 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
2060 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
2061 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
2062 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
2063 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
2064 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
2065 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
2066 for (j
= 0; j
< 10; j
++) {
2068 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
2069 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
2073 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
2081 static void cnic_free_irq(struct cnic_dev
*dev
)
2083 struct cnic_local
*cp
= dev
->cnic_priv
;
2084 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2086 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2087 cp
->disable_int_sync(dev
);
2088 tasklet_disable(&cp
->cnic_irq_task
);
2089 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
2093 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
2095 struct cnic_local
*cp
= dev
->cnic_priv
;
2096 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2098 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2100 int sblk_num
= cp
->status_blk_num
;
2101 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
2102 BNX2_HC_SB_CONFIG_1
;
2104 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
2106 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
2107 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
2108 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
2110 cp
->bnx2_status_blk
= cp
->status_blk
;
2111 cp
->last_status_idx
= cp
->bnx2_status_blk
->status_idx
;
2112 tasklet_init(&cp
->cnic_irq_task
, &cnic_service_bnx2_msix
,
2113 (unsigned long) dev
);
2114 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0,
2117 tasklet_disable(&cp
->cnic_irq_task
);
2120 while (cp
->bnx2_status_blk
->status_completion_producer_index
&&
2122 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
2123 1 << (11 + sblk_num
));
2128 if (cp
->bnx2_status_blk
->status_completion_producer_index
) {
2134 struct status_block
*sblk
= cp
->status_blk
;
2135 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
2138 while (sblk
->status_completion_producer_index
&& i
< 10) {
2139 CNIC_WR(dev
, BNX2_HC_COMMAND
,
2140 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
2145 if (sblk
->status_completion_producer_index
)
2152 printk(KERN_ERR PFX
"%s: " "KCQ index not resetting to 0.\n",
2157 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
2159 struct cnic_local
*cp
= dev
->cnic_priv
;
2160 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2162 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2165 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2166 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2169 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
2171 struct cnic_local
*cp
= dev
->cnic_priv
;
2172 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2174 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
2177 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2178 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2179 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
2180 synchronize_irq(ethdev
->irq_arr
[0].vector
);
2183 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
2185 struct cnic_local
*cp
= dev
->cnic_priv
;
2186 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2187 u32 cid_addr
, tx_cid
, sb_id
;
2188 u32 val
, offset0
, offset1
, offset2
, offset3
;
2192 struct status_block
*s_blk
= cp
->status_blk
;
2194 sb_id
= cp
->status_blk_num
;
2196 cnic_init_context(dev
, tx_cid
);
2197 cnic_init_context(dev
, tx_cid
+ 1);
2198 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
2199 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2200 struct status_block_msix
*sblk
= cp
->status_blk
;
2202 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
2203 cnic_init_context(dev
, tx_cid
);
2204 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
2206 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
2208 cp
->tx_cons
= *cp
->tx_cons_ptr
;
2210 cid_addr
= GET_CID_ADDR(tx_cid
);
2211 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
2212 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
2214 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
2215 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
2217 offset0
= BNX2_L2CTX_TYPE_XI
;
2218 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
2219 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
2220 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
2222 offset0
= BNX2_L2CTX_TYPE
;
2223 offset1
= BNX2_L2CTX_CMD_TYPE
;
2224 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
2225 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
2227 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
2228 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
2230 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
2231 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
2233 txbd
= (struct tx_bd
*) cp
->l2_ring
;
2235 buf_map
= cp
->l2_buf_map
;
2236 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
2237 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
2238 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
2240 val
= (u64
) cp
->l2_ring_map
>> 32;
2241 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
2242 txbd
->tx_bd_haddr_hi
= val
;
2244 val
= (u64
) cp
->l2_ring_map
& 0xffffffff;
2245 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
2246 txbd
->tx_bd_haddr_lo
= val
;
2249 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
2251 struct cnic_local
*cp
= dev
->cnic_priv
;
2252 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2253 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
2256 struct status_block
*s_blk
= cp
->status_blk
;
2258 sb_id
= cp
->status_blk_num
;
2259 cnic_init_context(dev
, 2);
2260 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
2261 coal_reg
= BNX2_HC_COMMAND
;
2262 coal_val
= CNIC_RD(dev
, coal_reg
);
2263 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2264 struct status_block_msix
*sblk
= cp
->status_blk
;
2266 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
2267 coal_reg
= BNX2_HC_COALESCE_NOW
;
2268 coal_val
= 1 << (11 + sb_id
);
2271 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
2272 CNIC_WR(dev
, coal_reg
, coal_val
);
2277 cp
->rx_cons
= *cp
->rx_cons_ptr
;
2279 cid_addr
= GET_CID_ADDR(2);
2280 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
2281 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
2282 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
2285 val
= 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT
;
2287 val
= BNX2_L2CTX_STATUSB_NUM(sb_id
);
2288 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
2290 rxbd
= (struct rx_bd
*) (cp
->l2_ring
+ BCM_PAGE_SIZE
);
2291 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
2293 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
2295 buf_map
= cp
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
2296 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
2297 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
2298 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
2299 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
2301 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) >> 32;
2302 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
2303 rxbd
->rx_bd_haddr_hi
= val
;
2305 val
= (u64
) (cp
->l2_ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
2306 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
2307 rxbd
->rx_bd_haddr_lo
= val
;
2309 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
2310 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
2313 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
2315 struct kwqe
*wqes
[1], l2kwqe
;
2317 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
2319 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_FLAGS_LAYER_SHIFT
) |
2320 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
2321 KWQE_OPCODE_SHIFT
) | 2;
2322 dev
->submit_kwqes(dev
, wqes
, 1);
2325 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
2327 struct cnic_local
*cp
= dev
->cnic_priv
;
2330 val
= cp
->func
<< 2;
2332 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
2334 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
2335 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
2336 dev
->mac_addr
[0] = (u8
) (val
>> 8);
2337 dev
->mac_addr
[1] = (u8
) val
;
2339 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
2341 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
2342 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
2343 dev
->mac_addr
[2] = (u8
) (val
>> 24);
2344 dev
->mac_addr
[3] = (u8
) (val
>> 16);
2345 dev
->mac_addr
[4] = (u8
) (val
>> 8);
2346 dev
->mac_addr
[5] = (u8
) val
;
2348 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
2350 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
2351 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
2352 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
2354 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
2355 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
2356 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
2359 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
2361 struct cnic_local
*cp
= dev
->cnic_priv
;
2362 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2363 struct status_block
*sblk
= cp
->status_blk
;
2367 cnic_set_bnx2_mac(dev
);
2369 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
2370 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
2371 if (BCM_PAGE_BITS
> 12)
2372 val
|= (12 - 8) << 4;
2374 val
|= (BCM_PAGE_BITS
- 8) << 4;
2376 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
2378 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
2379 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
2380 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
2382 err
= cnic_setup_5709_context(dev
, 1);
2386 cnic_init_context(dev
, KWQ_CID
);
2387 cnic_init_context(dev
, KCQ_CID
);
2389 cp
->kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
2390 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
2392 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
2393 cp
->kwq_prod_idx
= 0;
2394 cp
->kwq_con_idx
= 0;
2395 cp
->cnic_local_flags
|= CNIC_LCL_FL_KWQ_INIT
;
2397 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
2398 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
2400 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
2402 /* Initialize the kernel work queue context. */
2403 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
2404 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
2405 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
2407 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
2408 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
2410 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
2411 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
2413 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
2414 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
2416 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
2417 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
2419 cp
->kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
2420 cp
->kcq_io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
2422 cp
->kcq_prod_idx
= 0;
2424 /* Initialize the kernel complete queue context. */
2425 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
2426 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
2427 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
2429 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
2430 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
2432 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
2433 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
2435 val
= (u32
) ((u64
) cp
->kcq_info
.pgtbl_map
>> 32);
2436 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
2438 val
= (u32
) cp
->kcq_info
.pgtbl_map
;
2439 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
2442 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
2443 u32 sb_id
= cp
->status_blk_num
;
2444 u32 sb
= BNX2_L2CTX_STATUSB_NUM(sb_id
);
2446 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
2447 cnic_ctx_wr(dev
, cp
->kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
2448 cnic_ctx_wr(dev
, cp
->kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
2451 /* Enable Commnad Scheduler notification when we write to the
2452 * host producer index of the kernel contexts. */
2453 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
2455 /* Enable Command Scheduler notification when we write to either
2456 * the Send Queue or Receive Queue producer indexes of the kernel
2457 * bypass contexts. */
2458 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
2459 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
2461 /* Notify COM when the driver post an application buffer. */
2462 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
2464 /* Set the CP and COM doorbells. These two processors polls the
2465 * doorbell for a non zero value before running. This must be done
2466 * after setting up the kernel queue contexts. */
2467 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
2468 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
2470 cnic_init_bnx2_tx_ring(dev
);
2471 cnic_init_bnx2_rx_ring(dev
);
2473 err
= cnic_init_bnx2_irq(dev
);
2475 printk(KERN_ERR PFX
"%s: cnic_init_irq failed\n",
2477 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
2478 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
2485 static void cnic_init_rings(struct cnic_dev
*dev
)
2487 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
2488 cnic_init_bnx2_tx_ring(dev
);
2489 cnic_init_bnx2_rx_ring(dev
);
2493 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
2495 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
2496 cnic_shutdown_bnx2_rx_ring(dev
);
2500 static int cnic_register_netdev(struct cnic_dev
*dev
)
2502 struct cnic_local
*cp
= dev
->cnic_priv
;
2503 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2509 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
2512 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
2514 printk(KERN_ERR PFX
"%s: register_cnic failed\n",
2520 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
2522 struct cnic_local
*cp
= dev
->cnic_priv
;
2523 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2528 ethdev
->drv_unregister_cnic(dev
->netdev
);
2531 static int cnic_start_hw(struct cnic_dev
*dev
)
2533 struct cnic_local
*cp
= dev
->cnic_priv
;
2534 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
2537 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2540 dev
->regview
= ethdev
->io_base
;
2541 cp
->chip_id
= ethdev
->chip_id
;
2542 pci_dev_get(dev
->pcidev
);
2543 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
2544 cp
->status_blk
= ethdev
->irq_arr
[0].status_blk
;
2545 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
2547 err
= cp
->alloc_resc(dev
);
2549 printk(KERN_ERR PFX
"%s: allocate resource failure\n",
2554 err
= cp
->start_hw(dev
);
2558 err
= cnic_cm_open(dev
);
2562 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
2564 cp
->enable_int(dev
);
2570 pci_dev_put(dev
->pcidev
);
2574 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
2576 cnic_disable_bnx2_int_sync(dev
);
2578 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
2579 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
2581 cnic_init_context(dev
, KWQ_CID
);
2582 cnic_init_context(dev
, KCQ_CID
);
2584 cnic_setup_5709_context(dev
, 0);
2587 cnic_free_resc(dev
);
2590 static void cnic_stop_hw(struct cnic_dev
*dev
)
2592 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
2593 struct cnic_local
*cp
= dev
->cnic_priv
;
2595 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
2596 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
2598 cnic_cm_shutdown(dev
);
2600 pci_dev_put(dev
->pcidev
);
2604 static void cnic_free_dev(struct cnic_dev
*dev
)
2608 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
2612 if (atomic_read(&dev
->ref_count
) != 0)
2613 printk(KERN_ERR PFX
"%s: Failed waiting for ref count to go"
2614 " to zero.\n", dev
->netdev
->name
);
2616 printk(KERN_INFO PFX
"Removed CNIC device: %s\n", dev
->netdev
->name
);
2617 dev_put(dev
->netdev
);
2621 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
2622 struct pci_dev
*pdev
)
2624 struct cnic_dev
*cdev
;
2625 struct cnic_local
*cp
;
2628 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
2630 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
2632 printk(KERN_ERR PFX
"%s: allocate dev struct failure\n",
2638 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
2639 cdev
->register_device
= cnic_register_device
;
2640 cdev
->unregister_device
= cnic_unregister_device
;
2641 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
2643 cp
= cdev
->cnic_priv
;
2646 cp
->l2_single_buf_size
= 0x400;
2647 cp
->l2_rx_ring_size
= 3;
2649 spin_lock_init(&cp
->cnic_ulp_lock
);
2651 printk(KERN_INFO PFX
"Added CNIC device: %s\n", dev
->name
);
2656 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
2658 struct pci_dev
*pdev
;
2659 struct cnic_dev
*cdev
;
2660 struct cnic_local
*cp
;
2661 struct cnic_eth_dev
*ethdev
= NULL
;
2662 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
2664 probe
= symbol_get(bnx2_cnic_probe
);
2666 ethdev
= (*probe
)(dev
);
2667 symbol_put(bnx2_cnic_probe
);
2672 pdev
= ethdev
->pdev
;
2678 if (pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
2679 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) {
2682 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev
);
2690 cdev
= cnic_alloc_dev(dev
, pdev
);
2694 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
2695 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
2697 cp
= cdev
->cnic_priv
;
2698 cp
->ethdev
= ethdev
;
2699 cdev
->pcidev
= pdev
;
2701 cp
->cnic_ops
= &cnic_bnx2_ops
;
2702 cp
->start_hw
= cnic_start_bnx2_hw
;
2703 cp
->stop_hw
= cnic_stop_bnx2_hw
;
2704 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
2705 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
2706 cp
->free_resc
= cnic_free_resc
;
2707 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
2708 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
2709 cp
->enable_int
= cnic_enable_bnx2_int
;
2710 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
2711 cp
->close_conn
= cnic_close_bnx2_conn
;
2712 cp
->next_idx
= cnic_bnx2_next_idx
;
2713 cp
->hw_idx
= cnic_bnx2_hw_idx
;
2721 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
2723 struct ethtool_drvinfo drvinfo
;
2724 struct cnic_dev
*cdev
= NULL
;
2726 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
2727 memset(&drvinfo
, 0, sizeof(drvinfo
));
2728 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
2730 if (!strcmp(drvinfo
.driver
, "bnx2"))
2731 cdev
= init_bnx2_cnic(dev
);
2733 write_lock(&cnic_dev_lock
);
2734 list_add(&cdev
->list
, &cnic_dev_list
);
2735 write_unlock(&cnic_dev_lock
);
2742 * netdev event handler
2744 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
2747 struct net_device
*netdev
= ptr
;
2748 struct cnic_dev
*dev
;
2752 dev
= cnic_from_netdev(netdev
);
2754 if (!dev
&& (event
== NETDEV_REGISTER
|| event
== NETDEV_UP
)) {
2755 /* Check for the hot-plug device */
2756 dev
= is_cnic_dev(netdev
);
2763 struct cnic_local
*cp
= dev
->cnic_priv
;
2767 else if (event
== NETDEV_UNREGISTER
)
2770 if (event
== NETDEV_UP
) {
2771 if (cnic_register_netdev(dev
) != 0) {
2775 if (!cnic_start_hw(dev
))
2776 cnic_ulp_start(dev
);
2780 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
2781 struct cnic_ulp_ops
*ulp_ops
;
2784 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
2785 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
2788 ctx
= cp
->ulp_handle
[if_type
];
2790 ulp_ops
->indicate_netevent(ctx
, event
);
2794 if (event
== NETDEV_GOING_DOWN
) {
2797 cnic_unregister_netdev(dev
);
2798 } else if (event
== NETDEV_UNREGISTER
) {
2799 write_lock(&cnic_dev_lock
);
2800 list_del_init(&dev
->list
);
2801 write_unlock(&cnic_dev_lock
);
2813 static struct notifier_block cnic_netdev_notifier
= {
2814 .notifier_call
= cnic_netdev_event
2817 static void cnic_release(void)
2819 struct cnic_dev
*dev
;
2821 while (!list_empty(&cnic_dev_list
)) {
2822 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
2823 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
2829 cnic_unregister_netdev(dev
);
2830 list_del_init(&dev
->list
);
2835 static int __init
cnic_init(void)
2839 printk(KERN_INFO
"%s", version
);
2841 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
2850 static void __exit
cnic_exit(void)
2852 unregister_netdevice_notifier(&cnic_netdev_notifier
);
2857 module_init(cnic_init
);
2858 module_exit(cnic_exit
);