sfc: Don't use enums as a bitmask.
[zen-stable.git] / drivers / net / cnic.c
blobcde59b4e5ef803844e97de4f898105deae85a093
1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
25 #include <linux/in.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
31 #define BCM_VLAN 1
32 #endif
33 #include <net/ip.h>
34 #include <net/tcp.h>
35 #include <net/route.h>
36 #include <net/ipv6.h>
37 #include <net/ip6_route.h>
38 #include <net/ip6_checksum.h>
39 #include <scsi/iscsi_if.h>
41 #include "cnic_if.h"
42 #include "bnx2.h"
43 #include "bnx2x/bnx2x_reg.h"
44 #include "bnx2x/bnx2x_fw_defs.h"
45 #include "bnx2x/bnx2x_hsi.h"
46 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
47 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
48 #include "cnic.h"
49 #include "cnic_defs.h"
51 #define DRV_MODULE_NAME "cnic"
53 static char version[] __devinitdata =
54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
56 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
57 "Chen (zongxi@broadcom.com");
58 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59 MODULE_LICENSE("GPL");
60 MODULE_VERSION(CNIC_MODULE_VERSION);
62 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
63 static LIST_HEAD(cnic_dev_list);
64 static LIST_HEAD(cnic_udev_list);
65 static DEFINE_RWLOCK(cnic_dev_lock);
66 static DEFINE_MUTEX(cnic_lock);
68 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
70 /* helper function, assuming cnic_lock is held */
71 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
73 return rcu_dereference_protected(cnic_ulp_tbl[type],
74 lockdep_is_held(&cnic_lock));
77 static int cnic_service_bnx2(void *, void *);
78 static int cnic_service_bnx2x(void *, void *);
79 static int cnic_ctl(void *, struct cnic_ctl_info *);
81 static struct cnic_ops cnic_bnx2_ops = {
82 .cnic_owner = THIS_MODULE,
83 .cnic_handler = cnic_service_bnx2,
84 .cnic_ctl = cnic_ctl,
87 static struct cnic_ops cnic_bnx2x_ops = {
88 .cnic_owner = THIS_MODULE,
89 .cnic_handler = cnic_service_bnx2x,
90 .cnic_ctl = cnic_ctl,
93 static struct workqueue_struct *cnic_wq;
95 static void cnic_shutdown_rings(struct cnic_dev *);
96 static void cnic_init_rings(struct cnic_dev *);
97 static int cnic_cm_set_pg(struct cnic_sock *);
99 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
101 struct cnic_uio_dev *udev = uinfo->priv;
102 struct cnic_dev *dev;
104 if (!capable(CAP_NET_ADMIN))
105 return -EPERM;
107 if (udev->uio_dev != -1)
108 return -EBUSY;
110 rtnl_lock();
111 dev = udev->dev;
113 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
114 rtnl_unlock();
115 return -ENODEV;
118 udev->uio_dev = iminor(inode);
120 cnic_shutdown_rings(dev);
121 cnic_init_rings(dev);
122 rtnl_unlock();
124 return 0;
127 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
129 struct cnic_uio_dev *udev = uinfo->priv;
131 udev->uio_dev = -1;
132 return 0;
135 static inline void cnic_hold(struct cnic_dev *dev)
137 atomic_inc(&dev->ref_count);
140 static inline void cnic_put(struct cnic_dev *dev)
142 atomic_dec(&dev->ref_count);
145 static inline void csk_hold(struct cnic_sock *csk)
147 atomic_inc(&csk->ref_count);
150 static inline void csk_put(struct cnic_sock *csk)
152 atomic_dec(&csk->ref_count);
155 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
157 struct cnic_dev *cdev;
159 read_lock(&cnic_dev_lock);
160 list_for_each_entry(cdev, &cnic_dev_list, list) {
161 if (netdev == cdev->netdev) {
162 cnic_hold(cdev);
163 read_unlock(&cnic_dev_lock);
164 return cdev;
167 read_unlock(&cnic_dev_lock);
168 return NULL;
171 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
173 atomic_inc(&ulp_ops->ref_count);
176 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
178 atomic_dec(&ulp_ops->ref_count);
181 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
183 struct cnic_local *cp = dev->cnic_priv;
184 struct cnic_eth_dev *ethdev = cp->ethdev;
185 struct drv_ctl_info info;
186 struct drv_ctl_io *io = &info.data.io;
188 info.cmd = DRV_CTL_CTX_WR_CMD;
189 io->cid_addr = cid_addr;
190 io->offset = off;
191 io->data = val;
192 ethdev->drv_ctl(dev->netdev, &info);
195 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
197 struct cnic_local *cp = dev->cnic_priv;
198 struct cnic_eth_dev *ethdev = cp->ethdev;
199 struct drv_ctl_info info;
200 struct drv_ctl_io *io = &info.data.io;
202 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
203 io->offset = off;
204 io->dma_addr = addr;
205 ethdev->drv_ctl(dev->netdev, &info);
208 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
210 struct cnic_local *cp = dev->cnic_priv;
211 struct cnic_eth_dev *ethdev = cp->ethdev;
212 struct drv_ctl_info info;
213 struct drv_ctl_l2_ring *ring = &info.data.ring;
215 if (start)
216 info.cmd = DRV_CTL_START_L2_CMD;
217 else
218 info.cmd = DRV_CTL_STOP_L2_CMD;
220 ring->cid = cid;
221 ring->client_id = cl_id;
222 ethdev->drv_ctl(dev->netdev, &info);
225 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
227 struct cnic_local *cp = dev->cnic_priv;
228 struct cnic_eth_dev *ethdev = cp->ethdev;
229 struct drv_ctl_info info;
230 struct drv_ctl_io *io = &info.data.io;
232 info.cmd = DRV_CTL_IO_WR_CMD;
233 io->offset = off;
234 io->data = val;
235 ethdev->drv_ctl(dev->netdev, &info);
238 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
240 struct cnic_local *cp = dev->cnic_priv;
241 struct cnic_eth_dev *ethdev = cp->ethdev;
242 struct drv_ctl_info info;
243 struct drv_ctl_io *io = &info.data.io;
245 info.cmd = DRV_CTL_IO_RD_CMD;
246 io->offset = off;
247 ethdev->drv_ctl(dev->netdev, &info);
248 return io->data;
251 static int cnic_in_use(struct cnic_sock *csk)
253 return test_bit(SK_F_INUSE, &csk->flags);
256 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
258 struct cnic_local *cp = dev->cnic_priv;
259 struct cnic_eth_dev *ethdev = cp->ethdev;
260 struct drv_ctl_info info;
262 info.cmd = cmd;
263 info.data.credit.credit_count = count;
264 ethdev->drv_ctl(dev->netdev, &info);
267 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
269 u32 i;
271 for (i = 0; i < cp->max_cid_space; i++) {
272 if (cp->ctx_tbl[i].cid == cid) {
273 *l5_cid = i;
274 return 0;
277 return -EINVAL;
280 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
281 struct cnic_sock *csk)
283 struct iscsi_path path_req;
284 char *buf = NULL;
285 u16 len = 0;
286 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
287 struct cnic_ulp_ops *ulp_ops;
288 struct cnic_uio_dev *udev = cp->udev;
289 int rc = 0, retry = 0;
291 if (!udev || udev->uio_dev == -1)
292 return -ENODEV;
294 if (csk) {
295 len = sizeof(path_req);
296 buf = (char *) &path_req;
297 memset(&path_req, 0, len);
299 msg_type = ISCSI_KEVENT_PATH_REQ;
300 path_req.handle = (u64) csk->l5_cid;
301 if (test_bit(SK_F_IPV6, &csk->flags)) {
302 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
303 sizeof(struct in6_addr));
304 path_req.ip_addr_len = 16;
305 } else {
306 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
307 sizeof(struct in_addr));
308 path_req.ip_addr_len = 4;
310 path_req.vlan_id = csk->vlan_id;
311 path_req.pmtu = csk->mtu;
314 while (retry < 3) {
315 rc = 0;
316 rcu_read_lock();
317 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
318 if (ulp_ops)
319 rc = ulp_ops->iscsi_nl_send_msg(
320 cp->ulp_handle[CNIC_ULP_ISCSI],
321 msg_type, buf, len);
322 rcu_read_unlock();
323 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
324 break;
326 msleep(100);
327 retry++;
329 return 0;
332 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
334 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
335 char *buf, u16 len)
337 int rc = -EINVAL;
339 switch (msg_type) {
340 case ISCSI_UEVENT_PATH_UPDATE: {
341 struct cnic_local *cp;
342 u32 l5_cid;
343 struct cnic_sock *csk;
344 struct iscsi_path *path_resp;
346 if (len < sizeof(*path_resp))
347 break;
349 path_resp = (struct iscsi_path *) buf;
350 cp = dev->cnic_priv;
351 l5_cid = (u32) path_resp->handle;
352 if (l5_cid >= MAX_CM_SK_TBL_SZ)
353 break;
355 rcu_read_lock();
356 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
357 rc = -ENODEV;
358 rcu_read_unlock();
359 break;
361 csk = &cp->csk_tbl[l5_cid];
362 csk_hold(csk);
363 if (cnic_in_use(csk) &&
364 test_bit(SK_F_CONNECT_START, &csk->flags)) {
366 memcpy(csk->ha, path_resp->mac_addr, 6);
367 if (test_bit(SK_F_IPV6, &csk->flags))
368 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
369 sizeof(struct in6_addr));
370 else
371 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
372 sizeof(struct in_addr));
374 if (is_valid_ether_addr(csk->ha)) {
375 cnic_cm_set_pg(csk);
376 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
377 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
379 cnic_cm_upcall(cp, csk,
380 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
381 clear_bit(SK_F_CONNECT_START, &csk->flags);
384 csk_put(csk);
385 rcu_read_unlock();
386 rc = 0;
390 return rc;
393 static int cnic_offld_prep(struct cnic_sock *csk)
395 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
396 return 0;
398 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
399 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
400 return 0;
403 return 1;
406 static int cnic_close_prep(struct cnic_sock *csk)
408 clear_bit(SK_F_CONNECT_START, &csk->flags);
409 smp_mb__after_clear_bit();
411 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
412 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
413 msleep(1);
415 return 1;
417 return 0;
420 static int cnic_abort_prep(struct cnic_sock *csk)
422 clear_bit(SK_F_CONNECT_START, &csk->flags);
423 smp_mb__after_clear_bit();
425 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
426 msleep(1);
428 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
429 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
430 return 1;
433 return 0;
436 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
438 struct cnic_dev *dev;
440 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
441 pr_err("%s: Bad type %d\n", __func__, ulp_type);
442 return -EINVAL;
444 mutex_lock(&cnic_lock);
445 if (cnic_ulp_tbl_prot(ulp_type)) {
446 pr_err("%s: Type %d has already been registered\n",
447 __func__, ulp_type);
448 mutex_unlock(&cnic_lock);
449 return -EBUSY;
452 read_lock(&cnic_dev_lock);
453 list_for_each_entry(dev, &cnic_dev_list, list) {
454 struct cnic_local *cp = dev->cnic_priv;
456 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
458 read_unlock(&cnic_dev_lock);
460 atomic_set(&ulp_ops->ref_count, 0);
461 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
462 mutex_unlock(&cnic_lock);
464 /* Prevent race conditions with netdev_event */
465 rtnl_lock();
466 list_for_each_entry(dev, &cnic_dev_list, list) {
467 struct cnic_local *cp = dev->cnic_priv;
469 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
470 ulp_ops->cnic_init(dev);
472 rtnl_unlock();
474 return 0;
477 int cnic_unregister_driver(int ulp_type)
479 struct cnic_dev *dev;
480 struct cnic_ulp_ops *ulp_ops;
481 int i = 0;
483 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
484 pr_err("%s: Bad type %d\n", __func__, ulp_type);
485 return -EINVAL;
487 mutex_lock(&cnic_lock);
488 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
489 if (!ulp_ops) {
490 pr_err("%s: Type %d has not been registered\n",
491 __func__, ulp_type);
492 goto out_unlock;
494 read_lock(&cnic_dev_lock);
495 list_for_each_entry(dev, &cnic_dev_list, list) {
496 struct cnic_local *cp = dev->cnic_priv;
498 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
499 pr_err("%s: Type %d still has devices registered\n",
500 __func__, ulp_type);
501 read_unlock(&cnic_dev_lock);
502 goto out_unlock;
505 read_unlock(&cnic_dev_lock);
507 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
509 mutex_unlock(&cnic_lock);
510 synchronize_rcu();
511 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
512 msleep(100);
513 i++;
516 if (atomic_read(&ulp_ops->ref_count) != 0)
517 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
518 return 0;
520 out_unlock:
521 mutex_unlock(&cnic_lock);
522 return -EINVAL;
525 static int cnic_start_hw(struct cnic_dev *);
526 static void cnic_stop_hw(struct cnic_dev *);
528 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
529 void *ulp_ctx)
531 struct cnic_local *cp = dev->cnic_priv;
532 struct cnic_ulp_ops *ulp_ops;
534 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
535 pr_err("%s: Bad type %d\n", __func__, ulp_type);
536 return -EINVAL;
538 mutex_lock(&cnic_lock);
539 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
540 pr_err("%s: Driver with type %d has not been registered\n",
541 __func__, ulp_type);
542 mutex_unlock(&cnic_lock);
543 return -EAGAIN;
545 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
546 pr_err("%s: Type %d has already been registered to this device\n",
547 __func__, ulp_type);
548 mutex_unlock(&cnic_lock);
549 return -EBUSY;
552 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
553 cp->ulp_handle[ulp_type] = ulp_ctx;
554 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
555 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
556 cnic_hold(dev);
558 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
559 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
560 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
562 mutex_unlock(&cnic_lock);
564 return 0;
567 EXPORT_SYMBOL(cnic_register_driver);
569 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
571 struct cnic_local *cp = dev->cnic_priv;
572 int i = 0;
574 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
575 pr_err("%s: Bad type %d\n", __func__, ulp_type);
576 return -EINVAL;
578 mutex_lock(&cnic_lock);
579 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
580 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
581 cnic_put(dev);
582 } else {
583 pr_err("%s: device not registered to this ulp type %d\n",
584 __func__, ulp_type);
585 mutex_unlock(&cnic_lock);
586 return -EINVAL;
588 mutex_unlock(&cnic_lock);
590 if (ulp_type == CNIC_ULP_ISCSI)
591 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
593 synchronize_rcu();
595 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
596 i < 20) {
597 msleep(100);
598 i++;
600 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
601 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
603 return 0;
605 EXPORT_SYMBOL(cnic_unregister_driver);
607 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
609 id_tbl->start = start_id;
610 id_tbl->max = size;
611 id_tbl->next = 0;
612 spin_lock_init(&id_tbl->lock);
613 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
614 if (!id_tbl->table)
615 return -ENOMEM;
617 return 0;
620 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
622 kfree(id_tbl->table);
623 id_tbl->table = NULL;
626 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
628 int ret = -1;
630 id -= id_tbl->start;
631 if (id >= id_tbl->max)
632 return ret;
634 spin_lock(&id_tbl->lock);
635 if (!test_bit(id, id_tbl->table)) {
636 set_bit(id, id_tbl->table);
637 ret = 0;
639 spin_unlock(&id_tbl->lock);
640 return ret;
643 /* Returns -1 if not successful */
644 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
646 u32 id;
648 spin_lock(&id_tbl->lock);
649 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
650 if (id >= id_tbl->max) {
651 id = -1;
652 if (id_tbl->next != 0) {
653 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
654 if (id >= id_tbl->next)
655 id = -1;
659 if (id < id_tbl->max) {
660 set_bit(id, id_tbl->table);
661 id_tbl->next = (id + 1) & (id_tbl->max - 1);
662 id += id_tbl->start;
665 spin_unlock(&id_tbl->lock);
667 return id;
670 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
672 if (id == -1)
673 return;
675 id -= id_tbl->start;
676 if (id >= id_tbl->max)
677 return;
679 clear_bit(id, id_tbl->table);
682 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
684 int i;
686 if (!dma->pg_arr)
687 return;
689 for (i = 0; i < dma->num_pages; i++) {
690 if (dma->pg_arr[i]) {
691 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
692 dma->pg_arr[i], dma->pg_map_arr[i]);
693 dma->pg_arr[i] = NULL;
696 if (dma->pgtbl) {
697 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
698 dma->pgtbl, dma->pgtbl_map);
699 dma->pgtbl = NULL;
701 kfree(dma->pg_arr);
702 dma->pg_arr = NULL;
703 dma->num_pages = 0;
706 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
708 int i;
709 __le32 *page_table = (__le32 *) dma->pgtbl;
711 for (i = 0; i < dma->num_pages; i++) {
712 /* Each entry needs to be in big endian format. */
713 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
714 page_table++;
715 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
716 page_table++;
720 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
722 int i;
723 __le32 *page_table = (__le32 *) dma->pgtbl;
725 for (i = 0; i < dma->num_pages; i++) {
726 /* Each entry needs to be in little endian format. */
727 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
728 page_table++;
729 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
730 page_table++;
734 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
735 int pages, int use_pg_tbl)
737 int i, size;
738 struct cnic_local *cp = dev->cnic_priv;
740 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
741 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
742 if (dma->pg_arr == NULL)
743 return -ENOMEM;
745 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
746 dma->num_pages = pages;
748 for (i = 0; i < pages; i++) {
749 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
750 BCM_PAGE_SIZE,
751 &dma->pg_map_arr[i],
752 GFP_ATOMIC);
753 if (dma->pg_arr[i] == NULL)
754 goto error;
756 if (!use_pg_tbl)
757 return 0;
759 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
760 ~(BCM_PAGE_SIZE - 1);
761 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
762 &dma->pgtbl_map, GFP_ATOMIC);
763 if (dma->pgtbl == NULL)
764 goto error;
766 cp->setup_pgtbl(dev, dma);
768 return 0;
770 error:
771 cnic_free_dma(dev, dma);
772 return -ENOMEM;
775 static void cnic_free_context(struct cnic_dev *dev)
777 struct cnic_local *cp = dev->cnic_priv;
778 int i;
780 for (i = 0; i < cp->ctx_blks; i++) {
781 if (cp->ctx_arr[i].ctx) {
782 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
783 cp->ctx_arr[i].ctx,
784 cp->ctx_arr[i].mapping);
785 cp->ctx_arr[i].ctx = NULL;
790 static void __cnic_free_uio(struct cnic_uio_dev *udev)
792 uio_unregister_device(&udev->cnic_uinfo);
794 if (udev->l2_buf) {
795 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
796 udev->l2_buf, udev->l2_buf_map);
797 udev->l2_buf = NULL;
800 if (udev->l2_ring) {
801 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
802 udev->l2_ring, udev->l2_ring_map);
803 udev->l2_ring = NULL;
806 pci_dev_put(udev->pdev);
807 kfree(udev);
810 static void cnic_free_uio(struct cnic_uio_dev *udev)
812 if (!udev)
813 return;
815 write_lock(&cnic_dev_lock);
816 list_del_init(&udev->list);
817 write_unlock(&cnic_dev_lock);
818 __cnic_free_uio(udev);
821 static void cnic_free_resc(struct cnic_dev *dev)
823 struct cnic_local *cp = dev->cnic_priv;
824 struct cnic_uio_dev *udev = cp->udev;
826 if (udev) {
827 udev->dev = NULL;
828 cp->udev = NULL;
831 cnic_free_context(dev);
832 kfree(cp->ctx_arr);
833 cp->ctx_arr = NULL;
834 cp->ctx_blks = 0;
836 cnic_free_dma(dev, &cp->gbl_buf_info);
837 cnic_free_dma(dev, &cp->conn_buf_info);
838 cnic_free_dma(dev, &cp->kwq_info);
839 cnic_free_dma(dev, &cp->kwq_16_data_info);
840 cnic_free_dma(dev, &cp->kcq2.dma);
841 cnic_free_dma(dev, &cp->kcq1.dma);
842 kfree(cp->iscsi_tbl);
843 cp->iscsi_tbl = NULL;
844 kfree(cp->ctx_tbl);
845 cp->ctx_tbl = NULL;
847 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
848 cnic_free_id_tbl(&cp->cid_tbl);
851 static int cnic_alloc_context(struct cnic_dev *dev)
853 struct cnic_local *cp = dev->cnic_priv;
855 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
856 int i, k, arr_size;
858 cp->ctx_blk_size = BCM_PAGE_SIZE;
859 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
860 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
861 sizeof(struct cnic_ctx);
862 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
863 if (cp->ctx_arr == NULL)
864 return -ENOMEM;
866 k = 0;
867 for (i = 0; i < 2; i++) {
868 u32 j, reg, off, lo, hi;
870 if (i == 0)
871 off = BNX2_PG_CTX_MAP;
872 else
873 off = BNX2_ISCSI_CTX_MAP;
875 reg = cnic_reg_rd_ind(dev, off);
876 lo = reg >> 16;
877 hi = reg & 0xffff;
878 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
879 cp->ctx_arr[k].cid = j;
882 cp->ctx_blks = k;
883 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
884 cp->ctx_blks = 0;
885 return -ENOMEM;
888 for (i = 0; i < cp->ctx_blks; i++) {
889 cp->ctx_arr[i].ctx =
890 dma_alloc_coherent(&dev->pcidev->dev,
891 BCM_PAGE_SIZE,
892 &cp->ctx_arr[i].mapping,
893 GFP_KERNEL);
894 if (cp->ctx_arr[i].ctx == NULL)
895 return -ENOMEM;
898 return 0;
901 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
903 int err, i, is_bnx2 = 0;
904 struct kcqe **kcq;
906 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
907 is_bnx2 = 1;
909 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
910 if (err)
911 return err;
913 kcq = (struct kcqe **) info->dma.pg_arr;
914 info->kcq = kcq;
916 if (is_bnx2)
917 return 0;
919 for (i = 0; i < KCQ_PAGE_CNT; i++) {
920 struct bnx2x_bd_chain_next *next =
921 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
922 int j = i + 1;
924 if (j >= KCQ_PAGE_CNT)
925 j = 0;
926 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
927 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
929 return 0;
932 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
934 struct cnic_local *cp = dev->cnic_priv;
935 struct cnic_uio_dev *udev;
937 read_lock(&cnic_dev_lock);
938 list_for_each_entry(udev, &cnic_udev_list, list) {
939 if (udev->pdev == dev->pcidev) {
940 udev->dev = dev;
941 cp->udev = udev;
942 read_unlock(&cnic_dev_lock);
943 return 0;
946 read_unlock(&cnic_dev_lock);
948 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
949 if (!udev)
950 return -ENOMEM;
952 udev->uio_dev = -1;
954 udev->dev = dev;
955 udev->pdev = dev->pcidev;
956 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
957 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
958 &udev->l2_ring_map,
959 GFP_KERNEL | __GFP_COMP);
960 if (!udev->l2_ring)
961 goto err_udev;
963 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
964 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
965 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
966 &udev->l2_buf_map,
967 GFP_KERNEL | __GFP_COMP);
968 if (!udev->l2_buf)
969 goto err_dma;
971 write_lock(&cnic_dev_lock);
972 list_add(&udev->list, &cnic_udev_list);
973 write_unlock(&cnic_dev_lock);
975 pci_dev_get(udev->pdev);
977 cp->udev = udev;
979 return 0;
980 err_dma:
981 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
982 udev->l2_ring, udev->l2_ring_map);
983 err_udev:
984 kfree(udev);
985 return -ENOMEM;
988 static int cnic_init_uio(struct cnic_dev *dev)
990 struct cnic_local *cp = dev->cnic_priv;
991 struct cnic_uio_dev *udev = cp->udev;
992 struct uio_info *uinfo;
993 int ret = 0;
995 if (!udev)
996 return -ENOMEM;
998 uinfo = &udev->cnic_uinfo;
1000 uinfo->mem[0].addr = dev->netdev->base_addr;
1001 uinfo->mem[0].internal_addr = dev->regview;
1002 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1003 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1005 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1006 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1007 PAGE_MASK;
1008 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1009 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1010 else
1011 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1013 uinfo->name = "bnx2_cnic";
1014 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1015 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1016 PAGE_MASK;
1017 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1019 uinfo->name = "bnx2x_cnic";
1022 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1024 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1025 uinfo->mem[2].size = udev->l2_ring_size;
1026 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1028 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1029 uinfo->mem[3].size = udev->l2_buf_size;
1030 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1032 uinfo->version = CNIC_MODULE_VERSION;
1033 uinfo->irq = UIO_IRQ_CUSTOM;
1035 uinfo->open = cnic_uio_open;
1036 uinfo->release = cnic_uio_close;
1038 if (udev->uio_dev == -1) {
1039 if (!uinfo->priv) {
1040 uinfo->priv = udev;
1042 ret = uio_register_device(&udev->pdev->dev, uinfo);
1044 } else {
1045 cnic_init_rings(dev);
1048 return ret;
1051 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1053 struct cnic_local *cp = dev->cnic_priv;
1054 int ret;
1056 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1057 if (ret)
1058 goto error;
1059 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1061 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1062 if (ret)
1063 goto error;
1065 ret = cnic_alloc_context(dev);
1066 if (ret)
1067 goto error;
1069 ret = cnic_alloc_uio_rings(dev, 2);
1070 if (ret)
1071 goto error;
1073 ret = cnic_init_uio(dev);
1074 if (ret)
1075 goto error;
1077 return 0;
1079 error:
1080 cnic_free_resc(dev);
1081 return ret;
1084 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1086 struct cnic_local *cp = dev->cnic_priv;
1087 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1088 int total_mem, blks, i;
1090 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1091 blks = total_mem / ctx_blk_size;
1092 if (total_mem % ctx_blk_size)
1093 blks++;
1095 if (blks > cp->ethdev->ctx_tbl_len)
1096 return -ENOMEM;
1098 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1099 if (cp->ctx_arr == NULL)
1100 return -ENOMEM;
1102 cp->ctx_blks = blks;
1103 cp->ctx_blk_size = ctx_blk_size;
1104 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1105 cp->ctx_align = 0;
1106 else
1107 cp->ctx_align = ctx_blk_size;
1109 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1111 for (i = 0; i < blks; i++) {
1112 cp->ctx_arr[i].ctx =
1113 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1114 &cp->ctx_arr[i].mapping,
1115 GFP_KERNEL);
1116 if (cp->ctx_arr[i].ctx == NULL)
1117 return -ENOMEM;
1119 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1120 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1121 cnic_free_context(dev);
1122 cp->ctx_blk_size += cp->ctx_align;
1123 i = -1;
1124 continue;
1128 return 0;
1131 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1133 struct cnic_local *cp = dev->cnic_priv;
1134 struct cnic_eth_dev *ethdev = cp->ethdev;
1135 u32 start_cid = ethdev->starting_cid;
1136 int i, j, n, ret, pages;
1137 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1139 cp->iro_arr = ethdev->iro_arr;
1141 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
1142 cp->iscsi_start_cid = start_cid;
1143 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1145 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1146 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1147 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1148 if (!cp->fcoe_init_cid)
1149 cp->fcoe_init_cid = 0x10;
1152 if (start_cid < BNX2X_ISCSI_START_CID) {
1153 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1155 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1156 cp->fcoe_start_cid += delta;
1157 cp->max_cid_space += delta;
1160 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1161 GFP_KERNEL);
1162 if (!cp->iscsi_tbl)
1163 goto error;
1165 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1166 cp->max_cid_space, GFP_KERNEL);
1167 if (!cp->ctx_tbl)
1168 goto error;
1170 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1171 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1172 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1175 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1176 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1178 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1179 PAGE_SIZE;
1181 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1182 if (ret)
1183 return -ENOMEM;
1185 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1186 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1187 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1189 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1190 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1191 off;
1193 if ((i % n) == (n - 1))
1194 j++;
1197 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1198 if (ret)
1199 goto error;
1201 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1202 ret = cnic_alloc_kcq(dev, &cp->kcq2);
1203 if (ret)
1204 goto error;
1207 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1208 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1209 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1210 if (ret)
1211 goto error;
1213 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1214 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1215 if (ret)
1216 goto error;
1218 ret = cnic_alloc_bnx2x_context(dev);
1219 if (ret)
1220 goto error;
1222 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1224 cp->l2_rx_ring_size = 15;
1226 ret = cnic_alloc_uio_rings(dev, 4);
1227 if (ret)
1228 goto error;
1230 ret = cnic_init_uio(dev);
1231 if (ret)
1232 goto error;
1234 return 0;
1236 error:
1237 cnic_free_resc(dev);
1238 return -ENOMEM;
1241 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1243 return cp->max_kwq_idx -
1244 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1247 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1248 u32 num_wqes)
1250 struct cnic_local *cp = dev->cnic_priv;
1251 struct kwqe *prod_qe;
1252 u16 prod, sw_prod, i;
1254 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1255 return -EAGAIN; /* bnx2 is down */
1257 spin_lock_bh(&cp->cnic_ulp_lock);
1258 if (num_wqes > cnic_kwq_avail(cp) &&
1259 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1260 spin_unlock_bh(&cp->cnic_ulp_lock);
1261 return -EAGAIN;
1264 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1266 prod = cp->kwq_prod_idx;
1267 sw_prod = prod & MAX_KWQ_IDX;
1268 for (i = 0; i < num_wqes; i++) {
1269 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1270 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1271 prod++;
1272 sw_prod = prod & MAX_KWQ_IDX;
1274 cp->kwq_prod_idx = prod;
1276 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1278 spin_unlock_bh(&cp->cnic_ulp_lock);
1279 return 0;
1282 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1283 union l5cm_specific_data *l5_data)
1285 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1286 dma_addr_t map;
1288 map = ctx->kwqe_data_mapping;
1289 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1290 l5_data->phy_address.hi = (u64) map >> 32;
1291 return ctx->kwqe_data;
1294 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1295 u32 type, union l5cm_specific_data *l5_data)
1297 struct cnic_local *cp = dev->cnic_priv;
1298 struct l5cm_spe kwqe;
1299 struct kwqe_16 *kwq[1];
1300 u16 type_16;
1301 int ret;
1303 kwqe.hdr.conn_and_cmd_data =
1304 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1305 BNX2X_HW_CID(cp, cid)));
1307 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1308 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1309 SPE_HDR_FUNCTION_ID;
1311 kwqe.hdr.type = cpu_to_le16(type_16);
1312 kwqe.hdr.reserved1 = 0;
1313 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1314 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1316 kwq[0] = (struct kwqe_16 *) &kwqe;
1318 spin_lock_bh(&cp->cnic_ulp_lock);
1319 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1320 spin_unlock_bh(&cp->cnic_ulp_lock);
1322 if (ret == 1)
1323 return 0;
1325 return -EBUSY;
1328 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1329 struct kcqe *cqes[], u32 num_cqes)
1331 struct cnic_local *cp = dev->cnic_priv;
1332 struct cnic_ulp_ops *ulp_ops;
1334 rcu_read_lock();
1335 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1336 if (likely(ulp_ops)) {
1337 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1338 cqes, num_cqes);
1340 rcu_read_unlock();
1343 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1345 struct cnic_local *cp = dev->cnic_priv;
1346 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1347 int hq_bds, pages;
1348 u32 pfid = cp->pfid;
1350 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1351 cp->num_ccells = req1->num_ccells_per_conn;
1352 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1353 cp->num_iscsi_tasks;
1354 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1355 BNX2X_ISCSI_R2TQE_SIZE;
1356 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1357 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1358 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1359 cp->num_cqs = req1->num_cqs;
1361 if (!dev->max_iscsi_conn)
1362 return 0;
1364 /* init Tstorm RAM */
1365 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1366 req1->rq_num_wqes);
1367 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1368 PAGE_SIZE);
1369 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1370 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1371 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1372 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1373 req1->num_tasks_per_conn);
1375 /* init Ustorm RAM */
1376 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1377 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1378 req1->rq_buffer_size);
1379 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1380 PAGE_SIZE);
1381 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1382 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1383 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1384 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1385 req1->num_tasks_per_conn);
1386 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1387 req1->rq_num_wqes);
1388 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1389 req1->cq_num_wqes);
1390 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1391 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1393 /* init Xstorm RAM */
1394 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1395 PAGE_SIZE);
1396 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1397 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1398 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1399 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1400 req1->num_tasks_per_conn);
1401 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1402 hq_bds);
1403 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1404 req1->num_tasks_per_conn);
1405 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1406 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1408 /* init Cstorm RAM */
1409 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1410 PAGE_SIZE);
1411 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1412 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1413 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1414 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1415 req1->num_tasks_per_conn);
1416 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1417 req1->cq_num_wqes);
1418 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1419 hq_bds);
1421 return 0;
1424 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1426 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1427 struct cnic_local *cp = dev->cnic_priv;
1428 u32 pfid = cp->pfid;
1429 struct iscsi_kcqe kcqe;
1430 struct kcqe *cqes[1];
1432 memset(&kcqe, 0, sizeof(kcqe));
1433 if (!dev->max_iscsi_conn) {
1434 kcqe.completion_status =
1435 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1436 goto done;
1439 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1440 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1441 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1442 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1443 req2->error_bit_map[1]);
1445 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1446 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1447 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1448 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1449 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1450 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1451 req2->error_bit_map[1]);
1453 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1454 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1456 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1458 done:
1459 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1460 cqes[0] = (struct kcqe *) &kcqe;
1461 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1463 return 0;
1466 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1468 struct cnic_local *cp = dev->cnic_priv;
1469 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1471 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1472 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1474 cnic_free_dma(dev, &iscsi->hq_info);
1475 cnic_free_dma(dev, &iscsi->r2tq_info);
1476 cnic_free_dma(dev, &iscsi->task_array_info);
1477 cnic_free_id(&cp->cid_tbl, ctx->cid);
1478 } else {
1479 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1482 ctx->cid = 0;
1485 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1487 u32 cid;
1488 int ret, pages;
1489 struct cnic_local *cp = dev->cnic_priv;
1490 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1491 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1493 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1494 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1495 if (cid == -1) {
1496 ret = -ENOMEM;
1497 goto error;
1499 ctx->cid = cid;
1500 return 0;
1503 cid = cnic_alloc_new_id(&cp->cid_tbl);
1504 if (cid == -1) {
1505 ret = -ENOMEM;
1506 goto error;
1509 ctx->cid = cid;
1510 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1512 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1513 if (ret)
1514 goto error;
1516 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1517 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1518 if (ret)
1519 goto error;
1521 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1522 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1523 if (ret)
1524 goto error;
1526 return 0;
1528 error:
1529 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1530 return ret;
1533 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1534 struct regpair *ctx_addr)
1536 struct cnic_local *cp = dev->cnic_priv;
1537 struct cnic_eth_dev *ethdev = cp->ethdev;
1538 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1539 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1540 unsigned long align_off = 0;
1541 dma_addr_t ctx_map;
1542 void *ctx;
1544 if (cp->ctx_align) {
1545 unsigned long mask = cp->ctx_align - 1;
1547 if (cp->ctx_arr[blk].mapping & mask)
1548 align_off = cp->ctx_align -
1549 (cp->ctx_arr[blk].mapping & mask);
1551 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1552 (off * BNX2X_CONTEXT_MEM_SIZE);
1553 ctx = cp->ctx_arr[blk].ctx + align_off +
1554 (off * BNX2X_CONTEXT_MEM_SIZE);
1555 if (init)
1556 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1558 ctx_addr->lo = ctx_map & 0xffffffff;
1559 ctx_addr->hi = (u64) ctx_map >> 32;
1560 return ctx;
1563 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1564 u32 num)
1566 struct cnic_local *cp = dev->cnic_priv;
1567 struct iscsi_kwqe_conn_offload1 *req1 =
1568 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1569 struct iscsi_kwqe_conn_offload2 *req2 =
1570 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1571 struct iscsi_kwqe_conn_offload3 *req3;
1572 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1573 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1574 u32 cid = ctx->cid;
1575 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1576 struct iscsi_context *ictx;
1577 struct regpair context_addr;
1578 int i, j, n = 2, n_max;
1580 ctx->ctx_flags = 0;
1581 if (!req2->num_additional_wqes)
1582 return -EINVAL;
1584 n_max = req2->num_additional_wqes + 2;
1586 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1587 if (ictx == NULL)
1588 return -ENOMEM;
1590 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1592 ictx->xstorm_ag_context.hq_prod = 1;
1594 ictx->xstorm_st_context.iscsi.first_burst_length =
1595 ISCSI_DEF_FIRST_BURST_LEN;
1596 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1597 ISCSI_DEF_MAX_RECV_SEG_LEN;
1598 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1599 req1->sq_page_table_addr_lo;
1600 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1601 req1->sq_page_table_addr_hi;
1602 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1603 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1604 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1605 iscsi->hq_info.pgtbl_map & 0xffffffff;
1606 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1607 (u64) iscsi->hq_info.pgtbl_map >> 32;
1608 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1609 iscsi->hq_info.pgtbl[0];
1610 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1611 iscsi->hq_info.pgtbl[1];
1612 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1613 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1614 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1615 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1616 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1617 iscsi->r2tq_info.pgtbl[0];
1618 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1619 iscsi->r2tq_info.pgtbl[1];
1620 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1621 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1622 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1623 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1624 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1625 BNX2X_ISCSI_PBL_NOT_CACHED;
1626 ictx->xstorm_st_context.iscsi.flags.flags |=
1627 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1628 ictx->xstorm_st_context.iscsi.flags.flags |=
1629 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1631 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1632 /* TSTORM requires the base address of RQ DB & not PTE */
1633 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1634 req2->rq_page_table_addr_lo & PAGE_MASK;
1635 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1636 req2->rq_page_table_addr_hi;
1637 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1638 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1639 ictx->tstorm_st_context.tcp.flags2 |=
1640 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1641 ictx->tstorm_st_context.tcp.ooo_support_mode =
1642 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1644 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1646 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1647 req2->rq_page_table_addr_lo;
1648 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1649 req2->rq_page_table_addr_hi;
1650 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1651 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1652 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1653 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1654 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1655 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1656 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1657 iscsi->r2tq_info.pgtbl[0];
1658 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1659 iscsi->r2tq_info.pgtbl[1];
1660 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1661 req1->cq_page_table_addr_lo;
1662 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1663 req1->cq_page_table_addr_hi;
1664 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1665 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1666 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1667 ictx->ustorm_st_context.task_pbe_cache_index =
1668 BNX2X_ISCSI_PBL_NOT_CACHED;
1669 ictx->ustorm_st_context.task_pdu_cache_index =
1670 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1672 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1673 if (j == 3) {
1674 if (n >= n_max)
1675 break;
1676 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1677 j = 0;
1679 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1680 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1681 req3->qp_first_pte[j].hi;
1682 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1683 req3->qp_first_pte[j].lo;
1686 ictx->ustorm_st_context.task_pbl_base.lo =
1687 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1688 ictx->ustorm_st_context.task_pbl_base.hi =
1689 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1690 ictx->ustorm_st_context.tce_phy_addr.lo =
1691 iscsi->task_array_info.pgtbl[0];
1692 ictx->ustorm_st_context.tce_phy_addr.hi =
1693 iscsi->task_array_info.pgtbl[1];
1694 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1695 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1696 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1697 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1698 ISCSI_DEF_MAX_BURST_LEN;
1699 ictx->ustorm_st_context.negotiated_rx |=
1700 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1701 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1703 ictx->cstorm_st_context.hq_pbl_base.lo =
1704 iscsi->hq_info.pgtbl_map & 0xffffffff;
1705 ictx->cstorm_st_context.hq_pbl_base.hi =
1706 (u64) iscsi->hq_info.pgtbl_map >> 32;
1707 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1708 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1709 ictx->cstorm_st_context.task_pbl_base.lo =
1710 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1711 ictx->cstorm_st_context.task_pbl_base.hi =
1712 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1713 /* CSTORM and USTORM initialization is different, CSTORM requires
1714 * CQ DB base & not PTE addr */
1715 ictx->cstorm_st_context.cq_db_base.lo =
1716 req1->cq_page_table_addr_lo & PAGE_MASK;
1717 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1718 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1719 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1720 for (i = 0; i < cp->num_cqs; i++) {
1721 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1722 ISCSI_INITIAL_SN;
1723 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1724 ISCSI_INITIAL_SN;
1727 ictx->xstorm_ag_context.cdu_reserved =
1728 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1729 ISCSI_CONNECTION_TYPE);
1730 ictx->ustorm_ag_context.cdu_usage =
1731 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1732 ISCSI_CONNECTION_TYPE);
1733 return 0;
1737 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1738 u32 num, int *work)
1740 struct iscsi_kwqe_conn_offload1 *req1;
1741 struct iscsi_kwqe_conn_offload2 *req2;
1742 struct cnic_local *cp = dev->cnic_priv;
1743 struct cnic_context *ctx;
1744 struct iscsi_kcqe kcqe;
1745 struct kcqe *cqes[1];
1746 u32 l5_cid;
1747 int ret = 0;
1749 if (num < 2) {
1750 *work = num;
1751 return -EINVAL;
1754 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1755 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1756 if ((num - 2) < req2->num_additional_wqes) {
1757 *work = num;
1758 return -EINVAL;
1760 *work = 2 + req2->num_additional_wqes;
1762 l5_cid = req1->iscsi_conn_id;
1763 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1764 return -EINVAL;
1766 memset(&kcqe, 0, sizeof(kcqe));
1767 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1768 kcqe.iscsi_conn_id = l5_cid;
1769 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1771 ctx = &cp->ctx_tbl[l5_cid];
1772 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1773 kcqe.completion_status =
1774 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1775 goto done;
1778 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1779 atomic_dec(&cp->iscsi_conn);
1780 goto done;
1782 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1783 if (ret) {
1784 atomic_dec(&cp->iscsi_conn);
1785 ret = 0;
1786 goto done;
1788 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1789 if (ret < 0) {
1790 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1791 atomic_dec(&cp->iscsi_conn);
1792 goto done;
1795 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1796 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1798 done:
1799 cqes[0] = (struct kcqe *) &kcqe;
1800 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1801 return ret;
1805 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1807 struct cnic_local *cp = dev->cnic_priv;
1808 struct iscsi_kwqe_conn_update *req =
1809 (struct iscsi_kwqe_conn_update *) kwqe;
1810 void *data;
1811 union l5cm_specific_data l5_data;
1812 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1813 int ret;
1815 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1816 return -EINVAL;
1818 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1819 if (!data)
1820 return -ENOMEM;
1822 memcpy(data, kwqe, sizeof(struct kwqe));
1824 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1825 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1826 return ret;
1829 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1831 struct cnic_local *cp = dev->cnic_priv;
1832 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1833 union l5cm_specific_data l5_data;
1834 int ret;
1835 u32 hw_cid;
1837 init_waitqueue_head(&ctx->waitq);
1838 ctx->wait_cond = 0;
1839 memset(&l5_data, 0, sizeof(l5_data));
1840 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1842 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1843 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1845 if (ret == 0)
1846 wait_event(ctx->waitq, ctx->wait_cond);
1848 return ret;
1851 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1853 struct cnic_local *cp = dev->cnic_priv;
1854 struct iscsi_kwqe_conn_destroy *req =
1855 (struct iscsi_kwqe_conn_destroy *) kwqe;
1856 u32 l5_cid = req->reserved0;
1857 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1858 int ret = 0;
1859 struct iscsi_kcqe kcqe;
1860 struct kcqe *cqes[1];
1862 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1863 goto skip_cfc_delete;
1865 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1866 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1868 if (delta > (2 * HZ))
1869 delta = 0;
1871 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1872 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1873 goto destroy_reply;
1876 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1878 skip_cfc_delete:
1879 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1881 atomic_dec(&cp->iscsi_conn);
1882 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1884 destroy_reply:
1885 memset(&kcqe, 0, sizeof(kcqe));
1886 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1887 kcqe.iscsi_conn_id = l5_cid;
1888 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1889 kcqe.iscsi_conn_context_id = req->context_id;
1891 cqes[0] = (struct kcqe *) &kcqe;
1892 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1894 return ret;
1897 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1898 struct l4_kwq_connect_req1 *kwqe1,
1899 struct l4_kwq_connect_req3 *kwqe3,
1900 struct l5cm_active_conn_buffer *conn_buf)
1902 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1903 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1904 &conn_buf->xstorm_conn_buffer;
1905 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1906 &conn_buf->tstorm_conn_buffer;
1907 struct regpair context_addr;
1908 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1909 struct in6_addr src_ip, dst_ip;
1910 int i;
1911 u32 *addrp;
1913 addrp = (u32 *) &conn_addr->local_ip_addr;
1914 for (i = 0; i < 4; i++, addrp++)
1915 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1917 addrp = (u32 *) &conn_addr->remote_ip_addr;
1918 for (i = 0; i < 4; i++, addrp++)
1919 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1921 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1923 xstorm_buf->context_addr.hi = context_addr.hi;
1924 xstorm_buf->context_addr.lo = context_addr.lo;
1925 xstorm_buf->mss = 0xffff;
1926 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1927 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1928 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1929 xstorm_buf->pseudo_header_checksum =
1930 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1932 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1933 tstorm_buf->params |=
1934 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1935 if (kwqe3->ka_timeout) {
1936 tstorm_buf->ka_enable = 1;
1937 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1938 tstorm_buf->ka_interval = kwqe3->ka_interval;
1939 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1941 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1942 tstorm_buf->snd_buf = kwqe3->snd_buf;
1943 tstorm_buf->max_rt_time = 0xffffffff;
1946 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1948 struct cnic_local *cp = dev->cnic_priv;
1949 u32 pfid = cp->pfid;
1950 u8 *mac = dev->mac_addr;
1952 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1953 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1954 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1955 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1956 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1957 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1958 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1959 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1960 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1961 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1962 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1963 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1965 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1966 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
1967 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1968 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1969 mac[4]);
1970 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1971 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
1972 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1973 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1974 mac[2]);
1975 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1976 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
1977 mac[1]);
1978 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1979 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
1980 mac[0]);
1983 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1985 struct cnic_local *cp = dev->cnic_priv;
1986 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1987 u16 tstorm_flags = 0;
1989 if (tcp_ts) {
1990 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1991 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1994 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1995 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
1997 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1998 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2001 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2002 u32 num, int *work)
2004 struct cnic_local *cp = dev->cnic_priv;
2005 struct l4_kwq_connect_req1 *kwqe1 =
2006 (struct l4_kwq_connect_req1 *) wqes[0];
2007 struct l4_kwq_connect_req3 *kwqe3;
2008 struct l5cm_active_conn_buffer *conn_buf;
2009 struct l5cm_conn_addr_params *conn_addr;
2010 union l5cm_specific_data l5_data;
2011 u32 l5_cid = kwqe1->pg_cid;
2012 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2013 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2014 int ret;
2016 if (num < 2) {
2017 *work = num;
2018 return -EINVAL;
2021 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2022 *work = 3;
2023 else
2024 *work = 2;
2026 if (num < *work) {
2027 *work = num;
2028 return -EINVAL;
2031 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2032 netdev_err(dev->netdev, "conn_buf size too big\n");
2033 return -ENOMEM;
2035 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2036 if (!conn_buf)
2037 return -ENOMEM;
2039 memset(conn_buf, 0, sizeof(*conn_buf));
2041 conn_addr = &conn_buf->conn_addr_buf;
2042 conn_addr->remote_addr_0 = csk->ha[0];
2043 conn_addr->remote_addr_1 = csk->ha[1];
2044 conn_addr->remote_addr_2 = csk->ha[2];
2045 conn_addr->remote_addr_3 = csk->ha[3];
2046 conn_addr->remote_addr_4 = csk->ha[4];
2047 conn_addr->remote_addr_5 = csk->ha[5];
2049 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2050 struct l4_kwq_connect_req2 *kwqe2 =
2051 (struct l4_kwq_connect_req2 *) wqes[1];
2053 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2054 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2055 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2057 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2058 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2059 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2060 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2062 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2064 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2065 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2066 conn_addr->local_tcp_port = kwqe1->src_port;
2067 conn_addr->remote_tcp_port = kwqe1->dst_port;
2069 conn_addr->pmtu = kwqe3->pmtu;
2070 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2072 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2073 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2075 cnic_bnx2x_set_tcp_timestamp(dev,
2076 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2078 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2079 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2080 if (!ret)
2081 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2083 return ret;
2086 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2088 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2089 union l5cm_specific_data l5_data;
2090 int ret;
2092 memset(&l5_data, 0, sizeof(l5_data));
2093 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2094 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2095 return ret;
2098 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2100 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2101 union l5cm_specific_data l5_data;
2102 int ret;
2104 memset(&l5_data, 0, sizeof(l5_data));
2105 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2106 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2107 return ret;
2109 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2111 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2112 struct l4_kcq kcqe;
2113 struct kcqe *cqes[1];
2115 memset(&kcqe, 0, sizeof(kcqe));
2116 kcqe.pg_host_opaque = req->host_opaque;
2117 kcqe.pg_cid = req->host_opaque;
2118 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2119 cqes[0] = (struct kcqe *) &kcqe;
2120 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2121 return 0;
2124 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2126 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2127 struct l4_kcq kcqe;
2128 struct kcqe *cqes[1];
2130 memset(&kcqe, 0, sizeof(kcqe));
2131 kcqe.pg_host_opaque = req->pg_host_opaque;
2132 kcqe.pg_cid = req->pg_cid;
2133 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2134 cqes[0] = (struct kcqe *) &kcqe;
2135 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2136 return 0;
2139 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2141 struct fcoe_kwqe_stat *req;
2142 struct fcoe_stat_ramrod_params *fcoe_stat;
2143 union l5cm_specific_data l5_data;
2144 struct cnic_local *cp = dev->cnic_priv;
2145 int ret;
2146 u32 cid;
2148 req = (struct fcoe_kwqe_stat *) kwqe;
2149 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2151 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2152 if (!fcoe_stat)
2153 return -ENOMEM;
2155 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2156 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2158 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
2159 FCOE_CONNECTION_TYPE, &l5_data);
2160 return ret;
2163 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2164 u32 num, int *work)
2166 int ret;
2167 struct cnic_local *cp = dev->cnic_priv;
2168 u32 cid;
2169 struct fcoe_init_ramrod_params *fcoe_init;
2170 struct fcoe_kwqe_init1 *req1;
2171 struct fcoe_kwqe_init2 *req2;
2172 struct fcoe_kwqe_init3 *req3;
2173 union l5cm_specific_data l5_data;
2175 if (num < 3) {
2176 *work = num;
2177 return -EINVAL;
2179 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2180 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2181 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2182 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2183 *work = 1;
2184 return -EINVAL;
2186 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2187 *work = 2;
2188 return -EINVAL;
2191 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2192 netdev_err(dev->netdev, "fcoe_init size too big\n");
2193 return -ENOMEM;
2195 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2196 if (!fcoe_init)
2197 return -ENOMEM;
2199 memset(fcoe_init, 0, sizeof(*fcoe_init));
2200 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2201 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2202 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2203 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
2204 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
2205 fcoe_init->eq_next_page_addr.lo =
2206 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
2207 fcoe_init->eq_next_page_addr.hi =
2208 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
2210 fcoe_init->sb_num = cp->status_blk_num;
2211 fcoe_init->eq_prod = MAX_KCQ_IDX;
2212 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2213 cp->kcq2.sw_prod_idx = 0;
2215 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2216 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
2217 FCOE_CONNECTION_TYPE, &l5_data);
2218 *work = 3;
2219 return ret;
2222 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2223 u32 num, int *work)
2225 int ret = 0;
2226 u32 cid = -1, l5_cid;
2227 struct cnic_local *cp = dev->cnic_priv;
2228 struct fcoe_kwqe_conn_offload1 *req1;
2229 struct fcoe_kwqe_conn_offload2 *req2;
2230 struct fcoe_kwqe_conn_offload3 *req3;
2231 struct fcoe_kwqe_conn_offload4 *req4;
2232 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2233 struct cnic_context *ctx;
2234 struct fcoe_context *fctx;
2235 struct regpair ctx_addr;
2236 union l5cm_specific_data l5_data;
2237 struct fcoe_kcqe kcqe;
2238 struct kcqe *cqes[1];
2240 if (num < 4) {
2241 *work = num;
2242 return -EINVAL;
2244 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2245 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2246 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2247 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2249 *work = 4;
2251 l5_cid = req1->fcoe_conn_id;
2252 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2253 goto err_reply;
2255 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2257 ctx = &cp->ctx_tbl[l5_cid];
2258 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2259 goto err_reply;
2261 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2262 if (ret) {
2263 ret = 0;
2264 goto err_reply;
2266 cid = ctx->cid;
2268 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2269 if (fctx) {
2270 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2271 u32 val;
2273 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2274 FCOE_CONNECTION_TYPE);
2275 fctx->xstorm_ag_context.cdu_reserved = val;
2276 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2277 FCOE_CONNECTION_TYPE);
2278 fctx->ustorm_ag_context.cdu_usage = val;
2280 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2281 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2282 goto err_reply;
2284 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2285 if (!fcoe_offload)
2286 goto err_reply;
2288 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2289 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2290 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2291 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2292 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2294 cid = BNX2X_HW_CID(cp, cid);
2295 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2296 FCOE_CONNECTION_TYPE, &l5_data);
2297 if (!ret)
2298 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2300 return ret;
2302 err_reply:
2303 if (cid != -1)
2304 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2306 memset(&kcqe, 0, sizeof(kcqe));
2307 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2308 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2309 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2311 cqes[0] = (struct kcqe *) &kcqe;
2312 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2313 return ret;
2316 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2318 struct fcoe_kwqe_conn_enable_disable *req;
2319 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2320 union l5cm_specific_data l5_data;
2321 int ret;
2322 u32 cid, l5_cid;
2323 struct cnic_local *cp = dev->cnic_priv;
2325 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2326 cid = req->context_id;
2327 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2329 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2330 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2331 return -ENOMEM;
2333 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2334 if (!fcoe_enable)
2335 return -ENOMEM;
2337 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2338 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2339 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2340 FCOE_CONNECTION_TYPE, &l5_data);
2341 return ret;
2344 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2346 struct fcoe_kwqe_conn_enable_disable *req;
2347 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2348 union l5cm_specific_data l5_data;
2349 int ret;
2350 u32 cid, l5_cid;
2351 struct cnic_local *cp = dev->cnic_priv;
2353 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2354 cid = req->context_id;
2355 l5_cid = req->conn_id;
2356 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2357 return -EINVAL;
2359 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2361 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2362 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2363 return -ENOMEM;
2365 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2366 if (!fcoe_disable)
2367 return -ENOMEM;
2369 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2370 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2371 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2372 FCOE_CONNECTION_TYPE, &l5_data);
2373 return ret;
2376 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2378 struct fcoe_kwqe_conn_destroy *req;
2379 union l5cm_specific_data l5_data;
2380 int ret;
2381 u32 cid, l5_cid;
2382 struct cnic_local *cp = dev->cnic_priv;
2383 struct cnic_context *ctx;
2384 struct fcoe_kcqe kcqe;
2385 struct kcqe *cqes[1];
2387 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2388 cid = req->context_id;
2389 l5_cid = req->conn_id;
2390 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2391 return -EINVAL;
2393 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2395 ctx = &cp->ctx_tbl[l5_cid];
2397 init_waitqueue_head(&ctx->waitq);
2398 ctx->wait_cond = 0;
2400 memset(&l5_data, 0, sizeof(l5_data));
2401 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2402 FCOE_CONNECTION_TYPE, &l5_data);
2403 if (ret == 0) {
2404 wait_event(ctx->waitq, ctx->wait_cond);
2405 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2406 queue_delayed_work(cnic_wq, &cp->delete_task,
2407 msecs_to_jiffies(2000));
2410 memset(&kcqe, 0, sizeof(kcqe));
2411 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2412 kcqe.fcoe_conn_id = req->conn_id;
2413 kcqe.fcoe_conn_context_id = cid;
2415 cqes[0] = (struct kcqe *) &kcqe;
2416 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2417 return ret;
2420 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2422 struct fcoe_kwqe_destroy *req;
2423 union l5cm_specific_data l5_data;
2424 struct cnic_local *cp = dev->cnic_priv;
2425 int ret;
2426 u32 cid;
2428 req = (struct fcoe_kwqe_destroy *) kwqe;
2429 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2431 memset(&l5_data, 0, sizeof(l5_data));
2432 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
2433 FCOE_CONNECTION_TYPE, &l5_data);
2434 return ret;
2437 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2438 struct kwqe *wqes[], u32 num_wqes)
2440 int i, work, ret;
2441 u32 opcode;
2442 struct kwqe *kwqe;
2444 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2445 return -EAGAIN; /* bnx2 is down */
2447 for (i = 0; i < num_wqes; ) {
2448 kwqe = wqes[i];
2449 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2450 work = 1;
2452 switch (opcode) {
2453 case ISCSI_KWQE_OPCODE_INIT1:
2454 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2455 break;
2456 case ISCSI_KWQE_OPCODE_INIT2:
2457 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2458 break;
2459 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2460 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2461 num_wqes - i, &work);
2462 break;
2463 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2464 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2465 break;
2466 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2467 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2468 break;
2469 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2470 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2471 &work);
2472 break;
2473 case L4_KWQE_OPCODE_VALUE_CLOSE:
2474 ret = cnic_bnx2x_close(dev, kwqe);
2475 break;
2476 case L4_KWQE_OPCODE_VALUE_RESET:
2477 ret = cnic_bnx2x_reset(dev, kwqe);
2478 break;
2479 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2480 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2481 break;
2482 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2483 ret = cnic_bnx2x_update_pg(dev, kwqe);
2484 break;
2485 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2486 ret = 0;
2487 break;
2488 default:
2489 ret = 0;
2490 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2491 opcode);
2492 break;
2494 if (ret < 0)
2495 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2496 opcode);
2497 i += work;
2499 return 0;
2502 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2503 struct kwqe *wqes[], u32 num_wqes)
2505 struct cnic_local *cp = dev->cnic_priv;
2506 int i, work, ret;
2507 u32 opcode;
2508 struct kwqe *kwqe;
2510 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2511 return -EAGAIN; /* bnx2 is down */
2513 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
2514 return -EINVAL;
2516 for (i = 0; i < num_wqes; ) {
2517 kwqe = wqes[i];
2518 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2519 work = 1;
2521 switch (opcode) {
2522 case FCOE_KWQE_OPCODE_INIT1:
2523 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2524 num_wqes - i, &work);
2525 break;
2526 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2527 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2528 num_wqes - i, &work);
2529 break;
2530 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2531 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2532 break;
2533 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2534 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2535 break;
2536 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2537 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2538 break;
2539 case FCOE_KWQE_OPCODE_DESTROY:
2540 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2541 break;
2542 case FCOE_KWQE_OPCODE_STAT:
2543 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2544 break;
2545 default:
2546 ret = 0;
2547 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2548 opcode);
2549 break;
2551 if (ret < 0)
2552 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2553 opcode);
2554 i += work;
2556 return 0;
2559 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2560 u32 num_wqes)
2562 int ret = -EINVAL;
2563 u32 layer_code;
2565 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2566 return -EAGAIN; /* bnx2x is down */
2568 if (!num_wqes)
2569 return 0;
2571 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2572 switch (layer_code) {
2573 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2574 case KWQE_FLAGS_LAYER_MASK_L4:
2575 case KWQE_FLAGS_LAYER_MASK_L2:
2576 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2577 break;
2579 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2580 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2581 break;
2583 return ret;
2586 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2588 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2589 return KCQE_FLAGS_LAYER_MASK_L4;
2591 return opflag & KCQE_FLAGS_LAYER_MASK;
2594 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2596 struct cnic_local *cp = dev->cnic_priv;
2597 int i, j, comp = 0;
2599 i = 0;
2600 j = 1;
2601 while (num_cqes) {
2602 struct cnic_ulp_ops *ulp_ops;
2603 int ulp_type;
2604 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2605 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2607 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2608 comp++;
2610 while (j < num_cqes) {
2611 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2613 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2614 break;
2616 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2617 comp++;
2618 j++;
2621 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2622 ulp_type = CNIC_ULP_RDMA;
2623 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2624 ulp_type = CNIC_ULP_ISCSI;
2625 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2626 ulp_type = CNIC_ULP_FCOE;
2627 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2628 ulp_type = CNIC_ULP_L4;
2629 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2630 goto end;
2631 else {
2632 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2633 kcqe_op_flag);
2634 goto end;
2637 rcu_read_lock();
2638 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2639 if (likely(ulp_ops)) {
2640 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2641 cp->completed_kcq + i, j);
2643 rcu_read_unlock();
2644 end:
2645 num_cqes -= j;
2646 i += j;
2647 j = 1;
2649 if (unlikely(comp))
2650 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2653 static u16 cnic_bnx2_next_idx(u16 idx)
2655 return idx + 1;
2658 static u16 cnic_bnx2_hw_idx(u16 idx)
2660 return idx;
2663 static u16 cnic_bnx2x_next_idx(u16 idx)
2665 idx++;
2666 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2667 idx++;
2669 return idx;
2672 static u16 cnic_bnx2x_hw_idx(u16 idx)
2674 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2675 idx++;
2676 return idx;
2679 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2681 struct cnic_local *cp = dev->cnic_priv;
2682 u16 i, ri, hw_prod, last;
2683 struct kcqe *kcqe;
2684 int kcqe_cnt = 0, last_cnt = 0;
2686 i = ri = last = info->sw_prod_idx;
2687 ri &= MAX_KCQ_IDX;
2688 hw_prod = *info->hw_prod_idx_ptr;
2689 hw_prod = cp->hw_idx(hw_prod);
2691 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2692 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2693 cp->completed_kcq[kcqe_cnt++] = kcqe;
2694 i = cp->next_idx(i);
2695 ri = i & MAX_KCQ_IDX;
2696 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2697 last_cnt = kcqe_cnt;
2698 last = i;
2702 info->sw_prod_idx = last;
2703 return last_cnt;
2706 static int cnic_l2_completion(struct cnic_local *cp)
2708 u16 hw_cons, sw_cons;
2709 struct cnic_uio_dev *udev = cp->udev;
2710 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2711 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2712 u32 cmd;
2713 int comp = 0;
2715 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2716 return 0;
2718 hw_cons = *cp->rx_cons_ptr;
2719 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2720 hw_cons++;
2722 sw_cons = cp->rx_cons;
2723 while (sw_cons != hw_cons) {
2724 u8 cqe_fp_flags;
2726 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2727 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2728 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2729 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2730 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2731 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2732 cmd == RAMROD_CMD_ID_ETH_HALT)
2733 comp++;
2735 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2737 return comp;
2740 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2742 u16 rx_cons, tx_cons;
2743 int comp = 0;
2745 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2746 return;
2748 rx_cons = *cp->rx_cons_ptr;
2749 tx_cons = *cp->tx_cons_ptr;
2750 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2751 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2752 comp = cnic_l2_completion(cp);
2754 cp->tx_cons = tx_cons;
2755 cp->rx_cons = rx_cons;
2757 if (cp->udev)
2758 uio_event_notify(&cp->udev->cnic_uinfo);
2760 if (comp)
2761 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2764 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2766 struct cnic_local *cp = dev->cnic_priv;
2767 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2768 int kcqe_cnt;
2770 /* status block index must be read before reading other fields */
2771 rmb();
2772 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2774 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2776 service_kcqes(dev, kcqe_cnt);
2778 /* Tell compiler that status_blk fields can change. */
2779 barrier();
2780 if (status_idx != *cp->kcq1.status_idx_ptr) {
2781 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2782 /* status block index must be read first */
2783 rmb();
2784 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2785 } else
2786 break;
2789 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2791 cnic_chk_pkt_rings(cp);
2793 return status_idx;
2796 static int cnic_service_bnx2(void *data, void *status_blk)
2798 struct cnic_dev *dev = data;
2800 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2801 struct status_block *sblk = status_blk;
2803 return sblk->status_idx;
2806 return cnic_service_bnx2_queues(dev);
2809 static void cnic_service_bnx2_msix(unsigned long data)
2811 struct cnic_dev *dev = (struct cnic_dev *) data;
2812 struct cnic_local *cp = dev->cnic_priv;
2814 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2816 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2817 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2820 static void cnic_doirq(struct cnic_dev *dev)
2822 struct cnic_local *cp = dev->cnic_priv;
2824 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2825 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2827 prefetch(cp->status_blk.gen);
2828 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2830 tasklet_schedule(&cp->cnic_irq_task);
2834 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2836 struct cnic_dev *dev = dev_instance;
2837 struct cnic_local *cp = dev->cnic_priv;
2839 if (cp->ack_int)
2840 cp->ack_int(dev);
2842 cnic_doirq(dev);
2844 return IRQ_HANDLED;
2847 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2848 u16 index, u8 op, u8 update)
2850 struct cnic_local *cp = dev->cnic_priv;
2851 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2852 COMMAND_REG_INT_ACK);
2853 struct igu_ack_register igu_ack;
2855 igu_ack.status_block_index = index;
2856 igu_ack.sb_id_and_flags =
2857 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2858 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2859 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2860 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2862 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2865 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2866 u16 index, u8 op, u8 update)
2868 struct igu_regular cmd_data;
2869 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2871 cmd_data.sb_id_and_flags =
2872 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2873 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2874 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2875 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2878 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2881 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2883 struct cnic_local *cp = dev->cnic_priv;
2885 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2886 IGU_INT_DISABLE, 0);
2889 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2891 struct cnic_local *cp = dev->cnic_priv;
2893 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2894 IGU_INT_DISABLE, 0);
2897 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2899 u32 last_status = *info->status_idx_ptr;
2900 int kcqe_cnt;
2902 /* status block index must be read before reading the KCQ */
2903 rmb();
2904 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2906 service_kcqes(dev, kcqe_cnt);
2908 /* Tell compiler that sblk fields can change. */
2909 barrier();
2910 if (last_status == *info->status_idx_ptr)
2911 break;
2913 last_status = *info->status_idx_ptr;
2914 /* status block index must be read before reading the KCQ */
2915 rmb();
2917 return last_status;
2920 static void cnic_service_bnx2x_bh(unsigned long data)
2922 struct cnic_dev *dev = (struct cnic_dev *) data;
2923 struct cnic_local *cp = dev->cnic_priv;
2924 u32 status_idx, new_status_idx;
2926 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2927 return;
2929 while (1) {
2930 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2932 CNIC_WR16(dev, cp->kcq1.io_addr,
2933 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2935 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2936 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2937 status_idx, IGU_INT_ENABLE, 1);
2938 break;
2941 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2943 if (new_status_idx != status_idx)
2944 continue;
2946 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2947 MAX_KCQ_IDX);
2949 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2950 status_idx, IGU_INT_ENABLE, 1);
2952 break;
2956 static int cnic_service_bnx2x(void *data, void *status_blk)
2958 struct cnic_dev *dev = data;
2959 struct cnic_local *cp = dev->cnic_priv;
2961 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2962 cnic_doirq(dev);
2964 cnic_chk_pkt_rings(cp);
2966 return 0;
2969 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
2971 struct cnic_ulp_ops *ulp_ops;
2973 if (if_type == CNIC_ULP_ISCSI)
2974 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2976 mutex_lock(&cnic_lock);
2977 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2978 lockdep_is_held(&cnic_lock));
2979 if (!ulp_ops) {
2980 mutex_unlock(&cnic_lock);
2981 return;
2983 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2984 mutex_unlock(&cnic_lock);
2986 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2987 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2989 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2992 static void cnic_ulp_stop(struct cnic_dev *dev)
2994 struct cnic_local *cp = dev->cnic_priv;
2995 int if_type;
2997 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
2998 cnic_ulp_stop_one(cp, if_type);
3001 static void cnic_ulp_start(struct cnic_dev *dev)
3003 struct cnic_local *cp = dev->cnic_priv;
3004 int if_type;
3006 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3007 struct cnic_ulp_ops *ulp_ops;
3009 mutex_lock(&cnic_lock);
3010 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3011 lockdep_is_held(&cnic_lock));
3012 if (!ulp_ops || !ulp_ops->cnic_start) {
3013 mutex_unlock(&cnic_lock);
3014 continue;
3016 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3017 mutex_unlock(&cnic_lock);
3019 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3020 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3022 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3026 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3028 struct cnic_dev *dev = data;
3030 switch (info->cmd) {
3031 case CNIC_CTL_STOP_CMD:
3032 cnic_hold(dev);
3034 cnic_ulp_stop(dev);
3035 cnic_stop_hw(dev);
3037 cnic_put(dev);
3038 break;
3039 case CNIC_CTL_START_CMD:
3040 cnic_hold(dev);
3042 if (!cnic_start_hw(dev))
3043 cnic_ulp_start(dev);
3045 cnic_put(dev);
3046 break;
3047 case CNIC_CTL_STOP_ISCSI_CMD: {
3048 struct cnic_local *cp = dev->cnic_priv;
3049 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3050 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3051 break;
3053 case CNIC_CTL_COMPLETION_CMD: {
3054 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3055 u32 l5_cid;
3056 struct cnic_local *cp = dev->cnic_priv;
3058 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3059 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3061 ctx->wait_cond = 1;
3062 wake_up(&ctx->waitq);
3064 break;
3066 default:
3067 return -EINVAL;
3069 return 0;
3072 static void cnic_ulp_init(struct cnic_dev *dev)
3074 int i;
3075 struct cnic_local *cp = dev->cnic_priv;
3077 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3078 struct cnic_ulp_ops *ulp_ops;
3080 mutex_lock(&cnic_lock);
3081 ulp_ops = cnic_ulp_tbl_prot(i);
3082 if (!ulp_ops || !ulp_ops->cnic_init) {
3083 mutex_unlock(&cnic_lock);
3084 continue;
3086 ulp_get(ulp_ops);
3087 mutex_unlock(&cnic_lock);
3089 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3090 ulp_ops->cnic_init(dev);
3092 ulp_put(ulp_ops);
3096 static void cnic_ulp_exit(struct cnic_dev *dev)
3098 int i;
3099 struct cnic_local *cp = dev->cnic_priv;
3101 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3102 struct cnic_ulp_ops *ulp_ops;
3104 mutex_lock(&cnic_lock);
3105 ulp_ops = cnic_ulp_tbl_prot(i);
3106 if (!ulp_ops || !ulp_ops->cnic_exit) {
3107 mutex_unlock(&cnic_lock);
3108 continue;
3110 ulp_get(ulp_ops);
3111 mutex_unlock(&cnic_lock);
3113 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3114 ulp_ops->cnic_exit(dev);
3116 ulp_put(ulp_ops);
3120 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3122 struct cnic_dev *dev = csk->dev;
3123 struct l4_kwq_offload_pg *l4kwqe;
3124 struct kwqe *wqes[1];
3126 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3127 memset(l4kwqe, 0, sizeof(*l4kwqe));
3128 wqes[0] = (struct kwqe *) l4kwqe;
3130 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3131 l4kwqe->flags =
3132 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3133 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3135 l4kwqe->da0 = csk->ha[0];
3136 l4kwqe->da1 = csk->ha[1];
3137 l4kwqe->da2 = csk->ha[2];
3138 l4kwqe->da3 = csk->ha[3];
3139 l4kwqe->da4 = csk->ha[4];
3140 l4kwqe->da5 = csk->ha[5];
3142 l4kwqe->sa0 = dev->mac_addr[0];
3143 l4kwqe->sa1 = dev->mac_addr[1];
3144 l4kwqe->sa2 = dev->mac_addr[2];
3145 l4kwqe->sa3 = dev->mac_addr[3];
3146 l4kwqe->sa4 = dev->mac_addr[4];
3147 l4kwqe->sa5 = dev->mac_addr[5];
3149 l4kwqe->etype = ETH_P_IP;
3150 l4kwqe->ipid_start = DEF_IPID_START;
3151 l4kwqe->host_opaque = csk->l5_cid;
3153 if (csk->vlan_id) {
3154 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3155 l4kwqe->vlan_tag = csk->vlan_id;
3156 l4kwqe->l2hdr_nbytes += 4;
3159 return dev->submit_kwqes(dev, wqes, 1);
3162 static int cnic_cm_update_pg(struct cnic_sock *csk)
3164 struct cnic_dev *dev = csk->dev;
3165 struct l4_kwq_update_pg *l4kwqe;
3166 struct kwqe *wqes[1];
3168 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3169 memset(l4kwqe, 0, sizeof(*l4kwqe));
3170 wqes[0] = (struct kwqe *) l4kwqe;
3172 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3173 l4kwqe->flags =
3174 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3175 l4kwqe->pg_cid = csk->pg_cid;
3177 l4kwqe->da0 = csk->ha[0];
3178 l4kwqe->da1 = csk->ha[1];
3179 l4kwqe->da2 = csk->ha[2];
3180 l4kwqe->da3 = csk->ha[3];
3181 l4kwqe->da4 = csk->ha[4];
3182 l4kwqe->da5 = csk->ha[5];
3184 l4kwqe->pg_host_opaque = csk->l5_cid;
3185 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3187 return dev->submit_kwqes(dev, wqes, 1);
3190 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3192 struct cnic_dev *dev = csk->dev;
3193 struct l4_kwq_upload *l4kwqe;
3194 struct kwqe *wqes[1];
3196 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3197 memset(l4kwqe, 0, sizeof(*l4kwqe));
3198 wqes[0] = (struct kwqe *) l4kwqe;
3200 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3201 l4kwqe->flags =
3202 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3203 l4kwqe->cid = csk->pg_cid;
3205 return dev->submit_kwqes(dev, wqes, 1);
3208 static int cnic_cm_conn_req(struct cnic_sock *csk)
3210 struct cnic_dev *dev = csk->dev;
3211 struct l4_kwq_connect_req1 *l4kwqe1;
3212 struct l4_kwq_connect_req2 *l4kwqe2;
3213 struct l4_kwq_connect_req3 *l4kwqe3;
3214 struct kwqe *wqes[3];
3215 u8 tcp_flags = 0;
3216 int num_wqes = 2;
3218 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3219 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3220 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3221 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3222 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3223 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3225 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3226 l4kwqe3->flags =
3227 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3228 l4kwqe3->ka_timeout = csk->ka_timeout;
3229 l4kwqe3->ka_interval = csk->ka_interval;
3230 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3231 l4kwqe3->tos = csk->tos;
3232 l4kwqe3->ttl = csk->ttl;
3233 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3234 l4kwqe3->pmtu = csk->mtu;
3235 l4kwqe3->rcv_buf = csk->rcv_buf;
3236 l4kwqe3->snd_buf = csk->snd_buf;
3237 l4kwqe3->seed = csk->seed;
3239 wqes[0] = (struct kwqe *) l4kwqe1;
3240 if (test_bit(SK_F_IPV6, &csk->flags)) {
3241 wqes[1] = (struct kwqe *) l4kwqe2;
3242 wqes[2] = (struct kwqe *) l4kwqe3;
3243 num_wqes = 3;
3245 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3246 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3247 l4kwqe2->flags =
3248 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3249 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3250 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3251 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3252 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3253 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3254 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3255 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3256 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3257 sizeof(struct tcphdr);
3258 } else {
3259 wqes[1] = (struct kwqe *) l4kwqe3;
3260 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3261 sizeof(struct tcphdr);
3264 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3265 l4kwqe1->flags =
3266 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3267 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3268 l4kwqe1->cid = csk->cid;
3269 l4kwqe1->pg_cid = csk->pg_cid;
3270 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3271 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3272 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3273 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3274 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3275 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3276 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3277 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3278 if (csk->tcp_flags & SK_TCP_NAGLE)
3279 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3280 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3281 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3282 if (csk->tcp_flags & SK_TCP_SACK)
3283 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3284 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3285 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3287 l4kwqe1->tcp_flags = tcp_flags;
3289 return dev->submit_kwqes(dev, wqes, num_wqes);
3292 static int cnic_cm_close_req(struct cnic_sock *csk)
3294 struct cnic_dev *dev = csk->dev;
3295 struct l4_kwq_close_req *l4kwqe;
3296 struct kwqe *wqes[1];
3298 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3299 memset(l4kwqe, 0, sizeof(*l4kwqe));
3300 wqes[0] = (struct kwqe *) l4kwqe;
3302 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3303 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3304 l4kwqe->cid = csk->cid;
3306 return dev->submit_kwqes(dev, wqes, 1);
3309 static int cnic_cm_abort_req(struct cnic_sock *csk)
3311 struct cnic_dev *dev = csk->dev;
3312 struct l4_kwq_reset_req *l4kwqe;
3313 struct kwqe *wqes[1];
3315 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3316 memset(l4kwqe, 0, sizeof(*l4kwqe));
3317 wqes[0] = (struct kwqe *) l4kwqe;
3319 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3320 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3321 l4kwqe->cid = csk->cid;
3323 return dev->submit_kwqes(dev, wqes, 1);
3326 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3327 u32 l5_cid, struct cnic_sock **csk, void *context)
3329 struct cnic_local *cp = dev->cnic_priv;
3330 struct cnic_sock *csk1;
3332 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3333 return -EINVAL;
3335 if (cp->ctx_tbl) {
3336 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3338 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3339 return -EAGAIN;
3342 csk1 = &cp->csk_tbl[l5_cid];
3343 if (atomic_read(&csk1->ref_count))
3344 return -EAGAIN;
3346 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3347 return -EBUSY;
3349 csk1->dev = dev;
3350 csk1->cid = cid;
3351 csk1->l5_cid = l5_cid;
3352 csk1->ulp_type = ulp_type;
3353 csk1->context = context;
3355 csk1->ka_timeout = DEF_KA_TIMEOUT;
3356 csk1->ka_interval = DEF_KA_INTERVAL;
3357 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3358 csk1->tos = DEF_TOS;
3359 csk1->ttl = DEF_TTL;
3360 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3361 csk1->rcv_buf = DEF_RCV_BUF;
3362 csk1->snd_buf = DEF_SND_BUF;
3363 csk1->seed = DEF_SEED;
3365 *csk = csk1;
3366 return 0;
3369 static void cnic_cm_cleanup(struct cnic_sock *csk)
3371 if (csk->src_port) {
3372 struct cnic_dev *dev = csk->dev;
3373 struct cnic_local *cp = dev->cnic_priv;
3375 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3376 csk->src_port = 0;
3380 static void cnic_close_conn(struct cnic_sock *csk)
3382 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3383 cnic_cm_upload_pg(csk);
3384 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3386 cnic_cm_cleanup(csk);
3389 static int cnic_cm_destroy(struct cnic_sock *csk)
3391 if (!cnic_in_use(csk))
3392 return -EINVAL;
3394 csk_hold(csk);
3395 clear_bit(SK_F_INUSE, &csk->flags);
3396 smp_mb__after_clear_bit();
3397 while (atomic_read(&csk->ref_count) != 1)
3398 msleep(1);
3399 cnic_cm_cleanup(csk);
3401 csk->flags = 0;
3402 csk_put(csk);
3403 return 0;
3406 static inline u16 cnic_get_vlan(struct net_device *dev,
3407 struct net_device **vlan_dev)
3409 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3410 *vlan_dev = vlan_dev_real_dev(dev);
3411 return vlan_dev_vlan_id(dev);
3413 *vlan_dev = dev;
3414 return 0;
3417 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3418 struct dst_entry **dst)
3420 #if defined(CONFIG_INET)
3421 struct rtable *rt;
3423 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3424 if (!IS_ERR(rt)) {
3425 *dst = &rt->dst;
3426 return 0;
3428 return PTR_ERR(rt);
3429 #else
3430 return -ENETUNREACH;
3431 #endif
3434 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3435 struct dst_entry **dst)
3437 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3438 struct flowi6 fl6;
3440 memset(&fl6, 0, sizeof(fl6));
3441 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
3442 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3443 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3445 *dst = ip6_route_output(&init_net, NULL, &fl6);
3446 if (*dst)
3447 return 0;
3448 #endif
3450 return -ENETUNREACH;
3453 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3454 int ulp_type)
3456 struct cnic_dev *dev = NULL;
3457 struct dst_entry *dst;
3458 struct net_device *netdev = NULL;
3459 int err = -ENETUNREACH;
3461 if (dst_addr->sin_family == AF_INET)
3462 err = cnic_get_v4_route(dst_addr, &dst);
3463 else if (dst_addr->sin_family == AF_INET6) {
3464 struct sockaddr_in6 *dst_addr6 =
3465 (struct sockaddr_in6 *) dst_addr;
3467 err = cnic_get_v6_route(dst_addr6, &dst);
3468 } else
3469 return NULL;
3471 if (err)
3472 return NULL;
3474 if (!dst->dev)
3475 goto done;
3477 cnic_get_vlan(dst->dev, &netdev);
3479 dev = cnic_from_netdev(netdev);
3481 done:
3482 dst_release(dst);
3483 if (dev)
3484 cnic_put(dev);
3485 return dev;
3488 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3490 struct cnic_dev *dev = csk->dev;
3491 struct cnic_local *cp = dev->cnic_priv;
3493 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3496 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3498 struct cnic_dev *dev = csk->dev;
3499 struct cnic_local *cp = dev->cnic_priv;
3500 int is_v6, rc = 0;
3501 struct dst_entry *dst = NULL;
3502 struct net_device *realdev;
3503 __be16 local_port;
3504 u32 port_id;
3506 if (saddr->local.v6.sin6_family == AF_INET6 &&
3507 saddr->remote.v6.sin6_family == AF_INET6)
3508 is_v6 = 1;
3509 else if (saddr->local.v4.sin_family == AF_INET &&
3510 saddr->remote.v4.sin_family == AF_INET)
3511 is_v6 = 0;
3512 else
3513 return -EINVAL;
3515 clear_bit(SK_F_IPV6, &csk->flags);
3517 if (is_v6) {
3518 set_bit(SK_F_IPV6, &csk->flags);
3519 cnic_get_v6_route(&saddr->remote.v6, &dst);
3521 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3522 sizeof(struct in6_addr));
3523 csk->dst_port = saddr->remote.v6.sin6_port;
3524 local_port = saddr->local.v6.sin6_port;
3526 } else {
3527 cnic_get_v4_route(&saddr->remote.v4, &dst);
3529 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3530 csk->dst_port = saddr->remote.v4.sin_port;
3531 local_port = saddr->local.v4.sin_port;
3534 csk->vlan_id = 0;
3535 csk->mtu = dev->netdev->mtu;
3536 if (dst && dst->dev) {
3537 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3538 if (realdev == dev->netdev) {
3539 csk->vlan_id = vlan;
3540 csk->mtu = dst_mtu(dst);
3544 port_id = be16_to_cpu(local_port);
3545 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3546 port_id < CNIC_LOCAL_PORT_MAX) {
3547 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3548 port_id = 0;
3549 } else
3550 port_id = 0;
3552 if (!port_id) {
3553 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3554 if (port_id == -1) {
3555 rc = -ENOMEM;
3556 goto err_out;
3558 local_port = cpu_to_be16(port_id);
3560 csk->src_port = local_port;
3562 err_out:
3563 dst_release(dst);
3564 return rc;
3567 static void cnic_init_csk_state(struct cnic_sock *csk)
3569 csk->state = 0;
3570 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3571 clear_bit(SK_F_CLOSING, &csk->flags);
3574 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3576 struct cnic_local *cp = csk->dev->cnic_priv;
3577 int err = 0;
3579 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3580 return -EOPNOTSUPP;
3582 if (!cnic_in_use(csk))
3583 return -EINVAL;
3585 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3586 return -EINVAL;
3588 cnic_init_csk_state(csk);
3590 err = cnic_get_route(csk, saddr);
3591 if (err)
3592 goto err_out;
3594 err = cnic_resolve_addr(csk, saddr);
3595 if (!err)
3596 return 0;
3598 err_out:
3599 clear_bit(SK_F_CONNECT_START, &csk->flags);
3600 return err;
3603 static int cnic_cm_abort(struct cnic_sock *csk)
3605 struct cnic_local *cp = csk->dev->cnic_priv;
3606 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3608 if (!cnic_in_use(csk))
3609 return -EINVAL;
3611 if (cnic_abort_prep(csk))
3612 return cnic_cm_abort_req(csk);
3614 /* Getting here means that we haven't started connect, or
3615 * connect was not successful.
3618 cp->close_conn(csk, opcode);
3619 if (csk->state != opcode)
3620 return -EALREADY;
3622 return 0;
3625 static int cnic_cm_close(struct cnic_sock *csk)
3627 if (!cnic_in_use(csk))
3628 return -EINVAL;
3630 if (cnic_close_prep(csk)) {
3631 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3632 return cnic_cm_close_req(csk);
3633 } else {
3634 return -EALREADY;
3636 return 0;
3639 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3640 u8 opcode)
3642 struct cnic_ulp_ops *ulp_ops;
3643 int ulp_type = csk->ulp_type;
3645 rcu_read_lock();
3646 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3647 if (ulp_ops) {
3648 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3649 ulp_ops->cm_connect_complete(csk);
3650 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3651 ulp_ops->cm_close_complete(csk);
3652 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3653 ulp_ops->cm_remote_abort(csk);
3654 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3655 ulp_ops->cm_abort_complete(csk);
3656 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3657 ulp_ops->cm_remote_close(csk);
3659 rcu_read_unlock();
3662 static int cnic_cm_set_pg(struct cnic_sock *csk)
3664 if (cnic_offld_prep(csk)) {
3665 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3666 cnic_cm_update_pg(csk);
3667 else
3668 cnic_cm_offload_pg(csk);
3670 return 0;
3673 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3675 struct cnic_local *cp = dev->cnic_priv;
3676 u32 l5_cid = kcqe->pg_host_opaque;
3677 u8 opcode = kcqe->op_code;
3678 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3680 csk_hold(csk);
3681 if (!cnic_in_use(csk))
3682 goto done;
3684 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3685 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3686 goto done;
3688 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3689 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3690 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3691 cnic_cm_upcall(cp, csk,
3692 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3693 goto done;
3696 csk->pg_cid = kcqe->pg_cid;
3697 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3698 cnic_cm_conn_req(csk);
3700 done:
3701 csk_put(csk);
3704 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3706 struct cnic_local *cp = dev->cnic_priv;
3707 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3708 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3709 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3711 ctx->timestamp = jiffies;
3712 ctx->wait_cond = 1;
3713 wake_up(&ctx->waitq);
3716 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3718 struct cnic_local *cp = dev->cnic_priv;
3719 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3720 u8 opcode = l4kcqe->op_code;
3721 u32 l5_cid;
3722 struct cnic_sock *csk;
3724 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3725 cnic_process_fcoe_term_conn(dev, kcqe);
3726 return;
3728 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3729 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3730 cnic_cm_process_offld_pg(dev, l4kcqe);
3731 return;
3734 l5_cid = l4kcqe->conn_id;
3735 if (opcode & 0x80)
3736 l5_cid = l4kcqe->cid;
3737 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3738 return;
3740 csk = &cp->csk_tbl[l5_cid];
3741 csk_hold(csk);
3743 if (!cnic_in_use(csk)) {
3744 csk_put(csk);
3745 return;
3748 switch (opcode) {
3749 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3750 if (l4kcqe->status != 0) {
3751 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3752 cnic_cm_upcall(cp, csk,
3753 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3755 break;
3756 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3757 if (l4kcqe->status == 0)
3758 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3760 smp_mb__before_clear_bit();
3761 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3762 cnic_cm_upcall(cp, csk, opcode);
3763 break;
3765 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3766 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3767 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3768 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3769 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3770 cp->close_conn(csk, opcode);
3771 break;
3773 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3774 cnic_cm_upcall(cp, csk, opcode);
3775 break;
3777 csk_put(csk);
3780 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3782 struct cnic_dev *dev = data;
3783 int i;
3785 for (i = 0; i < num; i++)
3786 cnic_cm_process_kcqe(dev, kcqe[i]);
3789 static struct cnic_ulp_ops cm_ulp_ops = {
3790 .indicate_kcqes = cnic_cm_indicate_kcqe,
3793 static void cnic_cm_free_mem(struct cnic_dev *dev)
3795 struct cnic_local *cp = dev->cnic_priv;
3797 kfree(cp->csk_tbl);
3798 cp->csk_tbl = NULL;
3799 cnic_free_id_tbl(&cp->csk_port_tbl);
3802 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3804 struct cnic_local *cp = dev->cnic_priv;
3806 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3807 GFP_KERNEL);
3808 if (!cp->csk_tbl)
3809 return -ENOMEM;
3811 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3812 CNIC_LOCAL_PORT_MIN)) {
3813 cnic_cm_free_mem(dev);
3814 return -ENOMEM;
3816 return 0;
3819 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3821 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3822 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3823 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3824 csk->state = opcode;
3827 /* 1. If event opcode matches the expected event in csk->state
3828 * 2. If the expected event is CLOSE_COMP, we accept any event
3829 * 3. If the expected event is 0, meaning the connection was never
3830 * never established, we accept the opcode from cm_abort.
3832 if (opcode == csk->state || csk->state == 0 ||
3833 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3834 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3835 if (csk->state == 0)
3836 csk->state = opcode;
3837 return 1;
3840 return 0;
3843 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3845 struct cnic_dev *dev = csk->dev;
3846 struct cnic_local *cp = dev->cnic_priv;
3848 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3849 cnic_cm_upcall(cp, csk, opcode);
3850 return;
3853 clear_bit(SK_F_CONNECT_START, &csk->flags);
3854 cnic_close_conn(csk);
3855 csk->state = opcode;
3856 cnic_cm_upcall(cp, csk, opcode);
3859 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3863 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3865 u32 seed;
3867 get_random_bytes(&seed, 4);
3868 cnic_ctx_wr(dev, 45, 0, seed);
3869 return 0;
3872 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3874 struct cnic_dev *dev = csk->dev;
3875 struct cnic_local *cp = dev->cnic_priv;
3876 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3877 union l5cm_specific_data l5_data;
3878 u32 cmd = 0;
3879 int close_complete = 0;
3881 switch (opcode) {
3882 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3883 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3884 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3885 if (cnic_ready_to_close(csk, opcode)) {
3886 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3887 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3888 else
3889 close_complete = 1;
3891 break;
3892 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3893 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3894 break;
3895 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3896 close_complete = 1;
3897 break;
3899 if (cmd) {
3900 memset(&l5_data, 0, sizeof(l5_data));
3902 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3903 &l5_data);
3904 } else if (close_complete) {
3905 ctx->timestamp = jiffies;
3906 cnic_close_conn(csk);
3907 cnic_cm_upcall(cp, csk, csk->state);
3911 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3913 struct cnic_local *cp = dev->cnic_priv;
3914 int i;
3916 if (!cp->ctx_tbl)
3917 return;
3919 if (!netif_running(dev->netdev))
3920 return;
3922 for (i = 0; i < cp->max_cid_space; i++) {
3923 struct cnic_context *ctx = &cp->ctx_tbl[i];
3925 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3926 msleep(10);
3928 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3929 netdev_warn(dev->netdev, "CID %x not deleted\n",
3930 ctx->cid);
3933 cancel_delayed_work(&cp->delete_task);
3934 flush_workqueue(cnic_wq);
3936 if (atomic_read(&cp->iscsi_conn) != 0)
3937 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3938 atomic_read(&cp->iscsi_conn));
3941 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3943 struct cnic_local *cp = dev->cnic_priv;
3944 u32 pfid = cp->pfid;
3945 u32 port = CNIC_PORT(cp);
3947 cnic_init_bnx2x_mac(dev);
3948 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3950 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3951 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3953 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3954 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3955 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3956 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3957 DEF_MAX_DA_COUNT);
3959 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3960 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3961 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3962 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3963 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3964 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
3965 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3966 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
3968 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
3969 DEF_MAX_CWND);
3970 return 0;
3973 static void cnic_delete_task(struct work_struct *work)
3975 struct cnic_local *cp;
3976 struct cnic_dev *dev;
3977 u32 i;
3978 int need_resched = 0;
3980 cp = container_of(work, struct cnic_local, delete_task.work);
3981 dev = cp->dev;
3983 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
3984 struct drv_ctl_info info;
3986 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
3988 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
3989 cp->ethdev->drv_ctl(dev->netdev, &info);
3992 for (i = 0; i < cp->max_cid_space; i++) {
3993 struct cnic_context *ctx = &cp->ctx_tbl[i];
3995 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
3996 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3997 continue;
3999 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4000 need_resched = 1;
4001 continue;
4004 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4005 continue;
4007 cnic_bnx2x_destroy_ramrod(dev, i);
4009 cnic_free_bnx2x_conn_resc(dev, i);
4010 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4011 atomic_dec(&cp->iscsi_conn);
4013 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4016 if (need_resched)
4017 queue_delayed_work(cnic_wq, &cp->delete_task,
4018 msecs_to_jiffies(10));
4022 static int cnic_cm_open(struct cnic_dev *dev)
4024 struct cnic_local *cp = dev->cnic_priv;
4025 int err;
4027 err = cnic_cm_alloc_mem(dev);
4028 if (err)
4029 return err;
4031 err = cp->start_cm(dev);
4033 if (err)
4034 goto err_out;
4036 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4038 dev->cm_create = cnic_cm_create;
4039 dev->cm_destroy = cnic_cm_destroy;
4040 dev->cm_connect = cnic_cm_connect;
4041 dev->cm_abort = cnic_cm_abort;
4042 dev->cm_close = cnic_cm_close;
4043 dev->cm_select_dev = cnic_cm_select_dev;
4045 cp->ulp_handle[CNIC_ULP_L4] = dev;
4046 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4047 return 0;
4049 err_out:
4050 cnic_cm_free_mem(dev);
4051 return err;
4054 static int cnic_cm_shutdown(struct cnic_dev *dev)
4056 struct cnic_local *cp = dev->cnic_priv;
4057 int i;
4059 cp->stop_cm(dev);
4061 if (!cp->csk_tbl)
4062 return 0;
4064 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4065 struct cnic_sock *csk = &cp->csk_tbl[i];
4067 clear_bit(SK_F_INUSE, &csk->flags);
4068 cnic_cm_cleanup(csk);
4070 cnic_cm_free_mem(dev);
4072 return 0;
4075 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4077 u32 cid_addr;
4078 int i;
4080 cid_addr = GET_CID_ADDR(cid);
4082 for (i = 0; i < CTX_SIZE; i += 4)
4083 cnic_ctx_wr(dev, cid_addr, i, 0);
4086 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4088 struct cnic_local *cp = dev->cnic_priv;
4089 int ret = 0, i;
4090 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4092 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4093 return 0;
4095 for (i = 0; i < cp->ctx_blks; i++) {
4096 int j;
4097 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4098 u32 val;
4100 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4102 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4103 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4104 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4105 (u64) cp->ctx_arr[i].mapping >> 32);
4106 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4107 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4108 for (j = 0; j < 10; j++) {
4110 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4111 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4112 break;
4113 udelay(5);
4115 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4116 ret = -EBUSY;
4117 break;
4120 return ret;
4123 static void cnic_free_irq(struct cnic_dev *dev)
4125 struct cnic_local *cp = dev->cnic_priv;
4126 struct cnic_eth_dev *ethdev = cp->ethdev;
4128 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4129 cp->disable_int_sync(dev);
4130 tasklet_kill(&cp->cnic_irq_task);
4131 free_irq(ethdev->irq_arr[0].vector, dev);
4135 static int cnic_request_irq(struct cnic_dev *dev)
4137 struct cnic_local *cp = dev->cnic_priv;
4138 struct cnic_eth_dev *ethdev = cp->ethdev;
4139 int err;
4141 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4142 if (err)
4143 tasklet_disable(&cp->cnic_irq_task);
4145 return err;
4148 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4150 struct cnic_local *cp = dev->cnic_priv;
4151 struct cnic_eth_dev *ethdev = cp->ethdev;
4153 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4154 int err, i = 0;
4155 int sblk_num = cp->status_blk_num;
4156 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4157 BNX2_HC_SB_CONFIG_1;
4159 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4161 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4162 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4163 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4165 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4166 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4167 (unsigned long) dev);
4168 err = cnic_request_irq(dev);
4169 if (err)
4170 return err;
4172 while (cp->status_blk.bnx2->status_completion_producer_index &&
4173 i < 10) {
4174 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4175 1 << (11 + sblk_num));
4176 udelay(10);
4177 i++;
4178 barrier();
4180 if (cp->status_blk.bnx2->status_completion_producer_index) {
4181 cnic_free_irq(dev);
4182 goto failed;
4185 } else {
4186 struct status_block *sblk = cp->status_blk.gen;
4187 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4188 int i = 0;
4190 while (sblk->status_completion_producer_index && i < 10) {
4191 CNIC_WR(dev, BNX2_HC_COMMAND,
4192 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4193 udelay(10);
4194 i++;
4195 barrier();
4197 if (sblk->status_completion_producer_index)
4198 goto failed;
4201 return 0;
4203 failed:
4204 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4205 return -EBUSY;
4208 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4210 struct cnic_local *cp = dev->cnic_priv;
4211 struct cnic_eth_dev *ethdev = cp->ethdev;
4213 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4214 return;
4216 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4217 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4220 static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
4222 u32 max_conn;
4224 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
4225 dev->max_iscsi_conn = max_conn;
4228 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4230 struct cnic_local *cp = dev->cnic_priv;
4231 struct cnic_eth_dev *ethdev = cp->ethdev;
4233 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4234 return;
4236 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4237 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4238 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4239 synchronize_irq(ethdev->irq_arr[0].vector);
4242 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4244 struct cnic_local *cp = dev->cnic_priv;
4245 struct cnic_eth_dev *ethdev = cp->ethdev;
4246 struct cnic_uio_dev *udev = cp->udev;
4247 u32 cid_addr, tx_cid, sb_id;
4248 u32 val, offset0, offset1, offset2, offset3;
4249 int i;
4250 struct tx_bd *txbd;
4251 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4252 struct status_block *s_blk = cp->status_blk.gen;
4254 sb_id = cp->status_blk_num;
4255 tx_cid = 20;
4256 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4257 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4258 struct status_block_msix *sblk = cp->status_blk.bnx2;
4260 tx_cid = TX_TSS_CID + sb_id - 1;
4261 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4262 (TX_TSS_CID << 7));
4263 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4265 cp->tx_cons = *cp->tx_cons_ptr;
4267 cid_addr = GET_CID_ADDR(tx_cid);
4268 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4269 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4271 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4272 cnic_ctx_wr(dev, cid_addr2, i, 0);
4274 offset0 = BNX2_L2CTX_TYPE_XI;
4275 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4276 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4277 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4278 } else {
4279 cnic_init_context(dev, tx_cid);
4280 cnic_init_context(dev, tx_cid + 1);
4282 offset0 = BNX2_L2CTX_TYPE;
4283 offset1 = BNX2_L2CTX_CMD_TYPE;
4284 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4285 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4287 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4288 cnic_ctx_wr(dev, cid_addr, offset0, val);
4290 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4291 cnic_ctx_wr(dev, cid_addr, offset1, val);
4293 txbd = (struct tx_bd *) udev->l2_ring;
4295 buf_map = udev->l2_buf_map;
4296 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4297 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4298 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4300 val = (u64) ring_map >> 32;
4301 cnic_ctx_wr(dev, cid_addr, offset2, val);
4302 txbd->tx_bd_haddr_hi = val;
4304 val = (u64) ring_map & 0xffffffff;
4305 cnic_ctx_wr(dev, cid_addr, offset3, val);
4306 txbd->tx_bd_haddr_lo = val;
4309 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4311 struct cnic_local *cp = dev->cnic_priv;
4312 struct cnic_eth_dev *ethdev = cp->ethdev;
4313 struct cnic_uio_dev *udev = cp->udev;
4314 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4315 int i;
4316 struct rx_bd *rxbd;
4317 struct status_block *s_blk = cp->status_blk.gen;
4318 dma_addr_t ring_map = udev->l2_ring_map;
4320 sb_id = cp->status_blk_num;
4321 cnic_init_context(dev, 2);
4322 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4323 coal_reg = BNX2_HC_COMMAND;
4324 coal_val = CNIC_RD(dev, coal_reg);
4325 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4326 struct status_block_msix *sblk = cp->status_blk.bnx2;
4328 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4329 coal_reg = BNX2_HC_COALESCE_NOW;
4330 coal_val = 1 << (11 + sb_id);
4332 i = 0;
4333 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4334 CNIC_WR(dev, coal_reg, coal_val);
4335 udelay(10);
4336 i++;
4337 barrier();
4339 cp->rx_cons = *cp->rx_cons_ptr;
4341 cid_addr = GET_CID_ADDR(2);
4342 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4343 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4344 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4346 if (sb_id == 0)
4347 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4348 else
4349 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4350 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4352 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
4353 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4354 dma_addr_t buf_map;
4355 int n = (i % cp->l2_rx_ring_size) + 1;
4357 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4358 rxbd->rx_bd_len = cp->l2_single_buf_size;
4359 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4360 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4361 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4363 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4364 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4365 rxbd->rx_bd_haddr_hi = val;
4367 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4368 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4369 rxbd->rx_bd_haddr_lo = val;
4371 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4372 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4375 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4377 struct kwqe *wqes[1], l2kwqe;
4379 memset(&l2kwqe, 0, sizeof(l2kwqe));
4380 wqes[0] = &l2kwqe;
4381 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4382 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4383 KWQE_OPCODE_SHIFT) | 2;
4384 dev->submit_kwqes(dev, wqes, 1);
4387 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4389 struct cnic_local *cp = dev->cnic_priv;
4390 u32 val;
4392 val = cp->func << 2;
4394 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4396 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4397 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4398 dev->mac_addr[0] = (u8) (val >> 8);
4399 dev->mac_addr[1] = (u8) val;
4401 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4403 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4404 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4405 dev->mac_addr[2] = (u8) (val >> 24);
4406 dev->mac_addr[3] = (u8) (val >> 16);
4407 dev->mac_addr[4] = (u8) (val >> 8);
4408 dev->mac_addr[5] = (u8) val;
4410 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4412 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4413 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4414 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4416 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4417 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4418 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4421 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4423 struct cnic_local *cp = dev->cnic_priv;
4424 struct cnic_eth_dev *ethdev = cp->ethdev;
4425 struct status_block *sblk = cp->status_blk.gen;
4426 u32 val, kcq_cid_addr, kwq_cid_addr;
4427 int err;
4429 cnic_set_bnx2_mac(dev);
4431 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4432 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4433 if (BCM_PAGE_BITS > 12)
4434 val |= (12 - 8) << 4;
4435 else
4436 val |= (BCM_PAGE_BITS - 8) << 4;
4438 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4440 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4441 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4442 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4444 err = cnic_setup_5709_context(dev, 1);
4445 if (err)
4446 return err;
4448 cnic_init_context(dev, KWQ_CID);
4449 cnic_init_context(dev, KCQ_CID);
4451 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4452 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4454 cp->max_kwq_idx = MAX_KWQ_IDX;
4455 cp->kwq_prod_idx = 0;
4456 cp->kwq_con_idx = 0;
4457 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4459 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4460 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4461 else
4462 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4464 /* Initialize the kernel work queue context. */
4465 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4466 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4467 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4469 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4470 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4472 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4473 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4475 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4476 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4478 val = (u32) cp->kwq_info.pgtbl_map;
4479 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4481 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4482 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4484 cp->kcq1.sw_prod_idx = 0;
4485 cp->kcq1.hw_prod_idx_ptr =
4486 (u16 *) &sblk->status_completion_producer_index;
4488 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4490 /* Initialize the kernel complete queue context. */
4491 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4492 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4493 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4495 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4496 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4498 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4499 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4501 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4502 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4504 val = (u32) cp->kcq1.dma.pgtbl_map;
4505 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4507 cp->int_num = 0;
4508 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4509 struct status_block_msix *msblk = cp->status_blk.bnx2;
4510 u32 sb_id = cp->status_blk_num;
4511 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4513 cp->kcq1.hw_prod_idx_ptr =
4514 (u16 *) &msblk->status_completion_producer_index;
4515 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4516 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4517 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4518 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4519 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4522 /* Enable Commnad Scheduler notification when we write to the
4523 * host producer index of the kernel contexts. */
4524 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4526 /* Enable Command Scheduler notification when we write to either
4527 * the Send Queue or Receive Queue producer indexes of the kernel
4528 * bypass contexts. */
4529 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4530 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4532 /* Notify COM when the driver post an application buffer. */
4533 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4535 /* Set the CP and COM doorbells. These two processors polls the
4536 * doorbell for a non zero value before running. This must be done
4537 * after setting up the kernel queue contexts. */
4538 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4539 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4541 cnic_init_bnx2_tx_ring(dev);
4542 cnic_init_bnx2_rx_ring(dev);
4544 err = cnic_init_bnx2_irq(dev);
4545 if (err) {
4546 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4547 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4548 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4549 return err;
4552 cnic_get_bnx2_iscsi_info(dev);
4554 return 0;
4557 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4559 struct cnic_local *cp = dev->cnic_priv;
4560 struct cnic_eth_dev *ethdev = cp->ethdev;
4561 u32 start_offset = ethdev->ctx_tbl_offset;
4562 int i;
4564 for (i = 0; i < cp->ctx_blks; i++) {
4565 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4566 dma_addr_t map = ctx->mapping;
4568 if (cp->ctx_align) {
4569 unsigned long mask = cp->ctx_align - 1;
4571 map = (map + mask) & ~mask;
4574 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4578 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4580 struct cnic_local *cp = dev->cnic_priv;
4581 struct cnic_eth_dev *ethdev = cp->ethdev;
4582 int err = 0;
4584 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4585 (unsigned long) dev);
4586 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4587 err = cnic_request_irq(dev);
4589 return err;
4592 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4593 u16 sb_id, u8 sb_index,
4594 u8 disable)
4597 u32 addr = BAR_CSTRORM_INTMEM +
4598 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4599 offsetof(struct hc_status_block_data_e1x, index_data) +
4600 sizeof(struct hc_index_data)*sb_index +
4601 offsetof(struct hc_index_data, flags);
4602 u16 flags = CNIC_RD16(dev, addr);
4603 /* clear and set */
4604 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4605 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4606 HC_INDEX_DATA_HC_ENABLED);
4607 CNIC_WR16(dev, addr, flags);
4610 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4612 struct cnic_local *cp = dev->cnic_priv;
4613 u8 sb_id = cp->status_blk_num;
4615 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4616 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4617 offsetof(struct hc_status_block_data_e1x, index_data) +
4618 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4619 offsetof(struct hc_index_data, timeout), 64 / 12);
4620 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4623 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4627 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4628 struct client_init_ramrod_data *data)
4630 struct cnic_local *cp = dev->cnic_priv;
4631 struct cnic_uio_dev *udev = cp->udev;
4632 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4633 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4634 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4635 int port = CNIC_PORT(cp);
4636 int i;
4637 u32 cli = cp->ethdev->iscsi_l2_client_id;
4638 u32 val;
4640 memset(txbd, 0, BCM_PAGE_SIZE);
4642 buf_map = udev->l2_buf_map;
4643 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4644 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4645 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4647 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4648 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4649 reg_bd->addr_hi = start_bd->addr_hi;
4650 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4651 start_bd->nbytes = cpu_to_le16(0x10);
4652 start_bd->nbd = cpu_to_le16(3);
4653 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4654 start_bd->general_data = (UNICAST_ADDRESS <<
4655 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4656 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4660 val = (u64) ring_map >> 32;
4661 txbd->next_bd.addr_hi = cpu_to_le32(val);
4663 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4665 val = (u64) ring_map & 0xffffffff;
4666 txbd->next_bd.addr_lo = cpu_to_le32(val);
4668 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4670 /* Other ramrod params */
4671 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4672 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4674 /* reset xstorm per client statistics */
4675 if (cli < MAX_STAT_COUNTER_ID) {
4676 val = BAR_XSTRORM_INTMEM +
4677 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4678 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4679 CNIC_WR(dev, val + i * 4, 0);
4682 cp->tx_cons_ptr =
4683 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4686 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4687 struct client_init_ramrod_data *data)
4689 struct cnic_local *cp = dev->cnic_priv;
4690 struct cnic_uio_dev *udev = cp->udev;
4691 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4692 BCM_PAGE_SIZE);
4693 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4694 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4695 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4696 int i;
4697 int port = CNIC_PORT(cp);
4698 u32 cli = cp->ethdev->iscsi_l2_client_id;
4699 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4700 u32 val;
4701 dma_addr_t ring_map = udev->l2_ring_map;
4703 /* General data */
4704 data->general.client_id = cli;
4705 data->general.statistics_en_flg = 1;
4706 data->general.statistics_counter_id = cli;
4707 data->general.activate_flg = 1;
4708 data->general.sp_client_id = cli;
4710 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4711 dma_addr_t buf_map;
4712 int n = (i % cp->l2_rx_ring_size) + 1;
4714 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4715 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4716 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4719 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4720 rxbd->addr_hi = cpu_to_le32(val);
4721 data->rx.bd_page_base.hi = cpu_to_le32(val);
4723 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4724 rxbd->addr_lo = cpu_to_le32(val);
4725 data->rx.bd_page_base.lo = cpu_to_le32(val);
4727 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4728 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4729 rxcqe->addr_hi = cpu_to_le32(val);
4730 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4732 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4733 rxcqe->addr_lo = cpu_to_le32(val);
4734 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4736 /* Other ramrod params */
4737 data->rx.client_qzone_id = cl_qzone_id;
4738 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4739 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4741 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4742 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4744 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4745 data->rx.outer_vlan_removal_enable_flg = 1;
4747 /* reset tstorm and ustorm per client statistics */
4748 if (cli < MAX_STAT_COUNTER_ID) {
4749 val = BAR_TSTRORM_INTMEM +
4750 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4751 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4752 CNIC_WR(dev, val + i * 4, 0);
4754 val = BAR_USTRORM_INTMEM +
4755 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4756 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4757 CNIC_WR(dev, val + i * 4, 0);
4760 cp->rx_cons_ptr =
4761 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4762 cp->rx_cons = *cp->rx_cons_ptr;
4765 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4767 struct cnic_local *cp = dev->cnic_priv;
4768 u32 pfid = cp->pfid;
4770 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4771 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4772 cp->kcq1.sw_prod_idx = 0;
4774 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4775 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4777 cp->kcq1.hw_prod_idx_ptr =
4778 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4779 cp->kcq1.status_idx_ptr =
4780 &sb->sb.running_index[SM_RX_ID];
4781 } else {
4782 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4784 cp->kcq1.hw_prod_idx_ptr =
4785 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4786 cp->kcq1.status_idx_ptr =
4787 &sb->sb.running_index[SM_RX_ID];
4790 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4791 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4793 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4794 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4795 cp->kcq2.sw_prod_idx = 0;
4796 cp->kcq2.hw_prod_idx_ptr =
4797 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4798 cp->kcq2.status_idx_ptr =
4799 &sb->sb.running_index[SM_RX_ID];
4803 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4805 struct cnic_local *cp = dev->cnic_priv;
4806 struct cnic_eth_dev *ethdev = cp->ethdev;
4807 int func = CNIC_FUNC(cp), ret, i;
4808 u32 pfid;
4810 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4811 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4813 if (!(val & 1))
4814 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4815 else
4816 val = (val >> 1) & 1;
4818 if (val)
4819 cp->pfid = func >> 1;
4820 else
4821 cp->pfid = func & 0x6;
4822 } else {
4823 cp->pfid = func;
4825 pfid = cp->pfid;
4827 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4828 cp->iscsi_start_cid);
4830 if (ret)
4831 return -ENOMEM;
4833 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4834 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4835 BNX2X_FCOE_NUM_CONNECTIONS,
4836 cp->fcoe_start_cid);
4838 if (ret)
4839 return -ENOMEM;
4842 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4844 cnic_init_bnx2x_kcq(dev);
4846 /* Only 1 EQ */
4847 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4848 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4849 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4850 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4851 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4852 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4853 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4854 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4855 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4856 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4857 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4858 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4859 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4860 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4861 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4862 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4863 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4864 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4865 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4866 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4867 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4868 HC_INDEX_ISCSI_EQ_CONS);
4870 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4871 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4872 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
4873 cp->conn_buf_info.pgtbl[2 * i]);
4874 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4875 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
4876 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4879 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4880 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4881 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4882 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4883 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4884 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4886 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4887 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4889 cnic_setup_bnx2x_context(dev);
4891 ret = cnic_init_bnx2x_irq(dev);
4892 if (ret)
4893 return ret;
4895 return 0;
4898 static void cnic_init_rings(struct cnic_dev *dev)
4900 struct cnic_local *cp = dev->cnic_priv;
4901 struct cnic_uio_dev *udev = cp->udev;
4903 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4904 return;
4906 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4907 cnic_init_bnx2_tx_ring(dev);
4908 cnic_init_bnx2_rx_ring(dev);
4909 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4910 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4911 u32 cli = cp->ethdev->iscsi_l2_client_id;
4912 u32 cid = cp->ethdev->iscsi_l2_cid;
4913 u32 cl_qzone_id;
4914 struct client_init_ramrod_data *data;
4915 union l5cm_specific_data l5_data;
4916 struct ustorm_eth_rx_producers rx_prods = {0};
4917 u32 off, i;
4919 rx_prods.bd_prod = 0;
4920 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4921 barrier();
4923 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4925 off = BAR_USTRORM_INTMEM +
4926 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4927 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4928 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4930 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4931 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4933 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4935 data = udev->l2_buf;
4937 memset(data, 0, sizeof(*data));
4939 cnic_init_bnx2x_tx_ring(dev, data);
4940 cnic_init_bnx2x_rx_ring(dev, data);
4942 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4943 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4945 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4947 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4948 cid, ETH_CONNECTION_TYPE, &l5_data);
4950 i = 0;
4951 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4952 ++i < 10)
4953 msleep(1);
4955 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4956 netdev_err(dev->netdev,
4957 "iSCSI CLIENT_SETUP did not complete\n");
4958 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4959 cnic_ring_ctl(dev, cid, cli, 1);
4963 static void cnic_shutdown_rings(struct cnic_dev *dev)
4965 struct cnic_local *cp = dev->cnic_priv;
4967 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4968 return;
4970 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4971 cnic_shutdown_bnx2_rx_ring(dev);
4972 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4973 struct cnic_local *cp = dev->cnic_priv;
4974 u32 cli = cp->ethdev->iscsi_l2_client_id;
4975 u32 cid = cp->ethdev->iscsi_l2_cid;
4976 union l5cm_specific_data l5_data;
4977 int i;
4979 cnic_ring_ctl(dev, cid, cli, 0);
4981 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4983 l5_data.phy_address.lo = cli;
4984 l5_data.phy_address.hi = 0;
4985 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4986 cid, ETH_CONNECTION_TYPE, &l5_data);
4987 i = 0;
4988 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4989 ++i < 10)
4990 msleep(1);
4992 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4993 netdev_err(dev->netdev,
4994 "iSCSI CLIENT_HALT did not complete\n");
4995 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4997 memset(&l5_data, 0, sizeof(l5_data));
4998 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
4999 cid, NONE_CONNECTION_TYPE, &l5_data);
5000 msleep(10);
5002 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5005 static int cnic_register_netdev(struct cnic_dev *dev)
5007 struct cnic_local *cp = dev->cnic_priv;
5008 struct cnic_eth_dev *ethdev = cp->ethdev;
5009 int err;
5011 if (!ethdev)
5012 return -ENODEV;
5014 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5015 return 0;
5017 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5018 if (err)
5019 netdev_err(dev->netdev, "register_cnic failed\n");
5021 return err;
5024 static void cnic_unregister_netdev(struct cnic_dev *dev)
5026 struct cnic_local *cp = dev->cnic_priv;
5027 struct cnic_eth_dev *ethdev = cp->ethdev;
5029 if (!ethdev)
5030 return;
5032 ethdev->drv_unregister_cnic(dev->netdev);
5035 static int cnic_start_hw(struct cnic_dev *dev)
5037 struct cnic_local *cp = dev->cnic_priv;
5038 struct cnic_eth_dev *ethdev = cp->ethdev;
5039 int err;
5041 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5042 return -EALREADY;
5044 dev->regview = ethdev->io_base;
5045 pci_dev_get(dev->pcidev);
5046 cp->func = PCI_FUNC(dev->pcidev->devfn);
5047 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5048 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5050 err = cp->alloc_resc(dev);
5051 if (err) {
5052 netdev_err(dev->netdev, "allocate resource failure\n");
5053 goto err1;
5056 err = cp->start_hw(dev);
5057 if (err)
5058 goto err1;
5060 err = cnic_cm_open(dev);
5061 if (err)
5062 goto err1;
5064 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5066 cp->enable_int(dev);
5068 return 0;
5070 err1:
5071 cp->free_resc(dev);
5072 pci_dev_put(dev->pcidev);
5073 return err;
5076 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5078 cnic_disable_bnx2_int_sync(dev);
5080 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5081 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5083 cnic_init_context(dev, KWQ_CID);
5084 cnic_init_context(dev, KCQ_CID);
5086 cnic_setup_5709_context(dev, 0);
5087 cnic_free_irq(dev);
5089 cnic_free_resc(dev);
5093 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5095 struct cnic_local *cp = dev->cnic_priv;
5097 cnic_free_irq(dev);
5098 *cp->kcq1.hw_prod_idx_ptr = 0;
5099 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5100 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5101 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5102 cnic_free_resc(dev);
5105 static void cnic_stop_hw(struct cnic_dev *dev)
5107 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5108 struct cnic_local *cp = dev->cnic_priv;
5109 int i = 0;
5111 /* Need to wait for the ring shutdown event to complete
5112 * before clearing the CNIC_UP flag.
5114 while (cp->udev->uio_dev != -1 && i < 15) {
5115 msleep(100);
5116 i++;
5118 cnic_shutdown_rings(dev);
5119 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5120 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
5121 synchronize_rcu();
5122 cnic_cm_shutdown(dev);
5123 cp->stop_hw(dev);
5124 pci_dev_put(dev->pcidev);
5128 static void cnic_free_dev(struct cnic_dev *dev)
5130 int i = 0;
5132 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5133 msleep(100);
5134 i++;
5136 if (atomic_read(&dev->ref_count) != 0)
5137 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5139 netdev_info(dev->netdev, "Removed CNIC device\n");
5140 dev_put(dev->netdev);
5141 kfree(dev);
5144 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5145 struct pci_dev *pdev)
5147 struct cnic_dev *cdev;
5148 struct cnic_local *cp;
5149 int alloc_size;
5151 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5153 cdev = kzalloc(alloc_size , GFP_KERNEL);
5154 if (cdev == NULL) {
5155 netdev_err(dev, "allocate dev struct failure\n");
5156 return NULL;
5159 cdev->netdev = dev;
5160 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5161 cdev->register_device = cnic_register_device;
5162 cdev->unregister_device = cnic_unregister_device;
5163 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5165 cp = cdev->cnic_priv;
5166 cp->dev = cdev;
5167 cp->l2_single_buf_size = 0x400;
5168 cp->l2_rx_ring_size = 3;
5170 spin_lock_init(&cp->cnic_ulp_lock);
5172 netdev_info(dev, "Added CNIC device\n");
5174 return cdev;
5177 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5179 struct pci_dev *pdev;
5180 struct cnic_dev *cdev;
5181 struct cnic_local *cp;
5182 struct cnic_eth_dev *ethdev = NULL;
5183 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5185 probe = symbol_get(bnx2_cnic_probe);
5186 if (probe) {
5187 ethdev = (*probe)(dev);
5188 symbol_put(bnx2_cnic_probe);
5190 if (!ethdev)
5191 return NULL;
5193 pdev = ethdev->pdev;
5194 if (!pdev)
5195 return NULL;
5197 dev_hold(dev);
5198 pci_dev_get(pdev);
5199 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5200 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5201 (pdev->revision < 0x10)) {
5202 pci_dev_put(pdev);
5203 goto cnic_err;
5205 pci_dev_put(pdev);
5207 cdev = cnic_alloc_dev(dev, pdev);
5208 if (cdev == NULL)
5209 goto cnic_err;
5211 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5212 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5214 cp = cdev->cnic_priv;
5215 cp->ethdev = ethdev;
5216 cdev->pcidev = pdev;
5217 cp->chip_id = ethdev->chip_id;
5219 cp->cnic_ops = &cnic_bnx2_ops;
5220 cp->start_hw = cnic_start_bnx2_hw;
5221 cp->stop_hw = cnic_stop_bnx2_hw;
5222 cp->setup_pgtbl = cnic_setup_page_tbl;
5223 cp->alloc_resc = cnic_alloc_bnx2_resc;
5224 cp->free_resc = cnic_free_resc;
5225 cp->start_cm = cnic_cm_init_bnx2_hw;
5226 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5227 cp->enable_int = cnic_enable_bnx2_int;
5228 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5229 cp->close_conn = cnic_close_bnx2_conn;
5230 cp->next_idx = cnic_bnx2_next_idx;
5231 cp->hw_idx = cnic_bnx2_hw_idx;
5232 return cdev;
5234 cnic_err:
5235 dev_put(dev);
5236 return NULL;
5239 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5241 struct pci_dev *pdev;
5242 struct cnic_dev *cdev;
5243 struct cnic_local *cp;
5244 struct cnic_eth_dev *ethdev = NULL;
5245 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5247 probe = symbol_get(bnx2x_cnic_probe);
5248 if (probe) {
5249 ethdev = (*probe)(dev);
5250 symbol_put(bnx2x_cnic_probe);
5252 if (!ethdev)
5253 return NULL;
5255 pdev = ethdev->pdev;
5256 if (!pdev)
5257 return NULL;
5259 dev_hold(dev);
5260 cdev = cnic_alloc_dev(dev, pdev);
5261 if (cdev == NULL) {
5262 dev_put(dev);
5263 return NULL;
5266 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5267 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5269 cp = cdev->cnic_priv;
5270 cp->ethdev = ethdev;
5271 cdev->pcidev = pdev;
5272 cp->chip_id = ethdev->chip_id;
5274 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5275 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5276 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5277 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5278 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5280 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5282 cp->cnic_ops = &cnic_bnx2x_ops;
5283 cp->start_hw = cnic_start_bnx2x_hw;
5284 cp->stop_hw = cnic_stop_bnx2x_hw;
5285 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5286 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5287 cp->free_resc = cnic_free_resc;
5288 cp->start_cm = cnic_cm_init_bnx2x_hw;
5289 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5290 cp->enable_int = cnic_enable_bnx2x_int;
5291 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5292 if (BNX2X_CHIP_IS_E2(cp->chip_id))
5293 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5294 else
5295 cp->ack_int = cnic_ack_bnx2x_msix;
5296 cp->close_conn = cnic_close_bnx2x_conn;
5297 cp->next_idx = cnic_bnx2x_next_idx;
5298 cp->hw_idx = cnic_bnx2x_hw_idx;
5299 return cdev;
5302 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5304 struct ethtool_drvinfo drvinfo;
5305 struct cnic_dev *cdev = NULL;
5307 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5308 memset(&drvinfo, 0, sizeof(drvinfo));
5309 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5311 if (!strcmp(drvinfo.driver, "bnx2"))
5312 cdev = init_bnx2_cnic(dev);
5313 if (!strcmp(drvinfo.driver, "bnx2x"))
5314 cdev = init_bnx2x_cnic(dev);
5315 if (cdev) {
5316 write_lock(&cnic_dev_lock);
5317 list_add(&cdev->list, &cnic_dev_list);
5318 write_unlock(&cnic_dev_lock);
5321 return cdev;
5325 * netdev event handler
5327 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5328 void *ptr)
5330 struct net_device *netdev = ptr;
5331 struct cnic_dev *dev;
5332 int if_type;
5333 int new_dev = 0;
5335 dev = cnic_from_netdev(netdev);
5337 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
5338 /* Check for the hot-plug device */
5339 dev = is_cnic_dev(netdev);
5340 if (dev) {
5341 new_dev = 1;
5342 cnic_hold(dev);
5345 if (dev) {
5346 struct cnic_local *cp = dev->cnic_priv;
5348 if (new_dev)
5349 cnic_ulp_init(dev);
5350 else if (event == NETDEV_UNREGISTER)
5351 cnic_ulp_exit(dev);
5353 if (event == NETDEV_UP) {
5354 if (cnic_register_netdev(dev) != 0) {
5355 cnic_put(dev);
5356 goto done;
5358 if (!cnic_start_hw(dev))
5359 cnic_ulp_start(dev);
5362 rcu_read_lock();
5363 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5364 struct cnic_ulp_ops *ulp_ops;
5365 void *ctx;
5367 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5368 if (!ulp_ops || !ulp_ops->indicate_netevent)
5369 continue;
5371 ctx = cp->ulp_handle[if_type];
5373 ulp_ops->indicate_netevent(ctx, event);
5375 rcu_read_unlock();
5377 if (event == NETDEV_GOING_DOWN) {
5378 cnic_ulp_stop(dev);
5379 cnic_stop_hw(dev);
5380 cnic_unregister_netdev(dev);
5381 } else if (event == NETDEV_UNREGISTER) {
5382 write_lock(&cnic_dev_lock);
5383 list_del_init(&dev->list);
5384 write_unlock(&cnic_dev_lock);
5386 cnic_put(dev);
5387 cnic_free_dev(dev);
5388 goto done;
5390 cnic_put(dev);
5392 done:
5393 return NOTIFY_DONE;
5396 static struct notifier_block cnic_netdev_notifier = {
5397 .notifier_call = cnic_netdev_event
5400 static void cnic_release(void)
5402 struct cnic_dev *dev;
5403 struct cnic_uio_dev *udev;
5405 while (!list_empty(&cnic_dev_list)) {
5406 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5407 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5408 cnic_ulp_stop(dev);
5409 cnic_stop_hw(dev);
5412 cnic_ulp_exit(dev);
5413 cnic_unregister_netdev(dev);
5414 list_del_init(&dev->list);
5415 cnic_free_dev(dev);
5417 while (!list_empty(&cnic_udev_list)) {
5418 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5419 list);
5420 cnic_free_uio(udev);
5424 static int __init cnic_init(void)
5426 int rc = 0;
5428 pr_info("%s", version);
5430 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5431 if (rc) {
5432 cnic_release();
5433 return rc;
5436 cnic_wq = create_singlethread_workqueue("cnic_wq");
5437 if (!cnic_wq) {
5438 cnic_release();
5439 unregister_netdevice_notifier(&cnic_netdev_notifier);
5440 return -ENOMEM;
5443 return 0;
5446 static void __exit cnic_exit(void)
5448 unregister_netdevice_notifier(&cnic_netdev_notifier);
5449 cnic_release();
5450 destroy_workqueue(cnic_wq);
5453 module_init(cnic_init);
5454 module_exit(cnic_exit);