2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/kernel.h>
13 #include <linux/if_arp.h>
14 #include <scsi/iscsi_if.h>
15 #include <linux/inet.h>
17 #include <linux/list.h>
18 #include <linux/kthread.h>
20 #include <linux/if_vlan.h>
21 #include <linux/cpu.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_device.h>
25 #include <scsi/scsi_eh.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi.h>
31 #include "qedi_iscsi.h"
33 static uint qedi_fw_debug
;
34 module_param(qedi_fw_debug
, uint
, 0644);
35 MODULE_PARM_DESC(qedi_fw_debug
, " Firmware debug level 0(default) to 3");
37 uint qedi_dbg_log
= QEDI_LOG_WARN
| QEDI_LOG_SCSI_TM
;
38 module_param(qedi_dbg_log
, uint
, 0644);
39 MODULE_PARM_DESC(qedi_dbg_log
, " Default debug level");
42 module_param(qedi_io_tracing
, uint
, 0644);
43 MODULE_PARM_DESC(qedi_io_tracing
,
44 " Enable logging of SCSI requests/completions into trace buffer. (default off).");
46 const struct qed_iscsi_ops
*qedi_ops
;
47 static struct scsi_transport_template
*qedi_scsi_transport
;
48 static struct pci_driver qedi_pci_driver
;
49 static DEFINE_PER_CPU(struct qedi_percpu_s
, qedi_percpu
);
50 static LIST_HEAD(qedi_udev_list
);
51 /* Static function declaration */
52 static int qedi_alloc_global_queues(struct qedi_ctx
*qedi
);
53 static void qedi_free_global_queues(struct qedi_ctx
*qedi
);
54 static struct qedi_cmd
*qedi_get_cmd_from_tid(struct qedi_ctx
*qedi
, u32 tid
);
55 static void qedi_reset_uio_rings(struct qedi_uio_dev
*udev
);
56 static void qedi_ll2_free_skbs(struct qedi_ctx
*qedi
);
58 static int qedi_iscsi_event_cb(void *context
, u8 fw_event_code
, void *fw_handle
)
60 struct qedi_ctx
*qedi
;
61 struct qedi_endpoint
*qedi_ep
;
62 struct async_data
*data
;
65 if (!context
|| !fw_handle
) {
66 QEDI_ERR(NULL
, "Recv event with ctx NULL\n");
70 qedi
= (struct qedi_ctx
*)context
;
71 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
72 "Recv Event %d fw_handle %p\n", fw_event_code
, fw_handle
);
74 data
= (struct async_data
*)fw_handle
;
75 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
76 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
77 data
->cid
, data
->itid
, data
->error_code
,
78 data
->fw_debug_param
);
80 qedi_ep
= qedi
->ep_tbl
[data
->cid
];
83 QEDI_WARN(&qedi
->dbg_ctx
,
84 "Cannot process event, ep already disconnected, cid=0x%x\n",
90 switch (fw_event_code
) {
91 case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE
:
92 if (qedi_ep
->state
== EP_STATE_OFLDCONN_START
)
93 qedi_ep
->state
= EP_STATE_OFLDCONN_COMPL
;
95 wake_up_interruptible(&qedi_ep
->tcp_ofld_wait
);
97 case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE
:
98 qedi_ep
->state
= EP_STATE_DISCONN_COMPL
;
99 wake_up_interruptible(&qedi_ep
->tcp_ofld_wait
);
101 case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR
:
102 qedi_process_iscsi_error(qedi_ep
, data
);
104 case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD
:
105 case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD
:
106 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME
:
107 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT
:
108 case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT
:
109 case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2
:
110 case ISCSI_EVENT_TYPE_TCP_CONN_ERROR
:
111 qedi_process_tcp_error(qedi_ep
, data
);
114 QEDI_ERR(&qedi
->dbg_ctx
, "Recv Unknown Event %u\n",
121 static int qedi_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
123 struct qedi_uio_dev
*udev
= uinfo
->priv
;
124 struct qedi_ctx
*qedi
= udev
->qedi
;
126 if (!capable(CAP_NET_ADMIN
))
129 if (udev
->uio_dev
!= -1)
133 udev
->uio_dev
= iminor(inode
);
134 qedi_reset_uio_rings(udev
);
135 set_bit(UIO_DEV_OPENED
, &qedi
->flags
);
141 static int qedi_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
143 struct qedi_uio_dev
*udev
= uinfo
->priv
;
144 struct qedi_ctx
*qedi
= udev
->qedi
;
147 clear_bit(UIO_DEV_OPENED
, &qedi
->flags
);
148 qedi_ll2_free_skbs(qedi
);
152 static void __qedi_free_uio_rings(struct qedi_uio_dev
*udev
)
154 if (udev
->ll2_ring
) {
155 free_page((unsigned long)udev
->ll2_ring
);
156 udev
->ll2_ring
= NULL
;
160 free_pages((unsigned long)udev
->ll2_buf
, 2);
161 udev
->ll2_buf
= NULL
;
165 static void __qedi_free_uio(struct qedi_uio_dev
*udev
)
167 uio_unregister_device(&udev
->qedi_uinfo
);
169 __qedi_free_uio_rings(udev
);
171 pci_dev_put(udev
->pdev
);
176 static void qedi_free_uio(struct qedi_uio_dev
*udev
)
181 list_del_init(&udev
->list
);
182 __qedi_free_uio(udev
);
185 static void qedi_reset_uio_rings(struct qedi_uio_dev
*udev
)
187 struct qedi_ctx
*qedi
= NULL
;
188 struct qedi_uio_ctrl
*uctrl
= NULL
;
193 spin_lock_bh(&qedi
->ll2_lock
);
194 uctrl
->host_rx_cons
= 0;
195 uctrl
->hw_rx_prod
= 0;
196 uctrl
->hw_rx_bd_prod
= 0;
197 uctrl
->host_rx_bd_cons
= 0;
199 memset(udev
->ll2_ring
, 0, udev
->ll2_ring_size
);
200 memset(udev
->ll2_buf
, 0, udev
->ll2_buf_size
);
201 spin_unlock_bh(&qedi
->ll2_lock
);
204 static int __qedi_alloc_uio_rings(struct qedi_uio_dev
*udev
)
208 if (udev
->ll2_ring
|| udev
->ll2_buf
)
211 /* Allocating memory for LL2 ring */
212 udev
->ll2_ring_size
= QEDI_PAGE_SIZE
;
213 udev
->ll2_ring
= (void *)get_zeroed_page(GFP_KERNEL
| __GFP_COMP
);
214 if (!udev
->ll2_ring
) {
216 goto exit_alloc_ring
;
219 /* Allocating memory for Tx/Rx pkt buffer */
220 udev
->ll2_buf_size
= TX_RX_RING
* LL2_SINGLE_BUF_SIZE
;
221 udev
->ll2_buf_size
= QEDI_PAGE_ALIGN(udev
->ll2_buf_size
);
222 udev
->ll2_buf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_COMP
|
224 if (!udev
->ll2_buf
) {
231 free_page((unsigned long)udev
->ll2_ring
);
232 udev
->ll2_ring
= NULL
;
237 static int qedi_alloc_uio_rings(struct qedi_ctx
*qedi
)
239 struct qedi_uio_dev
*udev
= NULL
;
240 struct qedi_uio_ctrl
*uctrl
= NULL
;
243 list_for_each_entry(udev
, &qedi_udev_list
, list
) {
244 if (udev
->pdev
== qedi
->pdev
) {
246 if (__qedi_alloc_uio_rings(udev
)) {
255 udev
= kzalloc(sizeof(*udev
), GFP_KERNEL
);
261 uctrl
= kzalloc(sizeof(*uctrl
), GFP_KERNEL
);
270 udev
->pdev
= qedi
->pdev
;
273 rc
= __qedi_alloc_uio_rings(udev
);
277 list_add(&udev
->list
, &qedi_udev_list
);
279 pci_dev_get(udev
->pdev
);
282 udev
->tx_pkt
= udev
->ll2_buf
;
283 udev
->rx_pkt
= udev
->ll2_buf
+ LL2_SINGLE_BUF_SIZE
;
294 static int qedi_init_uio(struct qedi_ctx
*qedi
)
296 struct qedi_uio_dev
*udev
= qedi
->udev
;
297 struct uio_info
*uinfo
;
303 uinfo
= &udev
->qedi_uinfo
;
305 uinfo
->mem
[0].addr
= (unsigned long)udev
->uctrl
;
306 uinfo
->mem
[0].size
= sizeof(struct qedi_uio_ctrl
);
307 uinfo
->mem
[0].memtype
= UIO_MEM_LOGICAL
;
309 uinfo
->mem
[1].addr
= (unsigned long)udev
->ll2_ring
;
310 uinfo
->mem
[1].size
= udev
->ll2_ring_size
;
311 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
313 uinfo
->mem
[2].addr
= (unsigned long)udev
->ll2_buf
;
314 uinfo
->mem
[2].size
= udev
->ll2_buf_size
;
315 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
317 uinfo
->name
= "qedi_uio";
318 uinfo
->version
= QEDI_MODULE_VERSION
;
319 uinfo
->irq
= UIO_IRQ_CUSTOM
;
321 uinfo
->open
= qedi_uio_open
;
322 uinfo
->release
= qedi_uio_close
;
324 if (udev
->uio_dev
== -1) {
328 ret
= uio_register_device(&udev
->pdev
->dev
, uinfo
);
330 QEDI_ERR(&qedi
->dbg_ctx
,
331 "UIO registration failed\n");
339 static int qedi_alloc_and_init_sb(struct qedi_ctx
*qedi
,
340 struct qed_sb_info
*sb_info
, u16 sb_id
)
342 struct status_block
*sb_virt
;
346 sb_virt
= dma_alloc_coherent(&qedi
->pdev
->dev
,
347 sizeof(struct status_block
), &sb_phys
,
350 QEDI_ERR(&qedi
->dbg_ctx
,
351 "Status block allocation failed for id = %d.\n",
356 ret
= qedi_ops
->common
->sb_init(qedi
->cdev
, sb_info
, sb_virt
, sb_phys
,
357 sb_id
, QED_SB_TYPE_STORAGE
);
359 QEDI_ERR(&qedi
->dbg_ctx
,
360 "Status block initialization failed for id = %d.\n",
368 static void qedi_free_sb(struct qedi_ctx
*qedi
)
370 struct qed_sb_info
*sb_info
;
373 for (id
= 0; id
< MIN_NUM_CPUS_MSIX(qedi
); id
++) {
374 sb_info
= &qedi
->sb_array
[id
];
375 if (sb_info
->sb_virt
)
376 dma_free_coherent(&qedi
->pdev
->dev
,
377 sizeof(*sb_info
->sb_virt
),
378 (void *)sb_info
->sb_virt
,
383 static void qedi_free_fp(struct qedi_ctx
*qedi
)
385 kfree(qedi
->fp_array
);
386 kfree(qedi
->sb_array
);
389 static void qedi_destroy_fp(struct qedi_ctx
*qedi
)
395 static int qedi_alloc_fp(struct qedi_ctx
*qedi
)
399 qedi
->fp_array
= kcalloc(MIN_NUM_CPUS_MSIX(qedi
),
400 sizeof(struct qedi_fastpath
), GFP_KERNEL
);
401 if (!qedi
->fp_array
) {
402 QEDI_ERR(&qedi
->dbg_ctx
,
403 "fastpath fp array allocation failed.\n");
407 qedi
->sb_array
= kcalloc(MIN_NUM_CPUS_MSIX(qedi
),
408 sizeof(struct qed_sb_info
), GFP_KERNEL
);
409 if (!qedi
->sb_array
) {
410 QEDI_ERR(&qedi
->dbg_ctx
,
411 "fastpath sb array allocation failed.\n");
423 static void qedi_int_fp(struct qedi_ctx
*qedi
)
425 struct qedi_fastpath
*fp
;
428 memset(qedi
->fp_array
, 0, MIN_NUM_CPUS_MSIX(qedi
) *
429 sizeof(*qedi
->fp_array
));
430 memset(qedi
->sb_array
, 0, MIN_NUM_CPUS_MSIX(qedi
) *
431 sizeof(*qedi
->sb_array
));
433 for (id
= 0; id
< MIN_NUM_CPUS_MSIX(qedi
); id
++) {
434 fp
= &qedi
->fp_array
[id
];
435 fp
->sb_info
= &qedi
->sb_array
[id
];
438 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
441 /* fp_array[i] ---- irq cookie
442 * So init data which is needed in int ctx
447 static int qedi_prepare_fp(struct qedi_ctx
*qedi
)
449 struct qedi_fastpath
*fp
;
452 ret
= qedi_alloc_fp(qedi
);
458 for (id
= 0; id
< MIN_NUM_CPUS_MSIX(qedi
); id
++) {
459 fp
= &qedi
->fp_array
[id
];
460 ret
= qedi_alloc_and_init_sb(qedi
, fp
->sb_info
, fp
->sb_id
);
462 QEDI_ERR(&qedi
->dbg_ctx
,
463 "SB allocation and initialization failed.\n");
478 static int qedi_setup_cid_que(struct qedi_ctx
*qedi
)
482 qedi
->cid_que
.cid_que_base
= kmalloc_array(qedi
->max_active_conns
,
483 sizeof(u32
), GFP_KERNEL
);
484 if (!qedi
->cid_que
.cid_que_base
)
487 qedi
->cid_que
.conn_cid_tbl
= kmalloc_array(qedi
->max_active_conns
,
488 sizeof(struct qedi_conn
*),
490 if (!qedi
->cid_que
.conn_cid_tbl
) {
491 kfree(qedi
->cid_que
.cid_que_base
);
492 qedi
->cid_que
.cid_que_base
= NULL
;
496 qedi
->cid_que
.cid_que
= (u32
*)qedi
->cid_que
.cid_que_base
;
497 qedi
->cid_que
.cid_q_prod_idx
= 0;
498 qedi
->cid_que
.cid_q_cons_idx
= 0;
499 qedi
->cid_que
.cid_q_max_idx
= qedi
->max_active_conns
;
500 qedi
->cid_que
.cid_free_cnt
= qedi
->max_active_conns
;
502 for (i
= 0; i
< qedi
->max_active_conns
; i
++) {
503 qedi
->cid_que
.cid_que
[i
] = i
;
504 qedi
->cid_que
.conn_cid_tbl
[i
] = NULL
;
510 static void qedi_release_cid_que(struct qedi_ctx
*qedi
)
512 kfree(qedi
->cid_que
.cid_que_base
);
513 qedi
->cid_que
.cid_que_base
= NULL
;
515 kfree(qedi
->cid_que
.conn_cid_tbl
);
516 qedi
->cid_que
.conn_cid_tbl
= NULL
;
519 static int qedi_init_id_tbl(struct qedi_portid_tbl
*id_tbl
, u16 size
,
520 u16 start_id
, u16 next
)
522 id_tbl
->start
= start_id
;
525 spin_lock_init(&id_tbl
->lock
);
526 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
533 static void qedi_free_id_tbl(struct qedi_portid_tbl
*id_tbl
)
535 kfree(id_tbl
->table
);
536 id_tbl
->table
= NULL
;
539 int qedi_alloc_id(struct qedi_portid_tbl
*id_tbl
, u16 id
)
544 if (id
>= id_tbl
->max
)
547 spin_lock(&id_tbl
->lock
);
548 if (!test_bit(id
, id_tbl
->table
)) {
549 set_bit(id
, id_tbl
->table
);
552 spin_unlock(&id_tbl
->lock
);
556 u16
qedi_alloc_new_id(struct qedi_portid_tbl
*id_tbl
)
560 spin_lock(&id_tbl
->lock
);
561 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
562 if (id
>= id_tbl
->max
) {
563 id
= QEDI_LOCAL_PORT_INVALID
;
564 if (id_tbl
->next
!= 0) {
565 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
566 if (id
>= id_tbl
->next
)
567 id
= QEDI_LOCAL_PORT_INVALID
;
571 if (id
< id_tbl
->max
) {
572 set_bit(id
, id_tbl
->table
);
573 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
577 spin_unlock(&id_tbl
->lock
);
582 void qedi_free_id(struct qedi_portid_tbl
*id_tbl
, u16 id
)
584 if (id
== QEDI_LOCAL_PORT_INVALID
)
588 if (id
>= id_tbl
->max
)
591 clear_bit(id
, id_tbl
->table
);
594 static void qedi_cm_free_mem(struct qedi_ctx
*qedi
)
598 qedi_free_id_tbl(&qedi
->lcl_port_tbl
);
601 static int qedi_cm_alloc_mem(struct qedi_ctx
*qedi
)
605 qedi
->ep_tbl
= kzalloc((qedi
->max_active_conns
*
606 sizeof(struct qedi_endpoint
*)), GFP_KERNEL
);
609 port_id
= prandom_u32() % QEDI_LOCAL_PORT_RANGE
;
610 if (qedi_init_id_tbl(&qedi
->lcl_port_tbl
, QEDI_LOCAL_PORT_RANGE
,
611 QEDI_LOCAL_PORT_MIN
, port_id
)) {
612 qedi_cm_free_mem(qedi
);
619 static struct qedi_ctx
*qedi_host_alloc(struct pci_dev
*pdev
)
621 struct Scsi_Host
*shost
;
622 struct qedi_ctx
*qedi
= NULL
;
624 shost
= iscsi_host_alloc(&qedi_host_template
,
625 sizeof(struct qedi_ctx
), 0);
627 QEDI_ERR(NULL
, "Could not allocate shost\n");
628 goto exit_setup_shost
;
631 shost
->max_id
= QEDI_MAX_ISCSI_CONNS_PER_HBA
;
632 shost
->max_channel
= 0;
634 shost
->max_cmd_len
= 16;
635 shost
->transportt
= qedi_scsi_transport
;
637 qedi
= iscsi_host_priv(shost
);
638 memset(qedi
, 0, sizeof(*qedi
));
640 qedi
->dbg_ctx
.host_no
= shost
->host_no
;
642 qedi
->dbg_ctx
.pdev
= pdev
;
643 qedi
->max_active_conns
= ISCSI_MAX_SESS_PER_HBA
;
644 qedi
->max_sqes
= QEDI_SQ_SIZE
;
646 if (shost_use_blk_mq(shost
))
647 shost
->nr_hw_queues
= MIN_NUM_CPUS_MSIX(qedi
);
649 pci_set_drvdata(pdev
, qedi
);
655 static int qedi_ll2_rx(void *cookie
, struct sk_buff
*skb
, u32 arg1
, u32 arg2
)
657 struct qedi_ctx
*qedi
= (struct qedi_ctx
*)cookie
;
658 struct qedi_uio_dev
*udev
;
659 struct qedi_uio_ctrl
*uctrl
;
660 struct skb_work_list
*work
;
664 QEDI_ERR(NULL
, "qedi is NULL\n");
668 if (!test_bit(UIO_DEV_OPENED
, &qedi
->flags
)) {
669 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_UIO
,
670 "UIO DEV is not opened\n");
678 work
= kzalloc(sizeof(*work
), GFP_ATOMIC
);
680 QEDI_WARN(&qedi
->dbg_ctx
,
681 "Could not allocate work so dropping frame.\n");
686 INIT_LIST_HEAD(&work
->list
);
689 if (skb_vlan_tag_present(skb
))
690 work
->vlan_id
= skb_vlan_tag_get(skb
);
693 __vlan_insert_tag(work
->skb
, htons(ETH_P_8021Q
), work
->vlan_id
);
695 spin_lock_bh(&qedi
->ll2_lock
);
696 list_add_tail(&work
->list
, &qedi
->ll2_skb_list
);
698 ++uctrl
->hw_rx_prod_cnt
;
699 prod
= (uctrl
->hw_rx_prod
+ 1) % RX_RING
;
700 if (prod
!= uctrl
->host_rx_cons
) {
701 uctrl
->hw_rx_prod
= prod
;
702 spin_unlock_bh(&qedi
->ll2_lock
);
703 wake_up_process(qedi
->ll2_recv_thread
);
707 spin_unlock_bh(&qedi
->ll2_lock
);
711 /* map this skb to iscsiuio mmaped region */
712 static int qedi_ll2_process_skb(struct qedi_ctx
*qedi
, struct sk_buff
*skb
,
715 struct qedi_uio_dev
*udev
= NULL
;
716 struct qedi_uio_ctrl
*uctrl
= NULL
;
717 struct qedi_rx_bd rxbd
;
718 struct qedi_rx_bd
*p_rxbd
;
724 QEDI_ERR(NULL
, "qedi is NULL\n");
730 pkt
= udev
->rx_pkt
+ (uctrl
->hw_rx_prod
* LL2_SINGLE_BUF_SIZE
);
731 len
= min_t(u32
, skb
->len
, (u32
)LL2_SINGLE_BUF_SIZE
);
732 memcpy(pkt
, skb
->data
, len
);
734 memset(&rxbd
, 0, sizeof(rxbd
));
735 rxbd
.rx_pkt_index
= uctrl
->hw_rx_prod
;
736 rxbd
.rx_pkt_len
= len
;
737 rxbd
.vlan_id
= vlan_id
;
739 uctrl
->hw_rx_bd_prod
= (uctrl
->hw_rx_bd_prod
+ 1) % QEDI_NUM_RX_BD
;
740 rx_bd_prod
= uctrl
->hw_rx_bd_prod
;
741 p_rxbd
= (struct qedi_rx_bd
*)udev
->ll2_ring
;
742 p_rxbd
+= rx_bd_prod
;
744 memcpy(p_rxbd
, &rxbd
, sizeof(rxbd
));
746 /* notify the iscsiuio about new packet */
747 uio_event_notify(&udev
->qedi_uinfo
);
752 static void qedi_ll2_free_skbs(struct qedi_ctx
*qedi
)
754 struct skb_work_list
*work
, *work_tmp
;
756 spin_lock_bh(&qedi
->ll2_lock
);
757 list_for_each_entry_safe(work
, work_tmp
, &qedi
->ll2_skb_list
, list
) {
758 list_del(&work
->list
);
760 kfree_skb(work
->skb
);
763 spin_unlock_bh(&qedi
->ll2_lock
);
766 static int qedi_ll2_recv_thread(void *arg
)
768 struct qedi_ctx
*qedi
= (struct qedi_ctx
*)arg
;
769 struct skb_work_list
*work
, *work_tmp
;
771 set_user_nice(current
, -20);
773 while (!kthread_should_stop()) {
774 spin_lock_bh(&qedi
->ll2_lock
);
775 list_for_each_entry_safe(work
, work_tmp
, &qedi
->ll2_skb_list
,
777 list_del(&work
->list
);
778 qedi_ll2_process_skb(qedi
, work
->skb
, work
->vlan_id
);
779 kfree_skb(work
->skb
);
782 set_current_state(TASK_INTERRUPTIBLE
);
783 spin_unlock_bh(&qedi
->ll2_lock
);
787 __set_current_state(TASK_RUNNING
);
791 static int qedi_set_iscsi_pf_param(struct qedi_ctx
*qedi
)
797 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
, "Min number of MSIX %d\n",
798 MIN_NUM_CPUS_MSIX(qedi
));
800 num_sq_pages
= (MAX_OUSTANDING_TASKS_PER_CON
* 8) / PAGE_SIZE
;
802 qedi
->num_queues
= MIN_NUM_CPUS_MSIX(qedi
);
804 memset(&qedi
->pf_params
.iscsi_pf_params
, 0,
805 sizeof(qedi
->pf_params
.iscsi_pf_params
));
807 qedi
->p_cpuq
= pci_alloc_consistent(qedi
->pdev
,
808 qedi
->num_queues
* sizeof(struct qedi_glbl_q_params
),
811 QEDI_ERR(&qedi
->dbg_ctx
, "pci_alloc_consistent fail\n");
816 rval
= qedi_alloc_global_queues(qedi
);
818 QEDI_ERR(&qedi
->dbg_ctx
, "Global queue allocation failed.\n");
823 qedi
->pf_params
.iscsi_pf_params
.num_cons
= QEDI_MAX_ISCSI_CONNS_PER_HBA
;
824 qedi
->pf_params
.iscsi_pf_params
.num_tasks
= QEDI_MAX_ISCSI_TASK
;
825 qedi
->pf_params
.iscsi_pf_params
.half_way_close_timeout
= 10;
826 qedi
->pf_params
.iscsi_pf_params
.num_sq_pages_in_ring
= num_sq_pages
;
827 qedi
->pf_params
.iscsi_pf_params
.num_r2tq_pages_in_ring
= num_sq_pages
;
828 qedi
->pf_params
.iscsi_pf_params
.num_uhq_pages_in_ring
= num_sq_pages
;
829 qedi
->pf_params
.iscsi_pf_params
.num_queues
= qedi
->num_queues
;
830 qedi
->pf_params
.iscsi_pf_params
.debug_mode
= qedi_fw_debug
;
832 for (log_page_size
= 0 ; log_page_size
< 32 ; log_page_size
++) {
833 if ((1 << log_page_size
) == PAGE_SIZE
)
836 qedi
->pf_params
.iscsi_pf_params
.log_page_size
= log_page_size
;
838 qedi
->pf_params
.iscsi_pf_params
.glbl_q_params_addr
=
839 (u64
)qedi
->hw_p_cpuq
;
841 /* RQ BDQ initializations.
842 * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
843 * rqe_log_size: 8 for 256B RQE
845 qedi
->pf_params
.iscsi_pf_params
.rqe_log_size
= 8;
846 /* BDQ address and size */
847 qedi
->pf_params
.iscsi_pf_params
.bdq_pbl_base_addr
[BDQ_ID_RQ
] =
848 qedi
->bdq_pbl_list_dma
;
849 qedi
->pf_params
.iscsi_pf_params
.bdq_pbl_num_entries
[BDQ_ID_RQ
] =
850 qedi
->bdq_pbl_list_num_entries
;
851 qedi
->pf_params
.iscsi_pf_params
.rq_buffer_size
= QEDI_BDQ_BUF_SIZE
;
853 /* cq_num_entries: num_tasks + rq_num_entries */
854 qedi
->pf_params
.iscsi_pf_params
.cq_num_entries
= 2048;
856 qedi
->pf_params
.iscsi_pf_params
.gl_rq_pi
= QEDI_PROTO_CQ_PROD_IDX
;
857 qedi
->pf_params
.iscsi_pf_params
.gl_cmd_pi
= 1;
858 qedi
->pf_params
.iscsi_pf_params
.ooo_enable
= 1;
864 /* Free DMA coherent memory for array of queue pointers we pass to qed */
865 static void qedi_free_iscsi_pf_param(struct qedi_ctx
*qedi
)
870 size
= qedi
->num_queues
* sizeof(struct qedi_glbl_q_params
);
871 pci_free_consistent(qedi
->pdev
, size
, qedi
->p_cpuq
,
875 qedi_free_global_queues(qedi
);
877 kfree(qedi
->global_queues
);
880 static void qedi_link_update(void *dev
, struct qed_link_output
*link
)
882 struct qedi_ctx
*qedi
= (struct qedi_ctx
*)dev
;
885 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
, "Link Up event.\n");
886 atomic_set(&qedi
->link_state
, QEDI_LINK_UP
);
888 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
889 "Link Down event.\n");
890 atomic_set(&qedi
->link_state
, QEDI_LINK_DOWN
);
894 static struct qed_iscsi_cb_ops qedi_cb_ops
= {
896 .link_update
= qedi_link_update
,
900 static int qedi_queue_cqe(struct qedi_ctx
*qedi
, union iscsi_cqe
*cqe
,
901 u16 que_idx
, struct qedi_percpu_s
*p
)
903 struct qedi_work
*qedi_work
;
904 struct qedi_conn
*q_conn
;
905 struct iscsi_conn
*conn
;
906 struct qedi_cmd
*qedi_cmd
;
910 iscsi_cid
= cqe
->cqe_common
.conn_id
;
911 q_conn
= qedi
->cid_que
.conn_cid_tbl
[iscsi_cid
];
913 QEDI_WARN(&qedi
->dbg_ctx
,
914 "Session no longer exists for cid=0x%x!!\n",
918 conn
= q_conn
->cls_conn
->dd_data
;
920 switch (cqe
->cqe_common
.cqe_type
) {
921 case ISCSI_CQE_TYPE_SOLICITED
:
922 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
:
923 qedi_cmd
= qedi_get_cmd_from_tid(qedi
, cqe
->cqe_solicited
.itid
);
928 INIT_LIST_HEAD(&qedi_cmd
->cqe_work
.list
);
929 qedi_cmd
->cqe_work
.qedi
= qedi
;
930 memcpy(&qedi_cmd
->cqe_work
.cqe
, cqe
, sizeof(union iscsi_cqe
));
931 qedi_cmd
->cqe_work
.que_idx
= que_idx
;
932 qedi_cmd
->cqe_work
.is_solicited
= true;
933 list_add_tail(&qedi_cmd
->cqe_work
.list
, &p
->work_list
);
935 case ISCSI_CQE_TYPE_UNSOLICITED
:
936 case ISCSI_CQE_TYPE_DUMMY
:
937 case ISCSI_CQE_TYPE_TASK_CLEANUP
:
938 qedi_work
= kzalloc(sizeof(*qedi_work
), GFP_ATOMIC
);
943 INIT_LIST_HEAD(&qedi_work
->list
);
944 qedi_work
->qedi
= qedi
;
945 memcpy(&qedi_work
->cqe
, cqe
, sizeof(union iscsi_cqe
));
946 qedi_work
->que_idx
= que_idx
;
947 qedi_work
->is_solicited
= false;
948 list_add_tail(&qedi_work
->list
, &p
->work_list
);
952 QEDI_ERR(&qedi
->dbg_ctx
, "FW Error cqe.\n");
957 static bool qedi_process_completions(struct qedi_fastpath
*fp
)
959 struct qedi_ctx
*qedi
= fp
->qedi
;
960 struct qed_sb_info
*sb_info
= fp
->sb_info
;
961 struct status_block
*sb
= sb_info
->sb_virt
;
962 struct qedi_percpu_s
*p
= NULL
;
963 struct global_queue
*que
;
966 union iscsi_cqe
*cqe
;
970 /* Get the current firmware producer index */
971 prod_idx
= sb
->pi_array
[QEDI_PROTO_CQ_PROD_IDX
];
973 if (prod_idx
>= QEDI_CQ_SIZE
)
974 prod_idx
= prod_idx
% QEDI_CQ_SIZE
;
976 que
= qedi
->global_queues
[fp
->sb_id
];
977 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_IO
,
978 "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
979 que
, prod_idx
, que
->cq_cons_idx
, fp
->sb_id
);
981 qedi
->intr_cpu
= fp
->sb_id
;
982 cpu
= smp_processor_id();
983 p
= &per_cpu(qedi_percpu
, cpu
);
985 if (unlikely(!p
->iothread
))
988 spin_lock_irqsave(&p
->p_work_lock
, flags
);
989 while (que
->cq_cons_idx
!= prod_idx
) {
990 cqe
= &que
->cq
[que
->cq_cons_idx
];
992 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_IO
,
993 "cqe=%p prod_idx=%d cons_idx=%d.\n",
994 cqe
, prod_idx
, que
->cq_cons_idx
);
996 ret
= qedi_queue_cqe(qedi
, cqe
, fp
->sb_id
, p
);
1001 if (que
->cq_cons_idx
== QEDI_CQ_SIZE
)
1002 que
->cq_cons_idx
= 0;
1004 wake_up_process(p
->iothread
);
1005 spin_unlock_irqrestore(&p
->p_work_lock
, flags
);
1010 static bool qedi_fp_has_work(struct qedi_fastpath
*fp
)
1012 struct qedi_ctx
*qedi
= fp
->qedi
;
1013 struct global_queue
*que
;
1014 struct qed_sb_info
*sb_info
= fp
->sb_info
;
1015 struct status_block
*sb
= sb_info
->sb_virt
;
1020 /* Get the current firmware producer index */
1021 prod_idx
= sb
->pi_array
[QEDI_PROTO_CQ_PROD_IDX
];
1023 /* Get the pointer to the global CQ this completion is on */
1024 que
= qedi
->global_queues
[fp
->sb_id
];
1026 /* prod idx wrap around uint16 */
1027 if (prod_idx
>= QEDI_CQ_SIZE
)
1028 prod_idx
= prod_idx
% QEDI_CQ_SIZE
;
1030 return (que
->cq_cons_idx
!= prod_idx
);
1033 /* MSI-X fastpath handler code */
1034 static irqreturn_t
qedi_msix_handler(int irq
, void *dev_id
)
1036 struct qedi_fastpath
*fp
= dev_id
;
1037 struct qedi_ctx
*qedi
= fp
->qedi
;
1038 bool wake_io_thread
= true;
1040 qed_sb_ack(fp
->sb_info
, IGU_INT_DISABLE
, 0);
1043 wake_io_thread
= qedi_process_completions(fp
);
1044 if (wake_io_thread
) {
1045 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
,
1046 "process already running\n");
1049 if (qedi_fp_has_work(fp
) == 0)
1050 qed_sb_update_sb_idx(fp
->sb_info
);
1052 /* Check for more work */
1055 if (qedi_fp_has_work(fp
) == 0)
1056 qed_sb_ack(fp
->sb_info
, IGU_INT_ENABLE
, 1);
1063 /* simd handler for MSI/INTa */
1064 static void qedi_simd_int_handler(void *cookie
)
1066 /* Cookie is qedi_ctx struct */
1067 struct qedi_ctx
*qedi
= (struct qedi_ctx
*)cookie
;
1069 QEDI_WARN(&qedi
->dbg_ctx
, "qedi=%p.\n", qedi
);
1072 #define QEDI_SIMD_HANDLER_NUM 0
1073 static void qedi_sync_free_irqs(struct qedi_ctx
*qedi
)
1077 if (qedi
->int_info
.msix_cnt
) {
1078 for (i
= 0; i
< qedi
->int_info
.used_cnt
; i
++) {
1079 synchronize_irq(qedi
->int_info
.msix
[i
].vector
);
1080 irq_set_affinity_hint(qedi
->int_info
.msix
[i
].vector
,
1082 free_irq(qedi
->int_info
.msix
[i
].vector
,
1083 &qedi
->fp_array
[i
]);
1086 qedi_ops
->common
->simd_handler_clean(qedi
->cdev
,
1087 QEDI_SIMD_HANDLER_NUM
);
1090 qedi
->int_info
.used_cnt
= 0;
1091 qedi_ops
->common
->set_fp_int(qedi
->cdev
, 0);
1094 static int qedi_request_msix_irq(struct qedi_ctx
*qedi
)
1098 cpu
= cpumask_first(cpu_online_mask
);
1099 for (i
= 0; i
< MIN_NUM_CPUS_MSIX(qedi
); i
++) {
1100 rc
= request_irq(qedi
->int_info
.msix
[i
].vector
,
1101 qedi_msix_handler
, 0, "qedi",
1102 &qedi
->fp_array
[i
]);
1105 QEDI_WARN(&qedi
->dbg_ctx
, "request_irq failed.\n");
1106 qedi_sync_free_irqs(qedi
);
1109 qedi
->int_info
.used_cnt
++;
1110 rc
= irq_set_affinity_hint(qedi
->int_info
.msix
[i
].vector
,
1112 cpu
= cpumask_next(cpu
, cpu_online_mask
);
1118 static int qedi_setup_int(struct qedi_ctx
*qedi
)
1122 rc
= qedi_ops
->common
->set_fp_int(qedi
->cdev
, num_online_cpus());
1123 rc
= qedi_ops
->common
->get_fp_int(qedi
->cdev
, &qedi
->int_info
);
1125 goto exit_setup_int
;
1127 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
,
1128 "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
1129 qedi
->int_info
.msix_cnt
, num_online_cpus());
1131 if (qedi
->int_info
.msix_cnt
) {
1132 rc
= qedi_request_msix_irq(qedi
);
1133 goto exit_setup_int
;
1135 qedi_ops
->common
->simd_handler_config(qedi
->cdev
, &qedi
,
1136 QEDI_SIMD_HANDLER_NUM
,
1137 qedi_simd_int_handler
);
1138 qedi
->int_info
.used_cnt
= 1;
1145 static void qedi_free_bdq(struct qedi_ctx
*qedi
)
1149 if (qedi
->bdq_pbl_list
)
1150 dma_free_coherent(&qedi
->pdev
->dev
, PAGE_SIZE
,
1151 qedi
->bdq_pbl_list
, qedi
->bdq_pbl_list_dma
);
1154 dma_free_coherent(&qedi
->pdev
->dev
, qedi
->bdq_pbl_mem_size
,
1155 qedi
->bdq_pbl
, qedi
->bdq_pbl_dma
);
1157 for (i
= 0; i
< QEDI_BDQ_NUM
; i
++) {
1158 if (qedi
->bdq
[i
].buf_addr
) {
1159 dma_free_coherent(&qedi
->pdev
->dev
, QEDI_BDQ_BUF_SIZE
,
1160 qedi
->bdq
[i
].buf_addr
,
1161 qedi
->bdq
[i
].buf_dma
);
1166 static void qedi_free_global_queues(struct qedi_ctx
*qedi
)
1169 struct global_queue
**gl
= qedi
->global_queues
;
1171 for (i
= 0; i
< qedi
->num_queues
; i
++) {
1176 dma_free_coherent(&qedi
->pdev
->dev
, gl
[i
]->cq_mem_size
,
1177 gl
[i
]->cq
, gl
[i
]->cq_dma
);
1179 dma_free_coherent(&qedi
->pdev
->dev
, gl
[i
]->cq_pbl_size
,
1180 gl
[i
]->cq_pbl
, gl
[i
]->cq_pbl_dma
);
1184 qedi_free_bdq(qedi
);
1187 static int qedi_alloc_bdq(struct qedi_ctx
*qedi
)
1190 struct scsi_bd
*pbl
;
1194 /* Alloc dma memory for BDQ buffers */
1195 for (i
= 0; i
< QEDI_BDQ_NUM
; i
++) {
1196 qedi
->bdq
[i
].buf_addr
=
1197 dma_alloc_coherent(&qedi
->pdev
->dev
,
1199 &qedi
->bdq
[i
].buf_dma
,
1201 if (!qedi
->bdq
[i
].buf_addr
) {
1202 QEDI_ERR(&qedi
->dbg_ctx
,
1203 "Could not allocate BDQ buffer %d.\n", i
);
1208 /* Alloc dma memory for BDQ page buffer list */
1209 qedi
->bdq_pbl_mem_size
= QEDI_BDQ_NUM
* sizeof(struct scsi_bd
);
1210 qedi
->bdq_pbl_mem_size
= ALIGN(qedi
->bdq_pbl_mem_size
, PAGE_SIZE
);
1211 qedi
->rq_num_entries
= qedi
->bdq_pbl_mem_size
/ sizeof(struct scsi_bd
);
1213 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
, "rq_num_entries = %d.\n",
1214 qedi
->rq_num_entries
);
1216 qedi
->bdq_pbl
= dma_alloc_coherent(&qedi
->pdev
->dev
,
1217 qedi
->bdq_pbl_mem_size
,
1218 &qedi
->bdq_pbl_dma
, GFP_KERNEL
);
1219 if (!qedi
->bdq_pbl
) {
1220 QEDI_ERR(&qedi
->dbg_ctx
, "Could not allocate BDQ PBL.\n");
1225 * Populate BDQ PBL with physical and virtual address of individual
1228 pbl
= (struct scsi_bd
*)qedi
->bdq_pbl
;
1229 for (i
= 0; i
< QEDI_BDQ_NUM
; i
++) {
1231 cpu_to_le32(QEDI_U64_HI(qedi
->bdq
[i
].buf_dma
));
1233 cpu_to_le32(QEDI_U64_LO(qedi
->bdq
[i
].buf_dma
));
1234 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
1235 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
1236 pbl
, pbl
->address
.hi
, pbl
->address
.lo
, i
);
1238 pbl
->opaque
.lo
= cpu_to_le32(QEDI_U64_LO(i
));
1242 /* Allocate list of PBL pages */
1243 qedi
->bdq_pbl_list
= dma_alloc_coherent(&qedi
->pdev
->dev
,
1245 &qedi
->bdq_pbl_list_dma
,
1247 if (!qedi
->bdq_pbl_list
) {
1248 QEDI_ERR(&qedi
->dbg_ctx
,
1249 "Could not allocate list of PBL pages.\n");
1252 memset(qedi
->bdq_pbl_list
, 0, PAGE_SIZE
);
1255 * Now populate PBL list with pages that contain pointers to the
1256 * individual buffers.
1258 qedi
->bdq_pbl_list_num_entries
= qedi
->bdq_pbl_mem_size
/ PAGE_SIZE
;
1259 list
= (u64
*)qedi
->bdq_pbl_list
;
1260 page
= qedi
->bdq_pbl_list_dma
;
1261 for (i
= 0; i
< qedi
->bdq_pbl_list_num_entries
; i
++) {
1262 *list
= qedi
->bdq_pbl_dma
;
1270 static int qedi_alloc_global_queues(struct qedi_ctx
*qedi
)
1280 * Number of global queues (CQ / RQ). This should
1281 * be <= number of available MSIX vectors for the PF
1283 if (!qedi
->num_queues
) {
1284 QEDI_ERR(&qedi
->dbg_ctx
, "No MSI-X vectors available!\n");
1288 /* Make sure we allocated the PBL that will contain the physical
1289 * addresses of our queues
1291 if (!qedi
->p_cpuq
) {
1293 goto mem_alloc_failure
;
1296 qedi
->global_queues
= kzalloc((sizeof(struct global_queue
*) *
1297 qedi
->num_queues
), GFP_KERNEL
);
1298 if (!qedi
->global_queues
) {
1299 QEDI_ERR(&qedi
->dbg_ctx
,
1300 "Unable to allocate global queues array ptr memory\n");
1303 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
,
1304 "qedi->global_queues=%p.\n", qedi
->global_queues
);
1306 /* Allocate DMA coherent buffers for BDQ */
1307 rc
= qedi_alloc_bdq(qedi
);
1309 goto mem_alloc_failure
;
1311 /* Allocate a CQ and an associated PBL for each MSI-X
1314 for (i
= 0; i
< qedi
->num_queues
; i
++) {
1315 qedi
->global_queues
[i
] =
1316 kzalloc(sizeof(*qedi
->global_queues
[0]),
1318 if (!qedi
->global_queues
[i
]) {
1319 QEDI_ERR(&qedi
->dbg_ctx
,
1320 "Unable to allocation global queue %d.\n", i
);
1321 goto mem_alloc_failure
;
1324 qedi
->global_queues
[i
]->cq_mem_size
=
1325 (QEDI_CQ_SIZE
+ 8) * sizeof(union iscsi_cqe
);
1326 qedi
->global_queues
[i
]->cq_mem_size
=
1327 (qedi
->global_queues
[i
]->cq_mem_size
+
1328 (QEDI_PAGE_SIZE
- 1));
1330 qedi
->global_queues
[i
]->cq_pbl_size
=
1331 (qedi
->global_queues
[i
]->cq_mem_size
/
1332 QEDI_PAGE_SIZE
) * sizeof(void *);
1333 qedi
->global_queues
[i
]->cq_pbl_size
=
1334 (qedi
->global_queues
[i
]->cq_pbl_size
+
1335 (QEDI_PAGE_SIZE
- 1));
1337 qedi
->global_queues
[i
]->cq
=
1338 dma_alloc_coherent(&qedi
->pdev
->dev
,
1339 qedi
->global_queues
[i
]->cq_mem_size
,
1340 &qedi
->global_queues
[i
]->cq_dma
,
1343 if (!qedi
->global_queues
[i
]->cq
) {
1344 QEDI_WARN(&qedi
->dbg_ctx
,
1345 "Could not allocate cq.\n");
1347 goto mem_alloc_failure
;
1349 memset(qedi
->global_queues
[i
]->cq
, 0,
1350 qedi
->global_queues
[i
]->cq_mem_size
);
1352 qedi
->global_queues
[i
]->cq_pbl
=
1353 dma_alloc_coherent(&qedi
->pdev
->dev
,
1354 qedi
->global_queues
[i
]->cq_pbl_size
,
1355 &qedi
->global_queues
[i
]->cq_pbl_dma
,
1358 if (!qedi
->global_queues
[i
]->cq_pbl
) {
1359 QEDI_WARN(&qedi
->dbg_ctx
,
1360 "Could not allocate cq PBL.\n");
1362 goto mem_alloc_failure
;
1364 memset(qedi
->global_queues
[i
]->cq_pbl
, 0,
1365 qedi
->global_queues
[i
]->cq_pbl_size
);
1368 num_pages
= qedi
->global_queues
[i
]->cq_mem_size
/
1370 page
= qedi
->global_queues
[i
]->cq_dma
;
1371 pbl
= (u32
*)qedi
->global_queues
[i
]->cq_pbl
;
1373 while (num_pages
--) {
1376 *pbl
= (u32
)((u64
)page
>> 32);
1378 page
+= QEDI_PAGE_SIZE
;
1382 list
= (u32
*)qedi
->p_cpuq
;
1385 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
1386 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
1387 * to the physical address which contains an array of pointers to the
1388 * physical addresses of the specific queue pages.
1390 for (i
= 0; i
< qedi
->num_queues
; i
++) {
1391 *list
= (u32
)qedi
->global_queues
[i
]->cq_pbl_dma
;
1393 *list
= (u32
)((u64
)qedi
->global_queues
[i
]->cq_pbl_dma
>> 32);
1398 *list
= (u32
)((u64
)0 >> 32);
1405 qedi_free_global_queues(qedi
);
1409 int qedi_alloc_sq(struct qedi_ctx
*qedi
, struct qedi_endpoint
*ep
)
1419 /* Calculate appropriate queue and PBL sizes */
1420 ep
->sq_mem_size
= QEDI_SQ_SIZE
* sizeof(struct iscsi_wqe
);
1421 ep
->sq_mem_size
+= QEDI_PAGE_SIZE
- 1;
1423 ep
->sq_pbl_size
= (ep
->sq_mem_size
/ QEDI_PAGE_SIZE
) * sizeof(void *);
1424 ep
->sq_pbl_size
= ep
->sq_pbl_size
+ QEDI_PAGE_SIZE
;
1426 ep
->sq
= dma_alloc_coherent(&qedi
->pdev
->dev
, ep
->sq_mem_size
,
1427 &ep
->sq_dma
, GFP_KERNEL
);
1429 QEDI_WARN(&qedi
->dbg_ctx
,
1430 "Could not allocate send queue.\n");
1434 memset(ep
->sq
, 0, ep
->sq_mem_size
);
1436 ep
->sq_pbl
= dma_alloc_coherent(&qedi
->pdev
->dev
, ep
->sq_pbl_size
,
1437 &ep
->sq_pbl_dma
, GFP_KERNEL
);
1439 QEDI_WARN(&qedi
->dbg_ctx
,
1440 "Could not allocate send queue PBL.\n");
1444 memset(ep
->sq_pbl
, 0, ep
->sq_pbl_size
);
1447 num_pages
= ep
->sq_mem_size
/ QEDI_PAGE_SIZE
;
1449 pbl
= (u32
*)ep
->sq_pbl
;
1451 while (num_pages
--) {
1454 *pbl
= (u32
)((u64
)page
>> 32);
1456 page
+= QEDI_PAGE_SIZE
;
1462 dma_free_coherent(&qedi
->pdev
->dev
, ep
->sq_mem_size
, ep
->sq
,
1468 void qedi_free_sq(struct qedi_ctx
*qedi
, struct qedi_endpoint
*ep
)
1471 dma_free_coherent(&qedi
->pdev
->dev
, ep
->sq_pbl_size
, ep
->sq_pbl
,
1474 dma_free_coherent(&qedi
->pdev
->dev
, ep
->sq_mem_size
, ep
->sq
,
1478 int qedi_get_task_idx(struct qedi_ctx
*qedi
)
1483 tmp_idx
= find_first_zero_bit(qedi
->task_idx_map
,
1484 MAX_ISCSI_TASK_ENTRIES
);
1486 if (tmp_idx
>= MAX_ISCSI_TASK_ENTRIES
) {
1487 QEDI_ERR(&qedi
->dbg_ctx
, "FW task context pool is full.\n");
1492 if (test_and_set_bit(tmp_idx
, qedi
->task_idx_map
))
1499 void qedi_clear_task_idx(struct qedi_ctx
*qedi
, int idx
)
1501 if (!test_and_clear_bit(idx
, qedi
->task_idx_map
)) {
1502 QEDI_ERR(&qedi
->dbg_ctx
,
1503 "FW task context, already cleared, tid=0x%x\n", idx
);
1508 void qedi_update_itt_map(struct qedi_ctx
*qedi
, u32 tid
, u32 proto_itt
,
1509 struct qedi_cmd
*cmd
)
1511 qedi
->itt_map
[tid
].itt
= proto_itt
;
1512 qedi
->itt_map
[tid
].p_cmd
= cmd
;
1514 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
1515 "update itt map tid=0x%x, with proto itt=0x%x\n", tid
,
1516 qedi
->itt_map
[tid
].itt
);
1519 void qedi_get_task_tid(struct qedi_ctx
*qedi
, u32 itt
, s16
*tid
)
1523 for (i
= 0; i
< MAX_ISCSI_TASK_ENTRIES
; i
++) {
1524 if (qedi
->itt_map
[i
].itt
== itt
) {
1526 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
1527 "Ref itt=0x%x, found at tid=0x%x\n",
1536 void qedi_get_proto_itt(struct qedi_ctx
*qedi
, u32 tid
, u32
*proto_itt
)
1538 *proto_itt
= qedi
->itt_map
[tid
].itt
;
1539 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
1540 "Get itt map tid [0x%x with proto itt[0x%x]",
1544 struct qedi_cmd
*qedi_get_cmd_from_tid(struct qedi_ctx
*qedi
, u32 tid
)
1546 struct qedi_cmd
*cmd
= NULL
;
1548 if (tid
> MAX_ISCSI_TASK_ENTRIES
)
1551 cmd
= qedi
->itt_map
[tid
].p_cmd
;
1552 if (cmd
->task_id
!= tid
)
1555 qedi
->itt_map
[tid
].p_cmd
= NULL
;
1560 static int qedi_alloc_itt(struct qedi_ctx
*qedi
)
1562 qedi
->itt_map
= kcalloc(MAX_ISCSI_TASK_ENTRIES
,
1563 sizeof(struct qedi_itt_map
), GFP_KERNEL
);
1564 if (!qedi
->itt_map
) {
1565 QEDI_ERR(&qedi
->dbg_ctx
,
1566 "Unable to allocate itt map array memory\n");
1572 static void qedi_free_itt(struct qedi_ctx
*qedi
)
1574 kfree(qedi
->itt_map
);
1577 static struct qed_ll2_cb_ops qedi_ll2_cb_ops
= {
1578 .rx_cb
= qedi_ll2_rx
,
1582 static int qedi_percpu_io_thread(void *arg
)
1584 struct qedi_percpu_s
*p
= arg
;
1585 struct qedi_work
*work
, *tmp
;
1586 unsigned long flags
;
1587 LIST_HEAD(work_list
);
1589 set_user_nice(current
, -20);
1591 while (!kthread_should_stop()) {
1592 spin_lock_irqsave(&p
->p_work_lock
, flags
);
1593 while (!list_empty(&p
->work_list
)) {
1594 list_splice_init(&p
->work_list
, &work_list
);
1595 spin_unlock_irqrestore(&p
->p_work_lock
, flags
);
1597 list_for_each_entry_safe(work
, tmp
, &work_list
, list
) {
1598 list_del_init(&work
->list
);
1599 qedi_fp_process_cqes(work
);
1600 if (!work
->is_solicited
)
1604 spin_lock_irqsave(&p
->p_work_lock
, flags
);
1606 set_current_state(TASK_INTERRUPTIBLE
);
1607 spin_unlock_irqrestore(&p
->p_work_lock
, flags
);
1610 __set_current_state(TASK_RUNNING
);
1615 static int qedi_cpu_online(unsigned int cpu
)
1617 struct qedi_percpu_s
*p
= this_cpu_ptr(&qedi_percpu
);
1618 struct task_struct
*thread
;
1620 thread
= kthread_create_on_node(qedi_percpu_io_thread
, (void *)p
,
1622 "qedi_thread/%d", cpu
);
1624 return PTR_ERR(thread
);
1626 kthread_bind(thread
, cpu
);
1627 p
->iothread
= thread
;
1628 wake_up_process(thread
);
1632 static int qedi_cpu_offline(unsigned int cpu
)
1634 struct qedi_percpu_s
*p
= this_cpu_ptr(&qedi_percpu
);
1635 struct qedi_work
*work
, *tmp
;
1636 struct task_struct
*thread
;
1638 spin_lock_bh(&p
->p_work_lock
);
1639 thread
= p
->iothread
;
1642 list_for_each_entry_safe(work
, tmp
, &p
->work_list
, list
) {
1643 list_del_init(&work
->list
);
1644 qedi_fp_process_cqes(work
);
1645 if (!work
->is_solicited
)
1649 spin_unlock_bh(&p
->p_work_lock
);
1651 kthread_stop(thread
);
1655 void qedi_reset_host_mtu(struct qedi_ctx
*qedi
, u16 mtu
)
1657 struct qed_ll2_params params
;
1659 qedi_recover_all_conns(qedi
);
1661 qedi_ops
->ll2
->stop(qedi
->cdev
);
1662 qedi_ll2_free_skbs(qedi
);
1664 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
, "old MTU %u, new MTU %u\n",
1665 qedi
->ll2_mtu
, mtu
);
1666 memset(¶ms
, 0, sizeof(params
));
1667 qedi
->ll2_mtu
= mtu
;
1668 params
.mtu
= qedi
->ll2_mtu
+ IPV6_HDR_LEN
+ TCP_HDR_LEN
;
1669 params
.drop_ttl0_packets
= 0;
1670 params
.rx_vlan_stripping
= 1;
1671 ether_addr_copy(params
.ll2_mac_address
, qedi
->dev_info
.common
.hw_mac
);
1672 qedi_ops
->ll2
->start(qedi
->cdev
, ¶ms
);
1675 static void __qedi_remove(struct pci_dev
*pdev
, int mode
)
1677 struct qedi_ctx
*qedi
= pci_get_drvdata(pdev
);
1679 if (qedi
->tmf_thread
) {
1680 flush_workqueue(qedi
->tmf_thread
);
1681 destroy_workqueue(qedi
->tmf_thread
);
1682 qedi
->tmf_thread
= NULL
;
1685 if (qedi
->offload_thread
) {
1686 flush_workqueue(qedi
->offload_thread
);
1687 destroy_workqueue(qedi
->offload_thread
);
1688 qedi
->offload_thread
= NULL
;
1691 #ifdef CONFIG_DEBUG_FS
1692 qedi_dbg_host_exit(&qedi
->dbg_ctx
);
1694 if (!test_bit(QEDI_IN_OFFLINE
, &qedi
->flags
))
1695 qedi_ops
->common
->set_power_state(qedi
->cdev
, PCI_D0
);
1697 qedi_sync_free_irqs(qedi
);
1699 if (!test_bit(QEDI_IN_OFFLINE
, &qedi
->flags
)) {
1700 qedi_ops
->stop(qedi
->cdev
);
1701 qedi_ops
->ll2
->stop(qedi
->cdev
);
1704 if (mode
== QEDI_MODE_NORMAL
)
1705 qedi_free_iscsi_pf_param(qedi
);
1707 if (!test_bit(QEDI_IN_OFFLINE
, &qedi
->flags
)) {
1708 qedi_ops
->common
->slowpath_stop(qedi
->cdev
);
1709 qedi_ops
->common
->remove(qedi
->cdev
);
1712 qedi_destroy_fp(qedi
);
1714 if (mode
== QEDI_MODE_NORMAL
) {
1715 qedi_release_cid_que(qedi
);
1716 qedi_cm_free_mem(qedi
);
1717 qedi_free_uio(qedi
->udev
);
1718 qedi_free_itt(qedi
);
1720 iscsi_host_remove(qedi
->shost
);
1721 iscsi_host_free(qedi
->shost
);
1723 if (qedi
->ll2_recv_thread
) {
1724 kthread_stop(qedi
->ll2_recv_thread
);
1725 qedi
->ll2_recv_thread
= NULL
;
1727 qedi_ll2_free_skbs(qedi
);
1731 static int __qedi_probe(struct pci_dev
*pdev
, int mode
)
1733 struct qedi_ctx
*qedi
;
1734 struct qed_ll2_params params
;
1739 struct qed_link_params link_params
;
1740 struct qed_slowpath_params sp_params
;
1741 struct qed_probe_params qed_params
;
1742 void *task_start
, *task_end
;
1746 if (mode
!= QEDI_MODE_RECOVERY
) {
1747 qedi
= qedi_host_alloc(pdev
);
1753 qedi
= pci_get_drvdata(pdev
);
1756 memset(&qed_params
, 0, sizeof(qed_params
));
1757 qed_params
.protocol
= QED_PROTOCOL_ISCSI
;
1758 qed_params
.dp_module
= dp_module
;
1759 qed_params
.dp_level
= dp_level
;
1760 qed_params
.is_vf
= is_vf
;
1761 qedi
->cdev
= qedi_ops
->common
->probe(pdev
, &qed_params
);
1764 QEDI_ERR(&qedi
->dbg_ctx
, "Cannot initialize hardware\n");
1768 qedi
->msix_count
= MAX_NUM_MSIX_PF
;
1769 atomic_set(&qedi
->link_state
, QEDI_LINK_DOWN
);
1771 if (mode
!= QEDI_MODE_RECOVERY
) {
1772 rc
= qedi_set_iscsi_pf_param(qedi
);
1775 QEDI_ERR(&qedi
->dbg_ctx
,
1776 "Set iSCSI pf param fail\n");
1781 qedi_ops
->common
->update_pf_params(qedi
->cdev
, &qedi
->pf_params
);
1783 rc
= qedi_prepare_fp(qedi
);
1785 QEDI_ERR(&qedi
->dbg_ctx
, "Cannot start slowpath.\n");
1786 goto free_pf_params
;
1789 /* Start the Slowpath-process */
1790 memset(&sp_params
, 0, sizeof(struct qed_slowpath_params
));
1791 sp_params
.int_mode
= QED_INT_MODE_MSIX
;
1792 sp_params
.drv_major
= QEDI_DRIVER_MAJOR_VER
;
1793 sp_params
.drv_minor
= QEDI_DRIVER_MINOR_VER
;
1794 sp_params
.drv_rev
= QEDI_DRIVER_REV_VER
;
1795 sp_params
.drv_eng
= QEDI_DRIVER_ENG_VER
;
1796 strlcpy(sp_params
.name
, "qedi iSCSI", QED_DRV_VER_STR_SIZE
);
1797 rc
= qedi_ops
->common
->slowpath_start(qedi
->cdev
, &sp_params
);
1799 QEDI_ERR(&qedi
->dbg_ctx
, "Cannot start slowpath\n");
1803 /* update_pf_params needs to be called before and after slowpath
1806 qedi_ops
->common
->update_pf_params(qedi
->cdev
, &qedi
->pf_params
);
1808 qedi_setup_int(qedi
);
1810 goto stop_iscsi_func
;
1812 qedi_ops
->common
->set_power_state(qedi
->cdev
, PCI_D0
);
1814 /* Learn information crucial for qedi to progress */
1815 rc
= qedi_ops
->fill_dev_info(qedi
->cdev
, &qedi
->dev_info
);
1817 goto stop_iscsi_func
;
1819 /* Record BDQ producer doorbell addresses */
1820 qedi
->bdq_primary_prod
= qedi
->dev_info
.primary_dbq_rq_addr
;
1821 qedi
->bdq_secondary_prod
= qedi
->dev_info
.secondary_bdq_rq_addr
;
1822 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
,
1823 "BDQ primary_prod=%p secondary_prod=%p.\n",
1824 qedi
->bdq_primary_prod
,
1825 qedi
->bdq_secondary_prod
);
1828 * We need to write the number of BDs in the BDQ we've preallocated so
1829 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
1832 qedi
->bdq_prod_idx
= QEDI_BDQ_NUM
;
1833 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
,
1834 "Writing %d to primary and secondary BDQ doorbell registers.\n",
1835 qedi
->bdq_prod_idx
);
1836 writew(qedi
->bdq_prod_idx
, qedi
->bdq_primary_prod
);
1837 tmp
= readw(qedi
->bdq_primary_prod
);
1838 writew(qedi
->bdq_prod_idx
, qedi
->bdq_secondary_prod
);
1839 tmp
= readw(qedi
->bdq_secondary_prod
);
1841 ether_addr_copy(qedi
->mac
, qedi
->dev_info
.common
.hw_mac
);
1842 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
, "MAC address is %pM.\n",
1845 sprintf(host_buf
, "host_%d", qedi
->shost
->host_no
);
1846 qedi_ops
->common
->set_id(qedi
->cdev
, host_buf
, QEDI_MODULE_VERSION
);
1848 qedi_ops
->register_ops(qedi
->cdev
, &qedi_cb_ops
, qedi
);
1850 memset(¶ms
, 0, sizeof(params
));
1851 params
.mtu
= DEF_PATH_MTU
+ IPV6_HDR_LEN
+ TCP_HDR_LEN
;
1852 qedi
->ll2_mtu
= DEF_PATH_MTU
;
1853 params
.drop_ttl0_packets
= 0;
1854 params
.rx_vlan_stripping
= 1;
1855 ether_addr_copy(params
.ll2_mac_address
, qedi
->dev_info
.common
.hw_mac
);
1857 if (mode
!= QEDI_MODE_RECOVERY
) {
1858 /* set up rx path */
1859 INIT_LIST_HEAD(&qedi
->ll2_skb_list
);
1860 spin_lock_init(&qedi
->ll2_lock
);
1861 /* start qedi context */
1862 spin_lock_init(&qedi
->hba_lock
);
1863 spin_lock_init(&qedi
->task_idx_lock
);
1865 qedi_ops
->ll2
->register_cb_ops(qedi
->cdev
, &qedi_ll2_cb_ops
, qedi
);
1866 qedi_ops
->ll2
->start(qedi
->cdev
, ¶ms
);
1868 if (mode
!= QEDI_MODE_RECOVERY
) {
1869 qedi
->ll2_recv_thread
= kthread_run(qedi_ll2_recv_thread
,
1874 rc
= qedi_ops
->start(qedi
->cdev
, &qedi
->tasks
,
1875 qedi
, qedi_iscsi_event_cb
);
1878 QEDI_ERR(&qedi
->dbg_ctx
, "Cannot start iSCSI function\n");
1882 task_start
= qedi_get_task_mem(&qedi
->tasks
, 0);
1883 task_end
= qedi_get_task_mem(&qedi
->tasks
, MAX_TID_BLOCKS_ISCSI
- 1);
1884 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_DISC
,
1885 "Task context start=%p, end=%p block_size=%u.\n",
1886 task_start
, task_end
, qedi
->tasks
.size
);
1888 memset(&link_params
, 0, sizeof(link_params
));
1889 link_params
.link_up
= true;
1890 rc
= qedi_ops
->common
->set_link(qedi
->cdev
, &link_params
);
1892 QEDI_WARN(&qedi
->dbg_ctx
, "Link set up failed.\n");
1893 atomic_set(&qedi
->link_state
, QEDI_LINK_DOWN
);
1896 #ifdef CONFIG_DEBUG_FS
1897 qedi_dbg_host_init(&qedi
->dbg_ctx
, &qedi_debugfs_ops
,
1900 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
1901 "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
1902 QEDI_MODULE_VERSION
, FW_MAJOR_VERSION
, FW_MINOR_VERSION
,
1903 FW_REVISION_VERSION
, FW_ENGINEERING_VERSION
);
1905 if (mode
== QEDI_MODE_NORMAL
) {
1906 if (iscsi_host_add(qedi
->shost
, &pdev
->dev
)) {
1907 QEDI_ERR(&qedi
->dbg_ctx
,
1908 "Could not add iscsi host\n");
1913 /* Allocate uio buffers */
1914 rc
= qedi_alloc_uio_rings(qedi
);
1916 QEDI_ERR(&qedi
->dbg_ctx
,
1917 "UIO alloc ring failed err=%d\n", rc
);
1921 rc
= qedi_init_uio(qedi
);
1923 QEDI_ERR(&qedi
->dbg_ctx
,
1924 "UIO init failed, err=%d\n", rc
);
1928 /* host the array on iscsi_conn */
1929 rc
= qedi_setup_cid_que(qedi
);
1931 QEDI_ERR(&qedi
->dbg_ctx
,
1932 "Could not setup cid que\n");
1936 rc
= qedi_cm_alloc_mem(qedi
);
1938 QEDI_ERR(&qedi
->dbg_ctx
,
1939 "Could not alloc cm memory\n");
1943 rc
= qedi_alloc_itt(qedi
);
1945 QEDI_ERR(&qedi
->dbg_ctx
,
1946 "Could not alloc itt memory\n");
1950 sprintf(host_buf
, "host_%d", qedi
->shost
->host_no
);
1951 qedi
->tmf_thread
= create_singlethread_workqueue(host_buf
);
1952 if (!qedi
->tmf_thread
) {
1953 QEDI_ERR(&qedi
->dbg_ctx
,
1954 "Unable to start tmf thread!\n");
1959 sprintf(host_buf
, "qedi_ofld%d", qedi
->shost
->host_no
);
1960 qedi
->offload_thread
= create_workqueue(host_buf
);
1961 if (!qedi
->offload_thread
) {
1962 QEDI_ERR(&qedi
->dbg_ctx
,
1963 "Unable to start offload thread!\n");
1968 /* F/w needs 1st task context memory entry for performance */
1969 set_bit(QEDI_RESERVE_TASK_ID
, qedi
->task_idx_map
);
1970 atomic_set(&qedi
->num_offloads
, 0);
1976 qedi_release_cid_que(qedi
);
1978 qedi_free_uio(qedi
->udev
);
1980 #ifdef CONFIG_DEBUG_FS
1981 qedi_dbg_host_exit(&qedi
->dbg_ctx
);
1983 iscsi_host_remove(qedi
->shost
);
1985 qedi_ops
->stop(qedi
->cdev
);
1987 qedi_ops
->common
->slowpath_stop(qedi
->cdev
);
1989 qedi_ops
->common
->remove(qedi
->cdev
);
1991 qedi_free_iscsi_pf_param(qedi
);
1993 iscsi_host_free(qedi
->shost
);
1998 static int qedi_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2000 return __qedi_probe(pdev
, QEDI_MODE_NORMAL
);
2003 static void qedi_remove(struct pci_dev
*pdev
)
2005 __qedi_remove(pdev
, QEDI_MODE_NORMAL
);
2008 static struct pci_device_id qedi_pci_tbl
[] = {
2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, 0x165E) },
2012 MODULE_DEVICE_TABLE(pci
, qedi_pci_tbl
);
2014 static enum cpuhp_state qedi_cpuhp_state
;
2016 static struct pci_driver qedi_pci_driver
= {
2017 .name
= QEDI_MODULE_NAME
,
2018 .id_table
= qedi_pci_tbl
,
2019 .probe
= qedi_probe
,
2020 .remove
= qedi_remove
,
2023 static int __init
qedi_init(void)
2025 struct qedi_percpu_s
*p
;
2028 qedi_ops
= qed_get_iscsi_ops();
2030 QEDI_ERR(NULL
, "Failed to get qed iSCSI operations\n");
2034 #ifdef CONFIG_DEBUG_FS
2035 qedi_dbg_init("qedi");
2038 qedi_scsi_transport
= iscsi_register_transport(&qedi_iscsi_transport
);
2039 if (!qedi_scsi_transport
) {
2040 QEDI_ERR(NULL
, "Could not register qedi transport");
2042 goto exit_qedi_init_1
;
2045 for_each_possible_cpu(cpu
) {
2046 p
= &per_cpu(qedi_percpu
, cpu
);
2047 INIT_LIST_HEAD(&p
->work_list
);
2048 spin_lock_init(&p
->p_work_lock
);
2052 rc
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "scsi/qedi:online",
2053 qedi_cpu_online
, qedi_cpu_offline
);
2055 goto exit_qedi_init_2
;
2056 qedi_cpuhp_state
= rc
;
2058 rc
= pci_register_driver(&qedi_pci_driver
);
2060 QEDI_ERR(NULL
, "Failed to register driver\n");
2067 cpuhp_remove_state(qedi_cpuhp_state
);
2069 iscsi_unregister_transport(&qedi_iscsi_transport
);
2071 #ifdef CONFIG_DEBUG_FS
2074 qed_put_iscsi_ops();
2078 static void __exit
qedi_cleanup(void)
2080 pci_unregister_driver(&qedi_pci_driver
);
2081 cpuhp_remove_state(qedi_cpuhp_state
);
2082 iscsi_unregister_transport(&qedi_iscsi_transport
);
2084 #ifdef CONFIG_DEBUG_FS
2087 qed_put_iscsi_ops();
2090 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
2091 MODULE_LICENSE("GPL");
2092 MODULE_AUTHOR("QLogic Corporation");
2093 MODULE_VERSION(QEDI_MODULE_VERSION
);
2094 module_init(qedi_init
);
2095 module_exit(qedi_cleanup
);