2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
12 * Contact Information:
13 * linux-drivers@serverengines.com
16 * 209 N. Fair Oaks Ave
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
40 static unsigned int be_iopoll_budget
= 10;
41 static unsigned int be_max_phys_size
= 64;
42 static unsigned int enable_msix
;
44 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
45 MODULE_DESCRIPTION(DRV_DESC
" " BUILD_STR
);
46 MODULE_AUTHOR("ServerEngines Corporation");
47 MODULE_LICENSE("GPL");
48 module_param(be_iopoll_budget
, int, 0);
49 module_param(enable_msix
, int, 0);
50 module_param(be_max_phys_size
, uint
, S_IRUGO
);
51 MODULE_PARM_DESC(be_max_phys_size
, "Maximum Size (In Kilobytes) of physically"
52 "contiguous memory that can be allocated."
55 static int beiscsi_slave_configure(struct scsi_device
*sdev
)
57 blk_queue_max_segment_size(sdev
->request_queue
, 65536);
61 static struct scsi_host_template beiscsi_sht
= {
62 .module
= THIS_MODULE
,
63 .name
= "ServerEngines 10Gbe open-iscsi Initiator Driver",
64 .proc_name
= DRV_NAME
,
65 .queuecommand
= iscsi_queuecommand
,
66 .eh_abort_handler
= iscsi_eh_abort
,
67 .change_queue_depth
= iscsi_change_queue_depth
,
68 .slave_configure
= beiscsi_slave_configure
,
69 .target_alloc
= iscsi_target_alloc
,
70 .eh_device_reset_handler
= iscsi_eh_device_reset
,
71 .eh_target_reset_handler
= iscsi_eh_target_reset
,
72 .sg_tablesize
= BEISCSI_SGLIST_ELEMENTS
,
73 .can_queue
= BE2_IO_DEPTH
,
75 .max_sectors
= BEISCSI_MAX_SECTORS
,
76 .cmd_per_lun
= BEISCSI_CMD_PER_LUN
,
77 .use_clustering
= ENABLE_CLUSTERING
,
79 static struct scsi_transport_template
*beiscsi_scsi_transport
;
81 /*------------------- PCI Driver operations and data ----------------- */
82 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table
) = {
83 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
84 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
85 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
88 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
90 static struct beiscsi_hba
*beiscsi_hba_alloc(struct pci_dev
*pcidev
)
92 struct beiscsi_hba
*phba
;
93 struct Scsi_Host
*shost
;
95 shost
= iscsi_host_alloc(&beiscsi_sht
, sizeof(*phba
), 0);
97 dev_err(&pcidev
->dev
, "beiscsi_hba_alloc -"
98 "iscsi_host_alloc failed \n");
101 shost
->dma_boundary
= pcidev
->dma_mask
;
102 shost
->max_id
= BE2_MAX_SESSIONS
;
103 shost
->max_channel
= 0;
104 shost
->max_cmd_len
= BEISCSI_MAX_CMD_LEN
;
105 shost
->max_lun
= BEISCSI_NUM_MAX_LUN
;
106 shost
->transportt
= beiscsi_scsi_transport
;
108 phba
= iscsi_host_priv(shost
);
109 memset(phba
, 0, sizeof(*phba
));
111 phba
->pcidev
= pci_dev_get(pcidev
);
113 if (iscsi_host_add(shost
, &phba
->pcidev
->dev
))
118 pci_dev_put(phba
->pcidev
);
119 iscsi_host_free(phba
->shost
);
123 static void beiscsi_unmap_pci_function(struct beiscsi_hba
*phba
)
126 iounmap(phba
->csr_va
);
130 iounmap(phba
->db_va
);
134 iounmap(phba
->pci_va
);
139 static int beiscsi_map_pci_bars(struct beiscsi_hba
*phba
,
140 struct pci_dev
*pcidev
)
144 addr
= ioremap_nocache(pci_resource_start(pcidev
, 2),
145 pci_resource_len(pcidev
, 2));
148 phba
->ctrl
.csr
= addr
;
150 phba
->csr_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 2);
152 addr
= ioremap_nocache(pci_resource_start(pcidev
, 4), 128 * 1024);
155 phba
->ctrl
.db
= addr
;
157 phba
->db_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 4);
159 addr
= ioremap_nocache(pci_resource_start(pcidev
, 1),
160 pci_resource_len(pcidev
, 1));
163 phba
->ctrl
.pcicfg
= addr
;
165 phba
->pci_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 1);
169 beiscsi_unmap_pci_function(phba
);
173 static int beiscsi_enable_pci(struct pci_dev
*pcidev
)
177 ret
= pci_enable_device(pcidev
);
179 dev_err(&pcidev
->dev
, "beiscsi_enable_pci - enable device "
180 "failed. Returning -ENODEV\n");
184 if (pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(64))) {
185 ret
= pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(32));
187 dev_err(&pcidev
->dev
, "Could not set PCI DMA Mask\n");
188 pci_disable_device(pcidev
);
195 static int be_ctrl_init(struct beiscsi_hba
*phba
, struct pci_dev
*pdev
)
197 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
198 struct be_dma_mem
*mbox_mem_alloc
= &ctrl
->mbox_mem_alloced
;
199 struct be_dma_mem
*mbox_mem_align
= &ctrl
->mbox_mem
;
203 status
= beiscsi_map_pci_bars(phba
, pdev
);
207 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
208 mbox_mem_alloc
->va
= pci_alloc_consistent(pdev
,
209 mbox_mem_alloc
->size
,
210 &mbox_mem_alloc
->dma
);
211 if (!mbox_mem_alloc
->va
) {
212 beiscsi_unmap_pci_function(phba
);
217 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
218 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
219 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
220 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
221 spin_lock_init(&ctrl
->mbox_lock
);
225 static void beiscsi_get_params(struct beiscsi_hba
*phba
)
227 phba
->params
.ios_per_ctrl
= BE2_IO_DEPTH
;
228 phba
->params
.cxns_per_ctrl
= BE2_MAX_SESSIONS
;
229 phba
->params
.asyncpdus_per_ctrl
= BE2_ASYNCPDUS
;
230 phba
->params
.icds_per_ctrl
= BE2_MAX_ICDS
/ 2;
231 phba
->params
.num_sge_per_io
= BE2_SGE
;
232 phba
->params
.defpdu_hdr_sz
= BE2_DEFPDU_HDR_SZ
;
233 phba
->params
.defpdu_data_sz
= BE2_DEFPDU_DATA_SZ
;
234 phba
->params
.eq_timer
= 64;
235 phba
->params
.num_eq_entries
=
236 (((BE2_CMDS_PER_CXN
* 2 + BE2_LOGOUTS
+ BE2_TMFS
+ BE2_ASYNCPDUS
) /
238 phba
->params
.num_eq_entries
= (phba
->params
.num_eq_entries
< 1024)
239 ? 1024 : phba
->params
.num_eq_entries
;
240 SE_DEBUG(DBG_LVL_8
, "phba->params.num_eq_entries=%d \n",
241 phba
->params
.num_eq_entries
);
242 phba
->params
.num_cq_entries
=
243 (((BE2_CMDS_PER_CXN
* 2 + BE2_LOGOUTS
+ BE2_TMFS
+ BE2_ASYNCPDUS
) /
246 "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
247 "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
248 phba
->params
.num_cq_entries
, BE2_CMDS_PER_CXN
,
249 BE2_LOGOUTS
, BE2_TMFS
, BE2_ASYNCPDUS
);
250 phba
->params
.wrbs_per_cxn
= 256;
253 static void hwi_ring_eq_db(struct beiscsi_hba
*phba
,
254 unsigned int id
, unsigned int clr_interrupt
,
255 unsigned int num_processed
,
256 unsigned char rearm
, unsigned char event
)
259 val
|= id
& DB_EQ_RING_ID_MASK
;
261 val
|= 1 << DB_EQ_REARM_SHIFT
;
263 val
|= 1 << DB_EQ_CLR_SHIFT
;
265 val
|= 1 << DB_EQ_EVNT_SHIFT
;
266 val
|= num_processed
<< DB_EQ_NUM_POPPED_SHIFT
;
267 iowrite32(val
, phba
->db_va
+ DB_EQ_OFFSET
);
271 * be_isr - The isr routine of the driver.
273 * @dev_id: Pointer to host adapter structure
275 static irqreturn_t
be_isr(int irq
, void *dev_id
)
277 struct beiscsi_hba
*phba
;
278 struct hwi_controller
*phwi_ctrlr
;
279 struct hwi_context_memory
*phwi_context
;
280 struct be_eq_entry
*eqe
= NULL
;
281 struct be_queue_info
*eq
;
282 struct be_queue_info
*cq
;
283 unsigned long flags
, index
;
284 unsigned int num_eq_processed
;
285 struct be_ctrl_info
*ctrl
;
291 isr
= ioread32(ctrl
->csr
+ CEV_ISR0_OFFSET
+
292 (PCI_FUNC(ctrl
->pdev
->devfn
) * CEV_ISR_SIZE
));
297 phwi_ctrlr
= phba
->phwi_ctrlr
;
298 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
299 eq
= &phwi_context
->be_eq
.q
;
300 cq
= &phwi_context
->be_cq
;
302 eqe
= queue_tail_node(eq
);
304 SE_DEBUG(DBG_LVL_1
, "eqe is NULL\n");
306 num_eq_processed
= 0;
307 if (blk_iopoll_enabled
) {
308 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
310 if (!blk_iopoll_sched_prep(&phba
->iopoll
))
311 blk_iopoll_sched(&phba
->iopoll
);
313 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
315 eqe
= queue_tail_node(eq
);
317 SE_DEBUG(DBG_LVL_8
, "Valid EQE\n");
319 if (num_eq_processed
) {
320 hwi_ring_eq_db(phba
, eq
->id
, 0, num_eq_processed
, 0, 1);
325 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
328 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
330 EQE_RESID_MASK
) >> 16) != cq
->id
) {
331 spin_lock_irqsave(&phba
->isr_lock
, flags
);
332 phba
->todo_mcc_cq
= 1;
333 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
335 spin_lock_irqsave(&phba
->isr_lock
, flags
);
337 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
339 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
341 eqe
= queue_tail_node(eq
);
344 if (phba
->todo_cq
|| phba
->todo_mcc_cq
)
345 queue_work(phba
->wq
, &phba
->work_cqs
);
347 if (num_eq_processed
) {
348 hwi_ring_eq_db(phba
, eq
->id
, 0, num_eq_processed
, 1, 1);
355 static int beiscsi_init_irqs(struct beiscsi_hba
*phba
)
357 struct pci_dev
*pcidev
= phba
->pcidev
;
360 ret
= request_irq(pcidev
->irq
, be_isr
, IRQF_SHARED
, "beiscsi", phba
);
362 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_init_irqs-"
363 "Failed to register irq\\n");
369 static void hwi_ring_cq_db(struct beiscsi_hba
*phba
,
370 unsigned int id
, unsigned int num_processed
,
371 unsigned char rearm
, unsigned char event
)
374 val
|= id
& DB_CQ_RING_ID_MASK
;
376 val
|= 1 << DB_CQ_REARM_SHIFT
;
377 val
|= num_processed
<< DB_CQ_NUM_POPPED_SHIFT
;
378 iowrite32(val
, phba
->db_va
+ DB_CQ_OFFSET
);
383 * a. unsolicited NOP-In (target initiated NOP-In)
387 * These headers arrive unprocessed by the EP firmware and iSCSI layer
391 beiscsi_process_async_pdu(struct beiscsi_conn
*beiscsi_conn
,
392 struct beiscsi_hba
*phba
,
394 struct pdu_base
*ppdu
,
395 unsigned long pdu_len
,
396 void *pbuffer
, unsigned long buf_len
)
398 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
399 struct iscsi_session
*session
= conn
->session
;
401 switch (ppdu
->dw
[offsetof(struct amap_pdu_base
, opcode
) / 32] &
402 PDUBASE_OPCODE_MASK
) {
403 case ISCSI_OP_NOOP_IN
:
407 case ISCSI_OP_ASYNC_EVENT
:
409 case ISCSI_OP_REJECT
:
411 WARN_ON(!(buf_len
== 48));
412 SE_DEBUG(DBG_LVL_1
, "In ISCSI_OP_REJECT\n");
414 case ISCSI_OP_LOGIN_RSP
:
417 shost_printk(KERN_WARNING
, phba
->shost
,
418 "Unrecognized opcode 0x%x in async msg \n",
420 dw
[offsetof(struct amap_pdu_base
, opcode
) / 32]
421 & PDUBASE_OPCODE_MASK
));
425 spin_lock_bh(&session
->lock
);
426 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)ppdu
, pbuffer
, buf_len
);
427 spin_unlock_bh(&session
->lock
);
431 static struct sgl_handle
*alloc_io_sgl_handle(struct beiscsi_hba
*phba
)
433 struct sgl_handle
*psgl_handle
;
435 if (phba
->io_sgl_hndl_avbl
) {
437 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
438 phba
->io_sgl_alloc_index
);
439 psgl_handle
= phba
->io_sgl_hndl_base
[phba
->
441 phba
->io_sgl_hndl_base
[phba
->io_sgl_alloc_index
] = NULL
;
442 phba
->io_sgl_hndl_avbl
--;
443 if (phba
->io_sgl_alloc_index
== (phba
->params
.ios_per_ctrl
- 1))
444 phba
->io_sgl_alloc_index
= 0;
446 phba
->io_sgl_alloc_index
++;
453 free_io_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
455 SE_DEBUG(DBG_LVL_8
, "In free_,io_sgl_free_index=%d \n",
456 phba
->io_sgl_free_index
);
457 if (phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]) {
459 * this can happen if clean_task is called on a task that
460 * failed in xmit_task or alloc_pdu.
463 "Double Free in IO SGL io_sgl_free_index=%d,"
464 "value there=%p \n", phba
->io_sgl_free_index
,
465 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]);
468 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
] = psgl_handle
;
469 phba
->io_sgl_hndl_avbl
++;
470 if (phba
->io_sgl_free_index
== (phba
->params
.ios_per_ctrl
- 1))
471 phba
->io_sgl_free_index
= 0;
473 phba
->io_sgl_free_index
++;
477 * alloc_wrb_handle - To allocate a wrb handle
478 * @phba: The hba pointer
479 * @cid: The cid to use for allocation
480 * @index: index allocation and wrb index
482 * This happens under session_lock until submission to chip
484 struct wrb_handle
*alloc_wrb_handle(struct beiscsi_hba
*phba
, unsigned int cid
,
487 struct hwi_wrb_context
*pwrb_context
;
488 struct hwi_controller
*phwi_ctrlr
;
489 struct wrb_handle
*pwrb_handle
;
491 phwi_ctrlr
= phba
->phwi_ctrlr
;
492 pwrb_context
= &phwi_ctrlr
->wrb_context
[cid
];
493 pwrb_handle
= pwrb_context
->pwrb_handle_base
[index
];
494 pwrb_handle
->wrb_index
= index
;
495 pwrb_handle
->nxt_wrb_index
= index
;
500 * free_wrb_handle - To free the wrb handle back to pool
501 * @phba: The hba pointer
502 * @pwrb_context: The context to free from
503 * @pwrb_handle: The wrb_handle to free
505 * This happens under session_lock until submission to chip
508 free_wrb_handle(struct beiscsi_hba
*phba
, struct hwi_wrb_context
*pwrb_context
,
509 struct wrb_handle
*pwrb_handle
)
512 "FREE WRB: pwrb_handle=%p free_index=%d=0x%x"
513 "wrb_handles_available=%d \n",
514 pwrb_handle
, pwrb_context
->free_index
,
515 pwrb_context
->free_index
, pwrb_context
->wrb_handles_available
);
518 static struct sgl_handle
*alloc_mgmt_sgl_handle(struct beiscsi_hba
*phba
)
520 struct sgl_handle
*psgl_handle
;
522 if (phba
->eh_sgl_hndl_avbl
) {
523 psgl_handle
= phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
];
524 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
] = NULL
;
525 SE_DEBUG(DBG_LVL_8
, "mgmt_sgl_alloc_index=%d=0x%x \n",
526 phba
->eh_sgl_alloc_index
, phba
->eh_sgl_alloc_index
);
527 phba
->eh_sgl_hndl_avbl
--;
528 if (phba
->eh_sgl_alloc_index
==
529 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
-
531 phba
->eh_sgl_alloc_index
= 0;
533 phba
->eh_sgl_alloc_index
++;
540 free_mgmt_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
543 if (phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
]) {
545 * this can happen if clean_task is called on a task that
546 * failed in xmit_task or alloc_pdu.
549 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
550 phba
->eh_sgl_free_index
);
553 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
] = psgl_handle
;
554 phba
->eh_sgl_hndl_avbl
++;
555 if (phba
->eh_sgl_free_index
==
556 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
- 1))
557 phba
->eh_sgl_free_index
= 0;
559 phba
->eh_sgl_free_index
++;
563 be_complete_io(struct beiscsi_conn
*beiscsi_conn
,
564 struct iscsi_task
*task
, struct sol_cqe
*psol
)
566 struct beiscsi_io_task
*io_task
= task
->dd_data
;
567 struct be_status_bhs
*sts_bhs
=
568 (struct be_status_bhs
*)io_task
->cmd_bhs
;
569 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
570 unsigned int sense_len
;
571 unsigned char *sense
;
572 u32 resid
= 0, exp_cmdsn
, max_cmdsn
;
573 u8 rsp
, status
, flags
;
575 exp_cmdsn
= be32_to_cpu(psol
->
576 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
577 & SOL_EXP_CMD_SN_MASK
);
578 max_cmdsn
= be32_to_cpu((psol
->
579 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
580 & SOL_EXP_CMD_SN_MASK
) +
581 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
582 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
583 rsp
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) / 32]
584 & SOL_RESP_MASK
) >> 16);
585 status
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_sts
) / 32]
586 & SOL_STS_MASK
) >> 8);
587 flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
588 & SOL_FLAGS_MASK
) >> 24) | 0x80;
590 task
->sc
->result
= (DID_OK
<< 16) | status
;
591 if (rsp
!= ISCSI_STATUS_CMD_COMPLETED
) {
592 task
->sc
->result
= DID_ERROR
<< 16;
596 /* bidi not initially supported */
597 if (flags
& (ISCSI_FLAG_CMD_UNDERFLOW
| ISCSI_FLAG_CMD_OVERFLOW
)) {
598 resid
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) /
599 32] & SOL_RES_CNT_MASK
);
601 if (!status
&& (flags
& ISCSI_FLAG_CMD_OVERFLOW
))
602 task
->sc
->result
= DID_ERROR
<< 16;
604 if (flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
605 scsi_set_resid(task
->sc
, resid
);
606 if (!status
&& (scsi_bufflen(task
->sc
) - resid
<
607 task
->sc
->underflow
))
608 task
->sc
->result
= DID_ERROR
<< 16;
612 if (status
== SAM_STAT_CHECK_CONDITION
) {
613 sense
= sts_bhs
->sense_info
+ sizeof(unsigned short);
615 cpu_to_be16((unsigned short)(sts_bhs
->sense_info
[0]));
616 memcpy(task
->sc
->sense_buffer
, sense
,
617 min_t(u16
, sense_len
, SCSI_SENSE_BUFFERSIZE
));
619 if (io_task
->cmd_bhs
->iscsi_hdr
.flags
& ISCSI_FLAG_CMD_READ
) {
620 if (psol
->dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) / 32]
622 conn
->rxdata_octets
+= (psol
->
623 dw
[offsetof(struct amap_sol_cqe
, i_res_cnt
) / 32]
627 scsi_dma_unmap(io_task
->scsi_cmnd
);
628 iscsi_complete_scsi_task(task
, exp_cmdsn
, max_cmdsn
);
632 be_complete_logout(struct beiscsi_conn
*beiscsi_conn
,
633 struct iscsi_task
*task
, struct sol_cqe
*psol
)
635 struct iscsi_logout_rsp
*hdr
;
636 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
638 hdr
= (struct iscsi_logout_rsp
*)task
->hdr
;
641 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
642 & SOL_FLAGS_MASK
) >> 24) | 0x80;
643 hdr
->response
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) /
644 32] & SOL_RESP_MASK
);
645 hdr
->exp_cmdsn
= cpu_to_be32(psol
->
646 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
647 & SOL_EXP_CMD_SN_MASK
);
648 hdr
->max_cmdsn
= be32_to_cpu((psol
->
649 dw
[offsetof(struct amap_sol_cqe
, i_exp_cmd_sn
) / 32]
650 & SOL_EXP_CMD_SN_MASK
) +
651 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
652 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
655 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
659 be_complete_tmf(struct beiscsi_conn
*beiscsi_conn
,
660 struct iscsi_task
*task
, struct sol_cqe
*psol
)
662 struct iscsi_tm_rsp
*hdr
;
663 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
665 hdr
= (struct iscsi_tm_rsp
*)task
->hdr
;
666 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
667 & SOL_FLAGS_MASK
) >> 24) | 0x80;
668 hdr
->response
= (psol
->dw
[offsetof(struct amap_sol_cqe
, i_resp
) /
669 32] & SOL_RESP_MASK
);
670 hdr
->exp_cmdsn
= cpu_to_be32(psol
->dw
[offsetof(struct amap_sol_cqe
,
671 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
);
672 hdr
->max_cmdsn
= be32_to_cpu((psol
->dw
[offsetof(struct amap_sol_cqe
,
673 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
) +
674 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
675 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
676 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
680 hwi_complete_drvr_msgs(struct beiscsi_conn
*beiscsi_conn
,
681 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
683 struct hwi_wrb_context
*pwrb_context
;
684 struct wrb_handle
*pwrb_handle
;
685 struct hwi_controller
*phwi_ctrlr
;
686 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
687 struct iscsi_session
*session
= conn
->session
;
689 phwi_ctrlr
= phba
->phwi_ctrlr
;
690 pwrb_context
= &phwi_ctrlr
->wrb_context
[((psol
->
691 dw
[offsetof(struct amap_sol_cqe
, cid
) / 32] &
692 SOL_CID_MASK
) >> 6)];
693 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[((psol
->
694 dw
[offsetof(struct amap_sol_cqe
, wrb_index
) /
695 32] & SOL_WRB_INDEX_MASK
) >> 16)];
696 spin_lock_bh(&session
->lock
);
697 free_wrb_handle(phba
, pwrb_context
, pwrb_handle
);
698 spin_unlock_bh(&session
->lock
);
702 be_complete_nopin_resp(struct beiscsi_conn
*beiscsi_conn
,
703 struct iscsi_task
*task
, struct sol_cqe
*psol
)
705 struct iscsi_nopin
*hdr
;
706 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
708 hdr
= (struct iscsi_nopin
*)task
->hdr
;
709 hdr
->flags
= ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_flags
) / 32]
710 & SOL_FLAGS_MASK
) >> 24) | 0x80;
711 hdr
->exp_cmdsn
= cpu_to_be32(psol
->dw
[offsetof(struct amap_sol_cqe
,
712 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
);
713 hdr
->max_cmdsn
= be32_to_cpu((psol
->dw
[offsetof(struct amap_sol_cqe
,
714 i_exp_cmd_sn
) / 32] & SOL_EXP_CMD_SN_MASK
) +
715 ((psol
->dw
[offsetof(struct amap_sol_cqe
, i_cmd_wnd
)
716 / 32] & SOL_CMD_WND_MASK
) >> 24) - 1);
717 hdr
->opcode
= ISCSI_OP_NOOP_IN
;
718 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
721 static void hwi_complete_cmd(struct beiscsi_conn
*beiscsi_conn
,
722 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
724 struct hwi_wrb_context
*pwrb_context
;
725 struct wrb_handle
*pwrb_handle
;
726 struct iscsi_wrb
*pwrb
= NULL
;
727 struct hwi_controller
*phwi_ctrlr
;
728 struct iscsi_task
*task
;
729 struct beiscsi_io_task
*io_task
;
730 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
731 struct iscsi_session
*session
= conn
->session
;
733 phwi_ctrlr
= phba
->phwi_ctrlr
;
735 pwrb_context
= &phwi_ctrlr
->
736 wrb_context
[((psol
->dw
[offsetof(struct amap_sol_cqe
, cid
) / 32]
737 & SOL_CID_MASK
) >> 6)];
738 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[((psol
->
739 dw
[offsetof(struct amap_sol_cqe
, wrb_index
) /
740 32] & SOL_WRB_INDEX_MASK
) >> 16)];
742 task
= pwrb_handle
->pio_handle
;
743 io_task
= task
->dd_data
;
744 spin_lock_bh(&session
->lock
);
745 pwrb
= pwrb_handle
->pwrb
;
746 switch ((pwrb
->dw
[offsetof(struct amap_iscsi_wrb
, type
) / 32] &
747 WRB_TYPE_MASK
) >> 28) {
750 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) ==
752 be_complete_nopin_resp(beiscsi_conn
, task
, psol
);
754 be_complete_io(beiscsi_conn
, task
, psol
);
757 case HWH_TYPE_LOGOUT
:
758 be_complete_logout(beiscsi_conn
, task
, psol
);
763 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
764 "- Solicited path \n");
768 be_complete_tmf(beiscsi_conn
, task
, psol
);
772 be_complete_nopin_resp(beiscsi_conn
, task
, psol
);
776 shost_printk(KERN_WARNING
, phba
->shost
,
777 "wrb_index 0x%x CID 0x%x\n",
778 ((psol
->dw
[offsetof(struct amap_iscsi_wrb
, type
) /
779 32] & SOL_WRB_INDEX_MASK
) >> 16),
780 ((psol
->dw
[offsetof(struct amap_sol_cqe
, cid
) / 32]
781 & SOL_CID_MASK
) >> 6));
785 spin_unlock_bh(&session
->lock
);
788 static struct list_head
*hwi_get_async_busy_list(struct hwi_async_pdu_context
789 *pasync_ctx
, unsigned int is_header
,
790 unsigned int host_write_ptr
)
793 return &pasync_ctx
->async_entry
[host_write_ptr
].
796 return &pasync_ctx
->async_entry
[host_write_ptr
].data_busy_list
;
799 static struct async_pdu_handle
*
800 hwi_get_async_handle(struct beiscsi_hba
*phba
,
801 struct beiscsi_conn
*beiscsi_conn
,
802 struct hwi_async_pdu_context
*pasync_ctx
,
803 struct i_t_dpdu_cqe
*pdpdu_cqe
, unsigned int *pcq_index
)
805 struct be_bus_address phys_addr
;
806 struct list_head
*pbusy_list
;
807 struct async_pdu_handle
*pasync_handle
= NULL
;
809 unsigned char buffer_index
= -1;
810 unsigned char is_header
= 0;
812 phys_addr
.u
.a32
.address_lo
=
813 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, db_addr_lo
) / 32] -
814 ((pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, dpl
) / 32]
815 & PDUCQE_DPL_MASK
) >> 16);
816 phys_addr
.u
.a32
.address_hi
=
817 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, db_addr_hi
) / 32];
819 phys_addr
.u
.a64
.address
=
820 *((unsigned long long *)(&phys_addr
.u
.a64
.address
));
822 switch (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
, code
) / 32]
823 & PDUCQE_CODE_MASK
) {
824 case UNSOL_HDR_NOTIFY
:
827 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, 1,
828 (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
829 index
) / 32] & PDUCQE_INDEX_MASK
));
831 buffer_len
= (unsigned int)(phys_addr
.u
.a64
.address
-
832 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
);
834 buffer_index
= buffer_len
/
835 pasync_ctx
->async_header
.buffer_size
;
838 case UNSOL_DATA_NOTIFY
:
839 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, 0, (pdpdu_cqe
->
840 dw
[offsetof(struct amap_i_t_dpdu_cqe
,
841 index
) / 32] & PDUCQE_INDEX_MASK
));
842 buffer_len
= (unsigned long)(phys_addr
.u
.a64
.address
-
843 pasync_ctx
->async_data
.pa_base
.u
.
845 buffer_index
= buffer_len
/ pasync_ctx
->async_data
.buffer_size
;
849 shost_printk(KERN_WARNING
, phba
->shost
,
850 "Unexpected code=%d \n",
851 pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
852 code
) / 32] & PDUCQE_CODE_MASK
);
856 WARN_ON(!(buffer_index
<= pasync_ctx
->async_data
.num_entries
));
857 WARN_ON(list_empty(pbusy_list
));
858 list_for_each_entry(pasync_handle
, pbusy_list
, link
) {
859 WARN_ON(pasync_handle
->consumed
);
860 if (pasync_handle
->index
== buffer_index
)
864 WARN_ON(!pasync_handle
);
866 pasync_handle
->cri
= (unsigned short)beiscsi_conn
->beiscsi_conn_cid
;
867 pasync_handle
->is_header
= is_header
;
868 pasync_handle
->buffer_len
= ((pdpdu_cqe
->
869 dw
[offsetof(struct amap_i_t_dpdu_cqe
, dpl
) / 32]
870 & PDUCQE_DPL_MASK
) >> 16);
872 *pcq_index
= (pdpdu_cqe
->dw
[offsetof(struct amap_i_t_dpdu_cqe
,
873 index
) / 32] & PDUCQE_INDEX_MASK
);
874 return pasync_handle
;
878 hwi_update_async_writables(struct hwi_async_pdu_context
*pasync_ctx
,
879 unsigned int is_header
, unsigned int cq_index
)
881 struct list_head
*pbusy_list
;
882 struct async_pdu_handle
*pasync_handle
;
883 unsigned int num_entries
, writables
= 0;
884 unsigned int *pep_read_ptr
, *pwritables
;
888 pep_read_ptr
= &pasync_ctx
->async_header
.ep_read_ptr
;
889 pwritables
= &pasync_ctx
->async_header
.writables
;
890 num_entries
= pasync_ctx
->async_header
.num_entries
;
892 pep_read_ptr
= &pasync_ctx
->async_data
.ep_read_ptr
;
893 pwritables
= &pasync_ctx
->async_data
.writables
;
894 num_entries
= pasync_ctx
->async_data
.num_entries
;
897 while ((*pep_read_ptr
) != cq_index
) {
899 *pep_read_ptr
= (*pep_read_ptr
) % num_entries
;
901 pbusy_list
= hwi_get_async_busy_list(pasync_ctx
, is_header
,
904 WARN_ON(list_empty(pbusy_list
));
906 if (!list_empty(pbusy_list
)) {
907 pasync_handle
= list_entry(pbusy_list
->next
,
908 struct async_pdu_handle
,
910 WARN_ON(!pasync_handle
);
911 pasync_handle
->consumed
= 1;
919 "Duplicate notification received - index 0x%x!!\n",
924 *pwritables
= *pwritables
+ writables
;
928 static unsigned int hwi_free_async_msg(struct beiscsi_hba
*phba
,
931 struct hwi_controller
*phwi_ctrlr
;
932 struct hwi_async_pdu_context
*pasync_ctx
;
933 struct async_pdu_handle
*pasync_handle
, *tmp_handle
;
934 struct list_head
*plist
;
937 phwi_ctrlr
= phba
->phwi_ctrlr
;
938 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
940 plist
= &pasync_ctx
->async_entry
[cri
].wait_queue
.list
;
942 list_for_each_entry_safe(pasync_handle
, tmp_handle
, plist
, link
) {
943 list_del(&pasync_handle
->link
);
946 list_add_tail(&pasync_handle
->link
,
947 &pasync_ctx
->async_header
.free_list
);
948 pasync_ctx
->async_header
.free_entries
++;
951 list_add_tail(&pasync_handle
->link
,
952 &pasync_ctx
->async_data
.free_list
);
953 pasync_ctx
->async_data
.free_entries
++;
958 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[cri
].wait_queue
.list
);
959 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
= 0;
960 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_received
= 0;
964 static struct phys_addr
*
965 hwi_get_ring_address(struct hwi_async_pdu_context
*pasync_ctx
,
966 unsigned int is_header
, unsigned int host_write_ptr
)
968 struct phys_addr
*pasync_sge
= NULL
;
971 pasync_sge
= pasync_ctx
->async_header
.ring_base
;
973 pasync_sge
= pasync_ctx
->async_data
.ring_base
;
975 return pasync_sge
+ host_write_ptr
;
978 static void hwi_post_async_buffers(struct beiscsi_hba
*phba
,
979 unsigned int is_header
)
981 struct hwi_controller
*phwi_ctrlr
;
982 struct hwi_async_pdu_context
*pasync_ctx
;
983 struct async_pdu_handle
*pasync_handle
;
984 struct list_head
*pfree_link
, *pbusy_list
;
985 struct phys_addr
*pasync_sge
;
986 unsigned int ring_id
, num_entries
;
987 unsigned int host_write_num
;
988 unsigned int writables
;
992 phwi_ctrlr
= phba
->phwi_ctrlr
;
993 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
996 num_entries
= pasync_ctx
->async_header
.num_entries
;
997 writables
= min(pasync_ctx
->async_header
.writables
,
998 pasync_ctx
->async_header
.free_entries
);
999 pfree_link
= pasync_ctx
->async_header
.free_list
.next
;
1000 host_write_num
= pasync_ctx
->async_header
.host_write_ptr
;
1001 ring_id
= phwi_ctrlr
->default_pdu_hdr
.id
;
1003 num_entries
= pasync_ctx
->async_data
.num_entries
;
1004 writables
= min(pasync_ctx
->async_data
.writables
,
1005 pasync_ctx
->async_data
.free_entries
);
1006 pfree_link
= pasync_ctx
->async_data
.free_list
.next
;
1007 host_write_num
= pasync_ctx
->async_data
.host_write_ptr
;
1008 ring_id
= phwi_ctrlr
->default_pdu_data
.id
;
1011 writables
= (writables
/ 8) * 8;
1013 for (i
= 0; i
< writables
; i
++) {
1015 hwi_get_async_busy_list(pasync_ctx
, is_header
,
1018 list_entry(pfree_link
, struct async_pdu_handle
,
1020 WARN_ON(!pasync_handle
);
1021 pasync_handle
->consumed
= 0;
1023 pfree_link
= pfree_link
->next
;
1025 pasync_sge
= hwi_get_ring_address(pasync_ctx
,
1026 is_header
, host_write_num
);
1028 pasync_sge
->hi
= pasync_handle
->pa
.u
.a32
.address_lo
;
1029 pasync_sge
->lo
= pasync_handle
->pa
.u
.a32
.address_hi
;
1031 list_move(&pasync_handle
->link
, pbusy_list
);
1034 host_write_num
= host_write_num
% num_entries
;
1038 pasync_ctx
->async_header
.host_write_ptr
=
1040 pasync_ctx
->async_header
.free_entries
-= writables
;
1041 pasync_ctx
->async_header
.writables
-= writables
;
1042 pasync_ctx
->async_header
.busy_entries
+= writables
;
1044 pasync_ctx
->async_data
.host_write_ptr
= host_write_num
;
1045 pasync_ctx
->async_data
.free_entries
-= writables
;
1046 pasync_ctx
->async_data
.writables
-= writables
;
1047 pasync_ctx
->async_data
.busy_entries
+= writables
;
1050 doorbell
|= ring_id
& DB_DEF_PDU_RING_ID_MASK
;
1051 doorbell
|= 1 << DB_DEF_PDU_REARM_SHIFT
;
1052 doorbell
|= 0 << DB_DEF_PDU_EVENT_SHIFT
;
1053 doorbell
|= (writables
& DB_DEF_PDU_CQPROC_MASK
)
1054 << DB_DEF_PDU_CQPROC_SHIFT
;
1056 iowrite32(doorbell
, phba
->db_va
+ DB_RXULP0_OFFSET
);
1060 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba
*phba
,
1061 struct beiscsi_conn
*beiscsi_conn
,
1062 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1064 struct hwi_controller
*phwi_ctrlr
;
1065 struct hwi_async_pdu_context
*pasync_ctx
;
1066 struct async_pdu_handle
*pasync_handle
= NULL
;
1067 unsigned int cq_index
= -1;
1069 phwi_ctrlr
= phba
->phwi_ctrlr
;
1070 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1072 pasync_handle
= hwi_get_async_handle(phba
, beiscsi_conn
, pasync_ctx
,
1073 pdpdu_cqe
, &cq_index
);
1074 BUG_ON(pasync_handle
->is_header
!= 0);
1075 if (pasync_handle
->consumed
== 0)
1076 hwi_update_async_writables(pasync_ctx
, pasync_handle
->is_header
,
1079 hwi_free_async_msg(phba
, pasync_handle
->cri
);
1080 hwi_post_async_buffers(phba
, pasync_handle
->is_header
);
1084 hwi_fwd_async_msg(struct beiscsi_conn
*beiscsi_conn
,
1085 struct beiscsi_hba
*phba
,
1086 struct hwi_async_pdu_context
*pasync_ctx
, unsigned short cri
)
1088 struct list_head
*plist
;
1089 struct async_pdu_handle
*pasync_handle
;
1091 unsigned int hdr_len
= 0, buf_len
= 0;
1092 unsigned int status
, index
= 0, offset
= 0;
1093 void *pfirst_buffer
= NULL
;
1094 unsigned int num_buf
= 0;
1096 plist
= &pasync_ctx
->async_entry
[cri
].wait_queue
.list
;
1098 list_for_each_entry(pasync_handle
, plist
, link
) {
1100 phdr
= pasync_handle
->pbuffer
;
1101 hdr_len
= pasync_handle
->buffer_len
;
1103 buf_len
= pasync_handle
->buffer_len
;
1105 pfirst_buffer
= pasync_handle
->pbuffer
;
1108 memcpy(pfirst_buffer
+ offset
,
1109 pasync_handle
->pbuffer
, buf_len
);
1115 status
= beiscsi_process_async_pdu(beiscsi_conn
, phba
,
1116 beiscsi_conn
->beiscsi_conn_cid
,
1117 phdr
, hdr_len
, pfirst_buffer
,
1121 hwi_free_async_msg(phba
, cri
);
1126 hwi_gather_async_pdu(struct beiscsi_conn
*beiscsi_conn
,
1127 struct beiscsi_hba
*phba
,
1128 struct async_pdu_handle
*pasync_handle
)
1130 struct hwi_async_pdu_context
*pasync_ctx
;
1131 struct hwi_controller
*phwi_ctrlr
;
1132 unsigned int bytes_needed
= 0, status
= 0;
1133 unsigned short cri
= pasync_handle
->cri
;
1134 struct pdu_base
*ppdu
;
1136 phwi_ctrlr
= phba
->phwi_ctrlr
;
1137 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1139 list_del(&pasync_handle
->link
);
1140 if (pasync_handle
->is_header
) {
1141 pasync_ctx
->async_header
.busy_entries
--;
1142 if (pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
) {
1143 hwi_free_async_msg(phba
, cri
);
1147 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_received
= 0;
1148 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
= 1;
1149 pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_len
=
1150 (unsigned short)pasync_handle
->buffer_len
;
1151 list_add_tail(&pasync_handle
->link
,
1152 &pasync_ctx
->async_entry
[cri
].wait_queue
.list
);
1154 ppdu
= pasync_handle
->pbuffer
;
1155 bytes_needed
= ((((ppdu
->dw
[offsetof(struct amap_pdu_base
,
1156 data_len_hi
) / 32] & PDUBASE_DATALENHI_MASK
) << 8) &
1157 0xFFFF0000) | ((be16_to_cpu((ppdu
->
1158 dw
[offsetof(struct amap_pdu_base
, data_len_lo
) / 32]
1159 & PDUBASE_DATALENLO_MASK
) >> 16)) & 0x0000FFFF));
1162 pasync_ctx
->async_entry
[cri
].wait_queue
.bytes_needed
=
1165 if (bytes_needed
== 0)
1166 status
= hwi_fwd_async_msg(beiscsi_conn
, phba
,
1170 pasync_ctx
->async_data
.busy_entries
--;
1171 if (pasync_ctx
->async_entry
[cri
].wait_queue
.hdr_received
) {
1172 list_add_tail(&pasync_handle
->link
,
1173 &pasync_ctx
->async_entry
[cri
].wait_queue
.
1175 pasync_ctx
->async_entry
[cri
].wait_queue
.
1177 (unsigned short)pasync_handle
->buffer_len
;
1179 if (pasync_ctx
->async_entry
[cri
].wait_queue
.
1181 pasync_ctx
->async_entry
[cri
].wait_queue
.
1183 status
= hwi_fwd_async_msg(beiscsi_conn
, phba
,
1190 static void hwi_process_default_pdu_ring(struct beiscsi_conn
*beiscsi_conn
,
1191 struct beiscsi_hba
*phba
,
1192 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1194 struct hwi_controller
*phwi_ctrlr
;
1195 struct hwi_async_pdu_context
*pasync_ctx
;
1196 struct async_pdu_handle
*pasync_handle
= NULL
;
1197 unsigned int cq_index
= -1;
1199 phwi_ctrlr
= phba
->phwi_ctrlr
;
1200 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
);
1201 pasync_handle
= hwi_get_async_handle(phba
, beiscsi_conn
, pasync_ctx
,
1202 pdpdu_cqe
, &cq_index
);
1204 if (pasync_handle
->consumed
== 0)
1205 hwi_update_async_writables(pasync_ctx
, pasync_handle
->is_header
,
1207 hwi_gather_async_pdu(beiscsi_conn
, phba
, pasync_handle
);
1208 hwi_post_async_buffers(phba
, pasync_handle
->is_header
);
1211 static unsigned int beiscsi_process_cq(struct beiscsi_hba
*phba
)
1213 struct hwi_controller
*phwi_ctrlr
;
1214 struct hwi_context_memory
*phwi_context
;
1215 struct be_queue_info
*cq
;
1216 struct sol_cqe
*sol
;
1217 struct dmsg_cqe
*dmsg
;
1218 unsigned int num_processed
= 0;
1219 unsigned int tot_nump
= 0;
1220 struct beiscsi_conn
*beiscsi_conn
;
1222 phwi_ctrlr
= phba
->phwi_ctrlr
;
1223 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
1224 cq
= &phwi_context
->be_cq
;
1225 sol
= queue_tail_node(cq
);
1227 while (sol
->dw
[offsetof(struct amap_sol_cqe
, valid
) / 32] &
1229 be_dws_le_to_cpu(sol
, sizeof(struct sol_cqe
));
1231 beiscsi_conn
= phba
->conn_table
[(u32
) (sol
->
1232 dw
[offsetof(struct amap_sol_cqe
, cid
) / 32] &
1233 SOL_CID_MASK
) >> 6];
1235 if (!beiscsi_conn
|| !beiscsi_conn
->ep
) {
1236 shost_printk(KERN_WARNING
, phba
->shost
,
1237 "Connection table empty for cid = %d\n",
1238 (u32
)(sol
->dw
[offsetof(struct amap_sol_cqe
,
1239 cid
) / 32] & SOL_CID_MASK
) >> 6);
1243 if (num_processed
>= 32) {
1244 hwi_ring_cq_db(phba
, phwi_context
->be_cq
.id
,
1245 num_processed
, 0, 0);
1246 tot_nump
+= num_processed
;
1250 switch ((u32
) sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1251 32] & CQE_CODE_MASK
) {
1252 case SOL_CMD_COMPLETE
:
1253 hwi_complete_cmd(beiscsi_conn
, phba
, sol
);
1255 case DRIVERMSG_NOTIFY
:
1256 SE_DEBUG(DBG_LVL_8
, "Received DRIVERMSG_NOTIFY \n");
1257 dmsg
= (struct dmsg_cqe
*)sol
;
1258 hwi_complete_drvr_msgs(beiscsi_conn
, phba
, sol
);
1260 case UNSOL_HDR_NOTIFY
:
1261 case UNSOL_DATA_NOTIFY
:
1262 SE_DEBUG(DBG_LVL_8
, "Received UNSOL_HDR/DATA_NOTIFY\n");
1263 hwi_process_default_pdu_ring(beiscsi_conn
, phba
,
1264 (struct i_t_dpdu_cqe
*)sol
);
1266 case CXN_INVALIDATE_INDEX_NOTIFY
:
1267 case CMD_INVALIDATED_NOTIFY
:
1268 case CXN_INVALIDATE_NOTIFY
:
1270 "Ignoring CQ Error notification for cmd/cxn"
1273 case SOL_CMD_KILLED_DATA_DIGEST_ERR
:
1274 case CMD_KILLED_INVALID_STATSN_RCVD
:
1275 case CMD_KILLED_INVALID_R2T_RCVD
:
1276 case CMD_CXN_KILLED_LUN_INVALID
:
1277 case CMD_CXN_KILLED_ICD_INVALID
:
1278 case CMD_CXN_KILLED_ITT_INVALID
:
1279 case CMD_CXN_KILLED_SEQ_OUTOFORDER
:
1280 case CMD_CXN_KILLED_INVALID_DATASN_RCVD
:
1282 "CQ Error notification for cmd.. "
1283 "code %d cid 0x%x\n",
1284 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1285 32] & CQE_CODE_MASK
,
1286 (sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1287 32] & SOL_CID_MASK
));
1289 case UNSOL_DATA_DIGEST_ERROR_NOTIFY
:
1291 "Digest error on def pdu ring, dropping..\n");
1292 hwi_flush_default_pdu_buffer(phba
, beiscsi_conn
,
1293 (struct i_t_dpdu_cqe
*) sol
);
1295 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL
:
1296 case CXN_KILLED_BURST_LEN_MISMATCH
:
1297 case CXN_KILLED_AHS_RCVD
:
1298 case CXN_KILLED_HDR_DIGEST_ERR
:
1299 case CXN_KILLED_UNKNOWN_HDR
:
1300 case CXN_KILLED_STALE_ITT_TTT_RCVD
:
1301 case CXN_KILLED_INVALID_ITT_TTT_RCVD
:
1302 case CXN_KILLED_TIMED_OUT
:
1303 case CXN_KILLED_FIN_RCVD
:
1304 case CXN_KILLED_BAD_UNSOL_PDU_RCVD
:
1305 case CXN_KILLED_BAD_WRB_INDEX_ERROR
:
1306 case CXN_KILLED_OVER_RUN_RESIDUAL
:
1307 case CXN_KILLED_UNDER_RUN_RESIDUAL
:
1308 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN
:
1309 SE_DEBUG(DBG_LVL_1
, "CQ Error %d, resetting CID "
1311 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1312 32] & CQE_CODE_MASK
,
1313 sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1314 32] & CQE_CID_MASK
);
1315 iscsi_conn_failure(beiscsi_conn
->conn
,
1316 ISCSI_ERR_CONN_FAILED
);
1318 case CXN_KILLED_RST_SENT
:
1319 case CXN_KILLED_RST_RCVD
:
1320 SE_DEBUG(DBG_LVL_1
, "CQ Error %d, reset received/sent "
1322 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1323 32] & CQE_CODE_MASK
,
1324 sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1325 32] & CQE_CID_MASK
);
1326 iscsi_conn_failure(beiscsi_conn
->conn
,
1327 ISCSI_ERR_CONN_FAILED
);
1330 SE_DEBUG(DBG_LVL_1
, "CQ Error Invalid code= %d "
1331 "received on CID 0x%x...\n",
1332 sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1333 32] & CQE_CODE_MASK
,
1334 sol
->dw
[offsetof(struct amap_sol_cqe
, cid
) /
1335 32] & CQE_CID_MASK
);
1339 AMAP_SET_BITS(struct amap_sol_cqe
, valid
, sol
, 0);
1341 sol
= queue_tail_node(cq
);
1345 if (num_processed
> 0) {
1346 tot_nump
+= num_processed
;
1347 hwi_ring_cq_db(phba
, phwi_context
->be_cq
.id
, num_processed
,
1353 static void beiscsi_process_all_cqs(struct work_struct
*work
)
1355 unsigned long flags
;
1356 struct beiscsi_hba
*phba
=
1357 container_of(work
, struct beiscsi_hba
, work_cqs
);
1359 if (phba
->todo_mcc_cq
) {
1360 spin_lock_irqsave(&phba
->isr_lock
, flags
);
1361 phba
->todo_mcc_cq
= 0;
1362 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
1363 SE_DEBUG(DBG_LVL_1
, "MCC Interrupt Not expected \n");
1366 if (phba
->todo_cq
) {
1367 spin_lock_irqsave(&phba
->isr_lock
, flags
);
1369 spin_unlock_irqrestore(&phba
->isr_lock
, flags
);
1370 beiscsi_process_cq(phba
);
1374 static int be_iopoll(struct blk_iopoll
*iop
, int budget
)
1376 static unsigned int ret
;
1377 struct beiscsi_hba
*phba
;
1379 phba
= container_of(iop
, struct beiscsi_hba
, iopoll
);
1381 ret
= beiscsi_process_cq(phba
);
1383 struct hwi_controller
*phwi_ctrlr
;
1384 struct hwi_context_memory
*phwi_context
;
1386 phwi_ctrlr
= phba
->phwi_ctrlr
;
1387 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
1388 blk_iopoll_complete(iop
);
1389 hwi_ring_eq_db(phba
, phwi_context
->be_eq
.q
.id
, 0,
1396 hwi_write_sgl(struct iscsi_wrb
*pwrb
, struct scatterlist
*sg
,
1397 unsigned int num_sg
, struct beiscsi_io_task
*io_task
)
1399 struct iscsi_sge
*psgl
;
1400 unsigned short sg_len
, index
;
1401 unsigned int sge_len
= 0;
1402 unsigned long long addr
;
1403 struct scatterlist
*l_sg
;
1404 unsigned int offset
;
1406 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
1407 io_task
->bhs_pa
.u
.a32
.address_lo
);
1408 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
1409 io_task
->bhs_pa
.u
.a32
.address_hi
);
1412 for (index
= 0; (index
< num_sg
) && (index
< 2); index
++, sg_next(sg
)) {
1414 sg_len
= sg_dma_len(sg
);
1415 addr
= (u64
) sg_dma_address(sg
);
1416 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
1417 (addr
& 0xFFFFFFFF));
1418 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
1420 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
1423 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
1426 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
1428 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_r2t_offset
,
1430 sg_len
= sg_dma_len(sg
);
1431 addr
= (u64
) sg_dma_address(sg
);
1432 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_lo
, pwrb
,
1433 (addr
& 0xFFFFFFFF));
1434 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_hi
, pwrb
,
1436 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_len
, pwrb
,
1440 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
1441 memset(psgl
, 0, sizeof(*psgl
) * BE2_SGE
);
1443 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
- 2);
1445 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1446 io_task
->bhs_pa
.u
.a32
.address_hi
);
1447 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1448 io_task
->bhs_pa
.u
.a32
.address_lo
);
1451 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
, 1);
1456 for (index
= 0; index
< num_sg
; index
++, sg_next(sg
), psgl
++) {
1457 sg_len
= sg_dma_len(sg
);
1458 addr
= (u64
) sg_dma_address(sg
);
1459 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1460 (addr
& 0xFFFFFFFF));
1461 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1463 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, sg_len
);
1464 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, offset
);
1465 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
1469 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
1472 static void hwi_write_buffer(struct iscsi_wrb
*pwrb
, struct iscsi_task
*task
)
1474 struct iscsi_sge
*psgl
;
1475 unsigned long long addr
;
1476 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1477 struct beiscsi_conn
*beiscsi_conn
= io_task
->conn
;
1478 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
1480 io_task
->bhs_len
= sizeof(struct be_nonio_bhs
) - 2;
1481 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
1482 io_task
->bhs_pa
.u
.a32
.address_lo
);
1483 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
1484 io_task
->bhs_pa
.u
.a32
.address_hi
);
1487 if (task
->data_count
) {
1488 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
1489 addr
= (u64
) pci_map_single(phba
->pcidev
,
1491 task
->data_count
, 1);
1493 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
1496 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
1497 (addr
& 0xFFFFFFFF));
1498 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
1500 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
1503 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
, 1);
1505 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
1509 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
1511 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
);
1513 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1514 io_task
->bhs_pa
.u
.a32
.address_hi
);
1515 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1516 io_task
->bhs_pa
.u
.a32
.address_lo
);
1519 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
, 0);
1520 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
, 0);
1521 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0);
1522 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, 0);
1523 AMAP_SET_BITS(struct amap_iscsi_sge
, rsvd0
, psgl
, 0);
1524 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
1528 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
1529 (addr
& 0xFFFFFFFF));
1530 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
1533 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0x106);
1535 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
1538 static void beiscsi_find_mem_req(struct beiscsi_hba
*phba
)
1540 unsigned int num_cq_pages
, num_eq_pages
, num_async_pdu_buf_pages
;
1541 unsigned int num_async_pdu_data_pages
, wrb_sz_per_cxn
;
1542 unsigned int num_async_pdu_buf_sgl_pages
, num_async_pdu_data_sgl_pages
;
1544 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
1545 sizeof(struct sol_cqe
));
1546 num_eq_pages
= PAGES_REQUIRED(phba
->params
.num_eq_entries
* \
1547 sizeof(struct be_eq_entry
));
1548 num_async_pdu_buf_pages
=
1549 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1550 phba
->params
.defpdu_hdr_sz
);
1551 num_async_pdu_buf_sgl_pages
=
1552 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1553 sizeof(struct phys_addr
));
1554 num_async_pdu_data_pages
=
1555 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1556 phba
->params
.defpdu_data_sz
);
1557 num_async_pdu_data_sgl_pages
=
1558 PAGES_REQUIRED(phba
->params
.asyncpdus_per_ctrl
* \
1559 sizeof(struct phys_addr
));
1561 phba
->params
.hwi_ws_sz
= sizeof(struct hwi_controller
);
1563 phba
->mem_req
[ISCSI_MEM_GLOBAL_HEADER
] = 2 *
1564 BE_ISCSI_PDU_HEADER_SIZE
;
1565 phba
->mem_req
[HWI_MEM_ADDN_CONTEXT
] =
1566 sizeof(struct hwi_context_memory
);
1568 phba
->mem_req
[HWI_MEM_CQ
] = num_cq_pages
* PAGE_SIZE
;
1569 phba
->mem_req
[HWI_MEM_EQ
] = num_eq_pages
* PAGE_SIZE
;
1571 phba
->mem_req
[HWI_MEM_WRB
] = sizeof(struct iscsi_wrb
)
1572 * (phba
->params
.wrbs_per_cxn
)
1573 * phba
->params
.cxns_per_ctrl
;
1574 wrb_sz_per_cxn
= sizeof(struct wrb_handle
) *
1575 (phba
->params
.wrbs_per_cxn
);
1576 phba
->mem_req
[HWI_MEM_WRBH
] = roundup_pow_of_two((wrb_sz_per_cxn
) *
1577 phba
->params
.cxns_per_ctrl
);
1579 phba
->mem_req
[HWI_MEM_SGLH
] = sizeof(struct sgl_handle
) *
1580 phba
->params
.icds_per_ctrl
;
1581 phba
->mem_req
[HWI_MEM_SGE
] = sizeof(struct iscsi_sge
) *
1582 phba
->params
.num_sge_per_io
* phba
->params
.icds_per_ctrl
;
1584 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_BUF
] =
1585 num_async_pdu_buf_pages
* PAGE_SIZE
;
1586 phba
->mem_req
[HWI_MEM_ASYNC_DATA_BUF
] =
1587 num_async_pdu_data_pages
* PAGE_SIZE
;
1588 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_RING
] =
1589 num_async_pdu_buf_sgl_pages
* PAGE_SIZE
;
1590 phba
->mem_req
[HWI_MEM_ASYNC_DATA_RING
] =
1591 num_async_pdu_data_sgl_pages
* PAGE_SIZE
;
1592 phba
->mem_req
[HWI_MEM_ASYNC_HEADER_HANDLE
] =
1593 phba
->params
.asyncpdus_per_ctrl
*
1594 sizeof(struct async_pdu_handle
);
1595 phba
->mem_req
[HWI_MEM_ASYNC_DATA_HANDLE
] =
1596 phba
->params
.asyncpdus_per_ctrl
*
1597 sizeof(struct async_pdu_handle
);
1598 phba
->mem_req
[HWI_MEM_ASYNC_PDU_CONTEXT
] =
1599 sizeof(struct hwi_async_pdu_context
) +
1600 (phba
->params
.cxns_per_ctrl
* sizeof(struct hwi_async_entry
));
1603 static int beiscsi_alloc_mem(struct beiscsi_hba
*phba
)
1605 struct be_mem_descriptor
*mem_descr
;
1607 struct mem_array
*mem_arr
, *mem_arr_orig
;
1608 unsigned int i
, j
, alloc_size
, curr_alloc_size
;
1610 phba
->phwi_ctrlr
= kmalloc(phba
->params
.hwi_ws_sz
, GFP_KERNEL
);
1611 if (!phba
->phwi_ctrlr
)
1614 phba
->init_mem
= kcalloc(SE_MEM_MAX
, sizeof(*mem_descr
),
1616 if (!phba
->init_mem
) {
1617 kfree(phba
->phwi_ctrlr
);
1621 mem_arr_orig
= kmalloc(sizeof(*mem_arr_orig
) * BEISCSI_MAX_FRAGS_INIT
,
1623 if (!mem_arr_orig
) {
1624 kfree(phba
->init_mem
);
1625 kfree(phba
->phwi_ctrlr
);
1629 mem_descr
= phba
->init_mem
;
1630 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
1632 mem_arr
= mem_arr_orig
;
1633 alloc_size
= phba
->mem_req
[i
];
1634 memset(mem_arr
, 0, sizeof(struct mem_array
) *
1635 BEISCSI_MAX_FRAGS_INIT
);
1636 curr_alloc_size
= min(be_max_phys_size
* 1024, alloc_size
);
1638 mem_arr
->virtual_address
= pci_alloc_consistent(
1642 if (!mem_arr
->virtual_address
) {
1643 if (curr_alloc_size
<= BE_MIN_MEM_SIZE
)
1645 if (curr_alloc_size
-
1646 rounddown_pow_of_two(curr_alloc_size
))
1647 curr_alloc_size
= rounddown_pow_of_two
1650 curr_alloc_size
= curr_alloc_size
/ 2;
1652 mem_arr
->bus_address
.u
.
1653 a64
.address
= (__u64
) bus_add
;
1654 mem_arr
->size
= curr_alloc_size
;
1655 alloc_size
-= curr_alloc_size
;
1656 curr_alloc_size
= min(be_max_phys_size
*
1661 } while (alloc_size
);
1662 mem_descr
->num_elements
= j
;
1663 mem_descr
->size_in_bytes
= phba
->mem_req
[i
];
1664 mem_descr
->mem_array
= kmalloc(sizeof(*mem_arr
) * j
,
1666 if (!mem_descr
->mem_array
)
1669 memcpy(mem_descr
->mem_array
, mem_arr_orig
,
1670 sizeof(struct mem_array
) * j
);
1673 kfree(mem_arr_orig
);
1676 mem_descr
->num_elements
= j
;
1677 while ((i
) || (j
)) {
1678 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
1679 pci_free_consistent(phba
->pcidev
,
1680 mem_descr
->mem_array
[j
- 1].size
,
1681 mem_descr
->mem_array
[j
- 1].
1683 mem_descr
->mem_array
[j
- 1].
1684 bus_address
.u
.a64
.address
);
1688 kfree(mem_descr
->mem_array
);
1692 kfree(mem_arr_orig
);
1693 kfree(phba
->init_mem
);
1694 kfree(phba
->phwi_ctrlr
);
1698 static int beiscsi_get_memory(struct beiscsi_hba
*phba
)
1700 beiscsi_find_mem_req(phba
);
1701 return beiscsi_alloc_mem(phba
);
1704 static void iscsi_init_global_templates(struct beiscsi_hba
*phba
)
1706 struct pdu_data_out
*pdata_out
;
1707 struct pdu_nop_out
*pnop_out
;
1708 struct be_mem_descriptor
*mem_descr
;
1710 mem_descr
= phba
->init_mem
;
1711 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
1713 (struct pdu_data_out
*)mem_descr
->mem_array
[0].virtual_address
;
1714 memset(pdata_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
1716 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
, pdata_out
,
1720 (struct pdu_nop_out
*)((unsigned char *)mem_descr
->mem_array
[0].
1721 virtual_address
+ BE_ISCSI_PDU_HEADER_SIZE
);
1723 memset(pnop_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
1724 AMAP_SET_BITS(struct amap_pdu_nop_out
, ttt
, pnop_out
, 0xFFFFFFFF);
1725 AMAP_SET_BITS(struct amap_pdu_nop_out
, f_bit
, pnop_out
, 1);
1726 AMAP_SET_BITS(struct amap_pdu_nop_out
, i_bit
, pnop_out
, 0);
1729 static void beiscsi_init_wrb_handle(struct beiscsi_hba
*phba
)
1731 struct be_mem_descriptor
*mem_descr_wrbh
, *mem_descr_wrb
;
1732 struct wrb_handle
*pwrb_handle
;
1733 struct hwi_controller
*phwi_ctrlr
;
1734 struct hwi_wrb_context
*pwrb_context
;
1735 struct iscsi_wrb
*pwrb
;
1736 unsigned int num_cxn_wrbh
;
1737 unsigned int num_cxn_wrb
, j
, idx
, index
;
1739 mem_descr_wrbh
= phba
->init_mem
;
1740 mem_descr_wrbh
+= HWI_MEM_WRBH
;
1742 mem_descr_wrb
= phba
->init_mem
;
1743 mem_descr_wrb
+= HWI_MEM_WRB
;
1746 pwrb_handle
= mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
1747 num_cxn_wrbh
= ((mem_descr_wrbh
->mem_array
[idx
].size
) /
1748 ((sizeof(struct wrb_handle
)) *
1749 phba
->params
.wrbs_per_cxn
));
1750 phwi_ctrlr
= phba
->phwi_ctrlr
;
1752 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
1753 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
1754 SE_DEBUG(DBG_LVL_8
, "cid=%d pwrb_context=%p \n", index
,
1756 pwrb_context
->pwrb_handle_base
=
1757 kzalloc(sizeof(struct wrb_handle
*) *
1758 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
1759 pwrb_context
->pwrb_handle_basestd
=
1760 kzalloc(sizeof(struct wrb_handle
*) *
1761 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
1763 pwrb_context
->alloc_index
= 0;
1764 pwrb_context
->wrb_handles_available
= 0;
1765 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
1766 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
1767 pwrb_context
->pwrb_handle_basestd
[j
] =
1769 pwrb_context
->wrb_handles_available
++;
1772 pwrb_context
->free_index
= 0;
1777 mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
1779 ((mem_descr_wrbh
->mem_array
[idx
].size
) /
1780 ((sizeof(struct wrb_handle
)) *
1781 phba
->params
.wrbs_per_cxn
));
1782 pwrb_context
->alloc_index
= 0;
1783 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
1784 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
1785 pwrb_context
->pwrb_handle_basestd
[j
] =
1787 pwrb_context
->wrb_handles_available
++;
1790 pwrb_context
->free_index
= 0;
1795 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
1797 ((mem_descr_wrb
->mem_array
[idx
].size
) / (sizeof(struct iscsi_wrb
)) *
1798 phba
->params
.wrbs_per_cxn
);
1800 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
; index
+= 2) {
1801 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
1803 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
1804 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
1805 pwrb_handle
->pwrb
= pwrb
;
1811 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
1812 num_cxn_wrb
= ((mem_descr_wrb
->mem_array
[idx
].size
) /
1813 (sizeof(struct iscsi_wrb
)) *
1814 phba
->params
.wrbs_per_cxn
);
1815 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
1816 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
1817 pwrb_handle
->pwrb
= pwrb
;
1825 static void hwi_init_async_pdu_ctx(struct beiscsi_hba
*phba
)
1827 struct hwi_controller
*phwi_ctrlr
;
1828 struct hba_parameters
*p
= &phba
->params
;
1829 struct hwi_async_pdu_context
*pasync_ctx
;
1830 struct async_pdu_handle
*pasync_header_h
, *pasync_data_h
;
1832 struct be_mem_descriptor
*mem_descr
;
1834 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1835 mem_descr
+= HWI_MEM_ASYNC_PDU_CONTEXT
;
1837 phwi_ctrlr
= phba
->phwi_ctrlr
;
1838 phwi_ctrlr
->phwi_ctxt
->pasync_ctx
= (struct hwi_async_pdu_context
*)
1839 mem_descr
->mem_array
[0].virtual_address
;
1840 pasync_ctx
= phwi_ctrlr
->phwi_ctxt
->pasync_ctx
;
1841 memset(pasync_ctx
, 0, sizeof(*pasync_ctx
));
1843 pasync_ctx
->async_header
.num_entries
= p
->asyncpdus_per_ctrl
;
1844 pasync_ctx
->async_header
.buffer_size
= p
->defpdu_hdr_sz
;
1845 pasync_ctx
->async_data
.buffer_size
= p
->defpdu_data_sz
;
1846 pasync_ctx
->async_data
.num_entries
= p
->asyncpdus_per_ctrl
;
1848 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1849 mem_descr
+= HWI_MEM_ASYNC_HEADER_BUF
;
1850 if (mem_descr
->mem_array
[0].virtual_address
) {
1852 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
1853 "va=%p \n", mem_descr
->mem_array
[0].virtual_address
);
1855 shost_printk(KERN_WARNING
, phba
->shost
,
1856 "No Virtual address \n");
1858 pasync_ctx
->async_header
.va_base
=
1859 mem_descr
->mem_array
[0].virtual_address
;
1861 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
=
1862 mem_descr
->mem_array
[0].bus_address
.u
.a64
.address
;
1864 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1865 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING
;
1866 if (mem_descr
->mem_array
[0].virtual_address
) {
1868 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
1869 "va=%p \n", mem_descr
->mem_array
[0].virtual_address
);
1871 shost_printk(KERN_WARNING
, phba
->shost
,
1872 "No Virtual address \n");
1873 pasync_ctx
->async_header
.ring_base
=
1874 mem_descr
->mem_array
[0].virtual_address
;
1876 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1877 mem_descr
+= HWI_MEM_ASYNC_HEADER_HANDLE
;
1878 if (mem_descr
->mem_array
[0].virtual_address
) {
1880 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
1881 "va=%p \n", mem_descr
->mem_array
[0].virtual_address
);
1883 shost_printk(KERN_WARNING
, phba
->shost
,
1884 "No Virtual address \n");
1886 pasync_ctx
->async_header
.handle_base
=
1887 mem_descr
->mem_array
[0].virtual_address
;
1888 pasync_ctx
->async_header
.writables
= 0;
1889 INIT_LIST_HEAD(&pasync_ctx
->async_header
.free_list
);
1891 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1892 mem_descr
+= HWI_MEM_ASYNC_DATA_BUF
;
1893 if (mem_descr
->mem_array
[0].virtual_address
) {
1895 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
1896 "va=%p \n", mem_descr
->mem_array
[0].virtual_address
);
1898 shost_printk(KERN_WARNING
, phba
->shost
,
1899 "No Virtual address \n");
1900 pasync_ctx
->async_data
.va_base
=
1901 mem_descr
->mem_array
[0].virtual_address
;
1902 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
=
1903 mem_descr
->mem_array
[0].bus_address
.u
.a64
.address
;
1905 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1906 mem_descr
+= HWI_MEM_ASYNC_DATA_RING
;
1907 if (mem_descr
->mem_array
[0].virtual_address
) {
1909 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
1910 "va=%p \n", mem_descr
->mem_array
[0].virtual_address
);
1912 shost_printk(KERN_WARNING
, phba
->shost
,
1913 "No Virtual address \n");
1915 pasync_ctx
->async_data
.ring_base
=
1916 mem_descr
->mem_array
[0].virtual_address
;
1918 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
1919 mem_descr
+= HWI_MEM_ASYNC_DATA_HANDLE
;
1920 if (!mem_descr
->mem_array
[0].virtual_address
)
1921 shost_printk(KERN_WARNING
, phba
->shost
,
1922 "No Virtual address \n");
1924 pasync_ctx
->async_data
.handle_base
=
1925 mem_descr
->mem_array
[0].virtual_address
;
1926 pasync_ctx
->async_data
.writables
= 0;
1927 INIT_LIST_HEAD(&pasync_ctx
->async_data
.free_list
);
1930 (struct async_pdu_handle
*)pasync_ctx
->async_header
.handle_base
;
1932 (struct async_pdu_handle
*)pasync_ctx
->async_data
.handle_base
;
1934 for (index
= 0; index
< p
->asyncpdus_per_ctrl
; index
++) {
1935 pasync_header_h
->cri
= -1;
1936 pasync_header_h
->index
= (char)index
;
1937 INIT_LIST_HEAD(&pasync_header_h
->link
);
1938 pasync_header_h
->pbuffer
=
1939 (void *)((unsigned long)
1940 (pasync_ctx
->async_header
.va_base
) +
1941 (p
->defpdu_hdr_sz
* index
));
1943 pasync_header_h
->pa
.u
.a64
.address
=
1944 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
+
1945 (p
->defpdu_hdr_sz
* index
);
1947 list_add_tail(&pasync_header_h
->link
,
1948 &pasync_ctx
->async_header
.free_list
);
1950 pasync_ctx
->async_header
.free_entries
++;
1951 pasync_ctx
->async_header
.writables
++;
1953 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].wait_queue
.list
);
1954 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].
1956 pasync_data_h
->cri
= -1;
1957 pasync_data_h
->index
= (char)index
;
1958 INIT_LIST_HEAD(&pasync_data_h
->link
);
1959 pasync_data_h
->pbuffer
=
1960 (void *)((unsigned long)
1961 (pasync_ctx
->async_data
.va_base
) +
1962 (p
->defpdu_data_sz
* index
));
1964 pasync_data_h
->pa
.u
.a64
.address
=
1965 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
+
1966 (p
->defpdu_data_sz
* index
);
1968 list_add_tail(&pasync_data_h
->link
,
1969 &pasync_ctx
->async_data
.free_list
);
1971 pasync_ctx
->async_data
.free_entries
++;
1972 pasync_ctx
->async_data
.writables
++;
1974 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].data_busy_list
);
1977 pasync_ctx
->async_header
.host_write_ptr
= 0;
1978 pasync_ctx
->async_header
.ep_read_ptr
= -1;
1979 pasync_ctx
->async_data
.host_write_ptr
= 0;
1980 pasync_ctx
->async_data
.ep_read_ptr
= -1;
1984 be_sgl_create_contiguous(void *virtual_address
,
1985 u64 physical_address
, u32 length
,
1986 struct be_dma_mem
*sgl
)
1988 WARN_ON(!virtual_address
);
1989 WARN_ON(!physical_address
);
1990 WARN_ON(!length
> 0);
1993 sgl
->va
= virtual_address
;
1994 sgl
->dma
= physical_address
;
2000 static void be_sgl_destroy_contiguous(struct be_dma_mem
*sgl
)
2002 memset(sgl
, 0, sizeof(*sgl
));
2006 hwi_build_be_sgl_arr(struct beiscsi_hba
*phba
,
2007 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2010 be_sgl_destroy_contiguous(sgl
);
2012 be_sgl_create_contiguous(pmem
->virtual_address
,
2013 pmem
->bus_address
.u
.a64
.address
,
2018 hwi_build_be_sgl_by_offset(struct beiscsi_hba
*phba
,
2019 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2022 be_sgl_destroy_contiguous(sgl
);
2024 be_sgl_create_contiguous((unsigned char *)pmem
->virtual_address
,
2025 pmem
->bus_address
.u
.a64
.address
,
2029 static int be_fill_queue(struct be_queue_info
*q
,
2030 u16 len
, u16 entry_size
, void *vaddress
)
2032 struct be_dma_mem
*mem
= &q
->dma_mem
;
2034 memset(q
, 0, sizeof(*q
));
2036 q
->entry_size
= entry_size
;
2037 mem
->size
= len
* entry_size
;
2041 memset(mem
->va
, 0, mem
->size
);
2045 static int beiscsi_create_eq(struct beiscsi_hba
*phba
,
2046 struct hwi_context_memory
*phwi_context
)
2050 struct be_queue_info
*eq
;
2051 struct be_dma_mem
*mem
;
2052 struct be_mem_descriptor
*mem_descr
;
2056 eq
= &phwi_context
->be_eq
.q
;
2058 mem_descr
= phba
->init_mem
;
2059 mem_descr
+= HWI_MEM_EQ
;
2060 eq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2062 ret
= be_fill_queue(eq
, phba
->params
.num_eq_entries
,
2063 sizeof(struct be_eq_entry
), eq_vaddress
);
2065 shost_printk(KERN_ERR
, phba
->shost
,
2066 "be_fill_queue Failed for EQ \n");
2070 mem
->dma
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2072 ret
= beiscsi_cmd_eq_create(&phba
->ctrl
, eq
,
2073 phwi_context
->be_eq
.cur_eqd
);
2075 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_cmd_eq_create"
2079 SE_DEBUG(DBG_LVL_8
, "eq id is %d\n", phwi_context
->be_eq
.q
.id
);
2083 static int beiscsi_create_cq(struct beiscsi_hba
*phba
,
2084 struct hwi_context_memory
*phwi_context
)
2088 struct be_queue_info
*cq
, *eq
;
2089 struct be_dma_mem
*mem
;
2090 struct be_mem_descriptor
*mem_descr
;
2094 cq
= &phwi_context
->be_cq
;
2095 eq
= &phwi_context
->be_eq
.q
;
2097 mem_descr
= phba
->init_mem
;
2098 mem_descr
+= HWI_MEM_CQ
;
2099 cq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2100 ret
= be_fill_queue(cq
, phba
->params
.icds_per_ctrl
/ 2,
2101 sizeof(struct sol_cqe
), cq_vaddress
);
2103 shost_printk(KERN_ERR
, phba
->shost
,
2104 "be_fill_queue Failed for ISCSI CQ \n");
2108 mem
->dma
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2109 ret
= beiscsi_cmd_cq_create(&phba
->ctrl
, cq
, eq
, false, false, 0);
2111 shost_printk(KERN_ERR
, phba
->shost
,
2112 "beiscsi_cmd_eq_create Failed for ISCSI CQ \n");
2115 SE_DEBUG(DBG_LVL_8
, "iscsi cq id is %d\n", phwi_context
->be_cq
.id
);
2116 SE_DEBUG(DBG_LVL_8
, "ISCSI CQ CREATED\n");
2121 beiscsi_create_def_hdr(struct beiscsi_hba
*phba
,
2122 struct hwi_context_memory
*phwi_context
,
2123 struct hwi_controller
*phwi_ctrlr
,
2124 unsigned int def_pdu_ring_sz
)
2128 struct be_queue_info
*dq
, *cq
;
2129 struct be_dma_mem
*mem
;
2130 struct be_mem_descriptor
*mem_descr
;
2134 dq
= &phwi_context
->be_def_hdrq
;
2135 cq
= &phwi_context
->be_cq
;
2137 mem_descr
= phba
->init_mem
;
2138 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING
;
2139 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2140 ret
= be_fill_queue(dq
, mem_descr
->mem_array
[0].size
/
2141 sizeof(struct phys_addr
),
2142 sizeof(struct phys_addr
), dq_vaddress
);
2144 shost_printk(KERN_ERR
, phba
->shost
,
2145 "be_fill_queue Failed for DEF PDU HDR\n");
2148 mem
->dma
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2149 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dq
,
2151 phba
->params
.defpdu_hdr_sz
);
2153 shost_printk(KERN_ERR
, phba
->shost
,
2154 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2157 phwi_ctrlr
->default_pdu_hdr
.id
= phwi_context
->be_def_hdrq
.id
;
2158 SE_DEBUG(DBG_LVL_8
, "iscsi def pdu id is %d\n",
2159 phwi_context
->be_def_hdrq
.id
);
2160 hwi_post_async_buffers(phba
, 1);
2165 beiscsi_create_def_data(struct beiscsi_hba
*phba
,
2166 struct hwi_context_memory
*phwi_context
,
2167 struct hwi_controller
*phwi_ctrlr
,
2168 unsigned int def_pdu_ring_sz
)
2172 struct be_queue_info
*dataq
, *cq
;
2173 struct be_dma_mem
*mem
;
2174 struct be_mem_descriptor
*mem_descr
;
2178 dataq
= &phwi_context
->be_def_dataq
;
2179 cq
= &phwi_context
->be_cq
;
2180 mem
= &dataq
->dma_mem
;
2181 mem_descr
= phba
->init_mem
;
2182 mem_descr
+= HWI_MEM_ASYNC_DATA_RING
;
2183 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
2184 ret
= be_fill_queue(dataq
, mem_descr
->mem_array
[0].size
/
2185 sizeof(struct phys_addr
),
2186 sizeof(struct phys_addr
), dq_vaddress
);
2188 shost_printk(KERN_ERR
, phba
->shost
,
2189 "be_fill_queue Failed for DEF PDU DATA\n");
2192 mem
->dma
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2193 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dataq
,
2195 phba
->params
.defpdu_data_sz
);
2197 shost_printk(KERN_ERR
, phba
->shost
,
2198 "be_cmd_create_default_pdu_queue Failed"
2199 " for DEF PDU DATA\n");
2202 phwi_ctrlr
->default_pdu_data
.id
= phwi_context
->be_def_dataq
.id
;
2203 SE_DEBUG(DBG_LVL_8
, "iscsi def data id is %d\n",
2204 phwi_context
->be_def_dataq
.id
);
2205 hwi_post_async_buffers(phba
, 0);
2206 SE_DEBUG(DBG_LVL_8
, "DEFAULT PDU DATA RING CREATED \n");
2211 beiscsi_post_pages(struct beiscsi_hba
*phba
)
2213 struct be_mem_descriptor
*mem_descr
;
2214 struct mem_array
*pm_arr
;
2215 unsigned int page_offset
, i
;
2216 struct be_dma_mem sgl
;
2219 mem_descr
= phba
->init_mem
;
2220 mem_descr
+= HWI_MEM_SGE
;
2221 pm_arr
= mem_descr
->mem_array
;
2223 page_offset
= (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
*
2224 phba
->fw_config
.iscsi_icd_start
) / PAGE_SIZE
;
2225 for (i
= 0; i
< mem_descr
->num_elements
; i
++) {
2226 hwi_build_be_sgl_arr(phba
, pm_arr
, &sgl
);
2227 status
= be_cmd_iscsi_post_sgl_pages(&phba
->ctrl
, &sgl
,
2229 (pm_arr
->size
/ PAGE_SIZE
));
2230 page_offset
+= pm_arr
->size
/ PAGE_SIZE
;
2232 shost_printk(KERN_ERR
, phba
->shost
,
2233 "post sgl failed.\n");
2238 SE_DEBUG(DBG_LVL_8
, "POSTED PAGES \n");
2243 beiscsi_create_wrb_rings(struct beiscsi_hba
*phba
,
2244 struct hwi_context_memory
*phwi_context
,
2245 struct hwi_controller
*phwi_ctrlr
)
2247 unsigned int wrb_mem_index
, offset
, size
, num_wrb_rings
;
2249 unsigned int idx
, num
, i
;
2250 struct mem_array
*pwrb_arr
;
2252 struct be_dma_mem sgl
;
2253 struct be_mem_descriptor
*mem_descr
;
2257 mem_descr
= phba
->init_mem
;
2258 mem_descr
+= HWI_MEM_WRB
;
2259 pwrb_arr
= kmalloc(sizeof(*pwrb_arr
) * phba
->params
.cxns_per_ctrl
,
2262 shost_printk(KERN_ERR
, phba
->shost
,
2263 "Memory alloc failed in create wrb ring.\n");
2266 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
2267 pa_addr_lo
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
2268 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
2269 (phba
->params
.wrbs_per_cxn
* sizeof(struct iscsi_wrb
));
2271 for (num
= 0; num
< phba
->params
.cxns_per_ctrl
; num
++) {
2272 if (num_wrb_rings
) {
2273 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
2274 pwrb_arr
[num
].bus_address
.u
.a64
.address
= pa_addr_lo
;
2275 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
2276 sizeof(struct iscsi_wrb
);
2277 wrb_vaddr
+= pwrb_arr
[num
].size
;
2278 pa_addr_lo
+= pwrb_arr
[num
].size
;
2282 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
2283 pa_addr_lo
= mem_descr
->mem_array
[idx
].\
2284 bus_address
.u
.a64
.address
;
2285 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
2286 (phba
->params
.wrbs_per_cxn
*
2287 sizeof(struct iscsi_wrb
));
2288 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
2289 pwrb_arr
[num
].bus_address
.u
.a64
.address\
2291 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
2292 sizeof(struct iscsi_wrb
);
2293 wrb_vaddr
+= pwrb_arr
[num
].size
;
2294 pa_addr_lo
+= pwrb_arr
[num
].size
;
2298 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
2303 hwi_build_be_sgl_by_offset(phba
, &pwrb_arr
[i
], &sgl
);
2304 status
= be_cmd_wrbq_create(&phba
->ctrl
, &sgl
,
2305 &phwi_context
->be_wrbq
[i
]);
2307 shost_printk(KERN_ERR
, phba
->shost
,
2308 "wrbq create failed.");
2311 phwi_ctrlr
->wrb_context
[i
].cid
= phwi_context
->be_wrbq
[i
].id
;
2317 static void free_wrb_handles(struct beiscsi_hba
*phba
)
2320 struct hwi_controller
*phwi_ctrlr
;
2321 struct hwi_wrb_context
*pwrb_context
;
2323 phwi_ctrlr
= phba
->phwi_ctrlr
;
2324 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
* 2; index
+= 2) {
2325 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2326 kfree(pwrb_context
->pwrb_handle_base
);
2327 kfree(pwrb_context
->pwrb_handle_basestd
);
2331 static void hwi_cleanup(struct beiscsi_hba
*phba
)
2333 struct be_queue_info
*q
;
2334 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2335 struct hwi_controller
*phwi_ctrlr
;
2336 struct hwi_context_memory
*phwi_context
;
2339 phwi_ctrlr
= phba
->phwi_ctrlr
;
2340 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2341 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
2342 q
= &phwi_context
->be_wrbq
[i
];
2344 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_WRBQ
);
2347 free_wrb_handles(phba
);
2349 q
= &phwi_context
->be_def_hdrq
;
2351 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
2353 q
= &phwi_context
->be_def_dataq
;
2355 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
2357 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
2359 q
= &phwi_context
->be_cq
;
2361 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
2363 q
= &phwi_context
->be_eq
.q
;
2365 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_EQ
);
2368 static int hwi_init_port(struct beiscsi_hba
*phba
)
2370 struct hwi_controller
*phwi_ctrlr
;
2371 struct hwi_context_memory
*phwi_context
;
2372 unsigned int def_pdu_ring_sz
;
2373 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2377 phba
->params
.asyncpdus_per_ctrl
* sizeof(struct phys_addr
);
2378 phwi_ctrlr
= phba
->phwi_ctrlr
;
2380 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2381 phwi_context
->be_eq
.max_eqd
= 0;
2382 phwi_context
->be_eq
.min_eqd
= 0;
2383 phwi_context
->be_eq
.cur_eqd
= 64;
2384 phwi_context
->be_eq
.enable_aic
= false;
2385 be_cmd_fw_initialize(&phba
->ctrl
);
2386 status
= beiscsi_create_eq(phba
, phwi_context
);
2388 shost_printk(KERN_ERR
, phba
->shost
, "EQ not created \n");
2392 status
= mgmt_check_supported_fw(ctrl
);
2394 shost_printk(KERN_ERR
, phba
->shost
,
2395 "Unsupported fw version \n");
2399 status
= mgmt_get_fw_config(ctrl
, phba
);
2401 shost_printk(KERN_ERR
, phba
->shost
,
2402 "Error getting fw config\n");
2406 status
= beiscsi_create_cq(phba
, phwi_context
);
2408 shost_printk(KERN_ERR
, phba
->shost
, "CQ not created\n");
2412 status
= beiscsi_create_def_hdr(phba
, phwi_context
, phwi_ctrlr
,
2415 shost_printk(KERN_ERR
, phba
->shost
,
2416 "Default Header not created\n");
2420 status
= beiscsi_create_def_data(phba
, phwi_context
,
2421 phwi_ctrlr
, def_pdu_ring_sz
);
2423 shost_printk(KERN_ERR
, phba
->shost
,
2424 "Default Data not created\n");
2428 status
= beiscsi_post_pages(phba
);
2430 shost_printk(KERN_ERR
, phba
->shost
, "Post SGL Pages Failed\n");
2434 status
= beiscsi_create_wrb_rings(phba
, phwi_context
, phwi_ctrlr
);
2436 shost_printk(KERN_ERR
, phba
->shost
,
2437 "WRB Rings not created\n");
2441 SE_DEBUG(DBG_LVL_8
, "hwi_init_port success\n");
2445 shost_printk(KERN_ERR
, phba
->shost
, "hwi_init_port failed");
2451 static int hwi_init_controller(struct beiscsi_hba
*phba
)
2453 struct hwi_controller
*phwi_ctrlr
;
2455 phwi_ctrlr
= phba
->phwi_ctrlr
;
2456 if (1 == phba
->init_mem
[HWI_MEM_ADDN_CONTEXT
].num_elements
) {
2457 phwi_ctrlr
->phwi_ctxt
= (struct hwi_context_memory
*)phba
->
2458 init_mem
[HWI_MEM_ADDN_CONTEXT
].mem_array
[0].virtual_address
;
2459 SE_DEBUG(DBG_LVL_8
, " phwi_ctrlr->phwi_ctxt=%p \n",
2460 phwi_ctrlr
->phwi_ctxt
);
2462 shost_printk(KERN_ERR
, phba
->shost
,
2463 "HWI_MEM_ADDN_CONTEXT is more than one element."
2464 "Failing to load\n");
2468 iscsi_init_global_templates(phba
);
2469 beiscsi_init_wrb_handle(phba
);
2470 hwi_init_async_pdu_ctx(phba
);
2471 if (hwi_init_port(phba
) != 0) {
2472 shost_printk(KERN_ERR
, phba
->shost
,
2473 "hwi_init_controller failed\n");
2479 static void beiscsi_free_mem(struct beiscsi_hba
*phba
)
2481 struct be_mem_descriptor
*mem_descr
;
2484 mem_descr
= phba
->init_mem
;
2487 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
2488 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
2489 pci_free_consistent(phba
->pcidev
,
2490 mem_descr
->mem_array
[j
- 1].size
,
2491 mem_descr
->mem_array
[j
- 1].virtual_address
,
2492 mem_descr
->mem_array
[j
- 1].bus_address
.
2495 kfree(mem_descr
->mem_array
);
2498 kfree(phba
->init_mem
);
2499 kfree(phba
->phwi_ctrlr
);
2502 static int beiscsi_init_controller(struct beiscsi_hba
*phba
)
2506 ret
= beiscsi_get_memory(phba
);
2508 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe -"
2509 "Failed in beiscsi_alloc_memory \n");
2513 ret
= hwi_init_controller(phba
);
2516 SE_DEBUG(DBG_LVL_8
, "Return success from beiscsi_init_controller");
2520 beiscsi_free_mem(phba
);
2524 static int beiscsi_init_sgl_handle(struct beiscsi_hba
*phba
)
2526 struct be_mem_descriptor
*mem_descr_sglh
, *mem_descr_sg
;
2527 struct sgl_handle
*psgl_handle
;
2528 struct iscsi_sge
*pfrag
;
2529 unsigned int arr_index
, i
, idx
;
2531 phba
->io_sgl_hndl_avbl
= 0;
2532 phba
->eh_sgl_hndl_avbl
= 0;
2533 mem_descr_sglh
= phba
->init_mem
;
2534 mem_descr_sglh
+= HWI_MEM_SGLH
;
2535 if (1 == mem_descr_sglh
->num_elements
) {
2536 phba
->io_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
2537 phba
->params
.ios_per_ctrl
,
2539 if (!phba
->io_sgl_hndl_base
) {
2540 shost_printk(KERN_ERR
, phba
->shost
,
2541 "Mem Alloc Failed. Failing to load\n");
2544 phba
->eh_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
2545 (phba
->params
.icds_per_ctrl
-
2546 phba
->params
.ios_per_ctrl
),
2548 if (!phba
->eh_sgl_hndl_base
) {
2549 kfree(phba
->io_sgl_hndl_base
);
2550 shost_printk(KERN_ERR
, phba
->shost
,
2551 "Mem Alloc Failed. Failing to load\n");
2555 shost_printk(KERN_ERR
, phba
->shost
,
2556 "HWI_MEM_SGLH is more than one element."
2557 "Failing to load\n");
2563 while (idx
< mem_descr_sglh
->num_elements
) {
2564 psgl_handle
= mem_descr_sglh
->mem_array
[idx
].virtual_address
;
2566 for (i
= 0; i
< (mem_descr_sglh
->mem_array
[idx
].size
/
2567 sizeof(struct sgl_handle
)); i
++) {
2568 if (arr_index
< phba
->params
.ios_per_ctrl
) {
2569 phba
->io_sgl_hndl_base
[arr_index
] = psgl_handle
;
2570 phba
->io_sgl_hndl_avbl
++;
2573 phba
->eh_sgl_hndl_base
[arr_index
-
2574 phba
->params
.ios_per_ctrl
] =
2577 phba
->eh_sgl_hndl_avbl
++;
2584 "phba->io_sgl_hndl_avbl=%d"
2585 "phba->eh_sgl_hndl_avbl=%d \n",
2586 phba
->io_sgl_hndl_avbl
,
2587 phba
->eh_sgl_hndl_avbl
);
2588 mem_descr_sg
= phba
->init_mem
;
2589 mem_descr_sg
+= HWI_MEM_SGE
;
2590 SE_DEBUG(DBG_LVL_8
, "\n mem_descr_sg->num_elements=%d \n",
2591 mem_descr_sg
->num_elements
);
2594 while (idx
< mem_descr_sg
->num_elements
) {
2595 pfrag
= mem_descr_sg
->mem_array
[idx
].virtual_address
;
2598 i
< (mem_descr_sg
->mem_array
[idx
].size
) /
2599 (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
);
2601 if (arr_index
< phba
->params
.ios_per_ctrl
)
2602 psgl_handle
= phba
->io_sgl_hndl_base
[arr_index
];
2604 psgl_handle
= phba
->eh_sgl_hndl_base
[arr_index
-
2605 phba
->params
.ios_per_ctrl
];
2606 psgl_handle
->pfrag
= pfrag
;
2607 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, pfrag
, 0);
2608 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, pfrag
, 0);
2609 pfrag
+= phba
->params
.num_sge_per_io
;
2610 psgl_handle
->sgl_index
=
2611 phba
->fw_config
.iscsi_cid_start
+ arr_index
++;
2615 phba
->io_sgl_free_index
= 0;
2616 phba
->io_sgl_alloc_index
= 0;
2617 phba
->eh_sgl_free_index
= 0;
2618 phba
->eh_sgl_alloc_index
= 0;
2622 static int hba_setup_cid_tbls(struct beiscsi_hba
*phba
)
2626 phba
->cid_array
= kmalloc(sizeof(void *) * phba
->params
.cxns_per_ctrl
,
2628 if (!phba
->cid_array
) {
2629 shost_printk(KERN_ERR
, phba
->shost
,
2630 "Failed to allocate memory in "
2631 "hba_setup_cid_tbls\n");
2634 phba
->ep_array
= kmalloc(sizeof(struct iscsi_endpoint
*) *
2635 phba
->params
.cxns_per_ctrl
* 2, GFP_KERNEL
);
2636 if (!phba
->ep_array
) {
2637 shost_printk(KERN_ERR
, phba
->shost
,
2638 "Failed to allocate memory in "
2639 "hba_setup_cid_tbls \n");
2640 kfree(phba
->cid_array
);
2643 new_cid
= phba
->fw_config
.iscsi_icd_start
;
2644 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
2645 phba
->cid_array
[i
] = new_cid
;
2648 phba
->avlbl_cids
= phba
->params
.cxns_per_ctrl
;
2652 static unsigned char hwi_enable_intr(struct beiscsi_hba
*phba
)
2654 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2655 struct hwi_controller
*phwi_ctrlr
;
2656 struct hwi_context_memory
*phwi_context
;
2657 struct be_queue_info
*eq
;
2662 phwi_ctrlr
= phba
->phwi_ctrlr
;
2663 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2665 eq
= &phwi_context
->be_eq
.q
;
2666 addr
= (u8 __iomem
*) ((u8 __iomem
*) ctrl
->pcicfg
+
2667 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
);
2668 reg
= ioread32(addr
);
2669 SE_DEBUG(DBG_LVL_8
, "reg =x%08x \n", reg
);
2671 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
2673 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
2674 SE_DEBUG(DBG_LVL_8
, "reg =x%08x addr=%p \n", reg
, addr
);
2675 iowrite32(reg
, addr
);
2676 SE_DEBUG(DBG_LVL_8
, "eq->id=%d \n", eq
->id
);
2678 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
2680 shost_printk(KERN_WARNING
, phba
->shost
,
2681 "In hwi_enable_intr, Not Enabled \n");
2685 static void hwi_disable_intr(struct beiscsi_hba
*phba
)
2687 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
2689 u8 __iomem
*addr
= ctrl
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
2690 u32 reg
= ioread32(addr
);
2692 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
2694 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
2695 iowrite32(reg
, addr
);
2697 shost_printk(KERN_WARNING
, phba
->shost
,
2698 "In hwi_disable_intr, Already Disabled \n");
2701 static int beiscsi_init_port(struct beiscsi_hba
*phba
)
2705 ret
= beiscsi_init_controller(phba
);
2707 shost_printk(KERN_ERR
, phba
->shost
,
2708 "beiscsi_dev_probe - Failed in"
2709 "beiscsi_init_controller \n");
2712 ret
= beiscsi_init_sgl_handle(phba
);
2714 shost_printk(KERN_ERR
, phba
->shost
,
2715 "beiscsi_dev_probe - Failed in"
2716 "beiscsi_init_sgl_handle \n");
2717 goto do_cleanup_ctrlr
;
2720 if (hba_setup_cid_tbls(phba
)) {
2721 shost_printk(KERN_ERR
, phba
->shost
,
2722 "Failed in hba_setup_cid_tbls\n");
2723 kfree(phba
->io_sgl_hndl_base
);
2724 kfree(phba
->eh_sgl_hndl_base
);
2725 goto do_cleanup_ctrlr
;
2735 static void hwi_purge_eq(struct beiscsi_hba
*phba
)
2737 struct hwi_controller
*phwi_ctrlr
;
2738 struct hwi_context_memory
*phwi_context
;
2739 struct be_queue_info
*eq
;
2740 struct be_eq_entry
*eqe
= NULL
;
2742 phwi_ctrlr
= phba
->phwi_ctrlr
;
2743 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
2744 eq
= &phwi_context
->be_eq
.q
;
2745 eqe
= queue_tail_node(eq
);
2747 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
2749 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
2751 eqe
= queue_tail_node(eq
);
2755 static void beiscsi_clean_port(struct beiscsi_hba
*phba
)
2757 unsigned char mgmt_status
;
2759 mgmt_status
= mgmt_epfw_cleanup(phba
, CMD_CONNECTION_CHUTE_0
);
2761 shost_printk(KERN_WARNING
, phba
->shost
,
2762 "mgmt_epfw_cleanup FAILED \n");
2765 kfree(phba
->io_sgl_hndl_base
);
2766 kfree(phba
->eh_sgl_hndl_base
);
2767 kfree(phba
->cid_array
);
2768 kfree(phba
->ep_array
);
2772 beiscsi_offload_connection(struct beiscsi_conn
*beiscsi_conn
,
2773 struct beiscsi_offload_params
*params
)
2775 struct wrb_handle
*pwrb_handle
;
2776 struct iscsi_target_context_update_wrb
*pwrb
= NULL
;
2777 struct be_mem_descriptor
*mem_descr
;
2778 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
2782 * We can always use 0 here because it is reserved by libiscsi for
2783 * login/startup related tasks.
2785 pwrb_handle
= alloc_wrb_handle(phba
, beiscsi_conn
->beiscsi_conn_cid
, 0);
2786 pwrb
= (struct iscsi_target_context_update_wrb
*)pwrb_handle
->pwrb
;
2787 memset(pwrb
, 0, sizeof(*pwrb
));
2788 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
2789 max_burst_length
, pwrb
, params
->dw
[offsetof
2790 (struct amap_beiscsi_offload_params
,
2791 max_burst_length
) / 32]);
2792 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
2793 max_send_data_segment_length
, pwrb
,
2794 params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2795 max_send_data_segment_length
) / 32]);
2796 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
2799 params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2800 first_burst_length
) / 32]);
2802 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, erl
, pwrb
,
2803 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2804 erl
) / 32] & OFFLD_PARAMS_ERL
));
2805 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, dde
, pwrb
,
2806 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2807 dde
) / 32] & OFFLD_PARAMS_DDE
) >> 2);
2808 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, hde
, pwrb
,
2809 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2810 hde
) / 32] & OFFLD_PARAMS_HDE
) >> 3);
2811 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, ir2t
, pwrb
,
2812 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2813 ir2t
) / 32] & OFFLD_PARAMS_IR2T
) >> 4);
2814 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, imd
, pwrb
,
2815 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2816 imd
) / 32] & OFFLD_PARAMS_IMD
) >> 5);
2817 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, stat_sn
,
2819 (params
->dw
[offsetof(struct amap_beiscsi_offload_params
,
2820 exp_statsn
) / 32] + 1));
2821 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, type
, pwrb
,
2823 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, wrb_idx
,
2824 pwrb
, pwrb_handle
->wrb_index
);
2825 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, ptr2nextwrb
,
2826 pwrb
, pwrb_handle
->nxt_wrb_index
);
2827 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
2828 session_state
, pwrb
, 0);
2829 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, compltonack
,
2831 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, notpredblq
,
2833 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
, mode
, pwrb
,
2836 mem_descr
= phba
->init_mem
;
2837 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
2839 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
2840 pad_buffer_addr_hi
, pwrb
,
2841 mem_descr
->mem_array
[0].bus_address
.u
.a32
.address_hi
);
2842 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb
,
2843 pad_buffer_addr_lo
, pwrb
,
2844 mem_descr
->mem_array
[0].bus_address
.u
.a32
.address_lo
);
2846 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_target_context_update_wrb
));
2848 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
2849 doorbell
|= (pwrb_handle
->wrb_index
& DB_DEF_PDU_WRB_INDEX_MASK
) <<
2850 DB_DEF_PDU_WRB_INDEX_SHIFT
;
2851 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
2853 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
2856 static void beiscsi_parse_pdu(struct iscsi_conn
*conn
, itt_t itt
,
2857 int *index
, int *age
)
2859 *index
= be32_to_cpu(itt
) >> 16;
2861 *age
= conn
->session
->age
;
2865 * beiscsi_alloc_pdu - allocates pdu and related resources
2866 * @task: libiscsi task
2867 * @opcode: opcode of pdu for task
2869 * This is called with the session lock held. It will allocate
2870 * the wrb and sgl if needed for the command. And it will prep
2871 * the pdu's itt. beiscsi_parse_pdu will later translate
2872 * the pdu itt to the libiscsi task itt.
2874 static int beiscsi_alloc_pdu(struct iscsi_task
*task
, uint8_t opcode
)
2876 struct beiscsi_io_task
*io_task
= task
->dd_data
;
2877 struct iscsi_conn
*conn
= task
->conn
;
2878 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
2879 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
2880 struct hwi_wrb_context
*pwrb_context
;
2881 struct hwi_controller
*phwi_ctrlr
;
2883 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
2886 io_task
->cmd_bhs
= pci_pool_alloc(beiscsi_sess
->bhs_pool
,
2887 GFP_KERNEL
, &paddr
);
2889 if (!io_task
->cmd_bhs
)
2892 io_task
->bhs_pa
.u
.a64
.address
= paddr
;
2893 io_task
->pwrb_handle
= alloc_wrb_handle(phba
,
2894 beiscsi_conn
->beiscsi_conn_cid
,
2896 io_task
->pwrb_handle
->pio_handle
= task
;
2897 io_task
->conn
= beiscsi_conn
;
2899 task
->hdr
= (struct iscsi_hdr
*)&io_task
->cmd_bhs
->iscsi_hdr
;
2900 task
->hdr_max
= sizeof(struct be_cmd_bhs
);
2903 spin_lock(&phba
->io_sgl_lock
);
2904 io_task
->psgl_handle
= alloc_io_sgl_handle(phba
);
2905 spin_unlock(&phba
->io_sgl_lock
);
2906 if (!io_task
->psgl_handle
)
2910 io_task
->scsi_cmnd
= NULL
;
2911 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
) {
2912 if (!beiscsi_conn
->login_in_progress
) {
2913 spin_lock(&phba
->mgmt_sgl_lock
);
2914 io_task
->psgl_handle
= (struct sgl_handle
*)
2915 alloc_mgmt_sgl_handle(phba
);
2916 spin_unlock(&phba
->mgmt_sgl_lock
);
2917 if (!io_task
->psgl_handle
)
2920 beiscsi_conn
->login_in_progress
= 1;
2921 beiscsi_conn
->plogin_sgl_handle
=
2922 io_task
->psgl_handle
;
2924 io_task
->psgl_handle
=
2925 beiscsi_conn
->plogin_sgl_handle
;
2928 spin_lock(&phba
->mgmt_sgl_lock
);
2929 io_task
->psgl_handle
= alloc_mgmt_sgl_handle(phba
);
2930 spin_unlock(&phba
->mgmt_sgl_lock
);
2931 if (!io_task
->psgl_handle
)
2935 itt
= (itt_t
) cpu_to_be32(((unsigned int)task
->itt
<< 16) |
2936 (unsigned int)(io_task
->psgl_handle
->sgl_index
));
2937 io_task
->cmd_bhs
->iscsi_hdr
.itt
= itt
;
2941 phwi_ctrlr
= phba
->phwi_ctrlr
;
2942 pwrb_context
= &phwi_ctrlr
->wrb_context
[beiscsi_conn
->beiscsi_conn_cid
];
2943 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
2944 io_task
->pwrb_handle
= NULL
;
2945 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
2946 io_task
->bhs_pa
.u
.a64
.address
);
2947 SE_DEBUG(DBG_LVL_1
, "Alloc of SGL_ICD Failed \n");
2951 static void beiscsi_cleanup_task(struct iscsi_task
*task
)
2953 struct beiscsi_io_task
*io_task
= task
->dd_data
;
2954 struct iscsi_conn
*conn
= task
->conn
;
2955 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
2956 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
2957 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
2958 struct hwi_wrb_context
*pwrb_context
;
2959 struct hwi_controller
*phwi_ctrlr
;
2961 phwi_ctrlr
= phba
->phwi_ctrlr
;
2962 pwrb_context
= &phwi_ctrlr
->wrb_context
[beiscsi_conn
->beiscsi_conn_cid
];
2963 if (io_task
->pwrb_handle
) {
2964 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
2965 io_task
->pwrb_handle
= NULL
;
2968 if (io_task
->cmd_bhs
) {
2969 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
2970 io_task
->bhs_pa
.u
.a64
.address
);
2974 if (io_task
->psgl_handle
) {
2975 spin_lock(&phba
->io_sgl_lock
);
2976 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
2977 spin_unlock(&phba
->io_sgl_lock
);
2978 io_task
->psgl_handle
= NULL
;
2981 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
)
2983 if (io_task
->psgl_handle
) {
2984 spin_lock(&phba
->mgmt_sgl_lock
);
2985 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
2986 spin_unlock(&phba
->mgmt_sgl_lock
);
2987 io_task
->psgl_handle
= NULL
;
2992 static int beiscsi_iotask(struct iscsi_task
*task
, struct scatterlist
*sg
,
2993 unsigned int num_sg
, unsigned int xferlen
,
2994 unsigned int writedir
)
2997 struct beiscsi_io_task
*io_task
= task
->dd_data
;
2998 struct iscsi_conn
*conn
= task
->conn
;
2999 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3000 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3001 struct iscsi_wrb
*pwrb
= NULL
;
3002 unsigned int doorbell
= 0;
3004 pwrb
= io_task
->pwrb_handle
->pwrb
;
3005 io_task
->cmd_bhs
->iscsi_hdr
.exp_statsn
= 0;
3006 io_task
->bhs_len
= sizeof(struct be_cmd_bhs
);
3009 SE_DEBUG(DBG_LVL_4
, " WRITE Command \t");
3010 memset(&io_task
->cmd_bhs
->iscsi_data_pdu
, 0, 48);
3011 AMAP_SET_BITS(struct amap_pdu_data_out
, itt
,
3012 &io_task
->cmd_bhs
->iscsi_data_pdu
,
3013 (unsigned int)io_task
->cmd_bhs
->iscsi_hdr
.itt
);
3014 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
,
3015 &io_task
->cmd_bhs
->iscsi_data_pdu
,
3016 ISCSI_OPCODE_SCSI_DATA_OUT
);
3017 AMAP_SET_BITS(struct amap_pdu_data_out
, final_bit
,
3018 &io_task
->cmd_bhs
->iscsi_data_pdu
, 1);
3019 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
, INI_WR_CMD
);
3020 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
3022 SE_DEBUG(DBG_LVL_4
, "READ Command \t");
3023 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
, INI_RD_CMD
);
3024 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
3026 memcpy(&io_task
->cmd_bhs
->iscsi_data_pdu
.
3027 dw
[offsetof(struct amap_pdu_data_out
, lun
) / 32],
3028 io_task
->cmd_bhs
->iscsi_hdr
.lun
, sizeof(struct scsi_lun
));
3030 AMAP_SET_BITS(struct amap_iscsi_wrb
, lun
, pwrb
,
3031 cpu_to_be16((unsigned short)io_task
->cmd_bhs
->iscsi_hdr
.
3033 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
, xferlen
);
3034 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
3035 io_task
->pwrb_handle
->wrb_index
);
3036 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
3037 be32_to_cpu(task
->cmdsn
));
3038 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
3039 io_task
->psgl_handle
->sgl_index
);
3041 hwi_write_sgl(pwrb
, sg
, num_sg
, io_task
);
3043 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
3044 io_task
->pwrb_handle
->nxt_wrb_index
);
3045 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
3047 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
3048 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
3049 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3050 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3052 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3056 static int beiscsi_mtask(struct iscsi_task
*task
)
3058 struct beiscsi_io_task
*aborted_io_task
, *io_task
= task
->dd_data
;
3059 struct iscsi_conn
*conn
= task
->conn
;
3060 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3061 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
3062 struct iscsi_wrb
*pwrb
= NULL
;
3063 unsigned int doorbell
= 0;
3064 struct iscsi_task
*aborted_task
;
3066 pwrb
= io_task
->pwrb_handle
->pwrb
;
3067 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
3068 be32_to_cpu(task
->cmdsn
));
3069 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
3070 io_task
->pwrb_handle
->wrb_index
);
3071 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
3072 io_task
->psgl_handle
->sgl_index
);
3074 switch (task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) {
3075 case ISCSI_OP_LOGIN
:
3076 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
, TGT_DM_CMD
);
3077 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3078 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
, 1);
3079 hwi_write_buffer(pwrb
, task
);
3081 case ISCSI_OP_NOOP_OUT
:
3082 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
, INI_RD_CMD
);
3083 hwi_write_buffer(pwrb
, task
);
3086 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
, INI_WR_CMD
);
3087 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
3088 hwi_write_buffer(pwrb
, task
);
3090 case ISCSI_OP_SCSI_TMFUNC
:
3091 aborted_task
= iscsi_itt_to_task(conn
,
3092 ((struct iscsi_tm
*)task
->hdr
)->rtt
);
3095 aborted_io_task
= aborted_task
->dd_data
;
3096 if (!aborted_io_task
->scsi_cmnd
)
3099 mgmt_invalidate_icds(phba
,
3100 aborted_io_task
->psgl_handle
->sgl_index
,
3101 beiscsi_conn
->beiscsi_conn_cid
);
3102 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
, INI_TMF_CMD
);
3103 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3104 hwi_write_buffer(pwrb
, task
);
3106 case ISCSI_OP_LOGOUT
:
3107 AMAP_SET_BITS(struct amap_iscsi_wrb
, dmsg
, pwrb
, 0);
3108 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
3110 hwi_write_buffer(pwrb
, task
);
3114 SE_DEBUG(DBG_LVL_1
, "opcode =%d Not supported \n",
3115 task
->hdr
->opcode
& ISCSI_OPCODE_MASK
);
3119 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
,
3120 be32_to_cpu(task
->data_count
));
3121 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
3122 io_task
->pwrb_handle
->nxt_wrb_index
);
3123 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
3125 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
3126 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
3127 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
3128 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
3129 iowrite32(doorbell
, phba
->db_va
+ DB_TXULP0_OFFSET
);
3133 static int beiscsi_task_xmit(struct iscsi_task
*task
)
3135 struct iscsi_conn
*conn
= task
->conn
;
3136 struct beiscsi_io_task
*io_task
= task
->dd_data
;
3137 struct scsi_cmnd
*sc
= task
->sc
;
3138 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
3139 struct scatterlist
*sg
;
3141 unsigned int writedir
= 0, xferlen
= 0;
3143 SE_DEBUG(DBG_LVL_4
, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3144 "beiscsi_conn=%p \n", beiscsi_conn
->beiscsi_conn_cid
,
3145 task
, conn
, beiscsi_conn
);
3147 return beiscsi_mtask(task
);
3149 io_task
->scsi_cmnd
= sc
;
3150 num_sg
= scsi_dma_map(sc
);
3152 SE_DEBUG(DBG_LVL_1
, " scsi_dma_map Failed\n")
3155 SE_DEBUG(DBG_LVL_4
, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3156 (scsi_bufflen(sc
)), sc
, num_sg
, sc
->serial_number
);
3157 xferlen
= scsi_bufflen(sc
);
3158 sg
= scsi_sglist(sc
);
3159 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
3161 SE_DEBUG(DBG_LVL_4
, "task->imm_count=0x%08x \n",
3165 return beiscsi_iotask(task
, sg
, num_sg
, xferlen
, writedir
);
3168 static void beiscsi_remove(struct pci_dev
*pcidev
)
3170 struct beiscsi_hba
*phba
= NULL
;
3172 phba
= (struct beiscsi_hba
*)pci_get_drvdata(pcidev
);
3174 dev_err(&pcidev
->dev
, "beiscsi_remove called with no phba \n");
3178 hwi_disable_intr(phba
);
3179 if (phba
->pcidev
->irq
)
3180 free_irq(phba
->pcidev
->irq
, phba
);
3181 destroy_workqueue(phba
->wq
);
3182 if (blk_iopoll_enabled
)
3183 blk_iopoll_disable(&phba
->iopoll
);
3185 beiscsi_clean_port(phba
);
3186 beiscsi_free_mem(phba
);
3187 beiscsi_unmap_pci_function(phba
);
3188 pci_free_consistent(phba
->pcidev
,
3189 phba
->ctrl
.mbox_mem_alloced
.size
,
3190 phba
->ctrl
.mbox_mem_alloced
.va
,
3191 phba
->ctrl
.mbox_mem_alloced
.dma
);
3192 iscsi_host_remove(phba
->shost
);
3193 pci_dev_put(phba
->pcidev
);
3194 iscsi_host_free(phba
->shost
);
3197 static int __devinit
beiscsi_dev_probe(struct pci_dev
*pcidev
,
3198 const struct pci_device_id
*id
)
3200 struct beiscsi_hba
*phba
= NULL
;
3203 ret
= beiscsi_enable_pci(pcidev
);
3205 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3206 "Failed to enable pci device \n");
3210 phba
= beiscsi_hba_alloc(pcidev
);
3212 dev_err(&pcidev
->dev
, "beiscsi_dev_probe-"
3213 " Failed in beiscsi_hba_alloc \n");
3217 pci_set_drvdata(pcidev
, phba
);
3218 ret
= be_ctrl_init(phba
, pcidev
);
3220 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3221 "Failed in be_ctrl_init\n");
3225 spin_lock_init(&phba
->io_sgl_lock
);
3226 spin_lock_init(&phba
->mgmt_sgl_lock
);
3227 spin_lock_init(&phba
->isr_lock
);
3228 beiscsi_get_params(phba
);
3229 ret
= beiscsi_init_port(phba
);
3231 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3232 "Failed in beiscsi_init_port\n");
3236 snprintf(phba
->wq_name
, sizeof(phba
->wq_name
), "beiscsi_q_irq%u",
3237 phba
->shost
->host_no
);
3238 phba
->wq
= create_singlethread_workqueue(phba
->wq_name
);
3240 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3241 "Failed to allocate work queue\n");
3245 INIT_WORK(&phba
->work_cqs
, beiscsi_process_all_cqs
);
3247 if (blk_iopoll_enabled
) {
3248 blk_iopoll_init(&phba
->iopoll
, be_iopoll_budget
, be_iopoll
);
3249 blk_iopoll_enable(&phba
->iopoll
);
3252 ret
= beiscsi_init_irqs(phba
);
3254 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3255 "Failed to beiscsi_init_irqs\n");
3258 ret
= hwi_enable_intr(phba
);
3260 shost_printk(KERN_ERR
, phba
->shost
, "beiscsi_dev_probe-"
3261 "Failed to hwi_enable_intr\n");
3265 SE_DEBUG(DBG_LVL_8
, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3269 if (phba
->pcidev
->irq
)
3270 free_irq(phba
->pcidev
->irq
, phba
);
3272 destroy_workqueue(phba
->wq
);
3273 if (blk_iopoll_enabled
)
3274 blk_iopoll_disable(&phba
->iopoll
);
3276 beiscsi_clean_port(phba
);
3277 beiscsi_free_mem(phba
);
3279 pci_free_consistent(phba
->pcidev
,
3280 phba
->ctrl
.mbox_mem_alloced
.size
,
3281 phba
->ctrl
.mbox_mem_alloced
.va
,
3282 phba
->ctrl
.mbox_mem_alloced
.dma
);
3283 beiscsi_unmap_pci_function(phba
);
3285 iscsi_host_remove(phba
->shost
);
3286 pci_dev_put(phba
->pcidev
);
3287 iscsi_host_free(phba
->shost
);
3289 pci_disable_device(pcidev
);
3293 struct iscsi_transport beiscsi_iscsi_transport
= {
3294 .owner
= THIS_MODULE
,
3296 .caps
= CAP_RECOVERY_L0
| CAP_HDRDGST
|
3297 CAP_MULTI_R2T
| CAP_DATADGST
| CAP_DATA_PATH_OFFLOAD
,
3298 .param_mask
= ISCSI_MAX_RECV_DLENGTH
|
3299 ISCSI_MAX_XMIT_DLENGTH
|
3302 ISCSI_INITIAL_R2T_EN
|
3307 ISCSI_PDU_INORDER_EN
|
3308 ISCSI_DATASEQ_INORDER_EN
|
3311 ISCSI_CONN_ADDRESS
|
3313 ISCSI_PERSISTENT_PORT
|
3314 ISCSI_PERSISTENT_ADDRESS
|
3315 ISCSI_TARGET_NAME
| ISCSI_TPGT
|
3316 ISCSI_USERNAME
| ISCSI_PASSWORD
|
3317 ISCSI_USERNAME_IN
| ISCSI_PASSWORD_IN
|
3318 ISCSI_FAST_ABORT
| ISCSI_ABORT_TMO
|
3319 ISCSI_LU_RESET_TMO
|
3320 ISCSI_PING_TMO
| ISCSI_RECV_TMO
|
3321 ISCSI_IFACE_NAME
| ISCSI_INITIATOR_NAME
,
3322 .host_param_mask
= ISCSI_HOST_HWADDRESS
| ISCSI_HOST_IPADDRESS
|
3323 ISCSI_HOST_INITIATOR_NAME
,
3324 .create_session
= beiscsi_session_create
,
3325 .destroy_session
= beiscsi_session_destroy
,
3326 .create_conn
= beiscsi_conn_create
,
3327 .bind_conn
= beiscsi_conn_bind
,
3328 .destroy_conn
= iscsi_conn_teardown
,
3329 .set_param
= beiscsi_set_param
,
3330 .get_conn_param
= beiscsi_conn_get_param
,
3331 .get_session_param
= iscsi_session_get_param
,
3332 .get_host_param
= beiscsi_get_host_param
,
3333 .start_conn
= beiscsi_conn_start
,
3334 .stop_conn
= beiscsi_conn_stop
,
3335 .send_pdu
= iscsi_conn_send_pdu
,
3336 .xmit_task
= beiscsi_task_xmit
,
3337 .cleanup_task
= beiscsi_cleanup_task
,
3338 .alloc_pdu
= beiscsi_alloc_pdu
,
3339 .parse_pdu_itt
= beiscsi_parse_pdu
,
3340 .get_stats
= beiscsi_conn_get_stats
,
3341 .ep_connect
= beiscsi_ep_connect
,
3342 .ep_poll
= beiscsi_ep_poll
,
3343 .ep_disconnect
= beiscsi_ep_disconnect
,
3344 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
3347 static struct pci_driver beiscsi_pci_driver
= {
3349 .probe
= beiscsi_dev_probe
,
3350 .remove
= beiscsi_remove
,
3351 .id_table
= beiscsi_pci_id_table
3354 static int __init
beiscsi_module_init(void)
3358 beiscsi_scsi_transport
=
3359 iscsi_register_transport(&beiscsi_iscsi_transport
);
3360 if (!beiscsi_scsi_transport
) {
3362 "beiscsi_module_init - Unable to register beiscsi"
3366 SE_DEBUG(DBG_LVL_8
, "In beiscsi_module_init, tt=%p \n",
3367 &beiscsi_iscsi_transport
);
3369 ret
= pci_register_driver(&beiscsi_pci_driver
);
3372 "beiscsi_module_init - Unable to register"
3373 "beiscsi pci driver.\n");
3374 goto unregister_iscsi_transport
;
3378 unregister_iscsi_transport
:
3379 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
3383 static void __exit
beiscsi_module_exit(void)
3385 pci_unregister_driver(&beiscsi_pci_driver
);
3386 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
3389 module_init(beiscsi_module_init
);
3390 module_exit(beiscsi_module_exit
);