2 * Copyright (C) 2005 - 2016 Broadcom
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
12 * Contact Information:
13 * linux-drivers@broadcom.com
17 * Costa Mesa, CA 92626
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
32 #include <linux/irq_poll.h>
34 #include <scsi/libiscsi.h>
35 #include <scsi/scsi_bsg_iscsi.h>
36 #include <scsi/scsi_netlink.h>
37 #include <scsi/scsi_transport_iscsi.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi.h>
48 static unsigned int be_iopoll_budget
= 10;
49 static unsigned int be_max_phys_size
= 64;
50 static unsigned int enable_msix
= 1;
52 MODULE_DESCRIPTION(DRV_DESC
" " BUILD_STR
);
53 MODULE_VERSION(BUILD_STR
);
54 MODULE_AUTHOR("Emulex Corporation");
55 MODULE_LICENSE("GPL");
56 module_param(be_iopoll_budget
, int, 0);
57 module_param(enable_msix
, int, 0);
58 module_param(be_max_phys_size
, uint
, S_IRUGO
);
59 MODULE_PARM_DESC(be_max_phys_size
,
60 "Maximum Size (In Kilobytes) of physically contiguous "
61 "memory that can be allocated. Range is 16 - 128");
63 #define beiscsi_disp_param(_name)\
65 beiscsi_##_name##_disp(struct device *dev,\
66 struct device_attribute *attrib, char *buf) \
68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\
76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
80 if (val >= _minval && val <= _maxval) {\
81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 "BA_%d : beiscsi_"#_name" updated "\
83 "from 0x%x ==> 0x%x\n",\
84 phba->attr_##_name, val); \
85 phba->attr_##_name = val;\
88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 "BA_%d beiscsi_"#_name" attribute "\
90 "cannot be updated to 0x%x, "\
91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
95 #define beiscsi_store_param(_name) \
97 beiscsi_##_name##_store(struct device *dev,\
98 struct device_attribute *attr, const char *buf,\
101 struct Scsi_Host *shost = class_to_shost(dev);\
102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 uint32_t param_val = 0;\
104 if (!isdigit(buf[0]))\
106 if (sscanf(buf, "%i", ¶m_val) != 1)\
108 if (beiscsi_##_name##_change(phba, param_val) == 0) \
114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
118 if (val >= _minval && val <= _maxval) {\
119 phba->attr_##_name = val;\
122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 "BA_%d beiscsi_"#_name" attribute " \
124 "cannot be updated to 0x%x, "\
125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 phba->attr_##_name = _defval;\
130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131 static uint beiscsi_##_name = _defval;\
132 module_param(beiscsi_##_name, uint, S_IRUGO);\
133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134 beiscsi_disp_param(_name)\
135 beiscsi_change_param(_name, _minval, _maxval, _defval)\
136 beiscsi_store_param(_name)\
137 beiscsi_init_param(_name, _minval, _maxval, _defval)\
138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 beiscsi_##_name##_disp, beiscsi_##_name##_store)
142 * When new log level added update the
143 * the MAX allowed value for log_enable
145 BEISCSI_RW_ATTR(log_enable
, 0x00,
146 0xFF, 0x00, "Enable logging Bit Mask\n"
147 "\t\t\t\tInitialization Events : 0x01\n"
148 "\t\t\t\tMailbox Events : 0x02\n"
149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n"
153 "\t\t\t\tiSCSI Protocol : 0x40\n");
155 DEVICE_ATTR(beiscsi_drvr_ver
, S_IRUGO
, beiscsi_drvr_ver_disp
, NULL
);
156 DEVICE_ATTR(beiscsi_adapter_family
, S_IRUGO
, beiscsi_adap_family_disp
, NULL
);
157 DEVICE_ATTR(beiscsi_fw_ver
, S_IRUGO
, beiscsi_fw_ver_disp
, NULL
);
158 DEVICE_ATTR(beiscsi_phys_port
, S_IRUGO
, beiscsi_phys_port_disp
, NULL
);
159 DEVICE_ATTR(beiscsi_active_session_count
, S_IRUGO
,
160 beiscsi_active_session_disp
, NULL
);
161 DEVICE_ATTR(beiscsi_free_session_count
, S_IRUGO
,
162 beiscsi_free_session_disp
, NULL
);
163 struct device_attribute
*beiscsi_attrs
[] = {
164 &dev_attr_beiscsi_log_enable
,
165 &dev_attr_beiscsi_drvr_ver
,
166 &dev_attr_beiscsi_adapter_family
,
167 &dev_attr_beiscsi_fw_ver
,
168 &dev_attr_beiscsi_active_session_count
,
169 &dev_attr_beiscsi_free_session_count
,
170 &dev_attr_beiscsi_phys_port
,
174 static char const *cqe_desc
[] = {
177 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
178 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
179 "CXN_KILLED_BURST_LEN_MISMATCH",
180 "CXN_KILLED_AHS_RCVD",
181 "CXN_KILLED_HDR_DIGEST_ERR",
182 "CXN_KILLED_UNKNOWN_HDR",
183 "CXN_KILLED_STALE_ITT_TTT_RCVD",
184 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
185 "CXN_KILLED_RST_RCVD",
186 "CXN_KILLED_TIMED_OUT",
187 "CXN_KILLED_RST_SENT",
188 "CXN_KILLED_FIN_RCVD",
189 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
190 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
191 "CXN_KILLED_OVER_RUN_RESIDUAL",
192 "CXN_KILLED_UNDER_RUN_RESIDUAL",
193 "CMD_KILLED_INVALID_STATSN_RCVD",
194 "CMD_KILLED_INVALID_R2T_RCVD",
195 "CMD_CXN_KILLED_LUN_INVALID",
196 "CMD_CXN_KILLED_ICD_INVALID",
197 "CMD_CXN_KILLED_ITT_INVALID",
198 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
199 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
200 "CXN_INVALIDATE_NOTIFY",
201 "CXN_INVALIDATE_INDEX_NOTIFY",
202 "CMD_INVALIDATED_NOTIFY",
205 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
207 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
208 "SOL_CMD_KILLED_DIF_ERR",
209 "CXN_KILLED_SYN_RCVD",
210 "CXN_KILLED_IMM_DATA_RCVD"
213 static int beiscsi_slave_configure(struct scsi_device
*sdev
)
215 blk_queue_max_segment_size(sdev
->request_queue
, 65536);
219 static int beiscsi_eh_abort(struct scsi_cmnd
*sc
)
221 struct iscsi_cls_session
*cls_session
;
222 struct iscsi_task
*aborted_task
= (struct iscsi_task
*)sc
->SCp
.ptr
;
223 struct beiscsi_io_task
*aborted_io_task
;
224 struct iscsi_conn
*conn
;
225 struct beiscsi_conn
*beiscsi_conn
;
226 struct beiscsi_hba
*phba
;
227 struct iscsi_session
*session
;
228 struct invalidate_command_table
*inv_tbl
;
229 struct be_dma_mem nonemb_cmd
;
230 unsigned int cid
, tag
, num_invalidate
;
233 cls_session
= starget_to_session(scsi_target(sc
->device
));
234 session
= cls_session
->dd_data
;
236 spin_lock_bh(&session
->frwd_lock
);
237 if (!aborted_task
|| !aborted_task
->sc
) {
239 spin_unlock_bh(&session
->frwd_lock
);
243 aborted_io_task
= aborted_task
->dd_data
;
244 if (!aborted_io_task
->scsi_cmnd
) {
245 /* raced or invalid command */
246 spin_unlock_bh(&session
->frwd_lock
);
249 spin_unlock_bh(&session
->frwd_lock
);
250 /* Invalidate WRB Posted for this Task */
251 AMAP_SET_BITS(struct amap_iscsi_wrb
, invld
,
252 aborted_io_task
->pwrb_handle
->pwrb
,
255 conn
= aborted_task
->conn
;
256 beiscsi_conn
= conn
->dd_data
;
257 phba
= beiscsi_conn
->phba
;
259 /* invalidate iocb */
260 cid
= beiscsi_conn
->beiscsi_conn_cid
;
261 inv_tbl
= phba
->inv_tbl
;
262 memset(inv_tbl
, 0x0, sizeof(*inv_tbl
));
264 inv_tbl
->icd
= aborted_io_task
->psgl_handle
->sgl_index
;
266 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
267 sizeof(struct invalidate_commands_params_in
),
269 if (nonemb_cmd
.va
== NULL
) {
270 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_EH
,
271 "BM_%d : Failed to allocate memory for"
272 "mgmt_invalidate_icds\n");
275 nonemb_cmd
.size
= sizeof(struct invalidate_commands_params_in
);
277 tag
= mgmt_invalidate_icds(phba
, inv_tbl
, num_invalidate
,
280 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_EH
,
281 "BM_%d : mgmt_invalidate_icds could not be"
283 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
284 nonemb_cmd
.va
, nonemb_cmd
.dma
);
289 rc
= beiscsi_mccq_compl_wait(phba
, tag
, NULL
, &nonemb_cmd
);
291 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
292 nonemb_cmd
.va
, nonemb_cmd
.dma
);
294 return iscsi_eh_abort(sc
);
297 static int beiscsi_eh_device_reset(struct scsi_cmnd
*sc
)
299 struct iscsi_task
*abrt_task
;
300 struct beiscsi_io_task
*abrt_io_task
;
301 struct iscsi_conn
*conn
;
302 struct beiscsi_conn
*beiscsi_conn
;
303 struct beiscsi_hba
*phba
;
304 struct iscsi_session
*session
;
305 struct iscsi_cls_session
*cls_session
;
306 struct invalidate_command_table
*inv_tbl
;
307 struct be_dma_mem nonemb_cmd
;
308 unsigned int cid
, tag
, i
, num_invalidate
;
311 /* invalidate iocbs */
312 cls_session
= starget_to_session(scsi_target(sc
->device
));
313 session
= cls_session
->dd_data
;
314 spin_lock_bh(&session
->frwd_lock
);
315 if (!session
->leadconn
|| session
->state
!= ISCSI_STATE_LOGGED_IN
) {
316 spin_unlock_bh(&session
->frwd_lock
);
319 conn
= session
->leadconn
;
320 beiscsi_conn
= conn
->dd_data
;
321 phba
= beiscsi_conn
->phba
;
322 cid
= beiscsi_conn
->beiscsi_conn_cid
;
323 inv_tbl
= phba
->inv_tbl
;
324 memset(inv_tbl
, 0x0, sizeof(*inv_tbl
) * BE2_CMDS_PER_CXN
);
326 for (i
= 0; i
< conn
->session
->cmds_max
; i
++) {
327 abrt_task
= conn
->session
->cmds
[i
];
328 abrt_io_task
= abrt_task
->dd_data
;
329 if (!abrt_task
->sc
|| abrt_task
->state
== ISCSI_TASK_FREE
)
332 if (sc
->device
->lun
!= abrt_task
->sc
->device
->lun
)
335 /* Invalidate WRB Posted for this Task */
336 AMAP_SET_BITS(struct amap_iscsi_wrb
, invld
,
337 abrt_io_task
->pwrb_handle
->pwrb
,
341 inv_tbl
->icd
= abrt_io_task
->psgl_handle
->sgl_index
;
345 spin_unlock_bh(&session
->frwd_lock
);
346 inv_tbl
= phba
->inv_tbl
;
348 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
349 sizeof(struct invalidate_commands_params_in
),
351 if (nonemb_cmd
.va
== NULL
) {
352 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_EH
,
353 "BM_%d : Failed to allocate memory for"
354 "mgmt_invalidate_icds\n");
357 nonemb_cmd
.size
= sizeof(struct invalidate_commands_params_in
);
358 memset(nonemb_cmd
.va
, 0, nonemb_cmd
.size
);
359 tag
= mgmt_invalidate_icds(phba
, inv_tbl
, num_invalidate
,
362 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_EH
,
363 "BM_%d : mgmt_invalidate_icds could not be"
365 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
366 nonemb_cmd
.va
, nonemb_cmd
.dma
);
370 rc
= beiscsi_mccq_compl_wait(phba
, tag
, NULL
, &nonemb_cmd
);
372 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
373 nonemb_cmd
.va
, nonemb_cmd
.dma
);
374 return iscsi_eh_device_reset(sc
);
377 /*------------------- PCI Driver operations and data ----------------- */
378 static const struct pci_device_id beiscsi_pci_id_table
[] = {
379 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
380 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
381 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
382 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
383 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID3
) },
384 { PCI_DEVICE(ELX_VENDOR_ID
, OC_SKH_ID1
) },
387 MODULE_DEVICE_TABLE(pci
, beiscsi_pci_id_table
);
390 static struct scsi_host_template beiscsi_sht
= {
391 .module
= THIS_MODULE
,
392 .name
= "Emulex 10Gbe open-iscsi Initiator Driver",
393 .proc_name
= DRV_NAME
,
394 .queuecommand
= iscsi_queuecommand
,
395 .change_queue_depth
= scsi_change_queue_depth
,
396 .slave_configure
= beiscsi_slave_configure
,
397 .target_alloc
= iscsi_target_alloc
,
398 .eh_abort_handler
= beiscsi_eh_abort
,
399 .eh_device_reset_handler
= beiscsi_eh_device_reset
,
400 .eh_target_reset_handler
= iscsi_eh_session_reset
,
401 .shost_attrs
= beiscsi_attrs
,
402 .sg_tablesize
= BEISCSI_SGLIST_ELEMENTS
,
403 .can_queue
= BE2_IO_DEPTH
,
405 .max_sectors
= BEISCSI_MAX_SECTORS
,
406 .cmd_per_lun
= BEISCSI_CMD_PER_LUN
,
407 .use_clustering
= ENABLE_CLUSTERING
,
408 .vendor_id
= SCSI_NL_VID_TYPE_PCI
| BE_VENDOR_ID
,
409 .track_queue_depth
= 1,
412 static struct scsi_transport_template
*beiscsi_scsi_transport
;
414 static struct beiscsi_hba
*beiscsi_hba_alloc(struct pci_dev
*pcidev
)
416 struct beiscsi_hba
*phba
;
417 struct Scsi_Host
*shost
;
419 shost
= iscsi_host_alloc(&beiscsi_sht
, sizeof(*phba
), 0);
421 dev_err(&pcidev
->dev
,
422 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
425 shost
->max_id
= BE2_MAX_SESSIONS
;
426 shost
->max_channel
= 0;
427 shost
->max_cmd_len
= BEISCSI_MAX_CMD_LEN
;
428 shost
->max_lun
= BEISCSI_NUM_MAX_LUN
;
429 shost
->transportt
= beiscsi_scsi_transport
;
430 phba
= iscsi_host_priv(shost
);
431 memset(phba
, 0, sizeof(*phba
));
433 phba
->pcidev
= pci_dev_get(pcidev
);
434 pci_set_drvdata(pcidev
, phba
);
435 phba
->interface_handle
= 0xFFFFFFFF;
440 static void beiscsi_unmap_pci_function(struct beiscsi_hba
*phba
)
443 iounmap(phba
->csr_va
);
447 iounmap(phba
->db_va
);
451 iounmap(phba
->pci_va
);
456 static int beiscsi_map_pci_bars(struct beiscsi_hba
*phba
,
457 struct pci_dev
*pcidev
)
462 addr
= ioremap_nocache(pci_resource_start(pcidev
, 2),
463 pci_resource_len(pcidev
, 2));
466 phba
->ctrl
.csr
= addr
;
468 phba
->csr_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 2);
470 addr
= ioremap_nocache(pci_resource_start(pcidev
, 4), 128 * 1024);
473 phba
->ctrl
.db
= addr
;
475 phba
->db_pa
.u
.a64
.address
= pci_resource_start(pcidev
, 4);
477 if (phba
->generation
== BE_GEN2
)
482 addr
= ioremap_nocache(pci_resource_start(pcidev
, pcicfg_reg
),
483 pci_resource_len(pcidev
, pcicfg_reg
));
487 phba
->ctrl
.pcicfg
= addr
;
489 phba
->pci_pa
.u
.a64
.address
= pci_resource_start(pcidev
, pcicfg_reg
);
493 beiscsi_unmap_pci_function(phba
);
497 static int beiscsi_enable_pci(struct pci_dev
*pcidev
)
501 ret
= pci_enable_device(pcidev
);
503 dev_err(&pcidev
->dev
,
504 "beiscsi_enable_pci - enable device failed\n");
508 ret
= pci_request_regions(pcidev
, DRV_NAME
);
510 dev_err(&pcidev
->dev
,
511 "beiscsi_enable_pci - request region failed\n");
512 goto pci_dev_disable
;
515 pci_set_master(pcidev
);
516 ret
= pci_set_dma_mask(pcidev
, DMA_BIT_MASK(64));
518 ret
= pci_set_dma_mask(pcidev
, DMA_BIT_MASK(32));
520 dev_err(&pcidev
->dev
, "Could not set PCI DMA Mask\n");
521 goto pci_region_release
;
523 ret
= pci_set_consistent_dma_mask(pcidev
,
527 ret
= pci_set_consistent_dma_mask(pcidev
, DMA_BIT_MASK(64));
529 dev_err(&pcidev
->dev
, "Could not set PCI DMA Mask\n");
530 goto pci_region_release
;
536 pci_release_regions(pcidev
);
538 pci_disable_device(pcidev
);
543 static int be_ctrl_init(struct beiscsi_hba
*phba
, struct pci_dev
*pdev
)
545 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
546 struct be_dma_mem
*mbox_mem_alloc
= &ctrl
->mbox_mem_alloced
;
547 struct be_dma_mem
*mbox_mem_align
= &ctrl
->mbox_mem
;
551 status
= beiscsi_map_pci_bars(phba
, pdev
);
554 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
555 mbox_mem_alloc
->va
= pci_alloc_consistent(pdev
,
556 mbox_mem_alloc
->size
,
557 &mbox_mem_alloc
->dma
);
558 if (!mbox_mem_alloc
->va
) {
559 beiscsi_unmap_pci_function(phba
);
563 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
564 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
565 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
566 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
567 mutex_init(&ctrl
->mbox_lock
);
568 spin_lock_init(&phba
->ctrl
.mcc_lock
);
574 * beiscsi_get_params()- Set the config paramters
575 * @phba: ptr device priv structure
577 static void beiscsi_get_params(struct beiscsi_hba
*phba
)
579 uint32_t total_cid_count
= 0;
580 uint32_t total_icd_count
= 0;
583 total_cid_count
= BEISCSI_GET_CID_COUNT(phba
, BEISCSI_ULP0
) +
584 BEISCSI_GET_CID_COUNT(phba
, BEISCSI_ULP1
);
586 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
587 uint32_t align_mask
= 0;
588 uint32_t icd_post_per_page
= 0;
589 uint32_t icd_count_unavailable
= 0;
590 uint32_t icd_start
= 0, icd_count
= 0;
591 uint32_t icd_start_align
= 0, icd_count_align
= 0;
593 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
594 icd_start
= phba
->fw_config
.iscsi_icd_start
[ulp_num
];
595 icd_count
= phba
->fw_config
.iscsi_icd_count
[ulp_num
];
597 /* Get ICD count that can be posted on each page */
598 icd_post_per_page
= (PAGE_SIZE
/ (BE2_SGE
*
599 sizeof(struct iscsi_sge
)));
600 align_mask
= (icd_post_per_page
- 1);
602 /* Check if icd_start is aligned ICD per page posting */
603 if (icd_start
% icd_post_per_page
) {
604 icd_start_align
= ((icd_start
+
608 iscsi_icd_start
[ulp_num
] =
612 icd_count_align
= (icd_count
& ~align_mask
);
614 /* ICD discarded in the process of alignment */
616 icd_count_unavailable
= ((icd_start_align
-
621 /* Updated ICD count available */
622 phba
->fw_config
.iscsi_icd_count
[ulp_num
] = (icd_count
-
623 icd_count_unavailable
);
625 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
626 "BM_%d : Aligned ICD values\n"
627 "\t ICD Start : %d\n"
628 "\t ICD Count : %d\n"
629 "\t ICD Discarded : %d\n",
631 iscsi_icd_start
[ulp_num
],
633 iscsi_icd_count
[ulp_num
],
634 icd_count_unavailable
);
639 total_icd_count
= phba
->fw_config
.iscsi_icd_count
[ulp_num
];
640 phba
->params
.ios_per_ctrl
= (total_icd_count
-
642 BE2_TMFS
+ BE2_NOPOUT_REQ
));
643 phba
->params
.cxns_per_ctrl
= total_cid_count
;
644 phba
->params
.asyncpdus_per_ctrl
= total_cid_count
;
645 phba
->params
.icds_per_ctrl
= total_icd_count
;
646 phba
->params
.num_sge_per_io
= BE2_SGE
;
647 phba
->params
.defpdu_hdr_sz
= BE2_DEFPDU_HDR_SZ
;
648 phba
->params
.defpdu_data_sz
= BE2_DEFPDU_DATA_SZ
;
649 phba
->params
.eq_timer
= 64;
650 phba
->params
.num_eq_entries
= 1024;
651 phba
->params
.num_cq_entries
= 1024;
652 phba
->params
.wrbs_per_cxn
= 256;
655 static void hwi_ring_eq_db(struct beiscsi_hba
*phba
,
656 unsigned int id
, unsigned int clr_interrupt
,
657 unsigned int num_processed
,
658 unsigned char rearm
, unsigned char event
)
663 val
|= 1 << DB_EQ_REARM_SHIFT
;
665 val
|= 1 << DB_EQ_CLR_SHIFT
;
667 val
|= 1 << DB_EQ_EVNT_SHIFT
;
669 val
|= num_processed
<< DB_EQ_NUM_POPPED_SHIFT
;
670 /* Setting lower order EQ_ID Bits */
671 val
|= (id
& DB_EQ_RING_ID_LOW_MASK
);
673 /* Setting Higher order EQ_ID Bits */
674 val
|= (((id
>> DB_EQ_HIGH_FEILD_SHIFT
) &
675 DB_EQ_RING_ID_HIGH_MASK
)
676 << DB_EQ_HIGH_SET_SHIFT
);
678 iowrite32(val
, phba
->db_va
+ DB_EQ_OFFSET
);
682 * be_isr_mcc - The isr routine of the driver.
684 * @dev_id: Pointer to host adapter structure
686 static irqreturn_t
be_isr_mcc(int irq
, void *dev_id
)
688 struct beiscsi_hba
*phba
;
689 struct be_eq_entry
*eqe
;
690 struct be_queue_info
*eq
;
691 struct be_queue_info
*mcc
;
692 unsigned int mcc_events
;
693 struct be_eq_obj
*pbe_eq
;
698 mcc
= &phba
->ctrl
.mcc_obj
.cq
;
699 eqe
= queue_tail_node(eq
);
702 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
704 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
706 EQE_RESID_MASK
) >> 16) == mcc
->id
) {
709 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
711 eqe
= queue_tail_node(eq
);
715 queue_work(phba
->wq
, &pbe_eq
->mcc_work
);
716 hwi_ring_eq_db(phba
, eq
->id
, 1, mcc_events
, 1, 1);
722 * be_isr_msix - The isr routine of the driver.
724 * @dev_id: Pointer to host adapter structure
726 static irqreturn_t
be_isr_msix(int irq
, void *dev_id
)
728 struct beiscsi_hba
*phba
;
729 struct be_queue_info
*eq
;
730 struct be_eq_obj
*pbe_eq
;
736 /* disable interrupt till iopoll completes */
737 hwi_ring_eq_db(phba
, eq
->id
, 1, 0, 0, 1);
738 irq_poll_sched(&pbe_eq
->iopoll
);
744 * be_isr - The isr routine of the driver.
746 * @dev_id: Pointer to host adapter structure
748 static irqreturn_t
be_isr(int irq
, void *dev_id
)
750 struct beiscsi_hba
*phba
;
751 struct hwi_controller
*phwi_ctrlr
;
752 struct hwi_context_memory
*phwi_context
;
753 struct be_eq_entry
*eqe
;
754 struct be_queue_info
*eq
;
755 struct be_queue_info
*mcc
;
756 unsigned int mcc_events
, io_events
;
757 struct be_ctrl_info
*ctrl
;
758 struct be_eq_obj
*pbe_eq
;
763 isr
= ioread32(ctrl
->csr
+ CEV_ISR0_OFFSET
+
764 (PCI_FUNC(ctrl
->pdev
->devfn
) * CEV_ISR_SIZE
));
768 phwi_ctrlr
= phba
->phwi_ctrlr
;
769 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
770 pbe_eq
= &phwi_context
->be_eq
[0];
772 eq
= &phwi_context
->be_eq
[0].q
;
773 mcc
= &phba
->ctrl
.mcc_obj
.cq
;
774 eqe
= queue_tail_node(eq
);
778 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
780 if (((eqe
->dw
[offsetof(struct amap_eq_entry
,
781 resource_id
) / 32] & EQE_RESID_MASK
) >> 16) == mcc
->id
)
785 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
787 eqe
= queue_tail_node(eq
);
789 if (!io_events
&& !mcc_events
)
792 /* no need to rearm if interrupt is only for IOs */
795 queue_work(phba
->wq
, &pbe_eq
->mcc_work
);
800 irq_poll_sched(&pbe_eq
->iopoll
);
801 hwi_ring_eq_db(phba
, eq
->id
, 0, (io_events
+ mcc_events
), rearm
, 1);
806 static int beiscsi_init_irqs(struct beiscsi_hba
*phba
)
808 struct pci_dev
*pcidev
= phba
->pcidev
;
809 struct hwi_controller
*phwi_ctrlr
;
810 struct hwi_context_memory
*phwi_context
;
811 int ret
, msix_vec
, i
, j
;
813 phwi_ctrlr
= phba
->phwi_ctrlr
;
814 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
816 if (phba
->msix_enabled
) {
817 for (i
= 0; i
< phba
->num_cpus
; i
++) {
818 phba
->msi_name
[i
] = kzalloc(BEISCSI_MSI_NAME
,
820 if (!phba
->msi_name
[i
]) {
825 sprintf(phba
->msi_name
[i
], "beiscsi_%02x_%02x",
826 phba
->shost
->host_no
, i
);
827 msix_vec
= phba
->msix_entries
[i
].vector
;
828 ret
= request_irq(msix_vec
, be_isr_msix
, 0,
830 &phwi_context
->be_eq
[i
]);
832 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
833 "BM_%d : beiscsi_init_irqs-Failed to"
834 "register msix for i = %d\n",
836 kfree(phba
->msi_name
[i
]);
840 phba
->msi_name
[i
] = kzalloc(BEISCSI_MSI_NAME
, GFP_KERNEL
);
841 if (!phba
->msi_name
[i
]) {
845 sprintf(phba
->msi_name
[i
], "beiscsi_mcc_%02x",
846 phba
->shost
->host_no
);
847 msix_vec
= phba
->msix_entries
[i
].vector
;
848 ret
= request_irq(msix_vec
, be_isr_mcc
, 0, phba
->msi_name
[i
],
849 &phwi_context
->be_eq
[i
]);
851 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
852 "BM_%d : beiscsi_init_irqs-"
853 "Failed to register beiscsi_msix_mcc\n");
854 kfree(phba
->msi_name
[i
]);
859 ret
= request_irq(pcidev
->irq
, be_isr
, IRQF_SHARED
,
862 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
863 "BM_%d : beiscsi_init_irqs-"
864 "Failed to register irq\\n");
870 for (j
= i
- 1; j
>= 0; j
--) {
871 kfree(phba
->msi_name
[j
]);
872 msix_vec
= phba
->msix_entries
[j
].vector
;
873 free_irq(msix_vec
, &phwi_context
->be_eq
[j
]);
878 void hwi_ring_cq_db(struct beiscsi_hba
*phba
,
879 unsigned int id
, unsigned int num_processed
,
885 val
|= 1 << DB_CQ_REARM_SHIFT
;
887 val
|= num_processed
<< DB_CQ_NUM_POPPED_SHIFT
;
889 /* Setting lower order CQ_ID Bits */
890 val
|= (id
& DB_CQ_RING_ID_LOW_MASK
);
892 /* Setting Higher order CQ_ID Bits */
893 val
|= (((id
>> DB_CQ_HIGH_FEILD_SHIFT
) &
894 DB_CQ_RING_ID_HIGH_MASK
)
895 << DB_CQ_HIGH_SET_SHIFT
);
897 iowrite32(val
, phba
->db_va
+ DB_CQ_OFFSET
);
900 static struct sgl_handle
*alloc_io_sgl_handle(struct beiscsi_hba
*phba
)
902 struct sgl_handle
*psgl_handle
;
904 spin_lock_bh(&phba
->io_sgl_lock
);
905 if (phba
->io_sgl_hndl_avbl
) {
906 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_IO
,
907 "BM_%d : In alloc_io_sgl_handle,"
908 " io_sgl_alloc_index=%d\n",
909 phba
->io_sgl_alloc_index
);
911 psgl_handle
= phba
->io_sgl_hndl_base
[phba
->
913 phba
->io_sgl_hndl_base
[phba
->io_sgl_alloc_index
] = NULL
;
914 phba
->io_sgl_hndl_avbl
--;
915 if (phba
->io_sgl_alloc_index
== (phba
->params
.
917 phba
->io_sgl_alloc_index
= 0;
919 phba
->io_sgl_alloc_index
++;
922 spin_unlock_bh(&phba
->io_sgl_lock
);
927 free_io_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
929 spin_lock_bh(&phba
->io_sgl_lock
);
930 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_IO
,
931 "BM_%d : In free_,io_sgl_free_index=%d\n",
932 phba
->io_sgl_free_index
);
934 if (phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
]) {
936 * this can happen if clean_task is called on a task that
937 * failed in xmit_task or alloc_pdu.
939 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_IO
,
940 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
941 "value there=%p\n", phba
->io_sgl_free_index
,
942 phba
->io_sgl_hndl_base
943 [phba
->io_sgl_free_index
]);
944 spin_unlock_bh(&phba
->io_sgl_lock
);
947 phba
->io_sgl_hndl_base
[phba
->io_sgl_free_index
] = psgl_handle
;
948 phba
->io_sgl_hndl_avbl
++;
949 if (phba
->io_sgl_free_index
== (phba
->params
.ios_per_ctrl
- 1))
950 phba
->io_sgl_free_index
= 0;
952 phba
->io_sgl_free_index
++;
953 spin_unlock_bh(&phba
->io_sgl_lock
);
956 static inline struct wrb_handle
*
957 beiscsi_get_wrb_handle(struct hwi_wrb_context
*pwrb_context
,
958 unsigned int wrbs_per_cxn
)
960 struct wrb_handle
*pwrb_handle
;
962 spin_lock_bh(&pwrb_context
->wrb_lock
);
963 pwrb_handle
= pwrb_context
->pwrb_handle_base
[pwrb_context
->alloc_index
];
964 pwrb_context
->wrb_handles_available
--;
965 if (pwrb_context
->alloc_index
== (wrbs_per_cxn
- 1))
966 pwrb_context
->alloc_index
= 0;
968 pwrb_context
->alloc_index
++;
969 spin_unlock_bh(&pwrb_context
->wrb_lock
);
972 memset(pwrb_handle
->pwrb
, 0, sizeof(*pwrb_handle
->pwrb
));
978 * alloc_wrb_handle - To allocate a wrb handle
979 * @phba: The hba pointer
980 * @cid: The cid to use for allocation
981 * @pwrb_context: ptr to ptr to wrb context
983 * This happens under session_lock until submission to chip
985 struct wrb_handle
*alloc_wrb_handle(struct beiscsi_hba
*phba
, unsigned int cid
,
986 struct hwi_wrb_context
**pcontext
)
988 struct hwi_wrb_context
*pwrb_context
;
989 struct hwi_controller
*phwi_ctrlr
;
990 uint16_t cri_index
= BE_GET_CRI_FROM_CID(cid
);
992 phwi_ctrlr
= phba
->phwi_ctrlr
;
993 pwrb_context
= &phwi_ctrlr
->wrb_context
[cri_index
];
994 /* return the context address */
995 *pcontext
= pwrb_context
;
996 return beiscsi_get_wrb_handle(pwrb_context
, phba
->params
.wrbs_per_cxn
);
1000 beiscsi_put_wrb_handle(struct hwi_wrb_context
*pwrb_context
,
1001 struct wrb_handle
*pwrb_handle
,
1002 unsigned int wrbs_per_cxn
)
1004 spin_lock_bh(&pwrb_context
->wrb_lock
);
1005 pwrb_context
->pwrb_handle_base
[pwrb_context
->free_index
] = pwrb_handle
;
1006 pwrb_context
->wrb_handles_available
++;
1007 if (pwrb_context
->free_index
== (wrbs_per_cxn
- 1))
1008 pwrb_context
->free_index
= 0;
1010 pwrb_context
->free_index
++;
1011 spin_unlock_bh(&pwrb_context
->wrb_lock
);
1015 * free_wrb_handle - To free the wrb handle back to pool
1016 * @phba: The hba pointer
1017 * @pwrb_context: The context to free from
1018 * @pwrb_handle: The wrb_handle to free
1020 * This happens under session_lock until submission to chip
1023 free_wrb_handle(struct beiscsi_hba
*phba
, struct hwi_wrb_context
*pwrb_context
,
1024 struct wrb_handle
*pwrb_handle
)
1026 beiscsi_put_wrb_handle(pwrb_context
,
1028 phba
->params
.wrbs_per_cxn
);
1029 beiscsi_log(phba
, KERN_INFO
,
1030 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
1031 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1032 "wrb_handles_available=%d\n",
1033 pwrb_handle
, pwrb_context
->free_index
,
1034 pwrb_context
->wrb_handles_available
);
1037 static struct sgl_handle
*alloc_mgmt_sgl_handle(struct beiscsi_hba
*phba
)
1039 struct sgl_handle
*psgl_handle
;
1041 spin_lock_bh(&phba
->mgmt_sgl_lock
);
1042 if (phba
->eh_sgl_hndl_avbl
) {
1043 psgl_handle
= phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
];
1044 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_alloc_index
] = NULL
;
1045 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_CONFIG
,
1046 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1047 phba
->eh_sgl_alloc_index
,
1048 phba
->eh_sgl_alloc_index
);
1050 phba
->eh_sgl_hndl_avbl
--;
1051 if (phba
->eh_sgl_alloc_index
==
1052 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
-
1054 phba
->eh_sgl_alloc_index
= 0;
1056 phba
->eh_sgl_alloc_index
++;
1059 spin_unlock_bh(&phba
->mgmt_sgl_lock
);
1064 free_mgmt_sgl_handle(struct beiscsi_hba
*phba
, struct sgl_handle
*psgl_handle
)
1066 spin_lock_bh(&phba
->mgmt_sgl_lock
);
1067 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_CONFIG
,
1068 "BM_%d : In free_mgmt_sgl_handle,"
1069 "eh_sgl_free_index=%d\n",
1070 phba
->eh_sgl_free_index
);
1072 if (phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
]) {
1074 * this can happen if clean_task is called on a task that
1075 * failed in xmit_task or alloc_pdu.
1077 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_CONFIG
,
1078 "BM_%d : Double Free in eh SGL ,"
1079 "eh_sgl_free_index=%d\n",
1080 phba
->eh_sgl_free_index
);
1081 spin_unlock_bh(&phba
->mgmt_sgl_lock
);
1084 phba
->eh_sgl_hndl_base
[phba
->eh_sgl_free_index
] = psgl_handle
;
1085 phba
->eh_sgl_hndl_avbl
++;
1086 if (phba
->eh_sgl_free_index
==
1087 (phba
->params
.icds_per_ctrl
- phba
->params
.ios_per_ctrl
- 1))
1088 phba
->eh_sgl_free_index
= 0;
1090 phba
->eh_sgl_free_index
++;
1091 spin_unlock_bh(&phba
->mgmt_sgl_lock
);
1095 be_complete_io(struct beiscsi_conn
*beiscsi_conn
,
1096 struct iscsi_task
*task
,
1097 struct common_sol_cqe
*csol_cqe
)
1099 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1100 struct be_status_bhs
*sts_bhs
=
1101 (struct be_status_bhs
*)io_task
->cmd_bhs
;
1102 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1103 unsigned char *sense
;
1104 u32 resid
= 0, exp_cmdsn
, max_cmdsn
;
1105 u8 rsp
, status
, flags
;
1107 exp_cmdsn
= csol_cqe
->exp_cmdsn
;
1108 max_cmdsn
= (csol_cqe
->exp_cmdsn
+
1109 csol_cqe
->cmd_wnd
- 1);
1110 rsp
= csol_cqe
->i_resp
;
1111 status
= csol_cqe
->i_sts
;
1112 flags
= csol_cqe
->i_flags
;
1113 resid
= csol_cqe
->res_cnt
;
1116 if (io_task
->scsi_cmnd
) {
1117 scsi_dma_unmap(io_task
->scsi_cmnd
);
1118 io_task
->scsi_cmnd
= NULL
;
1123 task
->sc
->result
= (DID_OK
<< 16) | status
;
1124 if (rsp
!= ISCSI_STATUS_CMD_COMPLETED
) {
1125 task
->sc
->result
= DID_ERROR
<< 16;
1129 /* bidi not initially supported */
1130 if (flags
& (ISCSI_FLAG_CMD_UNDERFLOW
| ISCSI_FLAG_CMD_OVERFLOW
)) {
1131 if (!status
&& (flags
& ISCSI_FLAG_CMD_OVERFLOW
))
1132 task
->sc
->result
= DID_ERROR
<< 16;
1134 if (flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
1135 scsi_set_resid(task
->sc
, resid
);
1136 if (!status
&& (scsi_bufflen(task
->sc
) - resid
<
1137 task
->sc
->underflow
))
1138 task
->sc
->result
= DID_ERROR
<< 16;
1142 if (status
== SAM_STAT_CHECK_CONDITION
) {
1144 unsigned short *slen
= (unsigned short *)sts_bhs
->sense_info
;
1146 sense
= sts_bhs
->sense_info
+ sizeof(unsigned short);
1147 sense_len
= be16_to_cpu(*slen
);
1148 memcpy(task
->sc
->sense_buffer
, sense
,
1149 min_t(u16
, sense_len
, SCSI_SENSE_BUFFERSIZE
));
1152 if (io_task
->cmd_bhs
->iscsi_hdr
.flags
& ISCSI_FLAG_CMD_READ
)
1153 conn
->rxdata_octets
+= resid
;
1155 if (io_task
->scsi_cmnd
) {
1156 scsi_dma_unmap(io_task
->scsi_cmnd
);
1157 io_task
->scsi_cmnd
= NULL
;
1159 iscsi_complete_scsi_task(task
, exp_cmdsn
, max_cmdsn
);
1163 be_complete_logout(struct beiscsi_conn
*beiscsi_conn
,
1164 struct iscsi_task
*task
,
1165 struct common_sol_cqe
*csol_cqe
)
1167 struct iscsi_logout_rsp
*hdr
;
1168 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1169 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1171 hdr
= (struct iscsi_logout_rsp
*)task
->hdr
;
1172 hdr
->opcode
= ISCSI_OP_LOGOUT_RSP
;
1175 hdr
->flags
= csol_cqe
->i_flags
;
1176 hdr
->response
= csol_cqe
->i_resp
;
1177 hdr
->exp_cmdsn
= cpu_to_be32(csol_cqe
->exp_cmdsn
);
1178 hdr
->max_cmdsn
= cpu_to_be32(csol_cqe
->exp_cmdsn
+
1179 csol_cqe
->cmd_wnd
- 1);
1181 hdr
->dlength
[0] = 0;
1182 hdr
->dlength
[1] = 0;
1183 hdr
->dlength
[2] = 0;
1185 hdr
->itt
= io_task
->libiscsi_itt
;
1186 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1190 be_complete_tmf(struct beiscsi_conn
*beiscsi_conn
,
1191 struct iscsi_task
*task
,
1192 struct common_sol_cqe
*csol_cqe
)
1194 struct iscsi_tm_rsp
*hdr
;
1195 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1196 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1198 hdr
= (struct iscsi_tm_rsp
*)task
->hdr
;
1199 hdr
->opcode
= ISCSI_OP_SCSI_TMFUNC_RSP
;
1200 hdr
->flags
= csol_cqe
->i_flags
;
1201 hdr
->response
= csol_cqe
->i_resp
;
1202 hdr
->exp_cmdsn
= cpu_to_be32(csol_cqe
->exp_cmdsn
);
1203 hdr
->max_cmdsn
= cpu_to_be32(csol_cqe
->exp_cmdsn
+
1204 csol_cqe
->cmd_wnd
- 1);
1206 hdr
->itt
= io_task
->libiscsi_itt
;
1207 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1211 hwi_complete_drvr_msgs(struct beiscsi_conn
*beiscsi_conn
,
1212 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
1214 struct hwi_wrb_context
*pwrb_context
;
1215 uint16_t wrb_index
, cid
, cri_index
;
1216 struct hwi_controller
*phwi_ctrlr
;
1217 struct wrb_handle
*pwrb_handle
;
1218 struct iscsi_task
*task
;
1220 phwi_ctrlr
= phba
->phwi_ctrlr
;
1221 if (is_chip_be2_be3r(phba
)) {
1222 wrb_index
= AMAP_GET_BITS(struct amap_it_dmsg_cqe
,
1224 cid
= AMAP_GET_BITS(struct amap_it_dmsg_cqe
,
1227 wrb_index
= AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2
,
1229 cid
= AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2
,
1233 cri_index
= BE_GET_CRI_FROM_CID(cid
);
1234 pwrb_context
= &phwi_ctrlr
->wrb_context
[cri_index
];
1235 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[wrb_index
];
1236 task
= pwrb_handle
->pio_handle
;
1237 iscsi_put_task(task
);
1241 be_complete_nopin_resp(struct beiscsi_conn
*beiscsi_conn
,
1242 struct iscsi_task
*task
,
1243 struct common_sol_cqe
*csol_cqe
)
1245 struct iscsi_nopin
*hdr
;
1246 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1247 struct beiscsi_io_task
*io_task
= task
->dd_data
;
1249 hdr
= (struct iscsi_nopin
*)task
->hdr
;
1250 hdr
->flags
= csol_cqe
->i_flags
;
1251 hdr
->exp_cmdsn
= cpu_to_be32(csol_cqe
->exp_cmdsn
);
1252 hdr
->max_cmdsn
= cpu_to_be32(csol_cqe
->exp_cmdsn
+
1253 csol_cqe
->cmd_wnd
- 1);
1255 hdr
->opcode
= ISCSI_OP_NOOP_IN
;
1256 hdr
->itt
= io_task
->libiscsi_itt
;
1257 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1260 static void adapter_get_sol_cqe(struct beiscsi_hba
*phba
,
1261 struct sol_cqe
*psol
,
1262 struct common_sol_cqe
*csol_cqe
)
1264 if (is_chip_be2_be3r(phba
)) {
1265 csol_cqe
->exp_cmdsn
= AMAP_GET_BITS(struct amap_sol_cqe
,
1266 i_exp_cmd_sn
, psol
);
1267 csol_cqe
->res_cnt
= AMAP_GET_BITS(struct amap_sol_cqe
,
1269 csol_cqe
->cmd_wnd
= AMAP_GET_BITS(struct amap_sol_cqe
,
1271 csol_cqe
->wrb_index
= AMAP_GET_BITS(struct amap_sol_cqe
,
1273 csol_cqe
->cid
= AMAP_GET_BITS(struct amap_sol_cqe
,
1275 csol_cqe
->hw_sts
= AMAP_GET_BITS(struct amap_sol_cqe
,
1277 csol_cqe
->i_resp
= AMAP_GET_BITS(struct amap_sol_cqe
,
1279 csol_cqe
->i_sts
= AMAP_GET_BITS(struct amap_sol_cqe
,
1281 csol_cqe
->i_flags
= AMAP_GET_BITS(struct amap_sol_cqe
,
1284 csol_cqe
->exp_cmdsn
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1285 i_exp_cmd_sn
, psol
);
1286 csol_cqe
->res_cnt
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1288 csol_cqe
->wrb_index
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1290 csol_cqe
->cid
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1292 csol_cqe
->hw_sts
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1294 csol_cqe
->cmd_wnd
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1296 if (AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1298 csol_cqe
->i_sts
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1301 csol_cqe
->i_resp
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1303 if (AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1305 csol_cqe
->i_flags
= ISCSI_FLAG_CMD_UNDERFLOW
;
1307 if (AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1309 csol_cqe
->i_flags
|= ISCSI_FLAG_CMD_OVERFLOW
;
1314 static void hwi_complete_cmd(struct beiscsi_conn
*beiscsi_conn
,
1315 struct beiscsi_hba
*phba
, struct sol_cqe
*psol
)
1317 struct hwi_wrb_context
*pwrb_context
;
1318 struct wrb_handle
*pwrb_handle
;
1319 struct iscsi_wrb
*pwrb
= NULL
;
1320 struct hwi_controller
*phwi_ctrlr
;
1321 struct iscsi_task
*task
;
1323 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1324 struct iscsi_session
*session
= conn
->session
;
1325 struct common_sol_cqe csol_cqe
= {0};
1326 uint16_t cri_index
= 0;
1328 phwi_ctrlr
= phba
->phwi_ctrlr
;
1330 /* Copy the elements to a common structure */
1331 adapter_get_sol_cqe(phba
, psol
, &csol_cqe
);
1333 cri_index
= BE_GET_CRI_FROM_CID(csol_cqe
.cid
);
1334 pwrb_context
= &phwi_ctrlr
->wrb_context
[cri_index
];
1336 pwrb_handle
= pwrb_context
->pwrb_handle_basestd
[
1337 csol_cqe
.wrb_index
];
1339 task
= pwrb_handle
->pio_handle
;
1340 pwrb
= pwrb_handle
->pwrb
;
1341 type
= ((struct beiscsi_io_task
*)task
->dd_data
)->wrb_type
;
1343 spin_lock_bh(&session
->back_lock
);
1346 case HWH_TYPE_IO_RD
:
1347 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) ==
1349 be_complete_nopin_resp(beiscsi_conn
, task
, &csol_cqe
);
1351 be_complete_io(beiscsi_conn
, task
, &csol_cqe
);
1354 case HWH_TYPE_LOGOUT
:
1355 if ((task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGOUT
)
1356 be_complete_logout(beiscsi_conn
, task
, &csol_cqe
);
1358 be_complete_tmf(beiscsi_conn
, task
, &csol_cqe
);
1361 case HWH_TYPE_LOGIN
:
1362 beiscsi_log(phba
, KERN_ERR
,
1363 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
1364 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1365 " hwi_complete_cmd- Solicited path\n");
1369 be_complete_nopin_resp(beiscsi_conn
, task
, &csol_cqe
);
1373 beiscsi_log(phba
, KERN_WARNING
,
1374 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
1375 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1376 "wrb_index 0x%x CID 0x%x\n", type
,
1382 spin_unlock_bh(&session
->back_lock
);
1386 * ASYNC PDUs include
1387 * a. Unsolicited NOP-In (target initiated NOP-In)
1391 * These headers arrive unprocessed by the EP firmware.
1392 * iSCSI layer processes them.
1395 beiscsi_complete_pdu(struct beiscsi_conn
*beiscsi_conn
,
1396 struct pdu_base
*phdr
, void *pdata
, unsigned int dlen
)
1398 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
1399 struct iscsi_conn
*conn
= beiscsi_conn
->conn
;
1400 struct beiscsi_io_task
*io_task
;
1401 struct iscsi_hdr
*login_hdr
;
1402 struct iscsi_task
*task
;
1405 code
= AMAP_GET_BITS(struct amap_pdu_base
, opcode
, phdr
);
1407 case ISCSI_OP_NOOP_IN
:
1411 case ISCSI_OP_ASYNC_EVENT
:
1413 case ISCSI_OP_REJECT
:
1415 WARN_ON(!(dlen
== 48));
1416 beiscsi_log(phba
, KERN_ERR
,
1417 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
1418 "BM_%d : In ISCSI_OP_REJECT\n");
1420 case ISCSI_OP_LOGIN_RSP
:
1421 case ISCSI_OP_TEXT_RSP
:
1422 task
= conn
->login_task
;
1423 io_task
= task
->dd_data
;
1424 login_hdr
= (struct iscsi_hdr
*)phdr
;
1425 login_hdr
->itt
= io_task
->libiscsi_itt
;
1428 beiscsi_log(phba
, KERN_WARNING
,
1429 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
1430 "BM_%d : unrecognized async PDU opcode 0x%x\n",
1434 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)phdr
, pdata
, dlen
);
1439 beiscsi_hdl_put_handle(struct hd_async_context
*pasync_ctx
,
1440 struct hd_async_handle
*pasync_handle
)
1442 if (pasync_handle
->is_header
) {
1443 list_add_tail(&pasync_handle
->link
,
1444 &pasync_ctx
->async_header
.free_list
);
1445 pasync_ctx
->async_header
.free_entries
++;
1447 list_add_tail(&pasync_handle
->link
,
1448 &pasync_ctx
->async_data
.free_list
);
1449 pasync_ctx
->async_data
.free_entries
++;
1453 static struct hd_async_handle
*
1454 beiscsi_hdl_get_handle(struct beiscsi_conn
*beiscsi_conn
,
1455 struct hd_async_context
*pasync_ctx
,
1456 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1458 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
1459 struct hd_async_handle
*pasync_handle
;
1460 struct be_bus_address phys_addr
;
1461 u8 final
, error
= 0;
1465 cid
= beiscsi_conn
->beiscsi_conn_cid
;
1467 * This function is invoked to get the right async_handle structure
1468 * from a given DEF PDU CQ entry.
1470 * - index in CQ entry gives the vertical index
1471 * - address in CQ entry is the offset where the DMA last ended
1472 * - final - no more notifications for this PDU
1474 if (is_chip_be2_be3r(phba
)) {
1475 dpl
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe
,
1477 ci
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe
,
1479 final
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe
,
1482 dpl
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2
,
1484 ci
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2
,
1486 final
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2
,
1491 * DB addr Hi/Lo is same for BE and SKH.
1492 * Subtract the dataplacementlength to get to the base.
1494 phys_addr
.u
.a32
.address_lo
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe
,
1495 db_addr_lo
, pdpdu_cqe
);
1496 phys_addr
.u
.a32
.address_lo
-= dpl
;
1497 phys_addr
.u
.a32
.address_hi
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe
,
1498 db_addr_hi
, pdpdu_cqe
);
1500 code
= AMAP_GET_BITS(struct amap_i_t_dpdu_cqe
, code
, pdpdu_cqe
);
1502 case UNSOL_HDR_NOTIFY
:
1503 pasync_handle
= pasync_ctx
->async_entry
[ci
].header
;
1505 case UNSOL_DATA_DIGEST_ERROR_NOTIFY
:
1507 case UNSOL_DATA_NOTIFY
:
1508 pasync_handle
= pasync_ctx
->async_entry
[ci
].data
;
1510 /* called only for above codes */
1512 pasync_handle
= NULL
;
1516 if (!pasync_handle
) {
1517 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_ISCSI
,
1518 "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
1519 cid
, code
, ci
, phys_addr
.u
.a64
.address
);
1520 return pasync_handle
;
1523 if (pasync_handle
->pa
.u
.a64
.address
!= phys_addr
.u
.a64
.address
||
1524 pasync_handle
->index
!= ci
) {
1525 /* driver bug - if ci does not match async handle index */
1527 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_ISCSI
,
1528 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
1529 cid
, pasync_handle
->is_header
? 'H' : 'D',
1530 pasync_handle
->pa
.u
.a64
.address
,
1531 pasync_handle
->index
,
1532 phys_addr
.u
.a64
.address
, ci
);
1533 /* FW has stale address - attempt continuing by dropping */
1537 * Each CID is associated with unique CRI.
1538 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
1540 pasync_handle
->cri
= BE_GET_ASYNC_CRI_FROM_CID(cid
);
1541 pasync_handle
->is_final
= final
;
1542 pasync_handle
->buffer_len
= dpl
;
1543 /* empty the slot */
1544 if (pasync_handle
->is_header
)
1545 pasync_ctx
->async_entry
[ci
].header
= NULL
;
1547 pasync_ctx
->async_entry
[ci
].data
= NULL
;
1550 * DEF PDU header and data buffers with errors should be simply
1551 * dropped as there are no consumers for it.
1554 beiscsi_hdl_put_handle(pasync_ctx
, pasync_handle
);
1555 pasync_handle
= NULL
;
1557 return pasync_handle
;
1561 beiscsi_hdl_purge_handles(struct beiscsi_hba
*phba
,
1562 struct hd_async_context
*pasync_ctx
,
1565 struct hd_async_handle
*pasync_handle
, *tmp_handle
;
1566 struct list_head
*plist
;
1568 plist
= &pasync_ctx
->async_entry
[cri
].wq
.list
;
1569 list_for_each_entry_safe(pasync_handle
, tmp_handle
, plist
, link
) {
1570 list_del(&pasync_handle
->link
);
1571 beiscsi_hdl_put_handle(pasync_ctx
, pasync_handle
);
1574 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[cri
].wq
.list
);
1575 pasync_ctx
->async_entry
[cri
].wq
.hdr_len
= 0;
1576 pasync_ctx
->async_entry
[cri
].wq
.bytes_received
= 0;
1577 pasync_ctx
->async_entry
[cri
].wq
.bytes_needed
= 0;
1581 beiscsi_hdl_fwd_pdu(struct beiscsi_conn
*beiscsi_conn
,
1582 struct hd_async_context
*pasync_ctx
,
1585 struct iscsi_session
*session
= beiscsi_conn
->conn
->session
;
1586 struct hd_async_handle
*pasync_handle
, *plast_handle
;
1587 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
1588 void *phdr
= NULL
, *pdata
= NULL
;
1589 u32 dlen
= 0, status
= 0;
1590 struct list_head
*plist
;
1592 plist
= &pasync_ctx
->async_entry
[cri
].wq
.list
;
1593 plast_handle
= NULL
;
1594 list_for_each_entry(pasync_handle
, plist
, link
) {
1595 plast_handle
= pasync_handle
;
1596 /* get the header, the first entry */
1598 phdr
= pasync_handle
->pbuffer
;
1601 /* use first buffer to collect all the data */
1603 pdata
= pasync_handle
->pbuffer
;
1604 dlen
= pasync_handle
->buffer_len
;
1607 memcpy(pdata
+ dlen
, pasync_handle
->pbuffer
,
1608 pasync_handle
->buffer_len
);
1609 dlen
+= pasync_handle
->buffer_len
;
1612 if (!plast_handle
->is_final
) {
1613 /* last handle should have final PDU notification from FW */
1614 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_ISCSI
,
1615 "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
1616 beiscsi_conn
->beiscsi_conn_cid
, plast_handle
,
1617 pasync_ctx
->async_entry
[cri
].wq
.hdr_len
,
1618 pasync_ctx
->async_entry
[cri
].wq
.bytes_needed
,
1619 pasync_ctx
->async_entry
[cri
].wq
.bytes_received
);
1621 spin_lock_bh(&session
->back_lock
);
1622 status
= beiscsi_complete_pdu(beiscsi_conn
, phdr
, pdata
, dlen
);
1623 spin_unlock_bh(&session
->back_lock
);
1624 beiscsi_hdl_purge_handles(phba
, pasync_ctx
, cri
);
1629 beiscsi_hdl_gather_pdu(struct beiscsi_conn
*beiscsi_conn
,
1630 struct hd_async_context
*pasync_ctx
,
1631 struct hd_async_handle
*pasync_handle
)
1633 unsigned int bytes_needed
= 0, status
= 0;
1634 u16 cri
= pasync_handle
->cri
;
1635 struct cri_wait_queue
*wq
;
1636 struct beiscsi_hba
*phba
;
1637 struct pdu_base
*ppdu
;
1640 phba
= beiscsi_conn
->phba
;
1641 wq
= &pasync_ctx
->async_entry
[cri
].wq
;
1642 if (pasync_handle
->is_header
) {
1643 /* check if PDU hdr is rcv'd when old hdr not completed */
1648 ppdu
= pasync_handle
->pbuffer
;
1649 bytes_needed
= AMAP_GET_BITS(struct amap_pdu_base
,
1651 bytes_needed
<<= 16;
1652 bytes_needed
|= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base
,
1653 data_len_lo
, ppdu
));
1654 wq
->hdr_len
= pasync_handle
->buffer_len
;
1655 wq
->bytes_received
= 0;
1656 wq
->bytes_needed
= bytes_needed
;
1657 list_add_tail(&pasync_handle
->link
, &wq
->list
);
1659 status
= beiscsi_hdl_fwd_pdu(beiscsi_conn
,
1662 /* check if data received has header and is needed */
1663 if (!wq
->hdr_len
|| !wq
->bytes_needed
) {
1664 err
= "header less";
1667 wq
->bytes_received
+= pasync_handle
->buffer_len
;
1668 /* Something got overwritten? Better catch it here. */
1669 if (wq
->bytes_received
> wq
->bytes_needed
) {
1673 list_add_tail(&pasync_handle
->link
, &wq
->list
);
1674 if (wq
->bytes_received
== wq
->bytes_needed
)
1675 status
= beiscsi_hdl_fwd_pdu(beiscsi_conn
,
1681 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_ISCSI
,
1682 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
1683 beiscsi_conn
->beiscsi_conn_cid
, err
,
1684 pasync_handle
->is_header
? 'H' : 'D',
1685 wq
->hdr_len
, wq
->bytes_needed
,
1686 pasync_handle
->buffer_len
);
1687 /* discard this handle */
1688 beiscsi_hdl_put_handle(pasync_ctx
, pasync_handle
);
1689 /* free all the other handles in cri_wait_queue */
1690 beiscsi_hdl_purge_handles(phba
, pasync_ctx
, cri
);
1691 /* try continuing */
1696 beiscsi_hdq_post_handles(struct beiscsi_hba
*phba
,
1697 u8 header
, u8 ulp_num
)
1699 struct hd_async_handle
*pasync_handle
, *tmp
, **slot
;
1700 struct hd_async_context
*pasync_ctx
;
1701 struct hwi_controller
*phwi_ctrlr
;
1702 struct list_head
*hfree_list
;
1703 struct phys_addr
*pasync_sge
;
1704 u32 ring_id
, doorbell
= 0;
1705 u16 index
, num_entries
;
1706 u32 doorbell_offset
;
1709 phwi_ctrlr
= phba
->phwi_ctrlr
;
1710 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
, ulp_num
);
1711 num_entries
= pasync_ctx
->num_entries
;
1713 cons
= pasync_ctx
->async_header
.free_entries
;
1714 hfree_list
= &pasync_ctx
->async_header
.free_list
;
1715 ring_id
= phwi_ctrlr
->default_pdu_hdr
[ulp_num
].id
;
1716 doorbell_offset
= phwi_ctrlr
->default_pdu_hdr
[ulp_num
].
1719 cons
= pasync_ctx
->async_data
.free_entries
;
1720 hfree_list
= &pasync_ctx
->async_data
.free_list
;
1721 ring_id
= phwi_ctrlr
->default_pdu_data
[ulp_num
].id
;
1722 doorbell_offset
= phwi_ctrlr
->default_pdu_data
[ulp_num
].
1725 /* number of entries posted must be in multiples of 8 */
1729 list_for_each_entry_safe(pasync_handle
, tmp
, hfree_list
, link
) {
1730 list_del_init(&pasync_handle
->link
);
1731 pasync_handle
->is_final
= 0;
1732 pasync_handle
->buffer_len
= 0;
1734 /* handles can be consumed out of order, use index in handle */
1735 index
= pasync_handle
->index
;
1736 WARN_ON(pasync_handle
->is_header
!= header
);
1738 slot
= &pasync_ctx
->async_entry
[index
].header
;
1740 slot
= &pasync_ctx
->async_entry
[index
].data
;
1742 * The slot just tracks handle's hold and release, so
1743 * overwriting at the same index won't do any harm but
1744 * needs to be caught.
1746 if (*slot
!= NULL
) {
1747 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_ISCSI
,
1748 "BM_%d : async PDU %s slot at %u not empty\n",
1749 header
? "header" : "data", index
);
1752 * We use same freed index as in completion to post so this
1753 * operation is not required for refills. Its required only
1754 * for ring creation.
1757 pasync_sge
= pasync_ctx
->async_header
.ring_base
;
1759 pasync_sge
= pasync_ctx
->async_data
.ring_base
;
1760 pasync_sge
+= index
;
1761 /* if its a refill then address is same; hi is lo */
1762 WARN_ON(pasync_sge
->hi
&&
1763 pasync_sge
->hi
!= pasync_handle
->pa
.u
.a32
.address_lo
);
1764 WARN_ON(pasync_sge
->lo
&&
1765 pasync_sge
->lo
!= pasync_handle
->pa
.u
.a32
.address_hi
);
1766 pasync_sge
->hi
= pasync_handle
->pa
.u
.a32
.address_lo
;
1767 pasync_sge
->lo
= pasync_handle
->pa
.u
.a32
.address_hi
;
1769 *slot
= pasync_handle
;
1774 pasync_ctx
->async_header
.free_entries
-= prod
;
1776 pasync_ctx
->async_data
.free_entries
-= prod
;
1778 doorbell
|= ring_id
& DB_DEF_PDU_RING_ID_MASK
;
1779 doorbell
|= 1 << DB_DEF_PDU_REARM_SHIFT
;
1780 doorbell
|= 0 << DB_DEF_PDU_EVENT_SHIFT
;
1781 doorbell
|= (prod
& DB_DEF_PDU_CQPROC_MASK
) << DB_DEF_PDU_CQPROC_SHIFT
;
1782 iowrite32(doorbell
, phba
->db_va
+ doorbell_offset
);
1786 beiscsi_hdq_process_compl(struct beiscsi_conn
*beiscsi_conn
,
1787 struct i_t_dpdu_cqe
*pdpdu_cqe
)
1789 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
1790 struct hd_async_handle
*pasync_handle
= NULL
;
1791 struct hd_async_context
*pasync_ctx
;
1792 struct hwi_controller
*phwi_ctrlr
;
1796 phwi_ctrlr
= phba
->phwi_ctrlr
;
1797 cid_cri
= BE_GET_CRI_FROM_CID(beiscsi_conn
->beiscsi_conn_cid
);
1798 ulp_num
= BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr
, cid_cri
);
1799 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr
, ulp_num
);
1800 pasync_handle
= beiscsi_hdl_get_handle(beiscsi_conn
, pasync_ctx
,
1805 beiscsi_hdl_gather_pdu(beiscsi_conn
, pasync_ctx
, pasync_handle
);
1806 beiscsi_hdq_post_handles(phba
, pasync_handle
->is_header
, ulp_num
);
1809 void beiscsi_process_mcc_cq(struct beiscsi_hba
*phba
)
1811 struct be_queue_info
*mcc_cq
;
1812 struct be_mcc_compl
*mcc_compl
;
1813 unsigned int num_processed
= 0;
1815 mcc_cq
= &phba
->ctrl
.mcc_obj
.cq
;
1816 mcc_compl
= queue_tail_node(mcc_cq
);
1817 mcc_compl
->flags
= le32_to_cpu(mcc_compl
->flags
);
1818 while (mcc_compl
->flags
& CQE_FLAGS_VALID_MASK
) {
1819 if (beiscsi_hba_in_error(phba
))
1822 if (num_processed
>= 32) {
1823 hwi_ring_cq_db(phba
, mcc_cq
->id
,
1827 if (mcc_compl
->flags
& CQE_FLAGS_ASYNC_MASK
) {
1828 beiscsi_process_async_event(phba
, mcc_compl
);
1829 } else if (mcc_compl
->flags
& CQE_FLAGS_COMPLETED_MASK
) {
1830 beiscsi_process_mcc_compl(&phba
->ctrl
, mcc_compl
);
1833 mcc_compl
->flags
= 0;
1834 queue_tail_inc(mcc_cq
);
1835 mcc_compl
= queue_tail_node(mcc_cq
);
1836 mcc_compl
->flags
= le32_to_cpu(mcc_compl
->flags
);
1840 if (num_processed
> 0)
1841 hwi_ring_cq_db(phba
, mcc_cq
->id
, num_processed
, 1);
1844 static void beiscsi_mcc_work(struct work_struct
*work
)
1846 struct be_eq_obj
*pbe_eq
;
1847 struct beiscsi_hba
*phba
;
1849 pbe_eq
= container_of(work
, struct be_eq_obj
, mcc_work
);
1850 phba
= pbe_eq
->phba
;
1851 beiscsi_process_mcc_cq(phba
);
1852 /* rearm EQ for further interrupts */
1853 if (!beiscsi_hba_in_error(phba
))
1854 hwi_ring_eq_db(phba
, pbe_eq
->q
.id
, 0, 0, 1, 1);
1858 * beiscsi_process_cq()- Process the Completion Queue
1859 * @pbe_eq: Event Q on which the Completion has come
1860 * @budget: Max number of events to processed
1863 * Number of Completion Entries processed.
1865 unsigned int beiscsi_process_cq(struct be_eq_obj
*pbe_eq
, int budget
)
1867 struct be_queue_info
*cq
;
1868 struct sol_cqe
*sol
;
1869 struct dmsg_cqe
*dmsg
;
1870 unsigned int total
= 0;
1871 unsigned int num_processed
= 0;
1872 unsigned short code
= 0, cid
= 0;
1873 uint16_t cri_index
= 0;
1874 struct beiscsi_conn
*beiscsi_conn
;
1875 struct beiscsi_endpoint
*beiscsi_ep
;
1876 struct iscsi_endpoint
*ep
;
1877 struct beiscsi_hba
*phba
;
1880 sol
= queue_tail_node(cq
);
1881 phba
= pbe_eq
->phba
;
1883 while (sol
->dw
[offsetof(struct amap_sol_cqe
, valid
) / 32] &
1885 if (beiscsi_hba_in_error(phba
))
1888 be_dws_le_to_cpu(sol
, sizeof(struct sol_cqe
));
1890 code
= (sol
->dw
[offsetof(struct amap_sol_cqe
, code
) /
1891 32] & CQE_CODE_MASK
);
1894 if (is_chip_be2_be3r(phba
)) {
1895 cid
= AMAP_GET_BITS(struct amap_sol_cqe
, cid
, sol
);
1897 if ((code
== DRIVERMSG_NOTIFY
) ||
1898 (code
== UNSOL_HDR_NOTIFY
) ||
1899 (code
== UNSOL_DATA_NOTIFY
))
1900 cid
= AMAP_GET_BITS(
1901 struct amap_i_t_dpdu_cqe_v2
,
1904 cid
= AMAP_GET_BITS(struct amap_sol_cqe_v2
,
1908 cri_index
= BE_GET_CRI_FROM_CID(cid
);
1909 ep
= phba
->ep_array
[cri_index
];
1912 /* connection has already been freed
1913 * just move on to next one
1915 beiscsi_log(phba
, KERN_WARNING
,
1917 "BM_%d : proc cqe of disconn ep: cid %d\n",
1922 beiscsi_ep
= ep
->dd_data
;
1923 beiscsi_conn
= beiscsi_ep
->conn
;
1926 if (num_processed
== 32) {
1927 hwi_ring_cq_db(phba
, cq
->id
, 32, 0);
1933 case SOL_CMD_COMPLETE
:
1934 hwi_complete_cmd(beiscsi_conn
, phba
, sol
);
1936 case DRIVERMSG_NOTIFY
:
1937 beiscsi_log(phba
, KERN_INFO
,
1938 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
1939 "BM_%d : Received %s[%d] on CID : %d\n",
1940 cqe_desc
[code
], code
, cid
);
1942 dmsg
= (struct dmsg_cqe
*)sol
;
1943 hwi_complete_drvr_msgs(beiscsi_conn
, phba
, sol
);
1945 case UNSOL_HDR_NOTIFY
:
1946 beiscsi_log(phba
, KERN_INFO
,
1947 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
1948 "BM_%d : Received %s[%d] on CID : %d\n",
1949 cqe_desc
[code
], code
, cid
);
1951 spin_lock_bh(&phba
->async_pdu_lock
);
1952 beiscsi_hdq_process_compl(beiscsi_conn
,
1953 (struct i_t_dpdu_cqe
*)sol
);
1954 spin_unlock_bh(&phba
->async_pdu_lock
);
1956 case UNSOL_DATA_NOTIFY
:
1957 beiscsi_log(phba
, KERN_INFO
,
1958 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
1959 "BM_%d : Received %s[%d] on CID : %d\n",
1960 cqe_desc
[code
], code
, cid
);
1962 spin_lock_bh(&phba
->async_pdu_lock
);
1963 beiscsi_hdq_process_compl(beiscsi_conn
,
1964 (struct i_t_dpdu_cqe
*)sol
);
1965 spin_unlock_bh(&phba
->async_pdu_lock
);
1967 case CXN_INVALIDATE_INDEX_NOTIFY
:
1968 case CMD_INVALIDATED_NOTIFY
:
1969 case CXN_INVALIDATE_NOTIFY
:
1970 beiscsi_log(phba
, KERN_ERR
,
1971 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
1972 "BM_%d : Ignoring %s[%d] on CID : %d\n",
1973 cqe_desc
[code
], code
, cid
);
1975 case CXN_KILLED_HDR_DIGEST_ERR
:
1976 case SOL_CMD_KILLED_DATA_DIGEST_ERR
:
1977 beiscsi_log(phba
, KERN_ERR
,
1978 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
1979 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1980 cqe_desc
[code
], code
, cid
);
1982 case CMD_KILLED_INVALID_STATSN_RCVD
:
1983 case CMD_KILLED_INVALID_R2T_RCVD
:
1984 case CMD_CXN_KILLED_LUN_INVALID
:
1985 case CMD_CXN_KILLED_ICD_INVALID
:
1986 case CMD_CXN_KILLED_ITT_INVALID
:
1987 case CMD_CXN_KILLED_SEQ_OUTOFORDER
:
1988 case CMD_CXN_KILLED_INVALID_DATASN_RCVD
:
1989 beiscsi_log(phba
, KERN_ERR
,
1990 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
1991 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1992 cqe_desc
[code
], code
, cid
);
1994 case UNSOL_DATA_DIGEST_ERROR_NOTIFY
:
1995 beiscsi_log(phba
, KERN_ERR
,
1996 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
1997 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
1998 cqe_desc
[code
], code
, cid
);
1999 spin_lock_bh(&phba
->async_pdu_lock
);
2000 /* driver consumes the entry and drops the contents */
2001 beiscsi_hdq_process_compl(beiscsi_conn
,
2002 (struct i_t_dpdu_cqe
*)sol
);
2003 spin_unlock_bh(&phba
->async_pdu_lock
);
2005 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL
:
2006 case CXN_KILLED_BURST_LEN_MISMATCH
:
2007 case CXN_KILLED_AHS_RCVD
:
2008 case CXN_KILLED_UNKNOWN_HDR
:
2009 case CXN_KILLED_STALE_ITT_TTT_RCVD
:
2010 case CXN_KILLED_INVALID_ITT_TTT_RCVD
:
2011 case CXN_KILLED_TIMED_OUT
:
2012 case CXN_KILLED_FIN_RCVD
:
2013 case CXN_KILLED_RST_SENT
:
2014 case CXN_KILLED_RST_RCVD
:
2015 case CXN_KILLED_BAD_UNSOL_PDU_RCVD
:
2016 case CXN_KILLED_BAD_WRB_INDEX_ERROR
:
2017 case CXN_KILLED_OVER_RUN_RESIDUAL
:
2018 case CXN_KILLED_UNDER_RUN_RESIDUAL
:
2019 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN
:
2020 beiscsi_log(phba
, KERN_ERR
,
2021 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
2022 "BM_%d : Event %s[%d] received on CID : %d\n",
2023 cqe_desc
[code
], code
, cid
);
2025 iscsi_conn_failure(beiscsi_conn
->conn
,
2026 ISCSI_ERR_CONN_FAILED
);
2029 beiscsi_log(phba
, KERN_ERR
,
2030 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
2031 "BM_%d : Invalid CQE Event Received Code : %d"
2038 AMAP_SET_BITS(struct amap_sol_cqe
, valid
, sol
, 0);
2040 sol
= queue_tail_node(cq
);
2042 if (total
== budget
)
2046 hwi_ring_cq_db(phba
, cq
->id
, num_processed
, 1);
2050 static int be_iopoll(struct irq_poll
*iop
, int budget
)
2052 unsigned int ret
, io_events
;
2053 struct beiscsi_hba
*phba
;
2054 struct be_eq_obj
*pbe_eq
;
2055 struct be_eq_entry
*eqe
= NULL
;
2056 struct be_queue_info
*eq
;
2058 pbe_eq
= container_of(iop
, struct be_eq_obj
, iopoll
);
2059 phba
= pbe_eq
->phba
;
2060 if (beiscsi_hba_in_error(phba
)) {
2061 irq_poll_complete(iop
);
2067 eqe
= queue_tail_node(eq
);
2068 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32] &
2070 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
2072 eqe
= queue_tail_node(eq
);
2075 hwi_ring_eq_db(phba
, eq
->id
, 1, io_events
, 0, 1);
2077 ret
= beiscsi_process_cq(pbe_eq
, budget
);
2078 pbe_eq
->cq_count
+= ret
;
2080 irq_poll_complete(iop
);
2081 beiscsi_log(phba
, KERN_INFO
,
2082 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_IO
,
2083 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
2085 if (!beiscsi_hba_in_error(phba
))
2086 hwi_ring_eq_db(phba
, pbe_eq
->q
.id
, 0, 0, 1, 1);
2092 hwi_write_sgl_v2(struct iscsi_wrb
*pwrb
, struct scatterlist
*sg
,
2093 unsigned int num_sg
, struct beiscsi_io_task
*io_task
)
2095 struct iscsi_sge
*psgl
;
2096 unsigned int sg_len
, index
;
2097 unsigned int sge_len
= 0;
2098 unsigned long long addr
;
2099 struct scatterlist
*l_sg
;
2100 unsigned int offset
;
2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, iscsi_bhs_addr_lo
, pwrb
,
2103 io_task
->bhs_pa
.u
.a32
.address_lo
);
2104 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, iscsi_bhs_addr_hi
, pwrb
,
2105 io_task
->bhs_pa
.u
.a32
.address_hi
);
2108 for (index
= 0; (index
< num_sg
) && (index
< 2); index
++,
2111 sg_len
= sg_dma_len(sg
);
2112 addr
= (u64
) sg_dma_address(sg
);
2113 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
2115 lower_32_bits(addr
));
2116 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
2118 upper_32_bits(addr
));
2119 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
2124 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge1_r2t_offset
,
2126 sg_len
= sg_dma_len(sg
);
2127 addr
= (u64
) sg_dma_address(sg
);
2128 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
2130 lower_32_bits(addr
));
2131 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
2133 upper_32_bits(addr
));
2134 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
2139 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
2140 memset(psgl
, 0, sizeof(*psgl
) * BE2_SGE
);
2142 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
- 2);
2144 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2145 io_task
->bhs_pa
.u
.a32
.address_hi
);
2146 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2147 io_task
->bhs_pa
.u
.a32
.address_lo
);
2150 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge0_last
, pwrb
,
2152 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge1_last
, pwrb
,
2154 } else if (num_sg
== 2) {
2155 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge0_last
, pwrb
,
2157 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge1_last
, pwrb
,
2160 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge0_last
, pwrb
,
2162 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sge1_last
, pwrb
,
2170 for (index
= 0; index
< num_sg
; index
++, sg
= sg_next(sg
), psgl
++) {
2171 sg_len
= sg_dma_len(sg
);
2172 addr
= (u64
) sg_dma_address(sg
);
2173 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2174 lower_32_bits(addr
));
2175 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2176 upper_32_bits(addr
));
2177 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, sg_len
);
2178 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, offset
);
2179 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
2183 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
2187 hwi_write_sgl(struct iscsi_wrb
*pwrb
, struct scatterlist
*sg
,
2188 unsigned int num_sg
, struct beiscsi_io_task
*io_task
)
2190 struct iscsi_sge
*psgl
;
2191 unsigned int sg_len
, index
;
2192 unsigned int sge_len
= 0;
2193 unsigned long long addr
;
2194 struct scatterlist
*l_sg
;
2195 unsigned int offset
;
2197 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
2198 io_task
->bhs_pa
.u
.a32
.address_lo
);
2199 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
2200 io_task
->bhs_pa
.u
.a32
.address_hi
);
2203 for (index
= 0; (index
< num_sg
) && (index
< 2); index
++,
2206 sg_len
= sg_dma_len(sg
);
2207 addr
= (u64
) sg_dma_address(sg
);
2208 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
2209 ((u32
)(addr
& 0xFFFFFFFF)));
2210 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
2211 ((u32
)(addr
>> 32)));
2212 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
2216 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_r2t_offset
,
2218 sg_len
= sg_dma_len(sg
);
2219 addr
= (u64
) sg_dma_address(sg
);
2220 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_lo
, pwrb
,
2221 ((u32
)(addr
& 0xFFFFFFFF)));
2222 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_addr_hi
, pwrb
,
2223 ((u32
)(addr
>> 32)));
2224 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_len
, pwrb
,
2228 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
2229 memset(psgl
, 0, sizeof(*psgl
) * BE2_SGE
);
2231 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
- 2);
2233 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2234 io_task
->bhs_pa
.u
.a32
.address_hi
);
2235 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2236 io_task
->bhs_pa
.u
.a32
.address_lo
);
2239 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
2241 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
2243 } else if (num_sg
== 2) {
2244 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
2246 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
2249 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
,
2251 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge1_last
, pwrb
,
2258 for (index
= 0; index
< num_sg
; index
++, sg
= sg_next(sg
), psgl
++) {
2259 sg_len
= sg_dma_len(sg
);
2260 addr
= (u64
) sg_dma_address(sg
);
2261 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2262 (addr
& 0xFFFFFFFF));
2263 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2265 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, sg_len
);
2266 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, offset
);
2267 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
2271 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
2275 * hwi_write_buffer()- Populate the WRB with task info
2276 * @pwrb: ptr to the WRB entry
2277 * @task: iscsi task which is to be executed
2279 static int hwi_write_buffer(struct iscsi_wrb
*pwrb
, struct iscsi_task
*task
)
2281 struct iscsi_sge
*psgl
;
2282 struct beiscsi_io_task
*io_task
= task
->dd_data
;
2283 struct beiscsi_conn
*beiscsi_conn
= io_task
->conn
;
2284 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
2285 uint8_t dsp_value
= 0;
2287 io_task
->bhs_len
= sizeof(struct be_nonio_bhs
) - 2;
2288 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_lo
, pwrb
,
2289 io_task
->bhs_pa
.u
.a32
.address_lo
);
2290 AMAP_SET_BITS(struct amap_iscsi_wrb
, iscsi_bhs_addr_hi
, pwrb
,
2291 io_task
->bhs_pa
.u
.a32
.address_hi
);
2295 /* Check for the data_count */
2296 dsp_value
= (task
->data_count
) ? 1 : 0;
2298 if (is_chip_be2_be3r(phba
))
2299 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
,
2302 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, dsp
,
2305 /* Map addr only if there is data_count */
2307 io_task
->mtask_addr
= pci_map_single(phba
->pcidev
,
2311 if (pci_dma_mapping_error(phba
->pcidev
,
2312 io_task
->mtask_addr
))
2314 io_task
->mtask_data_count
= task
->data_count
;
2316 io_task
->mtask_addr
= 0;
2318 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_lo
, pwrb
,
2319 lower_32_bits(io_task
->mtask_addr
));
2320 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_addr_hi
, pwrb
,
2321 upper_32_bits(io_task
->mtask_addr
));
2322 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_len
, pwrb
,
2325 AMAP_SET_BITS(struct amap_iscsi_wrb
, sge0_last
, pwrb
, 1);
2327 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
2328 io_task
->mtask_addr
= 0;
2331 psgl
= (struct iscsi_sge
*)io_task
->psgl_handle
->pfrag
;
2333 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, io_task
->bhs_len
);
2335 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2336 io_task
->bhs_pa
.u
.a32
.address_hi
);
2337 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2338 io_task
->bhs_pa
.u
.a32
.address_lo
);
2341 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
, 0);
2342 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
, 0);
2343 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0);
2344 AMAP_SET_BITS(struct amap_iscsi_sge
, sge_offset
, psgl
, 0);
2345 AMAP_SET_BITS(struct amap_iscsi_sge
, rsvd0
, psgl
, 0);
2346 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 0);
2350 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, psgl
,
2351 lower_32_bits(io_task
->mtask_addr
));
2352 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, psgl
,
2353 upper_32_bits(io_task
->mtask_addr
));
2355 AMAP_SET_BITS(struct amap_iscsi_sge
, len
, psgl
, 0x106);
2357 AMAP_SET_BITS(struct amap_iscsi_sge
, last_sge
, psgl
, 1);
2362 * beiscsi_find_mem_req()- Find mem needed
2363 * @phba: ptr to HBA struct
2365 static void beiscsi_find_mem_req(struct beiscsi_hba
*phba
)
2367 uint8_t mem_descr_index
, ulp_num
;
2368 unsigned int num_cq_pages
, num_async_pdu_buf_pages
;
2369 unsigned int num_async_pdu_data_pages
, wrb_sz_per_cxn
;
2370 unsigned int num_async_pdu_buf_sgl_pages
, num_async_pdu_data_sgl_pages
;
2372 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
2373 sizeof(struct sol_cqe
));
2375 phba
->params
.hwi_ws_sz
= sizeof(struct hwi_controller
);
2377 phba
->mem_req
[ISCSI_MEM_GLOBAL_HEADER
] = 2 *
2378 BE_ISCSI_PDU_HEADER_SIZE
;
2379 phba
->mem_req
[HWI_MEM_ADDN_CONTEXT
] =
2380 sizeof(struct hwi_context_memory
);
2383 phba
->mem_req
[HWI_MEM_WRB
] = sizeof(struct iscsi_wrb
)
2384 * (phba
->params
.wrbs_per_cxn
)
2385 * phba
->params
.cxns_per_ctrl
;
2386 wrb_sz_per_cxn
= sizeof(struct wrb_handle
) *
2387 (phba
->params
.wrbs_per_cxn
);
2388 phba
->mem_req
[HWI_MEM_WRBH
] = roundup_pow_of_two((wrb_sz_per_cxn
) *
2389 phba
->params
.cxns_per_ctrl
);
2391 phba
->mem_req
[HWI_MEM_SGLH
] = sizeof(struct sgl_handle
) *
2392 phba
->params
.icds_per_ctrl
;
2393 phba
->mem_req
[HWI_MEM_SGE
] = sizeof(struct iscsi_sge
) *
2394 phba
->params
.num_sge_per_io
* phba
->params
.icds_per_ctrl
;
2395 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
2396 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
2398 num_async_pdu_buf_sgl_pages
=
2399 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2401 sizeof(struct phys_addr
));
2403 num_async_pdu_buf_pages
=
2404 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2406 phba
->params
.defpdu_hdr_sz
);
2408 num_async_pdu_data_pages
=
2409 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2411 phba
->params
.defpdu_data_sz
);
2413 num_async_pdu_data_sgl_pages
=
2414 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2416 sizeof(struct phys_addr
));
2418 mem_descr_index
= (HWI_MEM_TEMPLATE_HDR_ULP0
+
2419 (ulp_num
* MEM_DESCR_OFFSET
));
2420 phba
->mem_req
[mem_descr_index
] =
2421 BEISCSI_GET_CID_COUNT(phba
, ulp_num
) *
2422 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE
;
2424 mem_descr_index
= (HWI_MEM_ASYNC_HEADER_BUF_ULP0
+
2425 (ulp_num
* MEM_DESCR_OFFSET
));
2426 phba
->mem_req
[mem_descr_index
] =
2427 num_async_pdu_buf_pages
*
2430 mem_descr_index
= (HWI_MEM_ASYNC_DATA_BUF_ULP0
+
2431 (ulp_num
* MEM_DESCR_OFFSET
));
2432 phba
->mem_req
[mem_descr_index
] =
2433 num_async_pdu_data_pages
*
2436 mem_descr_index
= (HWI_MEM_ASYNC_HEADER_RING_ULP0
+
2437 (ulp_num
* MEM_DESCR_OFFSET
));
2438 phba
->mem_req
[mem_descr_index
] =
2439 num_async_pdu_buf_sgl_pages
*
2442 mem_descr_index
= (HWI_MEM_ASYNC_DATA_RING_ULP0
+
2443 (ulp_num
* MEM_DESCR_OFFSET
));
2444 phba
->mem_req
[mem_descr_index
] =
2445 num_async_pdu_data_sgl_pages
*
2448 mem_descr_index
= (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0
+
2449 (ulp_num
* MEM_DESCR_OFFSET
));
2450 phba
->mem_req
[mem_descr_index
] =
2451 BEISCSI_GET_CID_COUNT(phba
, ulp_num
) *
2452 sizeof(struct hd_async_handle
);
2454 mem_descr_index
= (HWI_MEM_ASYNC_DATA_HANDLE_ULP0
+
2455 (ulp_num
* MEM_DESCR_OFFSET
));
2456 phba
->mem_req
[mem_descr_index
] =
2457 BEISCSI_GET_CID_COUNT(phba
, ulp_num
) *
2458 sizeof(struct hd_async_handle
);
2460 mem_descr_index
= (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0
+
2461 (ulp_num
* MEM_DESCR_OFFSET
));
2462 phba
->mem_req
[mem_descr_index
] =
2463 sizeof(struct hd_async_context
) +
2464 (BEISCSI_GET_CID_COUNT(phba
, ulp_num
) *
2465 sizeof(struct hd_async_entry
));
2470 static int beiscsi_alloc_mem(struct beiscsi_hba
*phba
)
2473 struct hwi_controller
*phwi_ctrlr
;
2474 struct be_mem_descriptor
*mem_descr
;
2475 struct mem_array
*mem_arr
, *mem_arr_orig
;
2476 unsigned int i
, j
, alloc_size
, curr_alloc_size
;
2478 phba
->phwi_ctrlr
= kzalloc(phba
->params
.hwi_ws_sz
, GFP_KERNEL
);
2479 if (!phba
->phwi_ctrlr
)
2482 /* Allocate memory for wrb_context */
2483 phwi_ctrlr
= phba
->phwi_ctrlr
;
2484 phwi_ctrlr
->wrb_context
= kzalloc(sizeof(struct hwi_wrb_context
) *
2485 phba
->params
.cxns_per_ctrl
,
2487 if (!phwi_ctrlr
->wrb_context
) {
2488 kfree(phba
->phwi_ctrlr
);
2492 phba
->init_mem
= kcalloc(SE_MEM_MAX
, sizeof(*mem_descr
),
2494 if (!phba
->init_mem
) {
2495 kfree(phwi_ctrlr
->wrb_context
);
2496 kfree(phba
->phwi_ctrlr
);
2500 mem_arr_orig
= kmalloc(sizeof(*mem_arr_orig
) * BEISCSI_MAX_FRAGS_INIT
,
2502 if (!mem_arr_orig
) {
2503 kfree(phba
->init_mem
);
2504 kfree(phwi_ctrlr
->wrb_context
);
2505 kfree(phba
->phwi_ctrlr
);
2509 mem_descr
= phba
->init_mem
;
2510 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
2511 if (!phba
->mem_req
[i
]) {
2512 mem_descr
->mem_array
= NULL
;
2518 mem_arr
= mem_arr_orig
;
2519 alloc_size
= phba
->mem_req
[i
];
2520 memset(mem_arr
, 0, sizeof(struct mem_array
) *
2521 BEISCSI_MAX_FRAGS_INIT
);
2522 curr_alloc_size
= min(be_max_phys_size
* 1024, alloc_size
);
2524 mem_arr
->virtual_address
= pci_alloc_consistent(
2528 if (!mem_arr
->virtual_address
) {
2529 if (curr_alloc_size
<= BE_MIN_MEM_SIZE
)
2531 if (curr_alloc_size
-
2532 rounddown_pow_of_two(curr_alloc_size
))
2533 curr_alloc_size
= rounddown_pow_of_two
2536 curr_alloc_size
= curr_alloc_size
/ 2;
2538 mem_arr
->bus_address
.u
.
2539 a64
.address
= (__u64
) bus_add
;
2540 mem_arr
->size
= curr_alloc_size
;
2541 alloc_size
-= curr_alloc_size
;
2542 curr_alloc_size
= min(be_max_phys_size
*
2547 } while (alloc_size
);
2548 mem_descr
->num_elements
= j
;
2549 mem_descr
->size_in_bytes
= phba
->mem_req
[i
];
2550 mem_descr
->mem_array
= kmalloc(sizeof(*mem_arr
) * j
,
2552 if (!mem_descr
->mem_array
)
2555 memcpy(mem_descr
->mem_array
, mem_arr_orig
,
2556 sizeof(struct mem_array
) * j
);
2559 kfree(mem_arr_orig
);
2562 mem_descr
->num_elements
= j
;
2563 while ((i
) || (j
)) {
2564 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
2565 pci_free_consistent(phba
->pcidev
,
2566 mem_descr
->mem_array
[j
- 1].size
,
2567 mem_descr
->mem_array
[j
- 1].
2569 (unsigned long)mem_descr
->
2571 bus_address
.u
.a64
.address
);
2575 kfree(mem_descr
->mem_array
);
2579 kfree(mem_arr_orig
);
2580 kfree(phba
->init_mem
);
2581 kfree(phba
->phwi_ctrlr
->wrb_context
);
2582 kfree(phba
->phwi_ctrlr
);
2586 static int beiscsi_get_memory(struct beiscsi_hba
*phba
)
2588 beiscsi_find_mem_req(phba
);
2589 return beiscsi_alloc_mem(phba
);
2592 static void iscsi_init_global_templates(struct beiscsi_hba
*phba
)
2594 struct pdu_data_out
*pdata_out
;
2595 struct pdu_nop_out
*pnop_out
;
2596 struct be_mem_descriptor
*mem_descr
;
2598 mem_descr
= phba
->init_mem
;
2599 mem_descr
+= ISCSI_MEM_GLOBAL_HEADER
;
2601 (struct pdu_data_out
*)mem_descr
->mem_array
[0].virtual_address
;
2602 memset(pdata_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
2604 AMAP_SET_BITS(struct amap_pdu_data_out
, opcode
, pdata_out
,
2608 (struct pdu_nop_out
*)((unsigned char *)mem_descr
->mem_array
[0].
2609 virtual_address
+ BE_ISCSI_PDU_HEADER_SIZE
);
2611 memset(pnop_out
, 0, BE_ISCSI_PDU_HEADER_SIZE
);
2612 AMAP_SET_BITS(struct amap_pdu_nop_out
, ttt
, pnop_out
, 0xFFFFFFFF);
2613 AMAP_SET_BITS(struct amap_pdu_nop_out
, f_bit
, pnop_out
, 1);
2614 AMAP_SET_BITS(struct amap_pdu_nop_out
, i_bit
, pnop_out
, 0);
2617 static int beiscsi_init_wrb_handle(struct beiscsi_hba
*phba
)
2619 struct be_mem_descriptor
*mem_descr_wrbh
, *mem_descr_wrb
;
2620 struct hwi_context_memory
*phwi_ctxt
;
2621 struct wrb_handle
*pwrb_handle
= NULL
;
2622 struct hwi_controller
*phwi_ctrlr
;
2623 struct hwi_wrb_context
*pwrb_context
;
2624 struct iscsi_wrb
*pwrb
= NULL
;
2625 unsigned int num_cxn_wrbh
= 0;
2626 unsigned int num_cxn_wrb
= 0, j
, idx
= 0, index
;
2628 mem_descr_wrbh
= phba
->init_mem
;
2629 mem_descr_wrbh
+= HWI_MEM_WRBH
;
2631 mem_descr_wrb
= phba
->init_mem
;
2632 mem_descr_wrb
+= HWI_MEM_WRB
;
2633 phwi_ctrlr
= phba
->phwi_ctrlr
;
2635 /* Allocate memory for WRBQ */
2636 phwi_ctxt
= phwi_ctrlr
->phwi_ctxt
;
2637 phwi_ctxt
->be_wrbq
= kzalloc(sizeof(struct be_queue_info
) *
2638 phba
->params
.cxns_per_ctrl
,
2640 if (!phwi_ctxt
->be_wrbq
) {
2641 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
2642 "BM_%d : WRBQ Mem Alloc Failed\n");
2646 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
; index
++) {
2647 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2648 pwrb_context
->pwrb_handle_base
=
2649 kzalloc(sizeof(struct wrb_handle
*) *
2650 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
2651 if (!pwrb_context
->pwrb_handle_base
) {
2652 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
2653 "BM_%d : Mem Alloc Failed. Failing to load\n");
2654 goto init_wrb_hndl_failed
;
2656 pwrb_context
->pwrb_handle_basestd
=
2657 kzalloc(sizeof(struct wrb_handle
*) *
2658 phba
->params
.wrbs_per_cxn
, GFP_KERNEL
);
2659 if (!pwrb_context
->pwrb_handle_basestd
) {
2660 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
2661 "BM_%d : Mem Alloc Failed. Failing to load\n");
2662 goto init_wrb_hndl_failed
;
2664 if (!num_cxn_wrbh
) {
2666 mem_descr_wrbh
->mem_array
[idx
].virtual_address
;
2667 num_cxn_wrbh
= ((mem_descr_wrbh
->mem_array
[idx
].size
) /
2668 ((sizeof(struct wrb_handle
)) *
2669 phba
->params
.wrbs_per_cxn
));
2672 pwrb_context
->alloc_index
= 0;
2673 pwrb_context
->wrb_handles_available
= 0;
2674 pwrb_context
->free_index
= 0;
2677 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2678 pwrb_context
->pwrb_handle_base
[j
] = pwrb_handle
;
2679 pwrb_context
->pwrb_handle_basestd
[j
] =
2681 pwrb_context
->wrb_handles_available
++;
2682 pwrb_handle
->wrb_index
= j
;
2687 spin_lock_init(&pwrb_context
->wrb_lock
);
2690 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
; index
++) {
2691 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
2693 pwrb
= mem_descr_wrb
->mem_array
[idx
].virtual_address
;
2694 num_cxn_wrb
= (mem_descr_wrb
->mem_array
[idx
].size
) /
2695 ((sizeof(struct iscsi_wrb
) *
2696 phba
->params
.wrbs_per_cxn
));
2701 for (j
= 0; j
< phba
->params
.wrbs_per_cxn
; j
++) {
2702 pwrb_handle
= pwrb_context
->pwrb_handle_base
[j
];
2703 pwrb_handle
->pwrb
= pwrb
;
2710 init_wrb_hndl_failed
:
2711 for (j
= index
; j
> 0; j
--) {
2712 pwrb_context
= &phwi_ctrlr
->wrb_context
[j
];
2713 kfree(pwrb_context
->pwrb_handle_base
);
2714 kfree(pwrb_context
->pwrb_handle_basestd
);
2719 static int hwi_init_async_pdu_ctx(struct beiscsi_hba
*phba
)
2722 struct hwi_controller
*phwi_ctrlr
;
2723 struct hba_parameters
*p
= &phba
->params
;
2724 struct hd_async_context
*pasync_ctx
;
2725 struct hd_async_handle
*pasync_header_h
, *pasync_data_h
;
2726 unsigned int index
, idx
, num_per_mem
, num_async_data
;
2727 struct be_mem_descriptor
*mem_descr
;
2729 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
2730 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
2731 /* get async_ctx for each ULP */
2732 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2733 mem_descr
+= (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0
+
2734 (ulp_num
* MEM_DESCR_OFFSET
));
2736 phwi_ctrlr
= phba
->phwi_ctrlr
;
2737 phwi_ctrlr
->phwi_ctxt
->pasync_ctx
[ulp_num
] =
2738 (struct hd_async_context
*)
2739 mem_descr
->mem_array
[0].virtual_address
;
2741 pasync_ctx
= phwi_ctrlr
->phwi_ctxt
->pasync_ctx
[ulp_num
];
2742 memset(pasync_ctx
, 0, sizeof(*pasync_ctx
));
2744 pasync_ctx
->async_entry
=
2745 (struct hd_async_entry
*)
2746 ((long unsigned int)pasync_ctx
+
2747 sizeof(struct hd_async_context
));
2749 pasync_ctx
->num_entries
= BEISCSI_GET_CID_COUNT(phba
,
2751 /* setup header buffers */
2752 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2753 mem_descr
+= HWI_MEM_ASYNC_HEADER_BUF_ULP0
+
2754 (ulp_num
* MEM_DESCR_OFFSET
);
2755 if (mem_descr
->mem_array
[0].virtual_address
) {
2756 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
2757 "BM_%d : hwi_init_async_pdu_ctx"
2758 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2760 mem_descr
->mem_array
[0].
2763 beiscsi_log(phba
, KERN_WARNING
,
2765 "BM_%d : No Virtual address for ULP : %d\n",
2768 pasync_ctx
->async_header
.buffer_size
= p
->defpdu_hdr_sz
;
2769 pasync_ctx
->async_header
.va_base
=
2770 mem_descr
->mem_array
[0].virtual_address
;
2772 pasync_ctx
->async_header
.pa_base
.u
.a64
.address
=
2773 mem_descr
->mem_array
[0].
2774 bus_address
.u
.a64
.address
;
2776 /* setup header buffer sgls */
2777 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2778 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING_ULP0
+
2779 (ulp_num
* MEM_DESCR_OFFSET
);
2780 if (mem_descr
->mem_array
[0].virtual_address
) {
2781 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
2782 "BM_%d : hwi_init_async_pdu_ctx"
2783 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2785 mem_descr
->mem_array
[0].
2788 beiscsi_log(phba
, KERN_WARNING
,
2790 "BM_%d : No Virtual address for ULP : %d\n",
2793 pasync_ctx
->async_header
.ring_base
=
2794 mem_descr
->mem_array
[0].virtual_address
;
2796 /* setup header buffer handles */
2797 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2798 mem_descr
+= HWI_MEM_ASYNC_HEADER_HANDLE_ULP0
+
2799 (ulp_num
* MEM_DESCR_OFFSET
);
2800 if (mem_descr
->mem_array
[0].virtual_address
) {
2801 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
2802 "BM_%d : hwi_init_async_pdu_ctx"
2803 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
2805 mem_descr
->mem_array
[0].
2808 beiscsi_log(phba
, KERN_WARNING
,
2810 "BM_%d : No Virtual address for ULP : %d\n",
2813 pasync_ctx
->async_header
.handle_base
=
2814 mem_descr
->mem_array
[0].virtual_address
;
2815 INIT_LIST_HEAD(&pasync_ctx
->async_header
.free_list
);
2817 /* setup data buffer sgls */
2818 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2819 mem_descr
+= HWI_MEM_ASYNC_DATA_RING_ULP0
+
2820 (ulp_num
* MEM_DESCR_OFFSET
);
2821 if (mem_descr
->mem_array
[0].virtual_address
) {
2822 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
2823 "BM_%d : hwi_init_async_pdu_ctx"
2824 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
2826 mem_descr
->mem_array
[0].
2829 beiscsi_log(phba
, KERN_WARNING
,
2831 "BM_%d : No Virtual address for ULP : %d\n",
2834 pasync_ctx
->async_data
.ring_base
=
2835 mem_descr
->mem_array
[0].virtual_address
;
2837 /* setup data buffer handles */
2838 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2839 mem_descr
+= HWI_MEM_ASYNC_DATA_HANDLE_ULP0
+
2840 (ulp_num
* MEM_DESCR_OFFSET
);
2841 if (!mem_descr
->mem_array
[0].virtual_address
)
2842 beiscsi_log(phba
, KERN_WARNING
,
2844 "BM_%d : No Virtual address for ULP : %d\n",
2847 pasync_ctx
->async_data
.handle_base
=
2848 mem_descr
->mem_array
[0].virtual_address
;
2849 INIT_LIST_HEAD(&pasync_ctx
->async_data
.free_list
);
2852 (struct hd_async_handle
*)
2853 pasync_ctx
->async_header
.handle_base
;
2855 (struct hd_async_handle
*)
2856 pasync_ctx
->async_data
.handle_base
;
2858 /* setup data buffers */
2859 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
2860 mem_descr
+= HWI_MEM_ASYNC_DATA_BUF_ULP0
+
2861 (ulp_num
* MEM_DESCR_OFFSET
);
2862 if (mem_descr
->mem_array
[0].virtual_address
) {
2863 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
2864 "BM_%d : hwi_init_async_pdu_ctx"
2865 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
2867 mem_descr
->mem_array
[0].
2870 beiscsi_log(phba
, KERN_WARNING
,
2872 "BM_%d : No Virtual address for ULP : %d\n",
2876 pasync_ctx
->async_data
.buffer_size
= p
->defpdu_data_sz
;
2877 pasync_ctx
->async_data
.va_base
=
2878 mem_descr
->mem_array
[idx
].virtual_address
;
2879 pasync_ctx
->async_data
.pa_base
.u
.a64
.address
=
2880 mem_descr
->mem_array
[idx
].
2881 bus_address
.u
.a64
.address
;
2883 num_async_data
= ((mem_descr
->mem_array
[idx
].size
) /
2884 phba
->params
.defpdu_data_sz
);
2887 for (index
= 0; index
< BEISCSI_GET_CID_COUNT
2888 (phba
, ulp_num
); index
++) {
2889 pasync_header_h
->cri
= -1;
2890 pasync_header_h
->is_header
= 1;
2891 pasync_header_h
->index
= index
;
2892 INIT_LIST_HEAD(&pasync_header_h
->link
);
2893 pasync_header_h
->pbuffer
=
2894 (void *)((unsigned long)
2896 async_header
.va_base
) +
2897 (p
->defpdu_hdr_sz
* index
));
2899 pasync_header_h
->pa
.u
.a64
.address
=
2900 pasync_ctx
->async_header
.pa_base
.u
.a64
.
2901 address
+ (p
->defpdu_hdr_sz
* index
);
2903 list_add_tail(&pasync_header_h
->link
,
2904 &pasync_ctx
->async_header
.
2907 pasync_ctx
->async_header
.free_entries
++;
2908 INIT_LIST_HEAD(&pasync_ctx
->async_entry
[index
].
2910 pasync_ctx
->async_entry
[index
].header
= NULL
;
2912 pasync_data_h
->cri
= -1;
2913 pasync_data_h
->is_header
= 0;
2914 pasync_data_h
->index
= index
;
2915 INIT_LIST_HEAD(&pasync_data_h
->link
);
2917 if (!num_async_data
) {
2920 pasync_ctx
->async_data
.va_base
=
2921 mem_descr
->mem_array
[idx
].
2923 pasync_ctx
->async_data
.pa_base
.u
.
2925 mem_descr
->mem_array
[idx
].
2926 bus_address
.u
.a64
.address
;
2928 ((mem_descr
->mem_array
[idx
].
2930 phba
->params
.defpdu_data_sz
);
2932 pasync_data_h
->pbuffer
=
2933 (void *)((unsigned long)
2934 (pasync_ctx
->async_data
.va_base
) +
2935 (p
->defpdu_data_sz
* num_per_mem
));
2937 pasync_data_h
->pa
.u
.a64
.address
=
2938 pasync_ctx
->async_data
.pa_base
.u
.a64
.
2939 address
+ (p
->defpdu_data_sz
*
2944 list_add_tail(&pasync_data_h
->link
,
2945 &pasync_ctx
->async_data
.
2948 pasync_ctx
->async_data
.free_entries
++;
2949 pasync_ctx
->async_entry
[index
].data
= NULL
;
2958 be_sgl_create_contiguous(void *virtual_address
,
2959 u64 physical_address
, u32 length
,
2960 struct be_dma_mem
*sgl
)
2962 WARN_ON(!virtual_address
);
2963 WARN_ON(!physical_address
);
2967 sgl
->va
= virtual_address
;
2968 sgl
->dma
= (unsigned long)physical_address
;
2974 static void be_sgl_destroy_contiguous(struct be_dma_mem
*sgl
)
2976 memset(sgl
, 0, sizeof(*sgl
));
2980 hwi_build_be_sgl_arr(struct beiscsi_hba
*phba
,
2981 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2984 be_sgl_destroy_contiguous(sgl
);
2986 be_sgl_create_contiguous(pmem
->virtual_address
,
2987 pmem
->bus_address
.u
.a64
.address
,
2992 hwi_build_be_sgl_by_offset(struct beiscsi_hba
*phba
,
2993 struct mem_array
*pmem
, struct be_dma_mem
*sgl
)
2996 be_sgl_destroy_contiguous(sgl
);
2998 be_sgl_create_contiguous((unsigned char *)pmem
->virtual_address
,
2999 pmem
->bus_address
.u
.a64
.address
,
3003 static int be_fill_queue(struct be_queue_info
*q
,
3004 u16 len
, u16 entry_size
, void *vaddress
)
3006 struct be_dma_mem
*mem
= &q
->dma_mem
;
3008 memset(q
, 0, sizeof(*q
));
3010 q
->entry_size
= entry_size
;
3011 mem
->size
= len
* entry_size
;
3015 memset(mem
->va
, 0, mem
->size
);
3019 static int beiscsi_create_eqs(struct beiscsi_hba
*phba
,
3020 struct hwi_context_memory
*phwi_context
)
3022 int ret
= -ENOMEM
, eq_for_mcc
;
3023 unsigned int i
, num_eq_pages
;
3024 struct be_queue_info
*eq
;
3025 struct be_dma_mem
*mem
;
3029 num_eq_pages
= PAGES_REQUIRED(phba
->params
.num_eq_entries
* \
3030 sizeof(struct be_eq_entry
));
3032 if (phba
->msix_enabled
)
3036 for (i
= 0; i
< (phba
->num_cpus
+ eq_for_mcc
); i
++) {
3037 eq
= &phwi_context
->be_eq
[i
].q
;
3039 phwi_context
->be_eq
[i
].phba
= phba
;
3040 eq_vaddress
= pci_alloc_consistent(phba
->pcidev
,
3041 num_eq_pages
* PAGE_SIZE
,
3044 goto create_eq_error
;
3046 mem
->va
= eq_vaddress
;
3047 ret
= be_fill_queue(eq
, phba
->params
.num_eq_entries
,
3048 sizeof(struct be_eq_entry
), eq_vaddress
);
3050 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3051 "BM_%d : be_fill_queue Failed for EQ\n");
3052 goto create_eq_error
;
3056 ret
= beiscsi_cmd_eq_create(&phba
->ctrl
, eq
,
3057 phwi_context
->cur_eqd
);
3059 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3060 "BM_%d : beiscsi_cmd_eq_create"
3062 goto create_eq_error
;
3065 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3066 "BM_%d : eqid = %d\n",
3067 phwi_context
->be_eq
[i
].q
.id
);
3072 for (i
= 0; i
< (phba
->num_cpus
+ eq_for_mcc
); i
++) {
3073 eq
= &phwi_context
->be_eq
[i
].q
;
3076 pci_free_consistent(phba
->pcidev
, num_eq_pages
3083 static int beiscsi_create_cqs(struct beiscsi_hba
*phba
,
3084 struct hwi_context_memory
*phwi_context
)
3086 unsigned int i
, num_cq_pages
;
3087 struct be_queue_info
*cq
, *eq
;
3088 struct be_dma_mem
*mem
;
3089 struct be_eq_obj
*pbe_eq
;
3094 num_cq_pages
= PAGES_REQUIRED(phba
->params
.num_cq_entries
* \
3095 sizeof(struct sol_cqe
));
3097 for (i
= 0; i
< phba
->num_cpus
; i
++) {
3098 cq
= &phwi_context
->be_cq
[i
];
3099 eq
= &phwi_context
->be_eq
[i
].q
;
3100 pbe_eq
= &phwi_context
->be_eq
[i
];
3102 pbe_eq
->phba
= phba
;
3104 cq_vaddress
= pci_alloc_consistent(phba
->pcidev
,
3105 num_cq_pages
* PAGE_SIZE
,
3108 goto create_cq_error
;
3110 ret
= be_fill_queue(cq
, phba
->params
.num_cq_entries
,
3111 sizeof(struct sol_cqe
), cq_vaddress
);
3113 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3114 "BM_%d : be_fill_queue Failed "
3116 goto create_cq_error
;
3120 ret
= beiscsi_cmd_cq_create(&phba
->ctrl
, cq
, eq
, false,
3123 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3124 "BM_%d : beiscsi_cmd_eq_create"
3125 "Failed for ISCSI CQ\n");
3126 goto create_cq_error
;
3128 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3129 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3130 "iSCSI CQ CREATED\n", cq
->id
, eq
->id
);
3135 for (i
= 0; i
< phba
->num_cpus
; i
++) {
3136 cq
= &phwi_context
->be_cq
[i
];
3139 pci_free_consistent(phba
->pcidev
, num_cq_pages
3147 beiscsi_create_def_hdr(struct beiscsi_hba
*phba
,
3148 struct hwi_context_memory
*phwi_context
,
3149 struct hwi_controller
*phwi_ctrlr
,
3150 unsigned int def_pdu_ring_sz
, uint8_t ulp_num
)
3154 struct be_queue_info
*dq
, *cq
;
3155 struct be_dma_mem
*mem
;
3156 struct be_mem_descriptor
*mem_descr
;
3160 dq
= &phwi_context
->be_def_hdrq
[ulp_num
];
3161 cq
= &phwi_context
->be_cq
[0];
3163 mem_descr
= phba
->init_mem
;
3164 mem_descr
+= HWI_MEM_ASYNC_HEADER_RING_ULP0
+
3165 (ulp_num
* MEM_DESCR_OFFSET
);
3166 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
3167 ret
= be_fill_queue(dq
, mem_descr
->mem_array
[0].size
/
3168 sizeof(struct phys_addr
),
3169 sizeof(struct phys_addr
), dq_vaddress
);
3171 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3172 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3177 mem
->dma
= (unsigned long)mem_descr
->mem_array
[idx
].
3178 bus_address
.u
.a64
.address
;
3179 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dq
,
3181 phba
->params
.defpdu_hdr_sz
,
3182 BEISCSI_DEFQ_HDR
, ulp_num
);
3184 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3185 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3191 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3192 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3194 phwi_context
->be_def_hdrq
[ulp_num
].id
);
3199 beiscsi_create_def_data(struct beiscsi_hba
*phba
,
3200 struct hwi_context_memory
*phwi_context
,
3201 struct hwi_controller
*phwi_ctrlr
,
3202 unsigned int def_pdu_ring_sz
, uint8_t ulp_num
)
3206 struct be_queue_info
*dataq
, *cq
;
3207 struct be_dma_mem
*mem
;
3208 struct be_mem_descriptor
*mem_descr
;
3212 dataq
= &phwi_context
->be_def_dataq
[ulp_num
];
3213 cq
= &phwi_context
->be_cq
[0];
3214 mem
= &dataq
->dma_mem
;
3215 mem_descr
= phba
->init_mem
;
3216 mem_descr
+= HWI_MEM_ASYNC_DATA_RING_ULP0
+
3217 (ulp_num
* MEM_DESCR_OFFSET
);
3218 dq_vaddress
= mem_descr
->mem_array
[idx
].virtual_address
;
3219 ret
= be_fill_queue(dataq
, mem_descr
->mem_array
[0].size
/
3220 sizeof(struct phys_addr
),
3221 sizeof(struct phys_addr
), dq_vaddress
);
3223 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3224 "BM_%d : be_fill_queue Failed for DEF PDU "
3225 "DATA on ULP : %d\n",
3230 mem
->dma
= (unsigned long)mem_descr
->mem_array
[idx
].
3231 bus_address
.u
.a64
.address
;
3232 ret
= be_cmd_create_default_pdu_queue(&phba
->ctrl
, cq
, dataq
,
3234 phba
->params
.defpdu_data_sz
,
3235 BEISCSI_DEFQ_DATA
, ulp_num
);
3237 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3238 "BM_%d be_cmd_create_default_pdu_queue"
3239 " Failed for DEF PDU DATA on ULP : %d\n",
3244 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3245 "BM_%d : iscsi def data id on ULP : %d is %d\n",
3247 phwi_context
->be_def_dataq
[ulp_num
].id
);
3249 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3250 "BM_%d : DEFAULT PDU DATA RING CREATED"
3251 "on ULP : %d\n", ulp_num
);
3257 beiscsi_post_template_hdr(struct beiscsi_hba
*phba
)
3259 struct be_mem_descriptor
*mem_descr
;
3260 struct mem_array
*pm_arr
;
3261 struct be_dma_mem sgl
;
3262 int status
, ulp_num
;
3264 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
3265 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
3266 mem_descr
= (struct be_mem_descriptor
*)phba
->init_mem
;
3267 mem_descr
+= HWI_MEM_TEMPLATE_HDR_ULP0
+
3268 (ulp_num
* MEM_DESCR_OFFSET
);
3269 pm_arr
= mem_descr
->mem_array
;
3271 hwi_build_be_sgl_arr(phba
, pm_arr
, &sgl
);
3272 status
= be_cmd_iscsi_post_template_hdr(
3276 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3277 "BM_%d : Post Template HDR Failed for"
3278 "ULP_%d\n", ulp_num
);
3282 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3283 "BM_%d : Template HDR Pages Posted for"
3284 "ULP_%d\n", ulp_num
);
3291 beiscsi_post_pages(struct beiscsi_hba
*phba
)
3293 struct be_mem_descriptor
*mem_descr
;
3294 struct mem_array
*pm_arr
;
3295 unsigned int page_offset
, i
;
3296 struct be_dma_mem sgl
;
3297 int status
, ulp_num
= 0;
3299 mem_descr
= phba
->init_mem
;
3300 mem_descr
+= HWI_MEM_SGE
;
3301 pm_arr
= mem_descr
->mem_array
;
3303 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++)
3304 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
))
3307 page_offset
= (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
*
3308 phba
->fw_config
.iscsi_icd_start
[ulp_num
]) / PAGE_SIZE
;
3309 for (i
= 0; i
< mem_descr
->num_elements
; i
++) {
3310 hwi_build_be_sgl_arr(phba
, pm_arr
, &sgl
);
3311 status
= be_cmd_iscsi_post_sgl_pages(&phba
->ctrl
, &sgl
,
3313 (pm_arr
->size
/ PAGE_SIZE
));
3314 page_offset
+= pm_arr
->size
/ PAGE_SIZE
;
3316 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3317 "BM_%d : post sgl failed.\n");
3322 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3323 "BM_%d : POSTED PAGES\n");
3327 static void be_queue_free(struct beiscsi_hba
*phba
, struct be_queue_info
*q
)
3329 struct be_dma_mem
*mem
= &q
->dma_mem
;
3331 pci_free_consistent(phba
->pcidev
, mem
->size
,
3337 static int be_queue_alloc(struct beiscsi_hba
*phba
, struct be_queue_info
*q
,
3338 u16 len
, u16 entry_size
)
3340 struct be_dma_mem
*mem
= &q
->dma_mem
;
3342 memset(q
, 0, sizeof(*q
));
3344 q
->entry_size
= entry_size
;
3345 mem
->size
= len
* entry_size
;
3346 mem
->va
= pci_zalloc_consistent(phba
->pcidev
, mem
->size
, &mem
->dma
);
3353 beiscsi_create_wrb_rings(struct beiscsi_hba
*phba
,
3354 struct hwi_context_memory
*phwi_context
,
3355 struct hwi_controller
*phwi_ctrlr
)
3357 unsigned int wrb_mem_index
, offset
, size
, num_wrb_rings
;
3359 unsigned int idx
, num
, i
, ulp_num
;
3360 struct mem_array
*pwrb_arr
;
3362 struct be_dma_mem sgl
;
3363 struct be_mem_descriptor
*mem_descr
;
3364 struct hwi_wrb_context
*pwrb_context
;
3366 uint8_t ulp_count
= 0, ulp_base_num
= 0;
3367 uint16_t cid_count_ulp
[BEISCSI_ULP_COUNT
] = { 0 };
3370 mem_descr
= phba
->init_mem
;
3371 mem_descr
+= HWI_MEM_WRB
;
3372 pwrb_arr
= kmalloc(sizeof(*pwrb_arr
) * phba
->params
.cxns_per_ctrl
,
3375 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3376 "BM_%d : Memory alloc failed in create wrb ring.\n");
3379 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
3380 pa_addr_lo
= mem_descr
->mem_array
[idx
].bus_address
.u
.a64
.address
;
3381 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
3382 (phba
->params
.wrbs_per_cxn
* sizeof(struct iscsi_wrb
));
3384 for (num
= 0; num
< phba
->params
.cxns_per_ctrl
; num
++) {
3385 if (num_wrb_rings
) {
3386 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
3387 pwrb_arr
[num
].bus_address
.u
.a64
.address
= pa_addr_lo
;
3388 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
3389 sizeof(struct iscsi_wrb
);
3390 wrb_vaddr
+= pwrb_arr
[num
].size
;
3391 pa_addr_lo
+= pwrb_arr
[num
].size
;
3395 wrb_vaddr
= mem_descr
->mem_array
[idx
].virtual_address
;
3396 pa_addr_lo
= mem_descr
->mem_array
[idx
].\
3397 bus_address
.u
.a64
.address
;
3398 num_wrb_rings
= mem_descr
->mem_array
[idx
].size
/
3399 (phba
->params
.wrbs_per_cxn
*
3400 sizeof(struct iscsi_wrb
));
3401 pwrb_arr
[num
].virtual_address
= wrb_vaddr
;
3402 pwrb_arr
[num
].bus_address
.u
.a64
.address\
3404 pwrb_arr
[num
].size
= phba
->params
.wrbs_per_cxn
*
3405 sizeof(struct iscsi_wrb
);
3406 wrb_vaddr
+= pwrb_arr
[num
].size
;
3407 pa_addr_lo
+= pwrb_arr
[num
].size
;
3412 /* Get the ULP Count */
3413 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++)
3414 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
3416 ulp_base_num
= ulp_num
;
3417 cid_count_ulp
[ulp_num
] =
3418 BEISCSI_GET_CID_COUNT(phba
, ulp_num
);
3421 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
3426 if (ulp_count
> 1) {
3427 ulp_base_num
= (ulp_base_num
+ 1) % BEISCSI_ULP_COUNT
;
3429 if (!cid_count_ulp
[ulp_base_num
])
3430 ulp_base_num
= (ulp_base_num
+ 1) %
3433 cid_count_ulp
[ulp_base_num
]--;
3437 hwi_build_be_sgl_by_offset(phba
, &pwrb_arr
[i
], &sgl
);
3438 status
= be_cmd_wrbq_create(&phba
->ctrl
, &sgl
,
3439 &phwi_context
->be_wrbq
[i
],
3440 &phwi_ctrlr
->wrb_context
[i
],
3443 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3444 "BM_%d : wrbq create failed.");
3448 pwrb_context
= &phwi_ctrlr
->wrb_context
[i
];
3449 BE_SET_CID_TO_CRI(i
, pwrb_context
->cid
);
3455 static void free_wrb_handles(struct beiscsi_hba
*phba
)
3458 struct hwi_controller
*phwi_ctrlr
;
3459 struct hwi_wrb_context
*pwrb_context
;
3461 phwi_ctrlr
= phba
->phwi_ctrlr
;
3462 for (index
= 0; index
< phba
->params
.cxns_per_ctrl
; index
++) {
3463 pwrb_context
= &phwi_ctrlr
->wrb_context
[index
];
3464 kfree(pwrb_context
->pwrb_handle_base
);
3465 kfree(pwrb_context
->pwrb_handle_basestd
);
3469 static void be_mcc_queues_destroy(struct beiscsi_hba
*phba
)
3471 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3472 struct be_dma_mem
*ptag_mem
;
3473 struct be_queue_info
*q
;
3476 q
= &phba
->ctrl
.mcc_obj
.q
;
3477 for (i
= 0; i
< MAX_MCC_CMD
; i
++) {
3479 if (!test_bit(MCC_TAG_STATE_RUNNING
,
3480 &ctrl
->ptag_state
[tag
].tag_state
))
3483 if (test_bit(MCC_TAG_STATE_TIMEOUT
,
3484 &ctrl
->ptag_state
[tag
].tag_state
)) {
3485 ptag_mem
= &ctrl
->ptag_state
[tag
].tag_mem_state
;
3486 if (ptag_mem
->size
) {
3487 pci_free_consistent(ctrl
->pdev
,
3496 * If MCC is still active and waiting then wake up the process.
3497 * We are here only because port is going offline. The process
3498 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
3499 * returned for the operation and allocated memory cleaned up.
3501 if (waitqueue_active(&ctrl
->mcc_wait
[tag
])) {
3502 ctrl
->mcc_tag_status
[tag
] = MCC_STATUS_FAILED
;
3503 ctrl
->mcc_tag_status
[tag
] |= CQE_VALID_MASK
;
3504 wake_up_interruptible(&ctrl
->mcc_wait
[tag
]);
3506 * Control tag info gets reinitialized in enable
3507 * so wait for the process to clear running state.
3509 while (test_bit(MCC_TAG_STATE_RUNNING
,
3510 &ctrl
->ptag_state
[tag
].tag_state
))
3511 schedule_timeout_uninterruptible(HZ
);
3514 * For MCC with tag_states MCC_TAG_STATE_ASYNC and
3515 * MCC_TAG_STATE_IGNORE nothing needs to done.
3519 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_MCCQ
);
3520 be_queue_free(phba
, q
);
3523 q
= &phba
->ctrl
.mcc_obj
.cq
;
3525 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
3526 be_queue_free(phba
, q
);
3530 static int be_mcc_queues_create(struct beiscsi_hba
*phba
,
3531 struct hwi_context_memory
*phwi_context
)
3533 struct be_queue_info
*q
, *cq
;
3534 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3536 /* Alloc MCC compl queue */
3537 cq
= &phba
->ctrl
.mcc_obj
.cq
;
3538 if (be_queue_alloc(phba
, cq
, MCC_CQ_LEN
,
3539 sizeof(struct be_mcc_compl
)))
3541 /* Ask BE to create MCC compl queue; */
3542 if (phba
->msix_enabled
) {
3543 if (beiscsi_cmd_cq_create(ctrl
, cq
, &phwi_context
->be_eq
3544 [phba
->num_cpus
].q
, false, true, 0))
3547 if (beiscsi_cmd_cq_create(ctrl
, cq
, &phwi_context
->be_eq
[0].q
,
3552 /* Alloc MCC queue */
3553 q
= &phba
->ctrl
.mcc_obj
.q
;
3554 if (be_queue_alloc(phba
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
3555 goto mcc_cq_destroy
;
3557 /* Ask BE to create MCC queue */
3558 if (beiscsi_cmd_mccq_create(phba
, q
, cq
))
3564 be_queue_free(phba
, q
);
3566 beiscsi_cmd_q_destroy(ctrl
, cq
, QTYPE_CQ
);
3568 be_queue_free(phba
, cq
);
3574 * find_num_cpus()- Get the CPU online count
3575 * @phba: ptr to priv structure
3577 * CPU count is used for creating EQ.
3579 static void find_num_cpus(struct beiscsi_hba
*phba
)
3583 num_cpus
= num_online_cpus();
3585 switch (phba
->generation
) {
3588 phba
->num_cpus
= (num_cpus
> BEISCSI_MAX_NUM_CPUS
) ?
3589 BEISCSI_MAX_NUM_CPUS
: num_cpus
;
3593 * If eqid_count == 1 fall back to
3596 if (phba
->fw_config
.eqid_count
== 1) {
3603 (num_cpus
> (phba
->fw_config
.eqid_count
- 1)) ?
3604 (phba
->fw_config
.eqid_count
- 1) : num_cpus
;
3611 static void hwi_purge_eq(struct beiscsi_hba
*phba
)
3613 struct hwi_controller
*phwi_ctrlr
;
3614 struct hwi_context_memory
*phwi_context
;
3615 struct be_queue_info
*eq
;
3616 struct be_eq_entry
*eqe
= NULL
;
3618 unsigned int num_processed
;
3620 if (beiscsi_hba_in_error(phba
))
3623 phwi_ctrlr
= phba
->phwi_ctrlr
;
3624 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3625 if (phba
->msix_enabled
)
3630 for (i
= 0; i
< (phba
->num_cpus
+ eq_msix
); i
++) {
3631 eq
= &phwi_context
->be_eq
[i
].q
;
3632 eqe
= queue_tail_node(eq
);
3634 while (eqe
->dw
[offsetof(struct amap_eq_entry
, valid
) / 32]
3636 AMAP_SET_BITS(struct amap_eq_entry
, valid
, eqe
, 0);
3638 eqe
= queue_tail_node(eq
);
3643 hwi_ring_eq_db(phba
, eq
->id
, 1, num_processed
, 1, 1);
3647 static void hwi_cleanup_port(struct beiscsi_hba
*phba
)
3649 struct be_queue_info
*q
;
3650 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3651 struct hwi_controller
*phwi_ctrlr
;
3652 struct hwi_context_memory
*phwi_context
;
3653 struct hd_async_context
*pasync_ctx
;
3654 int i
, eq_for_mcc
, ulp_num
;
3656 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++)
3657 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
))
3658 beiscsi_cmd_iscsi_cleanup(phba
, ulp_num
);
3661 * Purge all EQ entries that may have been left out. This is to
3662 * workaround a problem we've seen occasionally where driver gets an
3663 * interrupt with EQ entry bit set after stopping the controller.
3667 phwi_ctrlr
= phba
->phwi_ctrlr
;
3668 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3670 be_cmd_iscsi_remove_template_hdr(ctrl
);
3672 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
3673 q
= &phwi_context
->be_wrbq
[i
];
3675 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_WRBQ
);
3677 kfree(phwi_context
->be_wrbq
);
3678 free_wrb_handles(phba
);
3680 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
3681 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
3683 q
= &phwi_context
->be_def_hdrq
[ulp_num
];
3685 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
3687 q
= &phwi_context
->be_def_dataq
[ulp_num
];
3689 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_DPDUQ
);
3691 pasync_ctx
= phwi_ctrlr
->phwi_ctxt
->pasync_ctx
[ulp_num
];
3695 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
3697 for (i
= 0; i
< (phba
->num_cpus
); i
++) {
3698 q
= &phwi_context
->be_cq
[i
];
3700 be_queue_free(phba
, q
);
3701 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_CQ
);
3705 be_mcc_queues_destroy(phba
);
3706 if (phba
->msix_enabled
)
3710 for (i
= 0; i
< (phba
->num_cpus
+ eq_for_mcc
); i
++) {
3711 q
= &phwi_context
->be_eq
[i
].q
;
3713 be_queue_free(phba
, q
);
3714 beiscsi_cmd_q_destroy(ctrl
, q
, QTYPE_EQ
);
3717 /* this ensures complete FW cleanup */
3718 beiscsi_cmd_function_reset(phba
);
3719 /* last communication, indicate driver is unloading */
3720 beiscsi_cmd_special_wrb(&phba
->ctrl
, 0);
3723 static int hwi_init_port(struct beiscsi_hba
*phba
)
3725 struct hwi_controller
*phwi_ctrlr
;
3726 struct hwi_context_memory
*phwi_context
;
3727 unsigned int def_pdu_ring_sz
;
3728 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
3729 int status
, ulp_num
;
3731 phwi_ctrlr
= phba
->phwi_ctrlr
;
3732 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
3733 phwi_context
->max_eqd
= 128;
3734 phwi_context
->min_eqd
= 0;
3735 phwi_context
->cur_eqd
= 32;
3736 /* set port optic state to unknown */
3737 phba
->optic_state
= 0xff;
3739 status
= beiscsi_create_eqs(phba
, phwi_context
);
3741 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3742 "BM_%d : EQ not created\n");
3746 status
= be_mcc_queues_create(phba
, phwi_context
);
3750 status
= beiscsi_check_supported_fw(ctrl
, phba
);
3752 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3753 "BM_%d : Unsupported fw version\n");
3757 status
= beiscsi_create_cqs(phba
, phwi_context
);
3759 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3760 "BM_%d : CQ not created\n");
3764 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
3765 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
3767 BEISCSI_GET_CID_COUNT(phba
, ulp_num
) *
3768 sizeof(struct phys_addr
);
3770 status
= beiscsi_create_def_hdr(phba
, phwi_context
,
3775 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3776 "BM_%d : Default Header not created for ULP : %d\n",
3781 status
= beiscsi_create_def_data(phba
, phwi_context
,
3786 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3787 "BM_%d : Default Data not created for ULP : %d\n",
3792 * Now that the default PDU rings have been created,
3793 * let EP know about it.
3794 * Call beiscsi_cmd_iscsi_cleanup before posting?
3796 beiscsi_hdq_post_handles(phba
, BEISCSI_DEFQ_HDR
,
3798 beiscsi_hdq_post_handles(phba
, BEISCSI_DEFQ_DATA
,
3803 status
= beiscsi_post_pages(phba
);
3805 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3806 "BM_%d : Post SGL Pages Failed\n");
3810 status
= beiscsi_post_template_hdr(phba
);
3812 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3813 "BM_%d : Template HDR Posting for CXN Failed\n");
3816 status
= beiscsi_create_wrb_rings(phba
, phwi_context
, phwi_ctrlr
);
3818 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3819 "BM_%d : WRB Rings not created\n");
3823 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
3824 uint16_t async_arr_idx
= 0;
3826 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
)) {
3828 struct hd_async_context
*pasync_ctx
;
3830 pasync_ctx
= HWI_GET_ASYNC_PDU_CTX(
3831 phwi_ctrlr
, ulp_num
);
3833 phba
->params
.cxns_per_ctrl
; cri
++) {
3834 if (ulp_num
== BEISCSI_GET_ULP_FROM_CRI
3836 pasync_ctx
->cid_to_async_cri_map
[
3837 phwi_ctrlr
->wrb_context
[cri
].cid
] =
3841 * Now that the default PDU rings have been created,
3842 * let EP know about it.
3844 beiscsi_hdq_post_handles(phba
, BEISCSI_DEFQ_HDR
,
3846 beiscsi_hdq_post_handles(phba
, BEISCSI_DEFQ_DATA
,
3851 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3852 "BM_%d : hwi_init_port success\n");
3856 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3857 "BM_%d : hwi_init_port failed");
3858 hwi_cleanup_port(phba
);
3862 static int hwi_init_controller(struct beiscsi_hba
*phba
)
3864 struct hwi_controller
*phwi_ctrlr
;
3866 phwi_ctrlr
= phba
->phwi_ctrlr
;
3867 if (1 == phba
->init_mem
[HWI_MEM_ADDN_CONTEXT
].num_elements
) {
3868 phwi_ctrlr
->phwi_ctxt
= (struct hwi_context_memory
*)phba
->
3869 init_mem
[HWI_MEM_ADDN_CONTEXT
].mem_array
[0].virtual_address
;
3870 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3871 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3872 phwi_ctrlr
->phwi_ctxt
);
3874 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3875 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3876 "than one element.Failing to load\n");
3880 iscsi_init_global_templates(phba
);
3881 if (beiscsi_init_wrb_handle(phba
))
3884 if (hwi_init_async_pdu_ctx(phba
)) {
3885 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3886 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3890 if (hwi_init_port(phba
) != 0) {
3891 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3892 "BM_%d : hwi_init_controller failed\n");
3899 static void beiscsi_free_mem(struct beiscsi_hba
*phba
)
3901 struct be_mem_descriptor
*mem_descr
;
3904 mem_descr
= phba
->init_mem
;
3907 for (i
= 0; i
< SE_MEM_MAX
; i
++) {
3908 for (j
= mem_descr
->num_elements
; j
> 0; j
--) {
3909 pci_free_consistent(phba
->pcidev
,
3910 mem_descr
->mem_array
[j
- 1].size
,
3911 mem_descr
->mem_array
[j
- 1].virtual_address
,
3912 (unsigned long)mem_descr
->mem_array
[j
- 1].
3913 bus_address
.u
.a64
.address
);
3916 kfree(mem_descr
->mem_array
);
3919 kfree(phba
->init_mem
);
3920 kfree(phba
->phwi_ctrlr
->wrb_context
);
3921 kfree(phba
->phwi_ctrlr
);
3924 static int beiscsi_init_controller(struct beiscsi_hba
*phba
)
3928 ret
= beiscsi_get_memory(phba
);
3930 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3931 "BM_%d : beiscsi_dev_probe -"
3932 "Failed in beiscsi_alloc_memory\n");
3936 ret
= hwi_init_controller(phba
);
3939 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
3940 "BM_%d : Return success from beiscsi_init_controller");
3945 beiscsi_free_mem(phba
);
3949 static int beiscsi_init_sgl_handle(struct beiscsi_hba
*phba
)
3951 struct be_mem_descriptor
*mem_descr_sglh
, *mem_descr_sg
;
3952 struct sgl_handle
*psgl_handle
;
3953 struct iscsi_sge
*pfrag
;
3954 unsigned int arr_index
, i
, idx
;
3955 unsigned int ulp_icd_start
, ulp_num
= 0;
3957 phba
->io_sgl_hndl_avbl
= 0;
3958 phba
->eh_sgl_hndl_avbl
= 0;
3960 mem_descr_sglh
= phba
->init_mem
;
3961 mem_descr_sglh
+= HWI_MEM_SGLH
;
3962 if (1 == mem_descr_sglh
->num_elements
) {
3963 phba
->io_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
3964 phba
->params
.ios_per_ctrl
,
3966 if (!phba
->io_sgl_hndl_base
) {
3967 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3968 "BM_%d : Mem Alloc Failed. Failing to load\n");
3971 phba
->eh_sgl_hndl_base
= kzalloc(sizeof(struct sgl_handle
*) *
3972 (phba
->params
.icds_per_ctrl
-
3973 phba
->params
.ios_per_ctrl
),
3975 if (!phba
->eh_sgl_hndl_base
) {
3976 kfree(phba
->io_sgl_hndl_base
);
3977 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3978 "BM_%d : Mem Alloc Failed. Failing to load\n");
3982 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
3983 "BM_%d : HWI_MEM_SGLH is more than one element."
3984 "Failing to load\n");
3990 while (idx
< mem_descr_sglh
->num_elements
) {
3991 psgl_handle
= mem_descr_sglh
->mem_array
[idx
].virtual_address
;
3993 for (i
= 0; i
< (mem_descr_sglh
->mem_array
[idx
].size
/
3994 sizeof(struct sgl_handle
)); i
++) {
3995 if (arr_index
< phba
->params
.ios_per_ctrl
) {
3996 phba
->io_sgl_hndl_base
[arr_index
] = psgl_handle
;
3997 phba
->io_sgl_hndl_avbl
++;
4000 phba
->eh_sgl_hndl_base
[arr_index
-
4001 phba
->params
.ios_per_ctrl
] =
4004 phba
->eh_sgl_hndl_avbl
++;
4010 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
4011 "BM_%d : phba->io_sgl_hndl_avbl=%d"
4012 "phba->eh_sgl_hndl_avbl=%d\n",
4013 phba
->io_sgl_hndl_avbl
,
4014 phba
->eh_sgl_hndl_avbl
);
4016 mem_descr_sg
= phba
->init_mem
;
4017 mem_descr_sg
+= HWI_MEM_SGE
;
4018 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
4019 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
4020 mem_descr_sg
->num_elements
);
4022 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++)
4023 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
))
4026 ulp_icd_start
= phba
->fw_config
.iscsi_icd_start
[ulp_num
];
4030 while (idx
< mem_descr_sg
->num_elements
) {
4031 pfrag
= mem_descr_sg
->mem_array
[idx
].virtual_address
;
4034 i
< (mem_descr_sg
->mem_array
[idx
].size
) /
4035 (sizeof(struct iscsi_sge
) * phba
->params
.num_sge_per_io
);
4037 if (arr_index
< phba
->params
.ios_per_ctrl
)
4038 psgl_handle
= phba
->io_sgl_hndl_base
[arr_index
];
4040 psgl_handle
= phba
->eh_sgl_hndl_base
[arr_index
-
4041 phba
->params
.ios_per_ctrl
];
4042 psgl_handle
->pfrag
= pfrag
;
4043 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_hi
, pfrag
, 0);
4044 AMAP_SET_BITS(struct amap_iscsi_sge
, addr_lo
, pfrag
, 0);
4045 pfrag
+= phba
->params
.num_sge_per_io
;
4046 psgl_handle
->sgl_index
= ulp_icd_start
+ arr_index
++;
4050 phba
->io_sgl_free_index
= 0;
4051 phba
->io_sgl_alloc_index
= 0;
4052 phba
->eh_sgl_free_index
= 0;
4053 phba
->eh_sgl_alloc_index
= 0;
4057 static int hba_setup_cid_tbls(struct beiscsi_hba
*phba
)
4060 uint16_t i
, ulp_num
;
4061 struct ulp_cid_info
*ptr_cid_info
= NULL
;
4063 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
4064 if (test_bit(ulp_num
, (void *)&phba
->fw_config
.ulp_supported
)) {
4065 ptr_cid_info
= kzalloc(sizeof(struct ulp_cid_info
),
4068 if (!ptr_cid_info
) {
4069 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4070 "BM_%d : Failed to allocate memory"
4071 "for ULP_CID_INFO for ULP : %d\n",
4078 /* Allocate memory for CID array */
4079 ptr_cid_info
->cid_array
= kzalloc(sizeof(void *) *
4080 BEISCSI_GET_CID_COUNT(phba
,
4081 ulp_num
), GFP_KERNEL
);
4082 if (!ptr_cid_info
->cid_array
) {
4083 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4084 "BM_%d : Failed to allocate memory"
4085 "for CID_ARRAY for ULP : %d\n",
4087 kfree(ptr_cid_info
);
4088 ptr_cid_info
= NULL
;
4093 ptr_cid_info
->avlbl_cids
= BEISCSI_GET_CID_COUNT(
4096 /* Save the cid_info_array ptr */
4097 phba
->cid_array_info
[ulp_num
] = ptr_cid_info
;
4100 phba
->ep_array
= kzalloc(sizeof(struct iscsi_endpoint
*) *
4101 phba
->params
.cxns_per_ctrl
, GFP_KERNEL
);
4102 if (!phba
->ep_array
) {
4103 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4104 "BM_%d : Failed to allocate memory in "
4105 "hba_setup_cid_tbls\n");
4111 phba
->conn_table
= kzalloc(sizeof(struct beiscsi_conn
*) *
4112 phba
->params
.cxns_per_ctrl
, GFP_KERNEL
);
4113 if (!phba
->conn_table
) {
4114 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4115 "BM_%d : Failed to allocate memory in"
4116 "hba_setup_cid_tbls\n");
4118 kfree(phba
->ep_array
);
4119 phba
->ep_array
= NULL
;
4125 for (i
= 0; i
< phba
->params
.cxns_per_ctrl
; i
++) {
4126 ulp_num
= phba
->phwi_ctrlr
->wrb_context
[i
].ulp_num
;
4128 ptr_cid_info
= phba
->cid_array_info
[ulp_num
];
4129 ptr_cid_info
->cid_array
[ptr_cid_info
->cid_alloc
++] =
4130 phba
->phwi_ctrlr
->wrb_context
[i
].cid
;
4134 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
4135 if (test_bit(ulp_num
, (void *)&phba
->fw_config
.ulp_supported
)) {
4136 ptr_cid_info
= phba
->cid_array_info
[ulp_num
];
4138 ptr_cid_info
->cid_alloc
= 0;
4139 ptr_cid_info
->cid_free
= 0;
4145 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
4146 if (test_bit(ulp_num
, (void *)&phba
->fw_config
.ulp_supported
)) {
4147 ptr_cid_info
= phba
->cid_array_info
[ulp_num
];
4150 kfree(ptr_cid_info
->cid_array
);
4151 kfree(ptr_cid_info
);
4152 phba
->cid_array_info
[ulp_num
] = NULL
;
4160 static void hwi_enable_intr(struct beiscsi_hba
*phba
)
4162 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
4163 struct hwi_controller
*phwi_ctrlr
;
4164 struct hwi_context_memory
*phwi_context
;
4165 struct be_queue_info
*eq
;
4170 phwi_ctrlr
= phba
->phwi_ctrlr
;
4171 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
4173 addr
= (u8 __iomem
*) ((u8 __iomem
*) ctrl
->pcicfg
+
4174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
);
4175 reg
= ioread32(addr
);
4177 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
4179 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
4180 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
4181 "BM_%d : reg =x%08x addr=%p\n", reg
, addr
);
4182 iowrite32(reg
, addr
);
4185 if (!phba
->msix_enabled
) {
4186 eq
= &phwi_context
->be_eq
[0].q
;
4187 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
4188 "BM_%d : eq->id=%d\n", eq
->id
);
4190 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
4192 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
4193 eq
= &phwi_context
->be_eq
[i
].q
;
4194 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
4195 "BM_%d : eq->id=%d\n", eq
->id
);
4196 hwi_ring_eq_db(phba
, eq
->id
, 0, 0, 1, 1);
4201 static void hwi_disable_intr(struct beiscsi_hba
*phba
)
4203 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
4205 u8 __iomem
*addr
= ctrl
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
4206 u32 reg
= ioread32(addr
);
4208 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
4210 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
4211 iowrite32(reg
, addr
);
4213 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_INIT
,
4214 "BM_%d : In hwi_disable_intr, Already Disabled\n");
4217 static int beiscsi_init_port(struct beiscsi_hba
*phba
)
4221 ret
= beiscsi_init_controller(phba
);
4223 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4224 "BM_%d : beiscsi_dev_probe - Failed in"
4225 "beiscsi_init_controller\n");
4228 ret
= beiscsi_init_sgl_handle(phba
);
4230 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4231 "BM_%d : beiscsi_dev_probe - Failed in"
4232 "beiscsi_init_sgl_handle\n");
4233 goto do_cleanup_ctrlr
;
4236 ret
= hba_setup_cid_tbls(phba
);
4238 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
4239 "BM_%d : Failed in hba_setup_cid_tbls\n");
4240 kfree(phba
->io_sgl_hndl_base
);
4241 kfree(phba
->eh_sgl_hndl_base
);
4242 goto do_cleanup_ctrlr
;
4248 hwi_cleanup_port(phba
);
4252 static void beiscsi_cleanup_port(struct beiscsi_hba
*phba
)
4254 struct ulp_cid_info
*ptr_cid_info
= NULL
;
4257 kfree(phba
->io_sgl_hndl_base
);
4258 kfree(phba
->eh_sgl_hndl_base
);
4259 kfree(phba
->ep_array
);
4260 kfree(phba
->conn_table
);
4262 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
4263 if (test_bit(ulp_num
, (void *)&phba
->fw_config
.ulp_supported
)) {
4264 ptr_cid_info
= phba
->cid_array_info
[ulp_num
];
4267 kfree(ptr_cid_info
->cid_array
);
4268 kfree(ptr_cid_info
);
4269 phba
->cid_array_info
[ulp_num
] = NULL
;
4276 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4277 * @beiscsi_conn: ptr to the conn to be cleaned up
4278 * @task: ptr to iscsi_task resource to be freed.
4280 * Free driver mgmt resources binded to CXN.
4283 beiscsi_free_mgmt_task_handles(struct beiscsi_conn
*beiscsi_conn
,
4284 struct iscsi_task
*task
)
4286 struct beiscsi_io_task
*io_task
;
4287 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4288 struct hwi_wrb_context
*pwrb_context
;
4289 struct hwi_controller
*phwi_ctrlr
;
4290 uint16_t cri_index
= BE_GET_CRI_FROM_CID(
4291 beiscsi_conn
->beiscsi_conn_cid
);
4293 phwi_ctrlr
= phba
->phwi_ctrlr
;
4294 pwrb_context
= &phwi_ctrlr
->wrb_context
[cri_index
];
4296 io_task
= task
->dd_data
;
4298 if (io_task
->pwrb_handle
) {
4299 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
4300 io_task
->pwrb_handle
= NULL
;
4303 if (io_task
->psgl_handle
) {
4304 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
4305 io_task
->psgl_handle
= NULL
;
4308 if (io_task
->mtask_addr
) {
4309 pci_unmap_single(phba
->pcidev
,
4310 io_task
->mtask_addr
,
4311 io_task
->mtask_data_count
,
4313 io_task
->mtask_addr
= 0;
4318 * beiscsi_cleanup_task()- Free driver resources of the task
4319 * @task: ptr to the iscsi task
4322 static void beiscsi_cleanup_task(struct iscsi_task
*task
)
4324 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4325 struct iscsi_conn
*conn
= task
->conn
;
4326 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
4327 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4328 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
4329 struct hwi_wrb_context
*pwrb_context
;
4330 struct hwi_controller
*phwi_ctrlr
;
4331 uint16_t cri_index
= BE_GET_CRI_FROM_CID(
4332 beiscsi_conn
->beiscsi_conn_cid
);
4334 phwi_ctrlr
= phba
->phwi_ctrlr
;
4335 pwrb_context
= &phwi_ctrlr
->wrb_context
[cri_index
];
4337 if (io_task
->cmd_bhs
) {
4338 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
4339 io_task
->bhs_pa
.u
.a64
.address
);
4340 io_task
->cmd_bhs
= NULL
;
4345 if (io_task
->pwrb_handle
) {
4346 free_wrb_handle(phba
, pwrb_context
,
4347 io_task
->pwrb_handle
);
4348 io_task
->pwrb_handle
= NULL
;
4351 if (io_task
->psgl_handle
) {
4352 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
4353 io_task
->psgl_handle
= NULL
;
4356 if (io_task
->scsi_cmnd
) {
4357 if (io_task
->num_sg
)
4358 scsi_dma_unmap(io_task
->scsi_cmnd
);
4359 io_task
->scsi_cmnd
= NULL
;
4362 if (!beiscsi_conn
->login_in_progress
)
4363 beiscsi_free_mgmt_task_handles(beiscsi_conn
, task
);
4368 beiscsi_offload_connection(struct beiscsi_conn
*beiscsi_conn
,
4369 struct beiscsi_offload_params
*params
)
4371 struct wrb_handle
*pwrb_handle
;
4372 struct hwi_wrb_context
*pwrb_context
= NULL
;
4373 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4374 struct iscsi_task
*task
= beiscsi_conn
->task
;
4375 struct iscsi_session
*session
= task
->conn
->session
;
4379 * We can always use 0 here because it is reserved by libiscsi for
4380 * login/startup related tasks.
4382 beiscsi_conn
->login_in_progress
= 0;
4383 spin_lock_bh(&session
->back_lock
);
4384 beiscsi_cleanup_task(task
);
4385 spin_unlock_bh(&session
->back_lock
);
4387 pwrb_handle
= alloc_wrb_handle(phba
, beiscsi_conn
->beiscsi_conn_cid
,
4390 /* Check for the adapter family */
4391 if (is_chip_be2_be3r(phba
))
4392 beiscsi_offload_cxn_v0(params
, pwrb_handle
,
4396 beiscsi_offload_cxn_v2(params
, pwrb_handle
,
4399 be_dws_le_to_cpu(pwrb_handle
->pwrb
,
4400 sizeof(struct iscsi_target_context_update_wrb
));
4402 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
4403 doorbell
|= (pwrb_handle
->wrb_index
& DB_DEF_PDU_WRB_INDEX_MASK
)
4404 << DB_DEF_PDU_WRB_INDEX_SHIFT
;
4405 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
4406 iowrite32(doorbell
, phba
->db_va
+
4407 beiscsi_conn
->doorbell_offset
);
4410 * There is no completion for CONTEXT_UPDATE. The completion of next
4411 * WRB posted guarantees FW's processing and DMA'ing of it.
4412 * Use beiscsi_put_wrb_handle to put it back in the pool which makes
4413 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
4415 beiscsi_put_wrb_handle(pwrb_context
, pwrb_handle
,
4416 phba
->params
.wrbs_per_cxn
);
4417 beiscsi_log(phba
, KERN_INFO
,
4418 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
4419 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
4420 pwrb_handle
, pwrb_context
->free_index
,
4421 pwrb_context
->wrb_handles_available
);
4424 static void beiscsi_parse_pdu(struct iscsi_conn
*conn
, itt_t itt
,
4425 int *index
, int *age
)
4429 *age
= conn
->session
->age
;
4433 * beiscsi_alloc_pdu - allocates pdu and related resources
4434 * @task: libiscsi task
4435 * @opcode: opcode of pdu for task
4437 * This is called with the session lock held. It will allocate
4438 * the wrb and sgl if needed for the command. And it will prep
4439 * the pdu's itt. beiscsi_parse_pdu will later translate
4440 * the pdu itt to the libiscsi task itt.
4442 static int beiscsi_alloc_pdu(struct iscsi_task
*task
, uint8_t opcode
)
4444 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4445 struct iscsi_conn
*conn
= task
->conn
;
4446 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
4447 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4448 struct hwi_wrb_context
*pwrb_context
;
4449 struct hwi_controller
*phwi_ctrlr
;
4451 uint16_t cri_index
= 0;
4452 struct beiscsi_session
*beiscsi_sess
= beiscsi_conn
->beiscsi_sess
;
4455 io_task
->cmd_bhs
= pci_pool_alloc(beiscsi_sess
->bhs_pool
,
4456 GFP_ATOMIC
, &paddr
);
4457 if (!io_task
->cmd_bhs
)
4459 io_task
->bhs_pa
.u
.a64
.address
= paddr
;
4460 io_task
->libiscsi_itt
= (itt_t
)task
->itt
;
4461 io_task
->conn
= beiscsi_conn
;
4463 task
->hdr
= (struct iscsi_hdr
*)&io_task
->cmd_bhs
->iscsi_hdr
;
4464 task
->hdr_max
= sizeof(struct be_cmd_bhs
);
4465 io_task
->psgl_handle
= NULL
;
4466 io_task
->pwrb_handle
= NULL
;
4469 io_task
->psgl_handle
= alloc_io_sgl_handle(phba
);
4470 if (!io_task
->psgl_handle
) {
4471 beiscsi_log(phba
, KERN_ERR
,
4472 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
4473 "BM_%d : Alloc of IO_SGL_ICD Failed"
4474 "for the CID : %d\n",
4475 beiscsi_conn
->beiscsi_conn_cid
);
4478 io_task
->pwrb_handle
= alloc_wrb_handle(phba
,
4479 beiscsi_conn
->beiscsi_conn_cid
,
4480 &io_task
->pwrb_context
);
4481 if (!io_task
->pwrb_handle
) {
4482 beiscsi_log(phba
, KERN_ERR
,
4483 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
4484 "BM_%d : Alloc of WRB_HANDLE Failed"
4485 "for the CID : %d\n",
4486 beiscsi_conn
->beiscsi_conn_cid
);
4490 io_task
->scsi_cmnd
= NULL
;
4491 if ((opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_LOGIN
) {
4492 beiscsi_conn
->task
= task
;
4493 if (!beiscsi_conn
->login_in_progress
) {
4494 io_task
->psgl_handle
= (struct sgl_handle
*)
4495 alloc_mgmt_sgl_handle(phba
);
4496 if (!io_task
->psgl_handle
) {
4497 beiscsi_log(phba
, KERN_ERR
,
4500 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4501 "for the CID : %d\n",
4507 beiscsi_conn
->login_in_progress
= 1;
4508 beiscsi_conn
->plogin_sgl_handle
=
4509 io_task
->psgl_handle
;
4510 io_task
->pwrb_handle
=
4511 alloc_wrb_handle(phba
,
4512 beiscsi_conn
->beiscsi_conn_cid
,
4513 &io_task
->pwrb_context
);
4514 if (!io_task
->pwrb_handle
) {
4515 beiscsi_log(phba
, KERN_ERR
,
4518 "BM_%d : Alloc of WRB_HANDLE Failed"
4519 "for the CID : %d\n",
4522 goto free_mgmt_hndls
;
4524 beiscsi_conn
->plogin_wrb_handle
=
4525 io_task
->pwrb_handle
;
4528 io_task
->psgl_handle
=
4529 beiscsi_conn
->plogin_sgl_handle
;
4530 io_task
->pwrb_handle
=
4531 beiscsi_conn
->plogin_wrb_handle
;
4534 io_task
->psgl_handle
= alloc_mgmt_sgl_handle(phba
);
4535 if (!io_task
->psgl_handle
) {
4536 beiscsi_log(phba
, KERN_ERR
,
4539 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4540 "for the CID : %d\n",
4545 io_task
->pwrb_handle
=
4546 alloc_wrb_handle(phba
,
4547 beiscsi_conn
->beiscsi_conn_cid
,
4548 &io_task
->pwrb_context
);
4549 if (!io_task
->pwrb_handle
) {
4550 beiscsi_log(phba
, KERN_ERR
,
4551 BEISCSI_LOG_IO
| BEISCSI_LOG_CONFIG
,
4552 "BM_%d : Alloc of WRB_HANDLE Failed"
4553 "for the CID : %d\n",
4554 beiscsi_conn
->beiscsi_conn_cid
);
4555 goto free_mgmt_hndls
;
4560 itt
= (itt_t
) cpu_to_be32(((unsigned int)io_task
->pwrb_handle
->
4561 wrb_index
<< 16) | (unsigned int)
4562 (io_task
->psgl_handle
->sgl_index
));
4563 io_task
->pwrb_handle
->pio_handle
= task
;
4565 io_task
->cmd_bhs
->iscsi_hdr
.itt
= itt
;
4569 free_io_sgl_handle(phba
, io_task
->psgl_handle
);
4572 free_mgmt_sgl_handle(phba
, io_task
->psgl_handle
);
4573 io_task
->psgl_handle
= NULL
;
4575 phwi_ctrlr
= phba
->phwi_ctrlr
;
4576 cri_index
= BE_GET_CRI_FROM_CID(
4577 beiscsi_conn
->beiscsi_conn_cid
);
4578 pwrb_context
= &phwi_ctrlr
->wrb_context
[cri_index
];
4579 if (io_task
->pwrb_handle
)
4580 free_wrb_handle(phba
, pwrb_context
, io_task
->pwrb_handle
);
4581 io_task
->pwrb_handle
= NULL
;
4582 pci_pool_free(beiscsi_sess
->bhs_pool
, io_task
->cmd_bhs
,
4583 io_task
->bhs_pa
.u
.a64
.address
);
4584 io_task
->cmd_bhs
= NULL
;
4587 static int beiscsi_iotask_v2(struct iscsi_task
*task
, struct scatterlist
*sg
,
4588 unsigned int num_sg
, unsigned int xferlen
,
4589 unsigned int writedir
)
4592 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4593 struct iscsi_conn
*conn
= task
->conn
;
4594 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
4595 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4596 struct iscsi_wrb
*pwrb
= NULL
;
4597 unsigned int doorbell
= 0;
4599 pwrb
= io_task
->pwrb_handle
->pwrb
;
4601 io_task
->bhs_len
= sizeof(struct be_cmd_bhs
);
4604 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, type
, pwrb
,
4606 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, dsp
, pwrb
, 1);
4608 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, type
, pwrb
,
4610 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, dsp
, pwrb
, 0);
4613 io_task
->wrb_type
= AMAP_GET_BITS(struct amap_iscsi_wrb_v2
,
4616 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, lun
, pwrb
,
4617 cpu_to_be16(*(unsigned short *)
4618 &io_task
->cmd_bhs
->iscsi_hdr
.lun
));
4619 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, r2t_exp_dtl
, pwrb
, xferlen
);
4620 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, wrb_idx
, pwrb
,
4621 io_task
->pwrb_handle
->wrb_index
);
4622 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, cmdsn_itt
, pwrb
,
4623 be32_to_cpu(task
->cmdsn
));
4624 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sgl_idx
, pwrb
,
4625 io_task
->psgl_handle
->sgl_index
);
4627 hwi_write_sgl_v2(pwrb
, sg
, num_sg
, io_task
);
4628 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, ptr2nextwrb
, pwrb
,
4629 io_task
->pwrb_handle
->wrb_index
);
4630 if (io_task
->pwrb_context
->plast_wrb
)
4631 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, ptr2nextwrb
,
4632 io_task
->pwrb_context
->plast_wrb
,
4633 io_task
->pwrb_handle
->wrb_index
);
4634 io_task
->pwrb_context
->plast_wrb
= pwrb
;
4636 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
4638 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
4639 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
4640 DB_DEF_PDU_WRB_INDEX_MASK
) <<
4641 DB_DEF_PDU_WRB_INDEX_SHIFT
;
4642 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
4643 iowrite32(doorbell
, phba
->db_va
+
4644 beiscsi_conn
->doorbell_offset
);
4648 static int beiscsi_iotask(struct iscsi_task
*task
, struct scatterlist
*sg
,
4649 unsigned int num_sg
, unsigned int xferlen
,
4650 unsigned int writedir
)
4653 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4654 struct iscsi_conn
*conn
= task
->conn
;
4655 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
4656 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4657 struct iscsi_wrb
*pwrb
= NULL
;
4658 unsigned int doorbell
= 0;
4660 pwrb
= io_task
->pwrb_handle
->pwrb
;
4661 io_task
->bhs_len
= sizeof(struct be_cmd_bhs
);
4664 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4666 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 1);
4668 AMAP_SET_BITS(struct amap_iscsi_wrb
, type
, pwrb
,
4670 AMAP_SET_BITS(struct amap_iscsi_wrb
, dsp
, pwrb
, 0);
4673 io_task
->wrb_type
= AMAP_GET_BITS(struct amap_iscsi_wrb
,
4676 AMAP_SET_BITS(struct amap_iscsi_wrb
, lun
, pwrb
,
4677 cpu_to_be16(*(unsigned short *)
4678 &io_task
->cmd_bhs
->iscsi_hdr
.lun
));
4679 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
, xferlen
);
4680 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
4681 io_task
->pwrb_handle
->wrb_index
);
4682 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
4683 be32_to_cpu(task
->cmdsn
));
4684 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
4685 io_task
->psgl_handle
->sgl_index
);
4687 hwi_write_sgl(pwrb
, sg
, num_sg
, io_task
);
4689 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
4690 io_task
->pwrb_handle
->wrb_index
);
4691 if (io_task
->pwrb_context
->plast_wrb
)
4692 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
,
4693 io_task
->pwrb_context
->plast_wrb
,
4694 io_task
->pwrb_handle
->wrb_index
);
4695 io_task
->pwrb_context
->plast_wrb
= pwrb
;
4697 be_dws_le_to_cpu(pwrb
, sizeof(struct iscsi_wrb
));
4699 doorbell
|= beiscsi_conn
->beiscsi_conn_cid
& DB_WRB_POST_CID_MASK
;
4700 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
4701 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
4702 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
4704 iowrite32(doorbell
, phba
->db_va
+
4705 beiscsi_conn
->doorbell_offset
);
4709 static int beiscsi_mtask(struct iscsi_task
*task
)
4711 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4712 struct iscsi_conn
*conn
= task
->conn
;
4713 struct beiscsi_conn
*beiscsi_conn
= conn
->dd_data
;
4714 struct beiscsi_hba
*phba
= beiscsi_conn
->phba
;
4715 struct iscsi_wrb
*pwrb
= NULL
;
4716 unsigned int doorbell
= 0;
4718 unsigned int pwrb_typeoffset
= 0;
4721 cid
= beiscsi_conn
->beiscsi_conn_cid
;
4722 pwrb
= io_task
->pwrb_handle
->pwrb
;
4724 if (is_chip_be2_be3r(phba
)) {
4725 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
,
4726 be32_to_cpu(task
->cmdsn
));
4727 AMAP_SET_BITS(struct amap_iscsi_wrb
, wrb_idx
, pwrb
,
4728 io_task
->pwrb_handle
->wrb_index
);
4729 AMAP_SET_BITS(struct amap_iscsi_wrb
, sgl_icd_idx
, pwrb
,
4730 io_task
->psgl_handle
->sgl_index
);
4731 AMAP_SET_BITS(struct amap_iscsi_wrb
, r2t_exp_dtl
, pwrb
,
4733 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
, pwrb
,
4734 io_task
->pwrb_handle
->wrb_index
);
4735 if (io_task
->pwrb_context
->plast_wrb
)
4736 AMAP_SET_BITS(struct amap_iscsi_wrb
, ptr2nextwrb
,
4737 io_task
->pwrb_context
->plast_wrb
,
4738 io_task
->pwrb_handle
->wrb_index
);
4739 io_task
->pwrb_context
->plast_wrb
= pwrb
;
4741 pwrb_typeoffset
= BE_WRB_TYPE_OFFSET
;
4743 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, cmdsn_itt
, pwrb
,
4744 be32_to_cpu(task
->cmdsn
));
4745 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, wrb_idx
, pwrb
,
4746 io_task
->pwrb_handle
->wrb_index
);
4747 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, sgl_idx
, pwrb
,
4748 io_task
->psgl_handle
->sgl_index
);
4749 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, r2t_exp_dtl
, pwrb
,
4751 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, ptr2nextwrb
, pwrb
,
4752 io_task
->pwrb_handle
->wrb_index
);
4753 if (io_task
->pwrb_context
->plast_wrb
)
4754 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
, ptr2nextwrb
,
4755 io_task
->pwrb_context
->plast_wrb
,
4756 io_task
->pwrb_handle
->wrb_index
);
4757 io_task
->pwrb_context
->plast_wrb
= pwrb
;
4759 pwrb_typeoffset
= SKH_WRB_TYPE_OFFSET
;
4763 switch (task
->hdr
->opcode
& ISCSI_OPCODE_MASK
) {
4764 case ISCSI_OP_LOGIN
:
4765 AMAP_SET_BITS(struct amap_iscsi_wrb
, cmdsn_itt
, pwrb
, 1);
4766 ADAPTER_SET_WRB_TYPE(pwrb
, TGT_DM_CMD
, pwrb_typeoffset
);
4767 ret
= hwi_write_buffer(pwrb
, task
);
4769 case ISCSI_OP_NOOP_OUT
:
4770 if (task
->hdr
->ttt
!= ISCSI_RESERVED_TAG
) {
4771 ADAPTER_SET_WRB_TYPE(pwrb
, TGT_DM_CMD
, pwrb_typeoffset
);
4772 if (is_chip_be2_be3r(phba
))
4773 AMAP_SET_BITS(struct amap_iscsi_wrb
,
4776 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
4779 ADAPTER_SET_WRB_TYPE(pwrb
, INI_RD_CMD
, pwrb_typeoffset
);
4780 if (is_chip_be2_be3r(phba
))
4781 AMAP_SET_BITS(struct amap_iscsi_wrb
,
4784 AMAP_SET_BITS(struct amap_iscsi_wrb_v2
,
4787 ret
= hwi_write_buffer(pwrb
, task
);
4790 ADAPTER_SET_WRB_TYPE(pwrb
, TGT_DM_CMD
, pwrb_typeoffset
);
4791 ret
= hwi_write_buffer(pwrb
, task
);
4793 case ISCSI_OP_SCSI_TMFUNC
:
4794 ADAPTER_SET_WRB_TYPE(pwrb
, INI_TMF_CMD
, pwrb_typeoffset
);
4795 ret
= hwi_write_buffer(pwrb
, task
);
4797 case ISCSI_OP_LOGOUT
:
4798 ADAPTER_SET_WRB_TYPE(pwrb
, HWH_TYPE_LOGOUT
, pwrb_typeoffset
);
4799 ret
= hwi_write_buffer(pwrb
, task
);
4803 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
,
4804 "BM_%d : opcode =%d Not supported\n",
4805 task
->hdr
->opcode
& ISCSI_OPCODE_MASK
);
4813 /* Set the task type */
4814 io_task
->wrb_type
= (is_chip_be2_be3r(phba
)) ?
4815 AMAP_GET_BITS(struct amap_iscsi_wrb
, type
, pwrb
) :
4816 AMAP_GET_BITS(struct amap_iscsi_wrb_v2
, type
, pwrb
);
4818 doorbell
|= cid
& DB_WRB_POST_CID_MASK
;
4819 doorbell
|= (io_task
->pwrb_handle
->wrb_index
&
4820 DB_DEF_PDU_WRB_INDEX_MASK
) << DB_DEF_PDU_WRB_INDEX_SHIFT
;
4821 doorbell
|= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT
;
4822 iowrite32(doorbell
, phba
->db_va
+
4823 beiscsi_conn
->doorbell_offset
);
4827 static int beiscsi_task_xmit(struct iscsi_task
*task
)
4829 struct beiscsi_io_task
*io_task
= task
->dd_data
;
4830 struct scsi_cmnd
*sc
= task
->sc
;
4831 struct beiscsi_hba
*phba
;
4832 struct scatterlist
*sg
;
4834 unsigned int writedir
= 0, xferlen
= 0;
4836 phba
= io_task
->conn
->phba
;
4838 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
4839 * operational if FW still gets heartbeat from EP FW. Is management
4840 * path really needed to continue further?
4842 if (!beiscsi_hba_is_online(phba
))
4845 if (!io_task
->conn
->login_in_progress
)
4846 task
->hdr
->exp_statsn
= 0;
4849 return beiscsi_mtask(task
);
4851 io_task
->scsi_cmnd
= sc
;
4852 io_task
->num_sg
= 0;
4853 num_sg
= scsi_dma_map(sc
);
4855 beiscsi_log(phba
, KERN_ERR
,
4856 BEISCSI_LOG_IO
| BEISCSI_LOG_ISCSI
,
4857 "BM_%d : scsi_dma_map Failed "
4858 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
4859 be32_to_cpu(io_task
->cmd_bhs
->iscsi_hdr
.itt
),
4860 io_task
->libiscsi_itt
, scsi_bufflen(sc
));
4865 * For scsi cmd task, check num_sg before unmapping in cleanup_task.
4866 * For management task, cleanup_task checks mtask_addr before unmapping.
4868 io_task
->num_sg
= num_sg
;
4869 xferlen
= scsi_bufflen(sc
);
4870 sg
= scsi_sglist(sc
);
4871 if (sc
->sc_data_direction
== DMA_TO_DEVICE
)
4876 return phba
->iotask_fn(task
, sg
, num_sg
, xferlen
, writedir
);
4880 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4881 * @job: job to handle
4883 static int beiscsi_bsg_request(struct bsg_job
*job
)
4885 struct Scsi_Host
*shost
;
4886 struct beiscsi_hba
*phba
;
4887 struct iscsi_bsg_request
*bsg_req
= job
->request
;
4890 struct be_dma_mem nonemb_cmd
;
4891 struct be_cmd_resp_hdr
*resp
;
4892 struct iscsi_bsg_reply
*bsg_reply
= job
->reply
;
4893 unsigned short status
, extd_status
;
4895 shost
= iscsi_job_to_shost(job
);
4896 phba
= iscsi_host_priv(shost
);
4898 if (!beiscsi_hba_is_online(phba
)) {
4899 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_CONFIG
,
4900 "BM_%d : HBA in error 0x%lx\n", phba
->state
);
4904 switch (bsg_req
->msgcode
) {
4905 case ISCSI_BSG_HST_VENDOR
:
4906 nonemb_cmd
.va
= pci_alloc_consistent(phba
->ctrl
.pdev
,
4907 job
->request_payload
.payload_len
,
4909 if (nonemb_cmd
.va
== NULL
) {
4910 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
,
4911 "BM_%d : Failed to allocate memory for "
4912 "beiscsi_bsg_request\n");
4915 tag
= mgmt_vendor_specific_fw_cmd(&phba
->ctrl
, phba
, job
,
4918 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
,
4919 "BM_%d : MBX Tag Allocation Failed\n");
4921 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
4922 nonemb_cmd
.va
, nonemb_cmd
.dma
);
4926 rc
= wait_event_interruptible_timeout(
4927 phba
->ctrl
.mcc_wait
[tag
],
4928 phba
->ctrl
.mcc_tag_status
[tag
],
4930 BEISCSI_HOST_MBX_TIMEOUT
));
4932 if (!test_bit(BEISCSI_HBA_ONLINE
, &phba
->state
)) {
4933 clear_bit(MCC_TAG_STATE_RUNNING
,
4934 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
4935 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
4936 nonemb_cmd
.va
, nonemb_cmd
.dma
);
4939 extd_status
= (phba
->ctrl
.mcc_tag_status
[tag
] &
4940 CQE_STATUS_ADDL_MASK
) >> CQE_STATUS_ADDL_SHIFT
;
4941 status
= phba
->ctrl
.mcc_tag_status
[tag
] & CQE_STATUS_MASK
;
4942 free_mcc_wrb(&phba
->ctrl
, tag
);
4943 resp
= (struct be_cmd_resp_hdr
*)nonemb_cmd
.va
;
4944 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
4945 job
->reply_payload
.sg_cnt
,
4946 nonemb_cmd
.va
, (resp
->response_length
4948 bsg_reply
->reply_payload_rcv_len
= resp
->response_length
;
4949 bsg_reply
->result
= status
;
4950 bsg_job_done(job
, bsg_reply
->result
,
4951 bsg_reply
->reply_payload_rcv_len
);
4952 pci_free_consistent(phba
->ctrl
.pdev
, nonemb_cmd
.size
,
4953 nonemb_cmd
.va
, nonemb_cmd
.dma
);
4954 if (status
|| extd_status
) {
4955 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
,
4956 "BM_%d : MBX Cmd Failed"
4957 " status = %d extd_status = %d\n",
4958 status
, extd_status
);
4967 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
,
4968 "BM_%d : Unsupported bsg command: 0x%x\n",
4976 static void beiscsi_hba_attrs_init(struct beiscsi_hba
*phba
)
4978 /* Set the logging parameter */
4979 beiscsi_log_enable_init(phba
, beiscsi_log_enable
);
4982 void beiscsi_start_boot_work(struct beiscsi_hba
*phba
, unsigned int s_handle
)
4984 if (phba
->boot_struct
.boot_kset
)
4987 /* skip if boot work is already in progress */
4988 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK
, &phba
->state
))
4991 phba
->boot_struct
.retry
= 3;
4992 phba
->boot_struct
.tag
= 0;
4993 phba
->boot_struct
.s_handle
= s_handle
;
4994 phba
->boot_struct
.action
= BEISCSI_BOOT_GET_SHANDLE
;
4995 schedule_work(&phba
->boot_work
);
4998 static ssize_t
beiscsi_show_boot_tgt_info(void *data
, int type
, char *buf
)
5000 struct beiscsi_hba
*phba
= data
;
5001 struct mgmt_session_info
*boot_sess
= &phba
->boot_struct
.boot_sess
;
5002 struct mgmt_conn_info
*boot_conn
= &boot_sess
->conn_list
[0];
5007 case ISCSI_BOOT_TGT_NAME
:
5008 rc
= sprintf(buf
, "%.*s\n",
5009 (int)strlen(boot_sess
->target_name
),
5010 (char *)&boot_sess
->target_name
);
5012 case ISCSI_BOOT_TGT_IP_ADDR
:
5013 if (boot_conn
->dest_ipaddr
.ip_type
== BEISCSI_IP_TYPE_V4
)
5014 rc
= sprintf(buf
, "%pI4\n",
5015 (char *)&boot_conn
->dest_ipaddr
.addr
);
5017 rc
= sprintf(str
, "%pI6\n",
5018 (char *)&boot_conn
->dest_ipaddr
.addr
);
5020 case ISCSI_BOOT_TGT_PORT
:
5021 rc
= sprintf(str
, "%d\n", boot_conn
->dest_port
);
5024 case ISCSI_BOOT_TGT_CHAP_NAME
:
5025 rc
= sprintf(str
, "%.*s\n",
5026 boot_conn
->negotiated_login_options
.auth_data
.chap
.
5027 target_chap_name_length
,
5028 (char *)&boot_conn
->negotiated_login_options
.
5029 auth_data
.chap
.target_chap_name
);
5031 case ISCSI_BOOT_TGT_CHAP_SECRET
:
5032 rc
= sprintf(str
, "%.*s\n",
5033 boot_conn
->negotiated_login_options
.auth_data
.chap
.
5034 target_secret_length
,
5035 (char *)&boot_conn
->negotiated_login_options
.
5036 auth_data
.chap
.target_secret
);
5038 case ISCSI_BOOT_TGT_REV_CHAP_NAME
:
5039 rc
= sprintf(str
, "%.*s\n",
5040 boot_conn
->negotiated_login_options
.auth_data
.chap
.
5041 intr_chap_name_length
,
5042 (char *)&boot_conn
->negotiated_login_options
.
5043 auth_data
.chap
.intr_chap_name
);
5045 case ISCSI_BOOT_TGT_REV_CHAP_SECRET
:
5046 rc
= sprintf(str
, "%.*s\n",
5047 boot_conn
->negotiated_login_options
.auth_data
.chap
.
5049 (char *)&boot_conn
->negotiated_login_options
.
5050 auth_data
.chap
.intr_secret
);
5052 case ISCSI_BOOT_TGT_FLAGS
:
5053 rc
= sprintf(str
, "2\n");
5055 case ISCSI_BOOT_TGT_NIC_ASSOC
:
5056 rc
= sprintf(str
, "0\n");
5062 static ssize_t
beiscsi_show_boot_ini_info(void *data
, int type
, char *buf
)
5064 struct beiscsi_hba
*phba
= data
;
5069 case ISCSI_BOOT_INI_INITIATOR_NAME
:
5070 rc
= sprintf(str
, "%s\n",
5071 phba
->boot_struct
.boot_sess
.initiator_iscsiname
);
5077 static ssize_t
beiscsi_show_boot_eth_info(void *data
, int type
, char *buf
)
5079 struct beiscsi_hba
*phba
= data
;
5084 case ISCSI_BOOT_ETH_FLAGS
:
5085 rc
= sprintf(str
, "2\n");
5087 case ISCSI_BOOT_ETH_INDEX
:
5088 rc
= sprintf(str
, "0\n");
5090 case ISCSI_BOOT_ETH_MAC
:
5091 rc
= beiscsi_get_macaddr(str
, phba
);
5097 static umode_t
beiscsi_tgt_get_attr_visibility(void *data
, int type
)
5102 case ISCSI_BOOT_TGT_NAME
:
5103 case ISCSI_BOOT_TGT_IP_ADDR
:
5104 case ISCSI_BOOT_TGT_PORT
:
5105 case ISCSI_BOOT_TGT_CHAP_NAME
:
5106 case ISCSI_BOOT_TGT_CHAP_SECRET
:
5107 case ISCSI_BOOT_TGT_REV_CHAP_NAME
:
5108 case ISCSI_BOOT_TGT_REV_CHAP_SECRET
:
5109 case ISCSI_BOOT_TGT_NIC_ASSOC
:
5110 case ISCSI_BOOT_TGT_FLAGS
:
5117 static umode_t
beiscsi_ini_get_attr_visibility(void *data
, int type
)
5122 case ISCSI_BOOT_INI_INITIATOR_NAME
:
5129 static umode_t
beiscsi_eth_get_attr_visibility(void *data
, int type
)
5134 case ISCSI_BOOT_ETH_FLAGS
:
5135 case ISCSI_BOOT_ETH_MAC
:
5136 case ISCSI_BOOT_ETH_INDEX
:
5143 static void beiscsi_boot_kobj_release(void *data
)
5145 struct beiscsi_hba
*phba
= data
;
5147 scsi_host_put(phba
->shost
);
5150 static int beiscsi_boot_create_kset(struct beiscsi_hba
*phba
)
5152 struct boot_struct
*bs
= &phba
->boot_struct
;
5153 struct iscsi_boot_kobj
*boot_kobj
;
5155 if (bs
->boot_kset
) {
5156 __beiscsi_log(phba
, KERN_ERR
,
5157 "BM_%d: boot_kset already created\n");
5161 bs
->boot_kset
= iscsi_boot_create_host_kset(phba
->shost
->host_no
);
5162 if (!bs
->boot_kset
) {
5163 __beiscsi_log(phba
, KERN_ERR
,
5164 "BM_%d: boot_kset alloc failed\n");
5168 /* get shost ref because the show function will refer phba */
5169 if (!scsi_host_get(phba
->shost
))
5172 boot_kobj
= iscsi_boot_create_target(bs
->boot_kset
, 0, phba
,
5173 beiscsi_show_boot_tgt_info
,
5174 beiscsi_tgt_get_attr_visibility
,
5175 beiscsi_boot_kobj_release
);
5179 if (!scsi_host_get(phba
->shost
))
5182 boot_kobj
= iscsi_boot_create_initiator(bs
->boot_kset
, 0, phba
,
5183 beiscsi_show_boot_ini_info
,
5184 beiscsi_ini_get_attr_visibility
,
5185 beiscsi_boot_kobj_release
);
5189 if (!scsi_host_get(phba
->shost
))
5192 boot_kobj
= iscsi_boot_create_ethernet(bs
->boot_kset
, 0, phba
,
5193 beiscsi_show_boot_eth_info
,
5194 beiscsi_eth_get_attr_visibility
,
5195 beiscsi_boot_kobj_release
);
5202 scsi_host_put(phba
->shost
);
5204 iscsi_boot_destroy_kset(bs
->boot_kset
);
5205 bs
->boot_kset
= NULL
;
5209 static void beiscsi_boot_work(struct work_struct
*work
)
5211 struct beiscsi_hba
*phba
=
5212 container_of(work
, struct beiscsi_hba
, boot_work
);
5213 struct boot_struct
*bs
= &phba
->boot_struct
;
5214 unsigned int tag
= 0;
5216 if (!beiscsi_hba_is_online(phba
))
5219 beiscsi_log(phba
, KERN_INFO
,
5220 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
5221 "BM_%d : %s action %d\n",
5222 __func__
, phba
->boot_struct
.action
);
5224 switch (phba
->boot_struct
.action
) {
5225 case BEISCSI_BOOT_REOPEN_SESS
:
5226 tag
= beiscsi_boot_reopen_sess(phba
);
5228 case BEISCSI_BOOT_GET_SHANDLE
:
5229 tag
= __beiscsi_boot_get_shandle(phba
, 1);
5231 case BEISCSI_BOOT_GET_SINFO
:
5232 tag
= beiscsi_boot_get_sinfo(phba
);
5234 case BEISCSI_BOOT_LOGOUT_SESS
:
5235 tag
= beiscsi_boot_logout_sess(phba
);
5237 case BEISCSI_BOOT_CREATE_KSET
:
5238 beiscsi_boot_create_kset(phba
);
5240 * updated boot_kset is made visible to all before
5241 * ending the boot work.
5244 clear_bit(BEISCSI_HBA_BOOT_WORK
, &phba
->state
);
5249 schedule_work(&phba
->boot_work
);
5251 clear_bit(BEISCSI_HBA_BOOT_WORK
, &phba
->state
);
5255 static void beiscsi_eqd_update_work(struct work_struct
*work
)
5257 struct hwi_context_memory
*phwi_context
;
5258 struct be_set_eqd set_eqd
[MAX_CPUS
];
5259 struct hwi_controller
*phwi_ctrlr
;
5260 struct be_eq_obj
*pbe_eq
;
5261 struct beiscsi_hba
*phba
;
5262 unsigned int pps
, delta
;
5263 struct be_aic_obj
*aic
;
5264 int eqd
, i
, num
= 0;
5267 phba
= container_of(work
, struct beiscsi_hba
, eqd_update
.work
);
5268 if (!beiscsi_hba_is_online(phba
))
5271 phwi_ctrlr
= phba
->phwi_ctrlr
;
5272 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
5274 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
5275 aic
= &phba
->aic_obj
[i
];
5276 pbe_eq
= &phwi_context
->be_eq
[i
];
5278 if (!aic
->jiffies
|| time_before(now
, aic
->jiffies
) ||
5279 pbe_eq
->cq_count
< aic
->eq_prev
) {
5281 aic
->eq_prev
= pbe_eq
->cq_count
;
5284 delta
= jiffies_to_msecs(now
- aic
->jiffies
);
5285 pps
= (((u32
)(pbe_eq
->cq_count
- aic
->eq_prev
) * 1000) / delta
);
5286 eqd
= (pps
/ 1500) << 2;
5290 eqd
= min_t(u32
, eqd
, phwi_context
->max_eqd
);
5291 eqd
= max_t(u32
, eqd
, phwi_context
->min_eqd
);
5294 aic
->eq_prev
= pbe_eq
->cq_count
;
5296 if (eqd
!= aic
->prev_eqd
) {
5297 set_eqd
[num
].delay_multiplier
= (eqd
* 65)/100;
5298 set_eqd
[num
].eq_id
= pbe_eq
->q
.id
;
5299 aic
->prev_eqd
= eqd
;
5304 /* completion of this is ignored */
5305 beiscsi_modify_eq_delay(phba
, set_eqd
, num
);
5307 schedule_delayed_work(&phba
->eqd_update
,
5308 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL
));
5311 static void beiscsi_msix_enable(struct beiscsi_hba
*phba
)
5315 for (i
= 0; i
<= phba
->num_cpus
; i
++)
5316 phba
->msix_entries
[i
].entry
= i
;
5318 status
= pci_enable_msix_range(phba
->pcidev
, phba
->msix_entries
,
5319 phba
->num_cpus
+ 1, phba
->num_cpus
+ 1);
5321 phba
->msix_enabled
= true;
5324 static void beiscsi_hw_tpe_check(unsigned long ptr
)
5326 struct beiscsi_hba
*phba
;
5329 phba
= (struct beiscsi_hba
*)ptr
;
5330 /* if not TPE, do nothing */
5331 if (!beiscsi_detect_tpe(phba
))
5334 /* wait default 4000ms before recovering */
5336 if (phba
->ue2rp
> BEISCSI_UE_DETECT_INTERVAL
)
5337 wait
= phba
->ue2rp
- BEISCSI_UE_DETECT_INTERVAL
;
5338 queue_delayed_work(phba
->wq
, &phba
->recover_port
,
5339 msecs_to_jiffies(wait
));
5342 static void beiscsi_hw_health_check(unsigned long ptr
)
5344 struct beiscsi_hba
*phba
;
5346 phba
= (struct beiscsi_hba
*)ptr
;
5347 beiscsi_detect_ue(phba
);
5348 if (beiscsi_detect_ue(phba
)) {
5349 __beiscsi_log(phba
, KERN_ERR
,
5350 "BM_%d : port in error: %lx\n", phba
->state
);
5351 /* sessions are no longer valid, so first fail the sessions */
5352 queue_work(phba
->wq
, &phba
->sess_work
);
5354 /* detect UER supported */
5355 if (!test_bit(BEISCSI_HBA_UER_SUPP
, &phba
->state
))
5357 /* modify this timer to check TPE */
5358 phba
->hw_check
.function
= beiscsi_hw_tpe_check
;
5361 mod_timer(&phba
->hw_check
,
5362 jiffies
+ msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL
));
5366 * beiscsi_enable_port()- Enables the disabled port.
5367 * Only port resources freed in disable function are reallocated.
5368 * This is called in HBA error handling path.
5370 * @phba: Instance of driver private structure
5373 static int beiscsi_enable_port(struct beiscsi_hba
*phba
)
5375 struct hwi_context_memory
*phwi_context
;
5376 struct hwi_controller
*phwi_ctrlr
;
5377 struct be_eq_obj
*pbe_eq
;
5380 if (test_bit(BEISCSI_HBA_ONLINE
, &phba
->state
)) {
5381 __beiscsi_log(phba
, KERN_ERR
,
5382 "BM_%d : %s : port is online %lx\n",
5383 __func__
, phba
->state
);
5387 ret
= beiscsi_init_sliport(phba
);
5392 find_num_cpus(phba
);
5396 beiscsi_msix_enable(phba
);
5397 if (!phba
->msix_enabled
)
5401 beiscsi_get_params(phba
);
5402 /* Re-enable UER. If different TPE occurs then it is recoverable. */
5403 beiscsi_set_uer_feature(phba
);
5405 phba
->shost
->max_id
= phba
->params
.cxns_per_ctrl
;
5406 phba
->shost
->can_queue
= phba
->params
.ios_per_ctrl
;
5407 ret
= hwi_init_controller(phba
);
5409 __beiscsi_log(phba
, KERN_ERR
,
5410 "BM_%d : init controller failed %d\n", ret
);
5414 for (i
= 0; i
< MAX_MCC_CMD
; i
++) {
5415 init_waitqueue_head(&phba
->ctrl
.mcc_wait
[i
+ 1]);
5416 phba
->ctrl
.mcc_tag
[i
] = i
+ 1;
5417 phba
->ctrl
.mcc_tag_status
[i
+ 1] = 0;
5418 phba
->ctrl
.mcc_tag_available
++;
5421 phwi_ctrlr
= phba
->phwi_ctrlr
;
5422 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
5423 for (i
= 0; i
< phba
->num_cpus
; i
++) {
5424 pbe_eq
= &phwi_context
->be_eq
[i
];
5425 irq_poll_init(&pbe_eq
->iopoll
, be_iopoll_budget
, be_iopoll
);
5428 i
= (phba
->msix_enabled
) ? i
: 0;
5429 /* Work item for MCC handling */
5430 pbe_eq
= &phwi_context
->be_eq
[i
];
5431 INIT_WORK(&pbe_eq
->mcc_work
, beiscsi_mcc_work
);
5433 ret
= beiscsi_init_irqs(phba
);
5435 __beiscsi_log(phba
, KERN_ERR
,
5436 "BM_%d : setup IRQs failed %d\n", ret
);
5439 hwi_enable_intr(phba
);
5440 /* port operational: clear all error bits */
5441 set_bit(BEISCSI_HBA_ONLINE
, &phba
->state
);
5442 __beiscsi_log(phba
, KERN_INFO
,
5443 "BM_%d : port online: 0x%lx\n", phba
->state
);
5445 /* start hw_check timer and eqd_update work */
5446 schedule_delayed_work(&phba
->eqd_update
,
5447 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL
));
5450 * Timer function gets modified for TPE detection.
5451 * Always reinit to do health check first.
5453 phba
->hw_check
.function
= beiscsi_hw_health_check
;
5454 mod_timer(&phba
->hw_check
,
5455 jiffies
+ msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL
));
5459 for (i
= 0; i
< phba
->num_cpus
; i
++) {
5460 pbe_eq
= &phwi_context
->be_eq
[i
];
5461 irq_poll_disable(&pbe_eq
->iopoll
);
5463 hwi_cleanup_port(phba
);
5466 if (phba
->msix_enabled
)
5467 pci_disable_msix(phba
->pcidev
);
5473 * beiscsi_disable_port()- Disable port and cleanup driver resources.
5474 * This is called in HBA error handling and driver removal.
5475 * @phba: Instance Priv structure
5476 * @unload: indicate driver is unloading
5478 * Free the OS and HW resources held by the driver
5480 static void beiscsi_disable_port(struct beiscsi_hba
*phba
, int unload
)
5482 struct hwi_context_memory
*phwi_context
;
5483 struct hwi_controller
*phwi_ctrlr
;
5484 struct be_eq_obj
*pbe_eq
;
5485 unsigned int i
, msix_vec
;
5487 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE
, &phba
->state
))
5490 phwi_ctrlr
= phba
->phwi_ctrlr
;
5491 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
5492 hwi_disable_intr(phba
);
5493 if (phba
->msix_enabled
) {
5494 for (i
= 0; i
<= phba
->num_cpus
; i
++) {
5495 msix_vec
= phba
->msix_entries
[i
].vector
;
5496 free_irq(msix_vec
, &phwi_context
->be_eq
[i
]);
5497 kfree(phba
->msi_name
[i
]);
5500 if (phba
->pcidev
->irq
)
5501 free_irq(phba
->pcidev
->irq
, phba
);
5502 pci_disable_msix(phba
->pcidev
);
5504 for (i
= 0; i
< phba
->num_cpus
; i
++) {
5505 pbe_eq
= &phwi_context
->be_eq
[i
];
5506 irq_poll_disable(&pbe_eq
->iopoll
);
5508 cancel_delayed_work_sync(&phba
->eqd_update
);
5509 cancel_work_sync(&phba
->boot_work
);
5510 /* WQ might be running cancel queued mcc_work if we are not exiting */
5511 if (!unload
&& beiscsi_hba_in_error(phba
)) {
5512 pbe_eq
= &phwi_context
->be_eq
[i
];
5513 cancel_work_sync(&pbe_eq
->mcc_work
);
5515 hwi_cleanup_port(phba
);
5518 static void beiscsi_sess_work(struct work_struct
*work
)
5520 struct beiscsi_hba
*phba
;
5522 phba
= container_of(work
, struct beiscsi_hba
, sess_work
);
5524 * This work gets scheduled only in case of HBA error.
5525 * Old sessions are gone so need to be re-established.
5526 * iscsi_session_failure needs process context hence this work.
5528 iscsi_host_for_each_session(phba
->shost
, beiscsi_session_fail
);
5531 static void beiscsi_recover_port(struct work_struct
*work
)
5533 struct beiscsi_hba
*phba
;
5535 phba
= container_of(work
, struct beiscsi_hba
, recover_port
.work
);
5536 beiscsi_disable_port(phba
, 0);
5537 beiscsi_enable_port(phba
);
5540 static pci_ers_result_t
beiscsi_eeh_err_detected(struct pci_dev
*pdev
,
5541 pci_channel_state_t state
)
5543 struct beiscsi_hba
*phba
= NULL
;
5545 phba
= (struct beiscsi_hba
*)pci_get_drvdata(pdev
);
5546 set_bit(BEISCSI_HBA_PCI_ERR
, &phba
->state
);
5548 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5549 "BM_%d : EEH error detected\n");
5551 /* first stop UE detection when PCI error detected */
5552 del_timer_sync(&phba
->hw_check
);
5553 cancel_delayed_work_sync(&phba
->recover_port
);
5555 /* sessions are no longer valid, so first fail the sessions */
5556 iscsi_host_for_each_session(phba
->shost
, beiscsi_session_fail
);
5557 beiscsi_disable_port(phba
, 0);
5559 if (state
== pci_channel_io_perm_failure
) {
5560 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5561 "BM_%d : EEH : State PERM Failure");
5562 return PCI_ERS_RESULT_DISCONNECT
;
5565 pci_disable_device(pdev
);
5567 /* The error could cause the FW to trigger a flash debug dump.
5568 * Resetting the card while flash dump is in progress
5569 * can cause it not to recover; wait for it to finish.
5570 * Wait only for first function as it is needed only once per
5573 if (pdev
->devfn
== 0)
5576 return PCI_ERS_RESULT_NEED_RESET
;
5579 static pci_ers_result_t
beiscsi_eeh_reset(struct pci_dev
*pdev
)
5581 struct beiscsi_hba
*phba
= NULL
;
5584 phba
= (struct beiscsi_hba
*)pci_get_drvdata(pdev
);
5586 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5587 "BM_%d : EEH Reset\n");
5589 status
= pci_enable_device(pdev
);
5591 return PCI_ERS_RESULT_DISCONNECT
;
5593 pci_set_master(pdev
);
5594 pci_set_power_state(pdev
, PCI_D0
);
5595 pci_restore_state(pdev
);
5597 status
= beiscsi_check_fw_rdy(phba
);
5599 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_INIT
,
5600 "BM_%d : EEH Reset Completed\n");
5602 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_INIT
,
5603 "BM_%d : EEH Reset Completion Failure\n");
5604 return PCI_ERS_RESULT_DISCONNECT
;
5607 pci_cleanup_aer_uncorrect_error_status(pdev
);
5608 return PCI_ERS_RESULT_RECOVERED
;
5611 static void beiscsi_eeh_resume(struct pci_dev
*pdev
)
5613 struct beiscsi_hba
*phba
;
5616 phba
= (struct beiscsi_hba
*)pci_get_drvdata(pdev
);
5617 pci_save_state(pdev
);
5619 ret
= beiscsi_enable_port(phba
);
5621 __beiscsi_log(phba
, KERN_ERR
,
5622 "BM_%d : AER EEH resume failed\n");
5625 static int beiscsi_dev_probe(struct pci_dev
*pcidev
,
5626 const struct pci_device_id
*id
)
5628 struct beiscsi_hba
*phba
= NULL
;
5629 struct hwi_controller
*phwi_ctrlr
;
5630 struct hwi_context_memory
*phwi_context
;
5631 struct be_eq_obj
*pbe_eq
;
5632 unsigned int s_handle
;
5635 ret
= beiscsi_enable_pci(pcidev
);
5637 dev_err(&pcidev
->dev
,
5638 "beiscsi_dev_probe - Failed to enable pci device\n");
5642 phba
= beiscsi_hba_alloc(pcidev
);
5644 dev_err(&pcidev
->dev
,
5645 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5650 /* Enable EEH reporting */
5651 ret
= pci_enable_pcie_error_reporting(pcidev
);
5653 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_INIT
,
5654 "BM_%d : PCIe Error Reporting "
5655 "Enabling Failed\n");
5657 pci_save_state(pcidev
);
5659 /* Initialize Driver configuration Paramters */
5660 beiscsi_hba_attrs_init(phba
);
5662 phba
->mac_addr_set
= false;
5664 switch (pcidev
->device
) {
5668 phba
->generation
= BE_GEN2
;
5669 phba
->iotask_fn
= beiscsi_iotask
;
5673 phba
->generation
= BE_GEN3
;
5674 phba
->iotask_fn
= beiscsi_iotask
;
5677 phba
->generation
= BE_GEN4
;
5678 phba
->iotask_fn
= beiscsi_iotask_v2
;
5681 phba
->generation
= 0;
5684 ret
= be_ctrl_init(phba
, pcidev
);
5686 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5687 "BM_%d : be_ctrl_init failed\n");
5691 ret
= beiscsi_init_sliport(phba
);
5695 spin_lock_init(&phba
->io_sgl_lock
);
5696 spin_lock_init(&phba
->mgmt_sgl_lock
);
5697 spin_lock_init(&phba
->async_pdu_lock
);
5698 ret
= beiscsi_get_fw_config(&phba
->ctrl
, phba
);
5700 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5701 "BM_%d : Error getting fw config\n");
5704 beiscsi_get_port_name(&phba
->ctrl
, phba
);
5705 beiscsi_get_params(phba
);
5706 beiscsi_set_uer_feature(phba
);
5709 find_num_cpus(phba
);
5713 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
5714 "BM_%d : num_cpus = %d\n",
5718 beiscsi_msix_enable(phba
);
5719 if (!phba
->msix_enabled
)
5723 phba
->shost
->max_id
= phba
->params
.cxns_per_ctrl
;
5724 phba
->shost
->can_queue
= phba
->params
.ios_per_ctrl
;
5725 ret
= beiscsi_init_port(phba
);
5727 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5728 "BM_%d : beiscsi_dev_probe-"
5729 "Failed in beiscsi_init_port\n");
5733 for (i
= 0; i
< MAX_MCC_CMD
; i
++) {
5734 init_waitqueue_head(&phba
->ctrl
.mcc_wait
[i
+ 1]);
5735 phba
->ctrl
.mcc_tag
[i
] = i
+ 1;
5736 phba
->ctrl
.mcc_tag_status
[i
+ 1] = 0;
5737 phba
->ctrl
.mcc_tag_available
++;
5738 memset(&phba
->ctrl
.ptag_state
[i
].tag_mem_state
, 0,
5739 sizeof(struct be_dma_mem
));
5742 phba
->ctrl
.mcc_alloc_index
= phba
->ctrl
.mcc_free_index
= 0;
5744 snprintf(phba
->wq_name
, sizeof(phba
->wq_name
), "beiscsi_%02x_wq",
5745 phba
->shost
->host_no
);
5746 phba
->wq
= alloc_workqueue("%s", WQ_MEM_RECLAIM
, 1, phba
->wq_name
);
5748 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5749 "BM_%d : beiscsi_dev_probe-"
5750 "Failed to allocate work queue\n");
5755 INIT_DELAYED_WORK(&phba
->eqd_update
, beiscsi_eqd_update_work
);
5757 phwi_ctrlr
= phba
->phwi_ctrlr
;
5758 phwi_context
= phwi_ctrlr
->phwi_ctxt
;
5760 for (i
= 0; i
< phba
->num_cpus
; i
++) {
5761 pbe_eq
= &phwi_context
->be_eq
[i
];
5762 irq_poll_init(&pbe_eq
->iopoll
, be_iopoll_budget
, be_iopoll
);
5765 i
= (phba
->msix_enabled
) ? i
: 0;
5766 /* Work item for MCC handling */
5767 pbe_eq
= &phwi_context
->be_eq
[i
];
5768 INIT_WORK(&pbe_eq
->mcc_work
, beiscsi_mcc_work
);
5770 ret
= beiscsi_init_irqs(phba
);
5772 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
5773 "BM_%d : beiscsi_dev_probe-"
5774 "Failed to beiscsi_init_irqs\n");
5777 hwi_enable_intr(phba
);
5779 ret
= iscsi_host_add(phba
->shost
, &phba
->pcidev
->dev
);
5783 /* set online bit after port is operational */
5784 set_bit(BEISCSI_HBA_ONLINE
, &phba
->state
);
5785 __beiscsi_log(phba
, KERN_INFO
,
5786 "BM_%d : port online: 0x%lx\n", phba
->state
);
5788 INIT_WORK(&phba
->boot_work
, beiscsi_boot_work
);
5789 ret
= beiscsi_boot_get_shandle(phba
, &s_handle
);
5791 beiscsi_start_boot_work(phba
, s_handle
);
5793 * Set this bit after starting the work to let
5794 * probe handle it first.
5795 * ASYNC event can too schedule this work.
5797 set_bit(BEISCSI_HBA_BOOT_FOUND
, &phba
->state
);
5800 beiscsi_iface_create_default(phba
);
5801 schedule_delayed_work(&phba
->eqd_update
,
5802 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL
));
5804 INIT_WORK(&phba
->sess_work
, beiscsi_sess_work
);
5805 INIT_DELAYED_WORK(&phba
->recover_port
, beiscsi_recover_port
);
5807 * Start UE detection here. UE before this will cause stall in probe
5808 * and eventually fail the probe.
5810 init_timer(&phba
->hw_check
);
5811 phba
->hw_check
.function
= beiscsi_hw_health_check
;
5812 phba
->hw_check
.data
= (unsigned long)phba
;
5813 mod_timer(&phba
->hw_check
,
5814 jiffies
+ msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL
));
5815 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
5816 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5820 destroy_workqueue(phba
->wq
);
5821 for (i
= 0; i
< phba
->num_cpus
; i
++) {
5822 pbe_eq
= &phwi_context
->be_eq
[i
];
5823 irq_poll_disable(&pbe_eq
->iopoll
);
5826 hwi_cleanup_port(phba
);
5827 beiscsi_cleanup_port(phba
);
5828 beiscsi_free_mem(phba
);
5830 pci_free_consistent(phba
->pcidev
,
5831 phba
->ctrl
.mbox_mem_alloced
.size
,
5832 phba
->ctrl
.mbox_mem_alloced
.va
,
5833 phba
->ctrl
.mbox_mem_alloced
.dma
);
5834 beiscsi_unmap_pci_function(phba
);
5836 if (phba
->msix_enabled
)
5837 pci_disable_msix(phba
->pcidev
);
5838 pci_dev_put(phba
->pcidev
);
5839 iscsi_host_free(phba
->shost
);
5840 pci_set_drvdata(pcidev
, NULL
);
5842 pci_release_regions(pcidev
);
5843 pci_disable_device(pcidev
);
5847 static void beiscsi_remove(struct pci_dev
*pcidev
)
5849 struct beiscsi_hba
*phba
= NULL
;
5851 phba
= pci_get_drvdata(pcidev
);
5853 dev_err(&pcidev
->dev
, "beiscsi_remove called with no phba\n");
5857 /* first stop UE detection before unloading */
5858 del_timer_sync(&phba
->hw_check
);
5859 cancel_delayed_work_sync(&phba
->recover_port
);
5860 cancel_work_sync(&phba
->sess_work
);
5862 beiscsi_iface_destroy_default(phba
);
5863 iscsi_host_remove(phba
->shost
);
5864 beiscsi_disable_port(phba
, 1);
5866 /* after cancelling boot_work */
5867 iscsi_boot_destroy_kset(phba
->boot_struct
.boot_kset
);
5869 /* free all resources */
5870 destroy_workqueue(phba
->wq
);
5871 beiscsi_cleanup_port(phba
);
5872 beiscsi_free_mem(phba
);
5875 beiscsi_unmap_pci_function(phba
);
5876 pci_free_consistent(phba
->pcidev
,
5877 phba
->ctrl
.mbox_mem_alloced
.size
,
5878 phba
->ctrl
.mbox_mem_alloced
.va
,
5879 phba
->ctrl
.mbox_mem_alloced
.dma
);
5881 pci_dev_put(phba
->pcidev
);
5882 iscsi_host_free(phba
->shost
);
5883 pci_disable_pcie_error_reporting(pcidev
);
5884 pci_set_drvdata(pcidev
, NULL
);
5885 pci_release_regions(pcidev
);
5886 pci_disable_device(pcidev
);
5890 static struct pci_error_handlers beiscsi_eeh_handlers
= {
5891 .error_detected
= beiscsi_eeh_err_detected
,
5892 .slot_reset
= beiscsi_eeh_reset
,
5893 .resume
= beiscsi_eeh_resume
,
5896 struct iscsi_transport beiscsi_iscsi_transport
= {
5897 .owner
= THIS_MODULE
,
5899 .caps
= CAP_RECOVERY_L0
| CAP_HDRDGST
| CAP_TEXT_NEGO
|
5900 CAP_MULTI_R2T
| CAP_DATADGST
| CAP_DATA_PATH_OFFLOAD
,
5901 .create_session
= beiscsi_session_create
,
5902 .destroy_session
= beiscsi_session_destroy
,
5903 .create_conn
= beiscsi_conn_create
,
5904 .bind_conn
= beiscsi_conn_bind
,
5905 .destroy_conn
= iscsi_conn_teardown
,
5906 .attr_is_visible
= beiscsi_attr_is_visible
,
5907 .set_iface_param
= beiscsi_iface_set_param
,
5908 .get_iface_param
= beiscsi_iface_get_param
,
5909 .set_param
= beiscsi_set_param
,
5910 .get_conn_param
= iscsi_conn_get_param
,
5911 .get_session_param
= iscsi_session_get_param
,
5912 .get_host_param
= beiscsi_get_host_param
,
5913 .start_conn
= beiscsi_conn_start
,
5914 .stop_conn
= iscsi_conn_stop
,
5915 .send_pdu
= iscsi_conn_send_pdu
,
5916 .xmit_task
= beiscsi_task_xmit
,
5917 .cleanup_task
= beiscsi_cleanup_task
,
5918 .alloc_pdu
= beiscsi_alloc_pdu
,
5919 .parse_pdu_itt
= beiscsi_parse_pdu
,
5920 .get_stats
= beiscsi_conn_get_stats
,
5921 .get_ep_param
= beiscsi_ep_get_param
,
5922 .ep_connect
= beiscsi_ep_connect
,
5923 .ep_poll
= beiscsi_ep_poll
,
5924 .ep_disconnect
= beiscsi_ep_disconnect
,
5925 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
5926 .bsg_request
= beiscsi_bsg_request
,
5929 static struct pci_driver beiscsi_pci_driver
= {
5931 .probe
= beiscsi_dev_probe
,
5932 .remove
= beiscsi_remove
,
5933 .id_table
= beiscsi_pci_id_table
,
5934 .err_handler
= &beiscsi_eeh_handlers
5937 static int __init
beiscsi_module_init(void)
5941 beiscsi_scsi_transport
=
5942 iscsi_register_transport(&beiscsi_iscsi_transport
);
5943 if (!beiscsi_scsi_transport
) {
5945 "beiscsi_module_init - Unable to register beiscsi transport.\n");
5948 printk(KERN_INFO
"In beiscsi_module_init, tt=%p\n",
5949 &beiscsi_iscsi_transport
);
5951 ret
= pci_register_driver(&beiscsi_pci_driver
);
5954 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
5955 goto unregister_iscsi_transport
;
5959 unregister_iscsi_transport
:
5960 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
5964 static void __exit
beiscsi_module_exit(void)
5966 pci_unregister_driver(&beiscsi_pci_driver
);
5967 iscsi_unregister_transport(&beiscsi_iscsi_transport
);
5970 module_init(beiscsi_module_init
);
5971 module_exit(beiscsi_module_exit
);