[S390] Remove error checking from copy_oldmem_page()
[linux/fpc-iii.git] / drivers / scsi / be2iscsi / be_main.c
blob7b0a8ab710494c120d0d7b2338ebe218e6883cbf
1 /**
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
12 * Contact Information:
13 * linux-drivers@emulex.com
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
31 #include <scsi/libiscsi.h>
32 #include <scsi/scsi_transport_iscsi.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi.h>
38 #include "be_main.h"
39 #include "be_iscsi.h"
40 #include "be_mgmt.h"
42 static unsigned int be_iopoll_budget = 10;
43 static unsigned int be_max_phys_size = 64;
44 static unsigned int enable_msix = 1;
45 static unsigned int gcrashmode = 0;
46 static unsigned int num_hba = 0;
48 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
49 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
50 MODULE_AUTHOR("ServerEngines Corporation");
51 MODULE_LICENSE("GPL");
52 module_param(be_iopoll_budget, int, 0);
53 module_param(enable_msix, int, 0);
54 module_param(be_max_phys_size, uint, S_IRUGO);
55 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
56 "contiguous memory that can be allocated."
57 "Range is 16 - 128");
59 static int beiscsi_slave_configure(struct scsi_device *sdev)
61 blk_queue_max_segment_size(sdev->request_queue, 65536);
62 return 0;
65 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
67 struct iscsi_cls_session *cls_session;
68 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
69 struct beiscsi_io_task *aborted_io_task;
70 struct iscsi_conn *conn;
71 struct beiscsi_conn *beiscsi_conn;
72 struct beiscsi_hba *phba;
73 struct iscsi_session *session;
74 struct invalidate_command_table *inv_tbl;
75 struct be_dma_mem nonemb_cmd;
76 unsigned int cid, tag, num_invalidate;
78 cls_session = starget_to_session(scsi_target(sc->device));
79 session = cls_session->dd_data;
81 spin_lock_bh(&session->lock);
82 if (!aborted_task || !aborted_task->sc) {
83 /* we raced */
84 spin_unlock_bh(&session->lock);
85 return SUCCESS;
88 aborted_io_task = aborted_task->dd_data;
89 if (!aborted_io_task->scsi_cmnd) {
90 /* raced or invalid command */
91 spin_unlock_bh(&session->lock);
92 return SUCCESS;
94 spin_unlock_bh(&session->lock);
95 conn = aborted_task->conn;
96 beiscsi_conn = conn->dd_data;
97 phba = beiscsi_conn->phba;
99 /* invalidate iocb */
100 cid = beiscsi_conn->beiscsi_conn_cid;
101 inv_tbl = phba->inv_tbl;
102 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
103 inv_tbl->cid = cid;
104 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
105 num_invalidate = 1;
106 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
107 sizeof(struct invalidate_commands_params_in),
108 &nonemb_cmd.dma);
109 if (nonemb_cmd.va == NULL) {
110 SE_DEBUG(DBG_LVL_1,
111 "Failed to allocate memory for"
112 "mgmt_invalidate_icds\n");
113 return FAILED;
115 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
117 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
118 cid, &nonemb_cmd);
119 if (!tag) {
120 shost_printk(KERN_WARNING, phba->shost,
121 "mgmt_invalidate_icds could not be"
122 " submitted\n");
123 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
124 nonemb_cmd.va, nonemb_cmd.dma);
126 return FAILED;
127 } else {
128 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
129 phba->ctrl.mcc_numtag[tag]);
130 free_mcc_tag(&phba->ctrl, tag);
132 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
133 nonemb_cmd.va, nonemb_cmd.dma);
134 return iscsi_eh_abort(sc);
137 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
139 struct iscsi_task *abrt_task;
140 struct beiscsi_io_task *abrt_io_task;
141 struct iscsi_conn *conn;
142 struct beiscsi_conn *beiscsi_conn;
143 struct beiscsi_hba *phba;
144 struct iscsi_session *session;
145 struct iscsi_cls_session *cls_session;
146 struct invalidate_command_table *inv_tbl;
147 struct be_dma_mem nonemb_cmd;
148 unsigned int cid, tag, i, num_invalidate;
149 int rc = FAILED;
151 /* invalidate iocbs */
152 cls_session = starget_to_session(scsi_target(sc->device));
153 session = cls_session->dd_data;
154 spin_lock_bh(&session->lock);
155 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
156 goto unlock;
158 conn = session->leadconn;
159 beiscsi_conn = conn->dd_data;
160 phba = beiscsi_conn->phba;
161 cid = beiscsi_conn->beiscsi_conn_cid;
162 inv_tbl = phba->inv_tbl;
163 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
164 num_invalidate = 0;
165 for (i = 0; i < conn->session->cmds_max; i++) {
166 abrt_task = conn->session->cmds[i];
167 abrt_io_task = abrt_task->dd_data;
168 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
169 continue;
171 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
172 continue;
174 inv_tbl->cid = cid;
175 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
176 num_invalidate++;
177 inv_tbl++;
179 spin_unlock_bh(&session->lock);
180 inv_tbl = phba->inv_tbl;
182 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
183 sizeof(struct invalidate_commands_params_in),
184 &nonemb_cmd.dma);
185 if (nonemb_cmd.va == NULL) {
186 SE_DEBUG(DBG_LVL_1,
187 "Failed to allocate memory for"
188 "mgmt_invalidate_icds\n");
189 return FAILED;
191 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
192 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
193 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
194 cid, &nonemb_cmd);
195 if (!tag) {
196 shost_printk(KERN_WARNING, phba->shost,
197 "mgmt_invalidate_icds could not be"
198 " submitted\n");
199 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
200 nonemb_cmd.va, nonemb_cmd.dma);
201 return FAILED;
202 } else {
203 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
204 phba->ctrl.mcc_numtag[tag]);
205 free_mcc_tag(&phba->ctrl, tag);
207 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
208 nonemb_cmd.va, nonemb_cmd.dma);
209 return iscsi_eh_device_reset(sc);
210 unlock:
211 spin_unlock_bh(&session->lock);
212 return rc;
215 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
217 struct beiscsi_hba *phba = data;
218 struct mgmt_session_info *boot_sess = &phba->boot_sess;
219 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
220 char *str = buf;
221 int rc;
223 switch (type) {
224 case ISCSI_BOOT_TGT_NAME:
225 rc = sprintf(buf, "%.*s\n",
226 (int)strlen(boot_sess->target_name),
227 (char *)&boot_sess->target_name);
228 break;
229 case ISCSI_BOOT_TGT_IP_ADDR:
230 if (boot_conn->dest_ipaddr.ip_type == 0x1)
231 rc = sprintf(buf, "%pI4\n",
232 (char *)&boot_conn->dest_ipaddr.ip_address);
233 else
234 rc = sprintf(str, "%pI6\n",
235 (char *)&boot_conn->dest_ipaddr.ip_address);
236 break;
237 case ISCSI_BOOT_TGT_PORT:
238 rc = sprintf(str, "%d\n", boot_conn->dest_port);
239 break;
241 case ISCSI_BOOT_TGT_CHAP_NAME:
242 rc = sprintf(str, "%.*s\n",
243 boot_conn->negotiated_login_options.auth_data.chap.
244 target_chap_name_length,
245 (char *)&boot_conn->negotiated_login_options.
246 auth_data.chap.target_chap_name);
247 break;
248 case ISCSI_BOOT_TGT_CHAP_SECRET:
249 rc = sprintf(str, "%.*s\n",
250 boot_conn->negotiated_login_options.auth_data.chap.
251 target_secret_length,
252 (char *)&boot_conn->negotiated_login_options.
253 auth_data.chap.target_secret);
254 break;
255 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
256 rc = sprintf(str, "%.*s\n",
257 boot_conn->negotiated_login_options.auth_data.chap.
258 intr_chap_name_length,
259 (char *)&boot_conn->negotiated_login_options.
260 auth_data.chap.intr_chap_name);
261 break;
262 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
263 rc = sprintf(str, "%.*s\n",
264 boot_conn->negotiated_login_options.auth_data.chap.
265 intr_secret_length,
266 (char *)&boot_conn->negotiated_login_options.
267 auth_data.chap.intr_secret);
268 break;
269 case ISCSI_BOOT_TGT_FLAGS:
270 rc = sprintf(str, "2\n");
271 break;
272 case ISCSI_BOOT_TGT_NIC_ASSOC:
273 rc = sprintf(str, "0\n");
274 break;
275 default:
276 rc = -ENOSYS;
277 break;
279 return rc;
282 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
284 struct beiscsi_hba *phba = data;
285 char *str = buf;
286 int rc;
288 switch (type) {
289 case ISCSI_BOOT_INI_INITIATOR_NAME:
290 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
291 break;
292 default:
293 rc = -ENOSYS;
294 break;
296 return rc;
299 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
301 struct beiscsi_hba *phba = data;
302 char *str = buf;
303 int rc;
305 switch (type) {
306 case ISCSI_BOOT_ETH_FLAGS:
307 rc = sprintf(str, "2\n");
308 break;
309 case ISCSI_BOOT_ETH_INDEX:
310 rc = sprintf(str, "0\n");
311 break;
312 case ISCSI_BOOT_ETH_MAC:
313 rc = beiscsi_get_macaddr(buf, phba);
314 if (rc < 0) {
315 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
316 return rc;
318 break;
319 default:
320 rc = -ENOSYS;
321 break;
323 return rc;
327 static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
329 int rc;
331 switch (type) {
332 case ISCSI_BOOT_TGT_NAME:
333 case ISCSI_BOOT_TGT_IP_ADDR:
334 case ISCSI_BOOT_TGT_PORT:
335 case ISCSI_BOOT_TGT_CHAP_NAME:
336 case ISCSI_BOOT_TGT_CHAP_SECRET:
337 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
338 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
339 case ISCSI_BOOT_TGT_NIC_ASSOC:
340 case ISCSI_BOOT_TGT_FLAGS:
341 rc = S_IRUGO;
342 break;
343 default:
344 rc = 0;
345 break;
347 return rc;
350 static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
352 int rc;
354 switch (type) {
355 case ISCSI_BOOT_INI_INITIATOR_NAME:
356 rc = S_IRUGO;
357 break;
358 default:
359 rc = 0;
360 break;
362 return rc;
366 static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
368 int rc;
370 switch (type) {
371 case ISCSI_BOOT_ETH_FLAGS:
372 case ISCSI_BOOT_ETH_MAC:
373 case ISCSI_BOOT_ETH_INDEX:
374 rc = S_IRUGO;
375 break;
376 default:
377 rc = 0;
378 break;
380 return rc;
383 /*------------------- PCI Driver operations and data ----------------- */
384 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
385 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
386 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
387 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
388 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
389 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
390 { 0 }
392 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
394 static struct scsi_host_template beiscsi_sht = {
395 .module = THIS_MODULE,
396 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
397 .proc_name = DRV_NAME,
398 .queuecommand = iscsi_queuecommand,
399 .change_queue_depth = iscsi_change_queue_depth,
400 .slave_configure = beiscsi_slave_configure,
401 .target_alloc = iscsi_target_alloc,
402 .eh_abort_handler = beiscsi_eh_abort,
403 .eh_device_reset_handler = beiscsi_eh_device_reset,
404 .eh_target_reset_handler = iscsi_eh_session_reset,
405 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
406 .can_queue = BE2_IO_DEPTH,
407 .this_id = -1,
408 .max_sectors = BEISCSI_MAX_SECTORS,
409 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
410 .use_clustering = ENABLE_CLUSTERING,
413 static struct scsi_transport_template *beiscsi_scsi_transport;
415 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
417 struct beiscsi_hba *phba;
418 struct Scsi_Host *shost;
420 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
421 if (!shost) {
422 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
423 "iscsi_host_alloc failed\n");
424 return NULL;
426 shost->dma_boundary = pcidev->dma_mask;
427 shost->max_id = BE2_MAX_SESSIONS;
428 shost->max_channel = 0;
429 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
430 shost->max_lun = BEISCSI_NUM_MAX_LUN;
431 shost->transportt = beiscsi_scsi_transport;
432 phba = iscsi_host_priv(shost);
433 memset(phba, 0, sizeof(*phba));
434 phba->shost = shost;
435 phba->pcidev = pci_dev_get(pcidev);
436 pci_set_drvdata(pcidev, phba);
438 if (iscsi_host_add(shost, &phba->pcidev->dev))
439 goto free_devices;
441 return phba;
443 free_devices:
444 pci_dev_put(phba->pcidev);
445 iscsi_host_free(phba->shost);
446 return NULL;
449 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
451 if (phba->csr_va) {
452 iounmap(phba->csr_va);
453 phba->csr_va = NULL;
455 if (phba->db_va) {
456 iounmap(phba->db_va);
457 phba->db_va = NULL;
459 if (phba->pci_va) {
460 iounmap(phba->pci_va);
461 phba->pci_va = NULL;
465 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
466 struct pci_dev *pcidev)
468 u8 __iomem *addr;
469 int pcicfg_reg;
471 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
472 pci_resource_len(pcidev, 2));
473 if (addr == NULL)
474 return -ENOMEM;
475 phba->ctrl.csr = addr;
476 phba->csr_va = addr;
477 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
479 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
480 if (addr == NULL)
481 goto pci_map_err;
482 phba->ctrl.db = addr;
483 phba->db_va = addr;
484 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
486 if (phba->generation == BE_GEN2)
487 pcicfg_reg = 1;
488 else
489 pcicfg_reg = 0;
491 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
492 pci_resource_len(pcidev, pcicfg_reg));
494 if (addr == NULL)
495 goto pci_map_err;
496 phba->ctrl.pcicfg = addr;
497 phba->pci_va = addr;
498 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
499 return 0;
501 pci_map_err:
502 beiscsi_unmap_pci_function(phba);
503 return -ENOMEM;
506 static int beiscsi_enable_pci(struct pci_dev *pcidev)
508 int ret;
510 ret = pci_enable_device(pcidev);
511 if (ret) {
512 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
513 "failed. Returning -ENODEV\n");
514 return ret;
517 pci_set_master(pcidev);
518 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
519 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
520 if (ret) {
521 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
522 pci_disable_device(pcidev);
523 return ret;
526 return 0;
529 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
531 struct be_ctrl_info *ctrl = &phba->ctrl;
532 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
533 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
534 int status = 0;
536 ctrl->pdev = pdev;
537 status = beiscsi_map_pci_bars(phba, pdev);
538 if (status)
539 return status;
540 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
541 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
542 mbox_mem_alloc->size,
543 &mbox_mem_alloc->dma);
544 if (!mbox_mem_alloc->va) {
545 beiscsi_unmap_pci_function(phba);
546 status = -ENOMEM;
547 return status;
550 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
551 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
552 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
553 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
554 spin_lock_init(&ctrl->mbox_lock);
555 spin_lock_init(&phba->ctrl.mcc_lock);
556 spin_lock_init(&phba->ctrl.mcc_cq_lock);
558 return status;
561 static void beiscsi_get_params(struct beiscsi_hba *phba)
563 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
564 - (phba->fw_config.iscsi_cid_count
565 + BE2_TMFS
566 + BE2_NOPOUT_REQ));
567 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
568 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
569 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
570 phba->params.num_sge_per_io = BE2_SGE;
571 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
572 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
573 phba->params.eq_timer = 64;
574 phba->params.num_eq_entries =
575 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
576 + BE2_TMFS) / 512) + 1) * 512;
577 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
578 ? 1024 : phba->params.num_eq_entries;
579 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
580 phba->params.num_eq_entries);
581 phba->params.num_cq_entries =
582 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
583 + BE2_TMFS) / 512) + 1) * 512;
584 phba->params.wrbs_per_cxn = 256;
587 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
588 unsigned int id, unsigned int clr_interrupt,
589 unsigned int num_processed,
590 unsigned char rearm, unsigned char event)
592 u32 val = 0;
593 val |= id & DB_EQ_RING_ID_MASK;
594 if (rearm)
595 val |= 1 << DB_EQ_REARM_SHIFT;
596 if (clr_interrupt)
597 val |= 1 << DB_EQ_CLR_SHIFT;
598 if (event)
599 val |= 1 << DB_EQ_EVNT_SHIFT;
600 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
601 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
605 * be_isr_mcc - The isr routine of the driver.
606 * @irq: Not used
607 * @dev_id: Pointer to host adapter structure
609 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
611 struct beiscsi_hba *phba;
612 struct be_eq_entry *eqe = NULL;
613 struct be_queue_info *eq;
614 struct be_queue_info *mcc;
615 unsigned int num_eq_processed;
616 struct be_eq_obj *pbe_eq;
617 unsigned long flags;
619 pbe_eq = dev_id;
620 eq = &pbe_eq->q;
621 phba = pbe_eq->phba;
622 mcc = &phba->ctrl.mcc_obj.cq;
623 eqe = queue_tail_node(eq);
624 if (!eqe)
625 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
627 num_eq_processed = 0;
629 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
630 & EQE_VALID_MASK) {
631 if (((eqe->dw[offsetof(struct amap_eq_entry,
632 resource_id) / 32] &
633 EQE_RESID_MASK) >> 16) == mcc->id) {
634 spin_lock_irqsave(&phba->isr_lock, flags);
635 phba->todo_mcc_cq = 1;
636 spin_unlock_irqrestore(&phba->isr_lock, flags);
638 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
639 queue_tail_inc(eq);
640 eqe = queue_tail_node(eq);
641 num_eq_processed++;
643 if (phba->todo_mcc_cq)
644 queue_work(phba->wq, &phba->work_cqs);
645 if (num_eq_processed)
646 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
648 return IRQ_HANDLED;
652 * be_isr_msix - The isr routine of the driver.
653 * @irq: Not used
654 * @dev_id: Pointer to host adapter structure
656 static irqreturn_t be_isr_msix(int irq, void *dev_id)
658 struct beiscsi_hba *phba;
659 struct be_eq_entry *eqe = NULL;
660 struct be_queue_info *eq;
661 struct be_queue_info *cq;
662 unsigned int num_eq_processed;
663 struct be_eq_obj *pbe_eq;
664 unsigned long flags;
666 pbe_eq = dev_id;
667 eq = &pbe_eq->q;
668 cq = pbe_eq->cq;
669 eqe = queue_tail_node(eq);
670 if (!eqe)
671 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
673 phba = pbe_eq->phba;
674 num_eq_processed = 0;
675 if (blk_iopoll_enabled) {
676 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
677 & EQE_VALID_MASK) {
678 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
679 blk_iopoll_sched(&pbe_eq->iopoll);
681 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
682 queue_tail_inc(eq);
683 eqe = queue_tail_node(eq);
684 num_eq_processed++;
686 if (num_eq_processed)
687 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
689 return IRQ_HANDLED;
690 } else {
691 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
692 & EQE_VALID_MASK) {
693 spin_lock_irqsave(&phba->isr_lock, flags);
694 phba->todo_cq = 1;
695 spin_unlock_irqrestore(&phba->isr_lock, flags);
696 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
697 queue_tail_inc(eq);
698 eqe = queue_tail_node(eq);
699 num_eq_processed++;
701 if (phba->todo_cq)
702 queue_work(phba->wq, &phba->work_cqs);
704 if (num_eq_processed)
705 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
707 return IRQ_HANDLED;
712 * be_isr - The isr routine of the driver.
713 * @irq: Not used
714 * @dev_id: Pointer to host adapter structure
716 static irqreturn_t be_isr(int irq, void *dev_id)
718 struct beiscsi_hba *phba;
719 struct hwi_controller *phwi_ctrlr;
720 struct hwi_context_memory *phwi_context;
721 struct be_eq_entry *eqe = NULL;
722 struct be_queue_info *eq;
723 struct be_queue_info *cq;
724 struct be_queue_info *mcc;
725 unsigned long flags, index;
726 unsigned int num_mcceq_processed, num_ioeq_processed;
727 struct be_ctrl_info *ctrl;
728 struct be_eq_obj *pbe_eq;
729 int isr;
731 phba = dev_id;
732 ctrl = &phba->ctrl;
733 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
734 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
735 if (!isr)
736 return IRQ_NONE;
738 phwi_ctrlr = phba->phwi_ctrlr;
739 phwi_context = phwi_ctrlr->phwi_ctxt;
740 pbe_eq = &phwi_context->be_eq[0];
742 eq = &phwi_context->be_eq[0].q;
743 mcc = &phba->ctrl.mcc_obj.cq;
744 index = 0;
745 eqe = queue_tail_node(eq);
746 if (!eqe)
747 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
749 num_ioeq_processed = 0;
750 num_mcceq_processed = 0;
751 if (blk_iopoll_enabled) {
752 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
753 & EQE_VALID_MASK) {
754 if (((eqe->dw[offsetof(struct amap_eq_entry,
755 resource_id) / 32] &
756 EQE_RESID_MASK) >> 16) == mcc->id) {
757 spin_lock_irqsave(&phba->isr_lock, flags);
758 phba->todo_mcc_cq = 1;
759 spin_unlock_irqrestore(&phba->isr_lock, flags);
760 num_mcceq_processed++;
761 } else {
762 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
763 blk_iopoll_sched(&pbe_eq->iopoll);
764 num_ioeq_processed++;
766 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
767 queue_tail_inc(eq);
768 eqe = queue_tail_node(eq);
770 if (num_ioeq_processed || num_mcceq_processed) {
771 if (phba->todo_mcc_cq)
772 queue_work(phba->wq, &phba->work_cqs);
774 if ((num_mcceq_processed) && (!num_ioeq_processed))
775 hwi_ring_eq_db(phba, eq->id, 0,
776 (num_ioeq_processed +
777 num_mcceq_processed) , 1, 1);
778 else
779 hwi_ring_eq_db(phba, eq->id, 0,
780 (num_ioeq_processed +
781 num_mcceq_processed), 0, 1);
783 return IRQ_HANDLED;
784 } else
785 return IRQ_NONE;
786 } else {
787 cq = &phwi_context->be_cq[0];
788 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
789 & EQE_VALID_MASK) {
791 if (((eqe->dw[offsetof(struct amap_eq_entry,
792 resource_id) / 32] &
793 EQE_RESID_MASK) >> 16) != cq->id) {
794 spin_lock_irqsave(&phba->isr_lock, flags);
795 phba->todo_mcc_cq = 1;
796 spin_unlock_irqrestore(&phba->isr_lock, flags);
797 } else {
798 spin_lock_irqsave(&phba->isr_lock, flags);
799 phba->todo_cq = 1;
800 spin_unlock_irqrestore(&phba->isr_lock, flags);
802 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
803 queue_tail_inc(eq);
804 eqe = queue_tail_node(eq);
805 num_ioeq_processed++;
807 if (phba->todo_cq || phba->todo_mcc_cq)
808 queue_work(phba->wq, &phba->work_cqs);
810 if (num_ioeq_processed) {
811 hwi_ring_eq_db(phba, eq->id, 0,
812 num_ioeq_processed, 1, 1);
813 return IRQ_HANDLED;
814 } else
815 return IRQ_NONE;
819 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
821 struct pci_dev *pcidev = phba->pcidev;
822 struct hwi_controller *phwi_ctrlr;
823 struct hwi_context_memory *phwi_context;
824 int ret, msix_vec, i, j;
826 phwi_ctrlr = phba->phwi_ctrlr;
827 phwi_context = phwi_ctrlr->phwi_ctxt;
829 if (phba->msix_enabled) {
830 for (i = 0; i < phba->num_cpus; i++) {
831 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
832 GFP_KERNEL);
833 if (!phba->msi_name[i]) {
834 ret = -ENOMEM;
835 goto free_msix_irqs;
838 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
839 phba->shost->host_no, i);
840 msix_vec = phba->msix_entries[i].vector;
841 ret = request_irq(msix_vec, be_isr_msix, 0,
842 phba->msi_name[i],
843 &phwi_context->be_eq[i]);
844 if (ret) {
845 shost_printk(KERN_ERR, phba->shost,
846 "beiscsi_init_irqs-Failed to"
847 "register msix for i = %d\n", i);
848 kfree(phba->msi_name[i]);
849 goto free_msix_irqs;
852 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
853 if (!phba->msi_name[i]) {
854 ret = -ENOMEM;
855 goto free_msix_irqs;
857 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
858 phba->shost->host_no);
859 msix_vec = phba->msix_entries[i].vector;
860 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
861 &phwi_context->be_eq[i]);
862 if (ret) {
863 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
864 "Failed to register beiscsi_msix_mcc\n");
865 kfree(phba->msi_name[i]);
866 goto free_msix_irqs;
869 } else {
870 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
871 "beiscsi", phba);
872 if (ret) {
873 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
874 "Failed to register irq\\n");
875 return ret;
878 return 0;
879 free_msix_irqs:
880 for (j = i - 1; j >= 0; j--) {
881 kfree(phba->msi_name[j]);
882 msix_vec = phba->msix_entries[j].vector;
883 free_irq(msix_vec, &phwi_context->be_eq[j]);
885 return ret;
888 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
889 unsigned int id, unsigned int num_processed,
890 unsigned char rearm, unsigned char event)
892 u32 val = 0;
893 val |= id & DB_CQ_RING_ID_MASK;
894 if (rearm)
895 val |= 1 << DB_CQ_REARM_SHIFT;
896 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
897 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
900 static unsigned int
901 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
902 struct beiscsi_hba *phba,
903 unsigned short cid,
904 struct pdu_base *ppdu,
905 unsigned long pdu_len,
906 void *pbuffer, unsigned long buf_len)
908 struct iscsi_conn *conn = beiscsi_conn->conn;
909 struct iscsi_session *session = conn->session;
910 struct iscsi_task *task;
911 struct beiscsi_io_task *io_task;
912 struct iscsi_hdr *login_hdr;
914 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
915 PDUBASE_OPCODE_MASK) {
916 case ISCSI_OP_NOOP_IN:
917 pbuffer = NULL;
918 buf_len = 0;
919 break;
920 case ISCSI_OP_ASYNC_EVENT:
921 break;
922 case ISCSI_OP_REJECT:
923 WARN_ON(!pbuffer);
924 WARN_ON(!(buf_len == 48));
925 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
926 break;
927 case ISCSI_OP_LOGIN_RSP:
928 case ISCSI_OP_TEXT_RSP:
929 task = conn->login_task;
930 io_task = task->dd_data;
931 login_hdr = (struct iscsi_hdr *)ppdu;
932 login_hdr->itt = io_task->libiscsi_itt;
933 break;
934 default:
935 shost_printk(KERN_WARNING, phba->shost,
936 "Unrecognized opcode 0x%x in async msg\n",
937 (ppdu->
938 dw[offsetof(struct amap_pdu_base, opcode) / 32]
939 & PDUBASE_OPCODE_MASK));
940 return 1;
943 spin_lock_bh(&session->lock);
944 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
945 spin_unlock_bh(&session->lock);
946 return 0;
949 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
951 struct sgl_handle *psgl_handle;
953 if (phba->io_sgl_hndl_avbl) {
954 SE_DEBUG(DBG_LVL_8,
955 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
956 phba->io_sgl_alloc_index);
957 psgl_handle = phba->io_sgl_hndl_base[phba->
958 io_sgl_alloc_index];
959 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
960 phba->io_sgl_hndl_avbl--;
961 if (phba->io_sgl_alloc_index == (phba->params.
962 ios_per_ctrl - 1))
963 phba->io_sgl_alloc_index = 0;
964 else
965 phba->io_sgl_alloc_index++;
966 } else
967 psgl_handle = NULL;
968 return psgl_handle;
971 static void
972 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
974 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
975 phba->io_sgl_free_index);
976 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
978 * this can happen if clean_task is called on a task that
979 * failed in xmit_task or alloc_pdu.
981 SE_DEBUG(DBG_LVL_8,
982 "Double Free in IO SGL io_sgl_free_index=%d,"
983 "value there=%p\n", phba->io_sgl_free_index,
984 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
985 return;
987 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
988 phba->io_sgl_hndl_avbl++;
989 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
990 phba->io_sgl_free_index = 0;
991 else
992 phba->io_sgl_free_index++;
996 * alloc_wrb_handle - To allocate a wrb handle
997 * @phba: The hba pointer
998 * @cid: The cid to use for allocation
1000 * This happens under session_lock until submission to chip
1002 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1004 struct hwi_wrb_context *pwrb_context;
1005 struct hwi_controller *phwi_ctrlr;
1006 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1008 phwi_ctrlr = phba->phwi_ctrlr;
1009 pwrb_context = &phwi_ctrlr->wrb_context[cid];
1010 if (pwrb_context->wrb_handles_available >= 2) {
1011 pwrb_handle = pwrb_context->pwrb_handle_base[
1012 pwrb_context->alloc_index];
1013 pwrb_context->wrb_handles_available--;
1014 if (pwrb_context->alloc_index ==
1015 (phba->params.wrbs_per_cxn - 1))
1016 pwrb_context->alloc_index = 0;
1017 else
1018 pwrb_context->alloc_index++;
1019 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1020 pwrb_context->alloc_index];
1021 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1022 } else
1023 pwrb_handle = NULL;
1024 return pwrb_handle;
1028 * free_wrb_handle - To free the wrb handle back to pool
1029 * @phba: The hba pointer
1030 * @pwrb_context: The context to free from
1031 * @pwrb_handle: The wrb_handle to free
1033 * This happens under session_lock until submission to chip
1035 static void
1036 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1037 struct wrb_handle *pwrb_handle)
1039 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1040 pwrb_context->wrb_handles_available++;
1041 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1042 pwrb_context->free_index = 0;
1043 else
1044 pwrb_context->free_index++;
1046 SE_DEBUG(DBG_LVL_8,
1047 "FREE WRB: pwrb_handle=%p free_index=0x%x"
1048 "wrb_handles_available=%d\n",
1049 pwrb_handle, pwrb_context->free_index,
1050 pwrb_context->wrb_handles_available);
1053 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1055 struct sgl_handle *psgl_handle;
1057 if (phba->eh_sgl_hndl_avbl) {
1058 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1059 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1060 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
1061 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1062 phba->eh_sgl_hndl_avbl--;
1063 if (phba->eh_sgl_alloc_index ==
1064 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1066 phba->eh_sgl_alloc_index = 0;
1067 else
1068 phba->eh_sgl_alloc_index++;
1069 } else
1070 psgl_handle = NULL;
1071 return psgl_handle;
1074 void
1075 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1078 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
1079 phba->eh_sgl_free_index);
1080 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1082 * this can happen if clean_task is called on a task that
1083 * failed in xmit_task or alloc_pdu.
1085 SE_DEBUG(DBG_LVL_8,
1086 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
1087 phba->eh_sgl_free_index);
1088 return;
1090 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1091 phba->eh_sgl_hndl_avbl++;
1092 if (phba->eh_sgl_free_index ==
1093 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1094 phba->eh_sgl_free_index = 0;
1095 else
1096 phba->eh_sgl_free_index++;
1099 static void
1100 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1101 struct iscsi_task *task, struct sol_cqe *psol)
1103 struct beiscsi_io_task *io_task = task->dd_data;
1104 struct be_status_bhs *sts_bhs =
1105 (struct be_status_bhs *)io_task->cmd_bhs;
1106 struct iscsi_conn *conn = beiscsi_conn->conn;
1107 unsigned int sense_len;
1108 unsigned char *sense;
1109 u32 resid = 0, exp_cmdsn, max_cmdsn;
1110 u8 rsp, status, flags;
1112 exp_cmdsn = (psol->
1113 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1114 & SOL_EXP_CMD_SN_MASK);
1115 max_cmdsn = ((psol->
1116 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1117 & SOL_EXP_CMD_SN_MASK) +
1118 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1119 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1120 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1121 & SOL_RESP_MASK) >> 16);
1122 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1123 & SOL_STS_MASK) >> 8);
1124 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1125 & SOL_FLAGS_MASK) >> 24) | 0x80;
1126 if (!task->sc) {
1127 if (io_task->scsi_cmnd)
1128 scsi_dma_unmap(io_task->scsi_cmnd);
1130 return;
1132 task->sc->result = (DID_OK << 16) | status;
1133 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1134 task->sc->result = DID_ERROR << 16;
1135 goto unmap;
1138 /* bidi not initially supported */
1139 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1140 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1141 32] & SOL_RES_CNT_MASK);
1143 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1144 task->sc->result = DID_ERROR << 16;
1146 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1147 scsi_set_resid(task->sc, resid);
1148 if (!status && (scsi_bufflen(task->sc) - resid <
1149 task->sc->underflow))
1150 task->sc->result = DID_ERROR << 16;
1154 if (status == SAM_STAT_CHECK_CONDITION) {
1155 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1156 sense = sts_bhs->sense_info + sizeof(unsigned short);
1157 sense_len = cpu_to_be16(*slen);
1158 memcpy(task->sc->sense_buffer, sense,
1159 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1162 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1163 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1164 & SOL_RES_CNT_MASK)
1165 conn->rxdata_octets += (psol->
1166 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1167 & SOL_RES_CNT_MASK);
1169 unmap:
1170 scsi_dma_unmap(io_task->scsi_cmnd);
1171 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1174 static void
1175 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1176 struct iscsi_task *task, struct sol_cqe *psol)
1178 struct iscsi_logout_rsp *hdr;
1179 struct beiscsi_io_task *io_task = task->dd_data;
1180 struct iscsi_conn *conn = beiscsi_conn->conn;
1182 hdr = (struct iscsi_logout_rsp *)task->hdr;
1183 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1184 hdr->t2wait = 5;
1185 hdr->t2retain = 0;
1186 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1187 & SOL_FLAGS_MASK) >> 24) | 0x80;
1188 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1189 32] & SOL_RESP_MASK);
1190 hdr->exp_cmdsn = cpu_to_be32(psol->
1191 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1192 & SOL_EXP_CMD_SN_MASK);
1193 hdr->max_cmdsn = be32_to_cpu((psol->
1194 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1195 & SOL_EXP_CMD_SN_MASK) +
1196 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1197 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1198 hdr->dlength[0] = 0;
1199 hdr->dlength[1] = 0;
1200 hdr->dlength[2] = 0;
1201 hdr->hlength = 0;
1202 hdr->itt = io_task->libiscsi_itt;
1203 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1206 static void
1207 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1208 struct iscsi_task *task, struct sol_cqe *psol)
1210 struct iscsi_tm_rsp *hdr;
1211 struct iscsi_conn *conn = beiscsi_conn->conn;
1212 struct beiscsi_io_task *io_task = task->dd_data;
1214 hdr = (struct iscsi_tm_rsp *)task->hdr;
1215 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1216 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1217 & SOL_FLAGS_MASK) >> 24) | 0x80;
1218 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1219 32] & SOL_RESP_MASK);
1220 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1221 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1222 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1223 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1224 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1225 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1226 hdr->itt = io_task->libiscsi_itt;
1227 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1230 static void
1231 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1232 struct beiscsi_hba *phba, struct sol_cqe *psol)
1234 struct hwi_wrb_context *pwrb_context;
1235 struct wrb_handle *pwrb_handle = NULL;
1236 struct hwi_controller *phwi_ctrlr;
1237 struct iscsi_task *task;
1238 struct beiscsi_io_task *io_task;
1239 struct iscsi_conn *conn = beiscsi_conn->conn;
1240 struct iscsi_session *session = conn->session;
1242 phwi_ctrlr = phba->phwi_ctrlr;
1243 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1244 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1245 SOL_CID_MASK) >> 6) -
1246 phba->fw_config.iscsi_cid_start];
1247 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1248 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1249 32] & SOL_WRB_INDEX_MASK) >> 16)];
1250 task = pwrb_handle->pio_handle;
1252 io_task = task->dd_data;
1253 spin_lock(&phba->mgmt_sgl_lock);
1254 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1255 spin_unlock(&phba->mgmt_sgl_lock);
1256 spin_lock_bh(&session->lock);
1257 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1258 spin_unlock_bh(&session->lock);
1261 static void
1262 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1263 struct iscsi_task *task, struct sol_cqe *psol)
1265 struct iscsi_nopin *hdr;
1266 struct iscsi_conn *conn = beiscsi_conn->conn;
1267 struct beiscsi_io_task *io_task = task->dd_data;
1269 hdr = (struct iscsi_nopin *)task->hdr;
1270 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1271 & SOL_FLAGS_MASK) >> 24) | 0x80;
1272 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1273 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1274 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1275 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1276 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1277 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1278 hdr->opcode = ISCSI_OP_NOOP_IN;
1279 hdr->itt = io_task->libiscsi_itt;
1280 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1283 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1284 struct beiscsi_hba *phba, struct sol_cqe *psol)
1286 struct hwi_wrb_context *pwrb_context;
1287 struct wrb_handle *pwrb_handle;
1288 struct iscsi_wrb *pwrb = NULL;
1289 struct hwi_controller *phwi_ctrlr;
1290 struct iscsi_task *task;
1291 unsigned int type;
1292 struct iscsi_conn *conn = beiscsi_conn->conn;
1293 struct iscsi_session *session = conn->session;
1295 phwi_ctrlr = phba->phwi_ctrlr;
1296 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1297 (struct amap_sol_cqe, cid) / 32]
1298 & SOL_CID_MASK) >> 6) -
1299 phba->fw_config.iscsi_cid_start];
1300 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1301 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1302 32] & SOL_WRB_INDEX_MASK) >> 16)];
1303 task = pwrb_handle->pio_handle;
1304 pwrb = pwrb_handle->pwrb;
1305 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1306 WRB_TYPE_MASK) >> 28;
1308 spin_lock_bh(&session->lock);
1309 switch (type) {
1310 case HWH_TYPE_IO:
1311 case HWH_TYPE_IO_RD:
1312 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1313 ISCSI_OP_NOOP_OUT)
1314 be_complete_nopin_resp(beiscsi_conn, task, psol);
1315 else
1316 be_complete_io(beiscsi_conn, task, psol);
1317 break;
1319 case HWH_TYPE_LOGOUT:
1320 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1321 be_complete_logout(beiscsi_conn, task, psol);
1322 else
1323 be_complete_tmf(beiscsi_conn, task, psol);
1325 break;
1327 case HWH_TYPE_LOGIN:
1328 SE_DEBUG(DBG_LVL_1,
1329 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1330 "- Solicited path\n");
1331 break;
1333 case HWH_TYPE_NOP:
1334 be_complete_nopin_resp(beiscsi_conn, task, psol);
1335 break;
1337 default:
1338 shost_printk(KERN_WARNING, phba->shost,
1339 "In hwi_complete_cmd, unknown type = %d"
1340 "wrb_index 0x%x CID 0x%x\n", type,
1341 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1342 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1343 ((psol->dw[offsetof(struct amap_sol_cqe,
1344 cid) / 32] & SOL_CID_MASK) >> 6));
1345 break;
1348 spin_unlock_bh(&session->lock);
1351 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1352 *pasync_ctx, unsigned int is_header,
1353 unsigned int host_write_ptr)
1355 if (is_header)
1356 return &pasync_ctx->async_entry[host_write_ptr].
1357 header_busy_list;
1358 else
1359 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1362 static struct async_pdu_handle *
1363 hwi_get_async_handle(struct beiscsi_hba *phba,
1364 struct beiscsi_conn *beiscsi_conn,
1365 struct hwi_async_pdu_context *pasync_ctx,
1366 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1368 struct be_bus_address phys_addr;
1369 struct list_head *pbusy_list;
1370 struct async_pdu_handle *pasync_handle = NULL;
1371 int buffer_len = 0;
1372 unsigned char buffer_index = -1;
1373 unsigned char is_header = 0;
1375 phys_addr.u.a32.address_lo =
1376 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1377 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1378 & PDUCQE_DPL_MASK) >> 16);
1379 phys_addr.u.a32.address_hi =
1380 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1382 phys_addr.u.a64.address =
1383 *((unsigned long long *)(&phys_addr.u.a64.address));
1385 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1386 & PDUCQE_CODE_MASK) {
1387 case UNSOL_HDR_NOTIFY:
1388 is_header = 1;
1390 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1391 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1392 index) / 32] & PDUCQE_INDEX_MASK));
1394 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1395 pasync_ctx->async_header.pa_base.u.a64.address);
1397 buffer_index = buffer_len /
1398 pasync_ctx->async_header.buffer_size;
1400 break;
1401 case UNSOL_DATA_NOTIFY:
1402 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1403 dw[offsetof(struct amap_i_t_dpdu_cqe,
1404 index) / 32] & PDUCQE_INDEX_MASK));
1405 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1406 pasync_ctx->async_data.pa_base.u.
1407 a64.address);
1408 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1409 break;
1410 default:
1411 pbusy_list = NULL;
1412 shost_printk(KERN_WARNING, phba->shost,
1413 "Unexpected code=%d\n",
1414 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1415 code) / 32] & PDUCQE_CODE_MASK);
1416 return NULL;
1419 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1420 WARN_ON(list_empty(pbusy_list));
1421 list_for_each_entry(pasync_handle, pbusy_list, link) {
1422 WARN_ON(pasync_handle->consumed);
1423 if (pasync_handle->index == buffer_index)
1424 break;
1427 WARN_ON(!pasync_handle);
1429 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1430 phba->fw_config.iscsi_cid_start;
1431 pasync_handle->is_header = is_header;
1432 pasync_handle->buffer_len = ((pdpdu_cqe->
1433 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1434 & PDUCQE_DPL_MASK) >> 16);
1436 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1437 index) / 32] & PDUCQE_INDEX_MASK);
1438 return pasync_handle;
1441 static unsigned int
1442 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1443 unsigned int is_header, unsigned int cq_index)
1445 struct list_head *pbusy_list;
1446 struct async_pdu_handle *pasync_handle;
1447 unsigned int num_entries, writables = 0;
1448 unsigned int *pep_read_ptr, *pwritables;
1451 if (is_header) {
1452 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1453 pwritables = &pasync_ctx->async_header.writables;
1454 num_entries = pasync_ctx->async_header.num_entries;
1455 } else {
1456 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1457 pwritables = &pasync_ctx->async_data.writables;
1458 num_entries = pasync_ctx->async_data.num_entries;
1461 while ((*pep_read_ptr) != cq_index) {
1462 (*pep_read_ptr)++;
1463 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1465 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1466 *pep_read_ptr);
1467 if (writables == 0)
1468 WARN_ON(list_empty(pbusy_list));
1470 if (!list_empty(pbusy_list)) {
1471 pasync_handle = list_entry(pbusy_list->next,
1472 struct async_pdu_handle,
1473 link);
1474 WARN_ON(!pasync_handle);
1475 pasync_handle->consumed = 1;
1478 writables++;
1481 if (!writables) {
1482 SE_DEBUG(DBG_LVL_1,
1483 "Duplicate notification received - index 0x%x!!\n",
1484 cq_index);
1485 WARN_ON(1);
1488 *pwritables = *pwritables + writables;
1489 return 0;
1492 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1493 unsigned int cri)
1495 struct hwi_controller *phwi_ctrlr;
1496 struct hwi_async_pdu_context *pasync_ctx;
1497 struct async_pdu_handle *pasync_handle, *tmp_handle;
1498 struct list_head *plist;
1499 unsigned int i = 0;
1501 phwi_ctrlr = phba->phwi_ctrlr;
1502 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1504 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1506 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1507 list_del(&pasync_handle->link);
1509 if (i == 0) {
1510 list_add_tail(&pasync_handle->link,
1511 &pasync_ctx->async_header.free_list);
1512 pasync_ctx->async_header.free_entries++;
1513 i++;
1514 } else {
1515 list_add_tail(&pasync_handle->link,
1516 &pasync_ctx->async_data.free_list);
1517 pasync_ctx->async_data.free_entries++;
1518 i++;
1522 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1523 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1524 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1525 return 0;
1528 static struct phys_addr *
1529 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1530 unsigned int is_header, unsigned int host_write_ptr)
1532 struct phys_addr *pasync_sge = NULL;
1534 if (is_header)
1535 pasync_sge = pasync_ctx->async_header.ring_base;
1536 else
1537 pasync_sge = pasync_ctx->async_data.ring_base;
1539 return pasync_sge + host_write_ptr;
1542 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1543 unsigned int is_header)
1545 struct hwi_controller *phwi_ctrlr;
1546 struct hwi_async_pdu_context *pasync_ctx;
1547 struct async_pdu_handle *pasync_handle;
1548 struct list_head *pfree_link, *pbusy_list;
1549 struct phys_addr *pasync_sge;
1550 unsigned int ring_id, num_entries;
1551 unsigned int host_write_num;
1552 unsigned int writables;
1553 unsigned int i = 0;
1554 u32 doorbell = 0;
1556 phwi_ctrlr = phba->phwi_ctrlr;
1557 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1559 if (is_header) {
1560 num_entries = pasync_ctx->async_header.num_entries;
1561 writables = min(pasync_ctx->async_header.writables,
1562 pasync_ctx->async_header.free_entries);
1563 pfree_link = pasync_ctx->async_header.free_list.next;
1564 host_write_num = pasync_ctx->async_header.host_write_ptr;
1565 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1566 } else {
1567 num_entries = pasync_ctx->async_data.num_entries;
1568 writables = min(pasync_ctx->async_data.writables,
1569 pasync_ctx->async_data.free_entries);
1570 pfree_link = pasync_ctx->async_data.free_list.next;
1571 host_write_num = pasync_ctx->async_data.host_write_ptr;
1572 ring_id = phwi_ctrlr->default_pdu_data.id;
1575 writables = (writables / 8) * 8;
1576 if (writables) {
1577 for (i = 0; i < writables; i++) {
1578 pbusy_list =
1579 hwi_get_async_busy_list(pasync_ctx, is_header,
1580 host_write_num);
1581 pasync_handle =
1582 list_entry(pfree_link, struct async_pdu_handle,
1583 link);
1584 WARN_ON(!pasync_handle);
1585 pasync_handle->consumed = 0;
1587 pfree_link = pfree_link->next;
1589 pasync_sge = hwi_get_ring_address(pasync_ctx,
1590 is_header, host_write_num);
1592 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1593 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1595 list_move(&pasync_handle->link, pbusy_list);
1597 host_write_num++;
1598 host_write_num = host_write_num % num_entries;
1601 if (is_header) {
1602 pasync_ctx->async_header.host_write_ptr =
1603 host_write_num;
1604 pasync_ctx->async_header.free_entries -= writables;
1605 pasync_ctx->async_header.writables -= writables;
1606 pasync_ctx->async_header.busy_entries += writables;
1607 } else {
1608 pasync_ctx->async_data.host_write_ptr = host_write_num;
1609 pasync_ctx->async_data.free_entries -= writables;
1610 pasync_ctx->async_data.writables -= writables;
1611 pasync_ctx->async_data.busy_entries += writables;
1614 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1615 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1616 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1617 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1618 << DB_DEF_PDU_CQPROC_SHIFT;
1620 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1624 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1625 struct beiscsi_conn *beiscsi_conn,
1626 struct i_t_dpdu_cqe *pdpdu_cqe)
1628 struct hwi_controller *phwi_ctrlr;
1629 struct hwi_async_pdu_context *pasync_ctx;
1630 struct async_pdu_handle *pasync_handle = NULL;
1631 unsigned int cq_index = -1;
1633 phwi_ctrlr = phba->phwi_ctrlr;
1634 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1636 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1637 pdpdu_cqe, &cq_index);
1638 BUG_ON(pasync_handle->is_header != 0);
1639 if (pasync_handle->consumed == 0)
1640 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1641 cq_index);
1643 hwi_free_async_msg(phba, pasync_handle->cri);
1644 hwi_post_async_buffers(phba, pasync_handle->is_header);
1647 static unsigned int
1648 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1649 struct beiscsi_hba *phba,
1650 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1652 struct list_head *plist;
1653 struct async_pdu_handle *pasync_handle;
1654 void *phdr = NULL;
1655 unsigned int hdr_len = 0, buf_len = 0;
1656 unsigned int status, index = 0, offset = 0;
1657 void *pfirst_buffer = NULL;
1658 unsigned int num_buf = 0;
1660 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1662 list_for_each_entry(pasync_handle, plist, link) {
1663 if (index == 0) {
1664 phdr = pasync_handle->pbuffer;
1665 hdr_len = pasync_handle->buffer_len;
1666 } else {
1667 buf_len = pasync_handle->buffer_len;
1668 if (!num_buf) {
1669 pfirst_buffer = pasync_handle->pbuffer;
1670 num_buf++;
1672 memcpy(pfirst_buffer + offset,
1673 pasync_handle->pbuffer, buf_len);
1674 offset = buf_len;
1676 index++;
1679 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1680 (beiscsi_conn->beiscsi_conn_cid -
1681 phba->fw_config.iscsi_cid_start),
1682 phdr, hdr_len, pfirst_buffer,
1683 buf_len);
1685 if (status == 0)
1686 hwi_free_async_msg(phba, cri);
1687 return 0;
1690 static unsigned int
1691 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1692 struct beiscsi_hba *phba,
1693 struct async_pdu_handle *pasync_handle)
1695 struct hwi_async_pdu_context *pasync_ctx;
1696 struct hwi_controller *phwi_ctrlr;
1697 unsigned int bytes_needed = 0, status = 0;
1698 unsigned short cri = pasync_handle->cri;
1699 struct pdu_base *ppdu;
1701 phwi_ctrlr = phba->phwi_ctrlr;
1702 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1704 list_del(&pasync_handle->link);
1705 if (pasync_handle->is_header) {
1706 pasync_ctx->async_header.busy_entries--;
1707 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1708 hwi_free_async_msg(phba, cri);
1709 BUG();
1712 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1713 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1714 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1715 (unsigned short)pasync_handle->buffer_len;
1716 list_add_tail(&pasync_handle->link,
1717 &pasync_ctx->async_entry[cri].wait_queue.list);
1719 ppdu = pasync_handle->pbuffer;
1720 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1721 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1722 0xFFFF0000) | ((be16_to_cpu((ppdu->
1723 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1724 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1726 if (status == 0) {
1727 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1728 bytes_needed;
1730 if (bytes_needed == 0)
1731 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1732 pasync_ctx, cri);
1734 } else {
1735 pasync_ctx->async_data.busy_entries--;
1736 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1737 list_add_tail(&pasync_handle->link,
1738 &pasync_ctx->async_entry[cri].wait_queue.
1739 list);
1740 pasync_ctx->async_entry[cri].wait_queue.
1741 bytes_received +=
1742 (unsigned short)pasync_handle->buffer_len;
1744 if (pasync_ctx->async_entry[cri].wait_queue.
1745 bytes_received >=
1746 pasync_ctx->async_entry[cri].wait_queue.
1747 bytes_needed)
1748 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1749 pasync_ctx, cri);
1752 return status;
1755 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1756 struct beiscsi_hba *phba,
1757 struct i_t_dpdu_cqe *pdpdu_cqe)
1759 struct hwi_controller *phwi_ctrlr;
1760 struct hwi_async_pdu_context *pasync_ctx;
1761 struct async_pdu_handle *pasync_handle = NULL;
1762 unsigned int cq_index = -1;
1764 phwi_ctrlr = phba->phwi_ctrlr;
1765 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1766 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1767 pdpdu_cqe, &cq_index);
1769 if (pasync_handle->consumed == 0)
1770 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1771 cq_index);
1772 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1773 hwi_post_async_buffers(phba, pasync_handle->is_header);
1776 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1778 struct be_queue_info *mcc_cq;
1779 struct be_mcc_compl *mcc_compl;
1780 unsigned int num_processed = 0;
1782 mcc_cq = &phba->ctrl.mcc_obj.cq;
1783 mcc_compl = queue_tail_node(mcc_cq);
1784 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1785 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1787 if (num_processed >= 32) {
1788 hwi_ring_cq_db(phba, mcc_cq->id,
1789 num_processed, 0, 0);
1790 num_processed = 0;
1792 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1793 /* Interpret flags as an async trailer */
1794 if (is_link_state_evt(mcc_compl->flags))
1795 /* Interpret compl as a async link evt */
1796 beiscsi_async_link_state_process(phba,
1797 (struct be_async_event_link_state *) mcc_compl);
1798 else
1799 SE_DEBUG(DBG_LVL_1,
1800 " Unsupported Async Event, flags"
1801 " = 0x%08x\n", mcc_compl->flags);
1802 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1803 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1804 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1807 mcc_compl->flags = 0;
1808 queue_tail_inc(mcc_cq);
1809 mcc_compl = queue_tail_node(mcc_cq);
1810 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1811 num_processed++;
1814 if (num_processed > 0)
1815 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1819 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1821 struct be_queue_info *cq;
1822 struct sol_cqe *sol;
1823 struct dmsg_cqe *dmsg;
1824 unsigned int num_processed = 0;
1825 unsigned int tot_nump = 0;
1826 struct beiscsi_conn *beiscsi_conn;
1827 struct beiscsi_endpoint *beiscsi_ep;
1828 struct iscsi_endpoint *ep;
1829 struct beiscsi_hba *phba;
1831 cq = pbe_eq->cq;
1832 sol = queue_tail_node(cq);
1833 phba = pbe_eq->phba;
1835 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1836 CQE_VALID_MASK) {
1837 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1839 ep = phba->ep_array[(u32) ((sol->
1840 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1841 SOL_CID_MASK) >> 6) -
1842 phba->fw_config.iscsi_cid_start];
1844 beiscsi_ep = ep->dd_data;
1845 beiscsi_conn = beiscsi_ep->conn;
1847 if (num_processed >= 32) {
1848 hwi_ring_cq_db(phba, cq->id,
1849 num_processed, 0, 0);
1850 tot_nump += num_processed;
1851 num_processed = 0;
1854 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1855 32] & CQE_CODE_MASK) {
1856 case SOL_CMD_COMPLETE:
1857 hwi_complete_cmd(beiscsi_conn, phba, sol);
1858 break;
1859 case DRIVERMSG_NOTIFY:
1860 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1861 dmsg = (struct dmsg_cqe *)sol;
1862 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1863 break;
1864 case UNSOL_HDR_NOTIFY:
1865 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1866 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1867 (struct i_t_dpdu_cqe *)sol);
1868 break;
1869 case UNSOL_DATA_NOTIFY:
1870 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1871 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1872 (struct i_t_dpdu_cqe *)sol);
1873 break;
1874 case CXN_INVALIDATE_INDEX_NOTIFY:
1875 case CMD_INVALIDATED_NOTIFY:
1876 case CXN_INVALIDATE_NOTIFY:
1877 SE_DEBUG(DBG_LVL_1,
1878 "Ignoring CQ Error notification for cmd/cxn"
1879 "invalidate\n");
1880 break;
1881 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1882 case CMD_KILLED_INVALID_STATSN_RCVD:
1883 case CMD_KILLED_INVALID_R2T_RCVD:
1884 case CMD_CXN_KILLED_LUN_INVALID:
1885 case CMD_CXN_KILLED_ICD_INVALID:
1886 case CMD_CXN_KILLED_ITT_INVALID:
1887 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1888 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1889 SE_DEBUG(DBG_LVL_1,
1890 "CQ Error notification for cmd.. "
1891 "code %d cid 0x%x\n",
1892 sol->dw[offsetof(struct amap_sol_cqe, code) /
1893 32] & CQE_CODE_MASK,
1894 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1895 32] & SOL_CID_MASK));
1896 break;
1897 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1898 SE_DEBUG(DBG_LVL_1,
1899 "Digest error on def pdu ring, dropping..\n");
1900 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1901 (struct i_t_dpdu_cqe *) sol);
1902 break;
1903 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1904 case CXN_KILLED_BURST_LEN_MISMATCH:
1905 case CXN_KILLED_AHS_RCVD:
1906 case CXN_KILLED_HDR_DIGEST_ERR:
1907 case CXN_KILLED_UNKNOWN_HDR:
1908 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1909 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1910 case CXN_KILLED_TIMED_OUT:
1911 case CXN_KILLED_FIN_RCVD:
1912 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1913 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1914 case CXN_KILLED_OVER_RUN_RESIDUAL:
1915 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1916 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1917 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1918 "0x%x...\n",
1919 sol->dw[offsetof(struct amap_sol_cqe, code) /
1920 32] & CQE_CODE_MASK,
1921 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1922 32] & CQE_CID_MASK));
1923 iscsi_conn_failure(beiscsi_conn->conn,
1924 ISCSI_ERR_CONN_FAILED);
1925 break;
1926 case CXN_KILLED_RST_SENT:
1927 case CXN_KILLED_RST_RCVD:
1928 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1929 "received/sent on CID 0x%x...\n",
1930 sol->dw[offsetof(struct amap_sol_cqe, code) /
1931 32] & CQE_CODE_MASK,
1932 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1933 32] & CQE_CID_MASK));
1934 iscsi_conn_failure(beiscsi_conn->conn,
1935 ISCSI_ERR_CONN_FAILED);
1936 break;
1937 default:
1938 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1939 "received on CID 0x%x...\n",
1940 sol->dw[offsetof(struct amap_sol_cqe, code) /
1941 32] & CQE_CODE_MASK,
1942 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1943 32] & CQE_CID_MASK));
1944 break;
1947 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1948 queue_tail_inc(cq);
1949 sol = queue_tail_node(cq);
1950 num_processed++;
1953 if (num_processed > 0) {
1954 tot_nump += num_processed;
1955 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1957 return tot_nump;
1960 void beiscsi_process_all_cqs(struct work_struct *work)
1962 unsigned long flags;
1963 struct hwi_controller *phwi_ctrlr;
1964 struct hwi_context_memory *phwi_context;
1965 struct be_eq_obj *pbe_eq;
1966 struct beiscsi_hba *phba =
1967 container_of(work, struct beiscsi_hba, work_cqs);
1969 phwi_ctrlr = phba->phwi_ctrlr;
1970 phwi_context = phwi_ctrlr->phwi_ctxt;
1971 if (phba->msix_enabled)
1972 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1973 else
1974 pbe_eq = &phwi_context->be_eq[0];
1976 if (phba->todo_mcc_cq) {
1977 spin_lock_irqsave(&phba->isr_lock, flags);
1978 phba->todo_mcc_cq = 0;
1979 spin_unlock_irqrestore(&phba->isr_lock, flags);
1980 beiscsi_process_mcc_isr(phba);
1983 if (phba->todo_cq) {
1984 spin_lock_irqsave(&phba->isr_lock, flags);
1985 phba->todo_cq = 0;
1986 spin_unlock_irqrestore(&phba->isr_lock, flags);
1987 beiscsi_process_cq(pbe_eq);
1991 static int be_iopoll(struct blk_iopoll *iop, int budget)
1993 static unsigned int ret;
1994 struct beiscsi_hba *phba;
1995 struct be_eq_obj *pbe_eq;
1997 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1998 ret = beiscsi_process_cq(pbe_eq);
1999 if (ret < budget) {
2000 phba = pbe_eq->phba;
2001 blk_iopoll_complete(iop);
2002 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
2003 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2005 return ret;
2008 static void
2009 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2010 unsigned int num_sg, struct beiscsi_io_task *io_task)
2012 struct iscsi_sge *psgl;
2013 unsigned int sg_len, index;
2014 unsigned int sge_len = 0;
2015 unsigned long long addr;
2016 struct scatterlist *l_sg;
2017 unsigned int offset;
2019 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2020 io_task->bhs_pa.u.a32.address_lo);
2021 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2022 io_task->bhs_pa.u.a32.address_hi);
2024 l_sg = sg;
2025 for (index = 0; (index < num_sg) && (index < 2); index++,
2026 sg = sg_next(sg)) {
2027 if (index == 0) {
2028 sg_len = sg_dma_len(sg);
2029 addr = (u64) sg_dma_address(sg);
2030 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2031 ((u32)(addr & 0xFFFFFFFF)));
2032 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2033 ((u32)(addr >> 32)));
2034 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2035 sg_len);
2036 sge_len = sg_len;
2037 } else {
2038 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2039 pwrb, sge_len);
2040 sg_len = sg_dma_len(sg);
2041 addr = (u64) sg_dma_address(sg);
2042 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2043 ((u32)(addr & 0xFFFFFFFF)));
2044 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2045 ((u32)(addr >> 32)));
2046 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2047 sg_len);
2050 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2051 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2053 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2055 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2056 io_task->bhs_pa.u.a32.address_hi);
2057 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2058 io_task->bhs_pa.u.a32.address_lo);
2060 if (num_sg == 1) {
2061 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2063 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2065 } else if (num_sg == 2) {
2066 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2068 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2070 } else {
2071 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2073 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2076 sg = l_sg;
2077 psgl++;
2078 psgl++;
2079 offset = 0;
2080 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2081 sg_len = sg_dma_len(sg);
2082 addr = (u64) sg_dma_address(sg);
2083 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2084 (addr & 0xFFFFFFFF));
2085 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2086 (addr >> 32));
2087 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2088 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2089 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2090 offset += sg_len;
2092 psgl--;
2093 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2096 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2098 struct iscsi_sge *psgl;
2099 unsigned long long addr;
2100 struct beiscsi_io_task *io_task = task->dd_data;
2101 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2102 struct beiscsi_hba *phba = beiscsi_conn->phba;
2104 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2105 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2106 io_task->bhs_pa.u.a32.address_lo);
2107 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2108 io_task->bhs_pa.u.a32.address_hi);
2110 if (task->data) {
2111 if (task->data_count) {
2112 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2113 addr = (u64) pci_map_single(phba->pcidev,
2114 task->data,
2115 task->data_count, 1);
2116 } else {
2117 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2118 addr = 0;
2120 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2121 ((u32)(addr & 0xFFFFFFFF)));
2122 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2123 ((u32)(addr >> 32)));
2124 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2125 task->data_count);
2127 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2128 } else {
2129 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2130 addr = 0;
2133 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2135 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2137 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2138 io_task->bhs_pa.u.a32.address_hi);
2139 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2140 io_task->bhs_pa.u.a32.address_lo);
2141 if (task->data) {
2142 psgl++;
2143 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2144 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2145 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2146 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2147 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2148 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2150 psgl++;
2151 if (task->data) {
2152 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2153 ((u32)(addr & 0xFFFFFFFF)));
2154 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2155 ((u32)(addr >> 32)));
2157 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2159 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2162 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2164 unsigned int num_cq_pages, num_async_pdu_buf_pages;
2165 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2166 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2168 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2169 sizeof(struct sol_cqe));
2170 num_async_pdu_buf_pages =
2171 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2172 phba->params.defpdu_hdr_sz);
2173 num_async_pdu_buf_sgl_pages =
2174 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2175 sizeof(struct phys_addr));
2176 num_async_pdu_data_pages =
2177 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2178 phba->params.defpdu_data_sz);
2179 num_async_pdu_data_sgl_pages =
2180 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2181 sizeof(struct phys_addr));
2183 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2185 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2186 BE_ISCSI_PDU_HEADER_SIZE;
2187 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2188 sizeof(struct hwi_context_memory);
2191 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2192 * (phba->params.wrbs_per_cxn)
2193 * phba->params.cxns_per_ctrl;
2194 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2195 (phba->params.wrbs_per_cxn);
2196 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2197 phba->params.cxns_per_ctrl);
2199 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2200 phba->params.icds_per_ctrl;
2201 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2202 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2204 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2205 num_async_pdu_buf_pages * PAGE_SIZE;
2206 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2207 num_async_pdu_data_pages * PAGE_SIZE;
2208 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2209 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2210 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2211 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2212 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2213 phba->params.asyncpdus_per_ctrl *
2214 sizeof(struct async_pdu_handle);
2215 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2216 phba->params.asyncpdus_per_ctrl *
2217 sizeof(struct async_pdu_handle);
2218 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2219 sizeof(struct hwi_async_pdu_context) +
2220 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2223 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2225 struct be_mem_descriptor *mem_descr;
2226 dma_addr_t bus_add;
2227 struct mem_array *mem_arr, *mem_arr_orig;
2228 unsigned int i, j, alloc_size, curr_alloc_size;
2230 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2231 if (!phba->phwi_ctrlr)
2232 return -ENOMEM;
2234 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2235 GFP_KERNEL);
2236 if (!phba->init_mem) {
2237 kfree(phba->phwi_ctrlr);
2238 return -ENOMEM;
2241 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2242 GFP_KERNEL);
2243 if (!mem_arr_orig) {
2244 kfree(phba->init_mem);
2245 kfree(phba->phwi_ctrlr);
2246 return -ENOMEM;
2249 mem_descr = phba->init_mem;
2250 for (i = 0; i < SE_MEM_MAX; i++) {
2251 j = 0;
2252 mem_arr = mem_arr_orig;
2253 alloc_size = phba->mem_req[i];
2254 memset(mem_arr, 0, sizeof(struct mem_array) *
2255 BEISCSI_MAX_FRAGS_INIT);
2256 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2257 do {
2258 mem_arr->virtual_address = pci_alloc_consistent(
2259 phba->pcidev,
2260 curr_alloc_size,
2261 &bus_add);
2262 if (!mem_arr->virtual_address) {
2263 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2264 goto free_mem;
2265 if (curr_alloc_size -
2266 rounddown_pow_of_two(curr_alloc_size))
2267 curr_alloc_size = rounddown_pow_of_two
2268 (curr_alloc_size);
2269 else
2270 curr_alloc_size = curr_alloc_size / 2;
2271 } else {
2272 mem_arr->bus_address.u.
2273 a64.address = (__u64) bus_add;
2274 mem_arr->size = curr_alloc_size;
2275 alloc_size -= curr_alloc_size;
2276 curr_alloc_size = min(be_max_phys_size *
2277 1024, alloc_size);
2278 j++;
2279 mem_arr++;
2281 } while (alloc_size);
2282 mem_descr->num_elements = j;
2283 mem_descr->size_in_bytes = phba->mem_req[i];
2284 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2285 GFP_KERNEL);
2286 if (!mem_descr->mem_array)
2287 goto free_mem;
2289 memcpy(mem_descr->mem_array, mem_arr_orig,
2290 sizeof(struct mem_array) * j);
2291 mem_descr++;
2293 kfree(mem_arr_orig);
2294 return 0;
2295 free_mem:
2296 mem_descr->num_elements = j;
2297 while ((i) || (j)) {
2298 for (j = mem_descr->num_elements; j > 0; j--) {
2299 pci_free_consistent(phba->pcidev,
2300 mem_descr->mem_array[j - 1].size,
2301 mem_descr->mem_array[j - 1].
2302 virtual_address,
2303 (unsigned long)mem_descr->
2304 mem_array[j - 1].
2305 bus_address.u.a64.address);
2307 if (i) {
2308 i--;
2309 kfree(mem_descr->mem_array);
2310 mem_descr--;
2313 kfree(mem_arr_orig);
2314 kfree(phba->init_mem);
2315 kfree(phba->phwi_ctrlr);
2316 return -ENOMEM;
2319 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2321 beiscsi_find_mem_req(phba);
2322 return beiscsi_alloc_mem(phba);
2325 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2327 struct pdu_data_out *pdata_out;
2328 struct pdu_nop_out *pnop_out;
2329 struct be_mem_descriptor *mem_descr;
2331 mem_descr = phba->init_mem;
2332 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2333 pdata_out =
2334 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2335 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2337 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2338 IIOC_SCSI_DATA);
2340 pnop_out =
2341 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2342 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2344 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2345 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2346 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2347 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2350 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2352 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2353 struct wrb_handle *pwrb_handle;
2354 struct hwi_controller *phwi_ctrlr;
2355 struct hwi_wrb_context *pwrb_context;
2356 struct iscsi_wrb *pwrb;
2357 unsigned int num_cxn_wrbh;
2358 unsigned int num_cxn_wrb, j, idx, index;
2360 mem_descr_wrbh = phba->init_mem;
2361 mem_descr_wrbh += HWI_MEM_WRBH;
2363 mem_descr_wrb = phba->init_mem;
2364 mem_descr_wrb += HWI_MEM_WRB;
2366 idx = 0;
2367 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2368 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2369 ((sizeof(struct wrb_handle)) *
2370 phba->params.wrbs_per_cxn));
2371 phwi_ctrlr = phba->phwi_ctrlr;
2373 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2374 pwrb_context = &phwi_ctrlr->wrb_context[index];
2375 pwrb_context->pwrb_handle_base =
2376 kzalloc(sizeof(struct wrb_handle *) *
2377 phba->params.wrbs_per_cxn, GFP_KERNEL);
2378 pwrb_context->pwrb_handle_basestd =
2379 kzalloc(sizeof(struct wrb_handle *) *
2380 phba->params.wrbs_per_cxn, GFP_KERNEL);
2381 if (num_cxn_wrbh) {
2382 pwrb_context->alloc_index = 0;
2383 pwrb_context->wrb_handles_available = 0;
2384 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2385 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2386 pwrb_context->pwrb_handle_basestd[j] =
2387 pwrb_handle;
2388 pwrb_context->wrb_handles_available++;
2389 pwrb_handle->wrb_index = j;
2390 pwrb_handle++;
2392 pwrb_context->free_index = 0;
2393 num_cxn_wrbh--;
2394 } else {
2395 idx++;
2396 pwrb_handle =
2397 mem_descr_wrbh->mem_array[idx].virtual_address;
2398 num_cxn_wrbh =
2399 ((mem_descr_wrbh->mem_array[idx].size) /
2400 ((sizeof(struct wrb_handle)) *
2401 phba->params.wrbs_per_cxn));
2402 pwrb_context->alloc_index = 0;
2403 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2404 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2405 pwrb_context->pwrb_handle_basestd[j] =
2406 pwrb_handle;
2407 pwrb_context->wrb_handles_available++;
2408 pwrb_handle->wrb_index = j;
2409 pwrb_handle++;
2411 pwrb_context->free_index = 0;
2412 num_cxn_wrbh--;
2415 idx = 0;
2416 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2417 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2418 ((sizeof(struct iscsi_wrb) *
2419 phba->params.wrbs_per_cxn));
2420 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2421 pwrb_context = &phwi_ctrlr->wrb_context[index];
2422 if (num_cxn_wrb) {
2423 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2424 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2425 pwrb_handle->pwrb = pwrb;
2426 pwrb++;
2428 num_cxn_wrb--;
2429 } else {
2430 idx++;
2431 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2432 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2433 ((sizeof(struct iscsi_wrb) *
2434 phba->params.wrbs_per_cxn));
2435 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2436 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2437 pwrb_handle->pwrb = pwrb;
2438 pwrb++;
2440 num_cxn_wrb--;
2445 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2447 struct hwi_controller *phwi_ctrlr;
2448 struct hba_parameters *p = &phba->params;
2449 struct hwi_async_pdu_context *pasync_ctx;
2450 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2451 unsigned int index;
2452 struct be_mem_descriptor *mem_descr;
2454 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2455 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2457 phwi_ctrlr = phba->phwi_ctrlr;
2458 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2459 mem_descr->mem_array[0].virtual_address;
2460 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2461 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2463 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2464 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2465 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2466 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2468 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2469 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2470 if (mem_descr->mem_array[0].virtual_address) {
2471 SE_DEBUG(DBG_LVL_8,
2472 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2473 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2474 } else
2475 shost_printk(KERN_WARNING, phba->shost,
2476 "No Virtual address\n");
2478 pasync_ctx->async_header.va_base =
2479 mem_descr->mem_array[0].virtual_address;
2481 pasync_ctx->async_header.pa_base.u.a64.address =
2482 mem_descr->mem_array[0].bus_address.u.a64.address;
2484 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2485 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2486 if (mem_descr->mem_array[0].virtual_address) {
2487 SE_DEBUG(DBG_LVL_8,
2488 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2489 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2490 } else
2491 shost_printk(KERN_WARNING, phba->shost,
2492 "No Virtual address\n");
2493 pasync_ctx->async_header.ring_base =
2494 mem_descr->mem_array[0].virtual_address;
2496 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2497 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2498 if (mem_descr->mem_array[0].virtual_address) {
2499 SE_DEBUG(DBG_LVL_8,
2500 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2501 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2502 } else
2503 shost_printk(KERN_WARNING, phba->shost,
2504 "No Virtual address\n");
2506 pasync_ctx->async_header.handle_base =
2507 mem_descr->mem_array[0].virtual_address;
2508 pasync_ctx->async_header.writables = 0;
2509 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2511 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2512 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2513 if (mem_descr->mem_array[0].virtual_address) {
2514 SE_DEBUG(DBG_LVL_8,
2515 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2516 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2517 } else
2518 shost_printk(KERN_WARNING, phba->shost,
2519 "No Virtual address\n");
2520 pasync_ctx->async_data.va_base =
2521 mem_descr->mem_array[0].virtual_address;
2522 pasync_ctx->async_data.pa_base.u.a64.address =
2523 mem_descr->mem_array[0].bus_address.u.a64.address;
2525 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2526 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2527 if (mem_descr->mem_array[0].virtual_address) {
2528 SE_DEBUG(DBG_LVL_8,
2529 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2530 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2531 } else
2532 shost_printk(KERN_WARNING, phba->shost,
2533 "No Virtual address\n");
2535 pasync_ctx->async_data.ring_base =
2536 mem_descr->mem_array[0].virtual_address;
2538 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2539 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2540 if (!mem_descr->mem_array[0].virtual_address)
2541 shost_printk(KERN_WARNING, phba->shost,
2542 "No Virtual address\n");
2544 pasync_ctx->async_data.handle_base =
2545 mem_descr->mem_array[0].virtual_address;
2546 pasync_ctx->async_data.writables = 0;
2547 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2549 pasync_header_h =
2550 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2551 pasync_data_h =
2552 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2554 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2555 pasync_header_h->cri = -1;
2556 pasync_header_h->index = (char)index;
2557 INIT_LIST_HEAD(&pasync_header_h->link);
2558 pasync_header_h->pbuffer =
2559 (void *)((unsigned long)
2560 (pasync_ctx->async_header.va_base) +
2561 (p->defpdu_hdr_sz * index));
2563 pasync_header_h->pa.u.a64.address =
2564 pasync_ctx->async_header.pa_base.u.a64.address +
2565 (p->defpdu_hdr_sz * index);
2567 list_add_tail(&pasync_header_h->link,
2568 &pasync_ctx->async_header.free_list);
2569 pasync_header_h++;
2570 pasync_ctx->async_header.free_entries++;
2571 pasync_ctx->async_header.writables++;
2573 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2574 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2575 header_busy_list);
2576 pasync_data_h->cri = -1;
2577 pasync_data_h->index = (char)index;
2578 INIT_LIST_HEAD(&pasync_data_h->link);
2579 pasync_data_h->pbuffer =
2580 (void *)((unsigned long)
2581 (pasync_ctx->async_data.va_base) +
2582 (p->defpdu_data_sz * index));
2584 pasync_data_h->pa.u.a64.address =
2585 pasync_ctx->async_data.pa_base.u.a64.address +
2586 (p->defpdu_data_sz * index);
2588 list_add_tail(&pasync_data_h->link,
2589 &pasync_ctx->async_data.free_list);
2590 pasync_data_h++;
2591 pasync_ctx->async_data.free_entries++;
2592 pasync_ctx->async_data.writables++;
2594 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2597 pasync_ctx->async_header.host_write_ptr = 0;
2598 pasync_ctx->async_header.ep_read_ptr = -1;
2599 pasync_ctx->async_data.host_write_ptr = 0;
2600 pasync_ctx->async_data.ep_read_ptr = -1;
2603 static int
2604 be_sgl_create_contiguous(void *virtual_address,
2605 u64 physical_address, u32 length,
2606 struct be_dma_mem *sgl)
2608 WARN_ON(!virtual_address);
2609 WARN_ON(!physical_address);
2610 WARN_ON(!length > 0);
2611 WARN_ON(!sgl);
2613 sgl->va = virtual_address;
2614 sgl->dma = (unsigned long)physical_address;
2615 sgl->size = length;
2617 return 0;
2620 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2622 memset(sgl, 0, sizeof(*sgl));
2625 static void
2626 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2627 struct mem_array *pmem, struct be_dma_mem *sgl)
2629 if (sgl->va)
2630 be_sgl_destroy_contiguous(sgl);
2632 be_sgl_create_contiguous(pmem->virtual_address,
2633 pmem->bus_address.u.a64.address,
2634 pmem->size, sgl);
2637 static void
2638 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2639 struct mem_array *pmem, struct be_dma_mem *sgl)
2641 if (sgl->va)
2642 be_sgl_destroy_contiguous(sgl);
2644 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2645 pmem->bus_address.u.a64.address,
2646 pmem->size, sgl);
2649 static int be_fill_queue(struct be_queue_info *q,
2650 u16 len, u16 entry_size, void *vaddress)
2652 struct be_dma_mem *mem = &q->dma_mem;
2654 memset(q, 0, sizeof(*q));
2655 q->len = len;
2656 q->entry_size = entry_size;
2657 mem->size = len * entry_size;
2658 mem->va = vaddress;
2659 if (!mem->va)
2660 return -ENOMEM;
2661 memset(mem->va, 0, mem->size);
2662 return 0;
2665 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2666 struct hwi_context_memory *phwi_context)
2668 unsigned int i, num_eq_pages;
2669 int ret, eq_for_mcc;
2670 struct be_queue_info *eq;
2671 struct be_dma_mem *mem;
2672 void *eq_vaddress;
2673 dma_addr_t paddr;
2675 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2676 sizeof(struct be_eq_entry));
2678 if (phba->msix_enabled)
2679 eq_for_mcc = 1;
2680 else
2681 eq_for_mcc = 0;
2682 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2683 eq = &phwi_context->be_eq[i].q;
2684 mem = &eq->dma_mem;
2685 phwi_context->be_eq[i].phba = phba;
2686 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2687 num_eq_pages * PAGE_SIZE,
2688 &paddr);
2689 if (!eq_vaddress)
2690 goto create_eq_error;
2692 mem->va = eq_vaddress;
2693 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2694 sizeof(struct be_eq_entry), eq_vaddress);
2695 if (ret) {
2696 shost_printk(KERN_ERR, phba->shost,
2697 "be_fill_queue Failed for EQ\n");
2698 goto create_eq_error;
2701 mem->dma = paddr;
2702 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2703 phwi_context->cur_eqd);
2704 if (ret) {
2705 shost_printk(KERN_ERR, phba->shost,
2706 "beiscsi_cmd_eq_create"
2707 "Failedfor EQ\n");
2708 goto create_eq_error;
2710 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2712 return 0;
2713 create_eq_error:
2714 for (i = 0; i < (phba->num_cpus + 1); i++) {
2715 eq = &phwi_context->be_eq[i].q;
2716 mem = &eq->dma_mem;
2717 if (mem->va)
2718 pci_free_consistent(phba->pcidev, num_eq_pages
2719 * PAGE_SIZE,
2720 mem->va, mem->dma);
2722 return ret;
2725 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2726 struct hwi_context_memory *phwi_context)
2728 unsigned int i, num_cq_pages;
2729 int ret;
2730 struct be_queue_info *cq, *eq;
2731 struct be_dma_mem *mem;
2732 struct be_eq_obj *pbe_eq;
2733 void *cq_vaddress;
2734 dma_addr_t paddr;
2736 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2737 sizeof(struct sol_cqe));
2739 for (i = 0; i < phba->num_cpus; i++) {
2740 cq = &phwi_context->be_cq[i];
2741 eq = &phwi_context->be_eq[i].q;
2742 pbe_eq = &phwi_context->be_eq[i];
2743 pbe_eq->cq = cq;
2744 pbe_eq->phba = phba;
2745 mem = &cq->dma_mem;
2746 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2747 num_cq_pages * PAGE_SIZE,
2748 &paddr);
2749 if (!cq_vaddress)
2750 goto create_cq_error;
2751 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2752 sizeof(struct sol_cqe), cq_vaddress);
2753 if (ret) {
2754 shost_printk(KERN_ERR, phba->shost,
2755 "be_fill_queue Failed for ISCSI CQ\n");
2756 goto create_cq_error;
2759 mem->dma = paddr;
2760 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2761 false, 0);
2762 if (ret) {
2763 shost_printk(KERN_ERR, phba->shost,
2764 "beiscsi_cmd_eq_create"
2765 "Failed for ISCSI CQ\n");
2766 goto create_cq_error;
2768 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2769 cq->id, eq->id);
2770 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2772 return 0;
2774 create_cq_error:
2775 for (i = 0; i < phba->num_cpus; i++) {
2776 cq = &phwi_context->be_cq[i];
2777 mem = &cq->dma_mem;
2778 if (mem->va)
2779 pci_free_consistent(phba->pcidev, num_cq_pages
2780 * PAGE_SIZE,
2781 mem->va, mem->dma);
2783 return ret;
2787 static int
2788 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2789 struct hwi_context_memory *phwi_context,
2790 struct hwi_controller *phwi_ctrlr,
2791 unsigned int def_pdu_ring_sz)
2793 unsigned int idx;
2794 int ret;
2795 struct be_queue_info *dq, *cq;
2796 struct be_dma_mem *mem;
2797 struct be_mem_descriptor *mem_descr;
2798 void *dq_vaddress;
2800 idx = 0;
2801 dq = &phwi_context->be_def_hdrq;
2802 cq = &phwi_context->be_cq[0];
2803 mem = &dq->dma_mem;
2804 mem_descr = phba->init_mem;
2805 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2806 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2807 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2808 sizeof(struct phys_addr),
2809 sizeof(struct phys_addr), dq_vaddress);
2810 if (ret) {
2811 shost_printk(KERN_ERR, phba->shost,
2812 "be_fill_queue Failed for DEF PDU HDR\n");
2813 return ret;
2815 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2816 bus_address.u.a64.address;
2817 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2818 def_pdu_ring_sz,
2819 phba->params.defpdu_hdr_sz);
2820 if (ret) {
2821 shost_printk(KERN_ERR, phba->shost,
2822 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2823 return ret;
2825 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2826 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2827 phwi_context->be_def_hdrq.id);
2828 hwi_post_async_buffers(phba, 1);
2829 return 0;
2832 static int
2833 beiscsi_create_def_data(struct beiscsi_hba *phba,
2834 struct hwi_context_memory *phwi_context,
2835 struct hwi_controller *phwi_ctrlr,
2836 unsigned int def_pdu_ring_sz)
2838 unsigned int idx;
2839 int ret;
2840 struct be_queue_info *dataq, *cq;
2841 struct be_dma_mem *mem;
2842 struct be_mem_descriptor *mem_descr;
2843 void *dq_vaddress;
2845 idx = 0;
2846 dataq = &phwi_context->be_def_dataq;
2847 cq = &phwi_context->be_cq[0];
2848 mem = &dataq->dma_mem;
2849 mem_descr = phba->init_mem;
2850 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2851 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2852 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2853 sizeof(struct phys_addr),
2854 sizeof(struct phys_addr), dq_vaddress);
2855 if (ret) {
2856 shost_printk(KERN_ERR, phba->shost,
2857 "be_fill_queue Failed for DEF PDU DATA\n");
2858 return ret;
2860 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2861 bus_address.u.a64.address;
2862 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2863 def_pdu_ring_sz,
2864 phba->params.defpdu_data_sz);
2865 if (ret) {
2866 shost_printk(KERN_ERR, phba->shost,
2867 "be_cmd_create_default_pdu_queue Failed"
2868 " for DEF PDU DATA\n");
2869 return ret;
2871 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2872 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2873 phwi_context->be_def_dataq.id);
2874 hwi_post_async_buffers(phba, 0);
2875 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2876 return 0;
2879 static int
2880 beiscsi_post_pages(struct beiscsi_hba *phba)
2882 struct be_mem_descriptor *mem_descr;
2883 struct mem_array *pm_arr;
2884 unsigned int page_offset, i;
2885 struct be_dma_mem sgl;
2886 int status;
2888 mem_descr = phba->init_mem;
2889 mem_descr += HWI_MEM_SGE;
2890 pm_arr = mem_descr->mem_array;
2892 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2893 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2894 for (i = 0; i < mem_descr->num_elements; i++) {
2895 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2896 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2897 page_offset,
2898 (pm_arr->size / PAGE_SIZE));
2899 page_offset += pm_arr->size / PAGE_SIZE;
2900 if (status != 0) {
2901 shost_printk(KERN_ERR, phba->shost,
2902 "post sgl failed.\n");
2903 return status;
2905 pm_arr++;
2907 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2908 return 0;
2911 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2913 struct be_dma_mem *mem = &q->dma_mem;
2914 if (mem->va)
2915 pci_free_consistent(phba->pcidev, mem->size,
2916 mem->va, mem->dma);
2919 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2920 u16 len, u16 entry_size)
2922 struct be_dma_mem *mem = &q->dma_mem;
2924 memset(q, 0, sizeof(*q));
2925 q->len = len;
2926 q->entry_size = entry_size;
2927 mem->size = len * entry_size;
2928 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2929 if (!mem->va)
2930 return -ENOMEM;
2931 memset(mem->va, 0, mem->size);
2932 return 0;
2935 static int
2936 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2937 struct hwi_context_memory *phwi_context,
2938 struct hwi_controller *phwi_ctrlr)
2940 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2941 u64 pa_addr_lo;
2942 unsigned int idx, num, i;
2943 struct mem_array *pwrb_arr;
2944 void *wrb_vaddr;
2945 struct be_dma_mem sgl;
2946 struct be_mem_descriptor *mem_descr;
2947 int status;
2949 idx = 0;
2950 mem_descr = phba->init_mem;
2951 mem_descr += HWI_MEM_WRB;
2952 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2953 GFP_KERNEL);
2954 if (!pwrb_arr) {
2955 shost_printk(KERN_ERR, phba->shost,
2956 "Memory alloc failed in create wrb ring.\n");
2957 return -ENOMEM;
2959 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2960 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2961 num_wrb_rings = mem_descr->mem_array[idx].size /
2962 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2964 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2965 if (num_wrb_rings) {
2966 pwrb_arr[num].virtual_address = wrb_vaddr;
2967 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2968 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2969 sizeof(struct iscsi_wrb);
2970 wrb_vaddr += pwrb_arr[num].size;
2971 pa_addr_lo += pwrb_arr[num].size;
2972 num_wrb_rings--;
2973 } else {
2974 idx++;
2975 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2976 pa_addr_lo = mem_descr->mem_array[idx].\
2977 bus_address.u.a64.address;
2978 num_wrb_rings = mem_descr->mem_array[idx].size /
2979 (phba->params.wrbs_per_cxn *
2980 sizeof(struct iscsi_wrb));
2981 pwrb_arr[num].virtual_address = wrb_vaddr;
2982 pwrb_arr[num].bus_address.u.a64.address\
2983 = pa_addr_lo;
2984 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2985 sizeof(struct iscsi_wrb);
2986 wrb_vaddr += pwrb_arr[num].size;
2987 pa_addr_lo += pwrb_arr[num].size;
2988 num_wrb_rings--;
2991 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2992 wrb_mem_index = 0;
2993 offset = 0;
2994 size = 0;
2996 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2997 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2998 &phwi_context->be_wrbq[i]);
2999 if (status != 0) {
3000 shost_printk(KERN_ERR, phba->shost,
3001 "wrbq create failed.");
3002 kfree(pwrb_arr);
3003 return status;
3005 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3008 kfree(pwrb_arr);
3009 return 0;
3012 static void free_wrb_handles(struct beiscsi_hba *phba)
3014 unsigned int index;
3015 struct hwi_controller *phwi_ctrlr;
3016 struct hwi_wrb_context *pwrb_context;
3018 phwi_ctrlr = phba->phwi_ctrlr;
3019 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3020 pwrb_context = &phwi_ctrlr->wrb_context[index];
3021 kfree(pwrb_context->pwrb_handle_base);
3022 kfree(pwrb_context->pwrb_handle_basestd);
3026 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3028 struct be_queue_info *q;
3029 struct be_ctrl_info *ctrl = &phba->ctrl;
3031 q = &phba->ctrl.mcc_obj.q;
3032 if (q->created)
3033 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3034 be_queue_free(phba, q);
3036 q = &phba->ctrl.mcc_obj.cq;
3037 if (q->created)
3038 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3039 be_queue_free(phba, q);
3042 static void hwi_cleanup(struct beiscsi_hba *phba)
3044 struct be_queue_info *q;
3045 struct be_ctrl_info *ctrl = &phba->ctrl;
3046 struct hwi_controller *phwi_ctrlr;
3047 struct hwi_context_memory *phwi_context;
3048 int i, eq_num;
3050 phwi_ctrlr = phba->phwi_ctrlr;
3051 phwi_context = phwi_ctrlr->phwi_ctxt;
3052 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3053 q = &phwi_context->be_wrbq[i];
3054 if (q->created)
3055 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3057 free_wrb_handles(phba);
3059 q = &phwi_context->be_def_hdrq;
3060 if (q->created)
3061 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3063 q = &phwi_context->be_def_dataq;
3064 if (q->created)
3065 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3067 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3069 for (i = 0; i < (phba->num_cpus); i++) {
3070 q = &phwi_context->be_cq[i];
3071 if (q->created)
3072 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3074 if (phba->msix_enabled)
3075 eq_num = 1;
3076 else
3077 eq_num = 0;
3078 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3079 q = &phwi_context->be_eq[i].q;
3080 if (q->created)
3081 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3083 be_mcc_queues_destroy(phba);
3086 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3087 struct hwi_context_memory *phwi_context)
3089 struct be_queue_info *q, *cq;
3090 struct be_ctrl_info *ctrl = &phba->ctrl;
3092 /* Alloc MCC compl queue */
3093 cq = &phba->ctrl.mcc_obj.cq;
3094 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3095 sizeof(struct be_mcc_compl)))
3096 goto err;
3097 /* Ask BE to create MCC compl queue; */
3098 if (phba->msix_enabled) {
3099 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3100 [phba->num_cpus].q, false, true, 0))
3101 goto mcc_cq_free;
3102 } else {
3103 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3104 false, true, 0))
3105 goto mcc_cq_free;
3108 /* Alloc MCC queue */
3109 q = &phba->ctrl.mcc_obj.q;
3110 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3111 goto mcc_cq_destroy;
3113 /* Ask BE to create MCC queue */
3114 if (beiscsi_cmd_mccq_create(phba, q, cq))
3115 goto mcc_q_free;
3117 return 0;
3119 mcc_q_free:
3120 be_queue_free(phba, q);
3121 mcc_cq_destroy:
3122 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3123 mcc_cq_free:
3124 be_queue_free(phba, cq);
3125 err:
3126 return -ENOMEM;
3129 static int find_num_cpus(void)
3131 int num_cpus = 0;
3133 num_cpus = num_online_cpus();
3134 if (num_cpus >= MAX_CPUS)
3135 num_cpus = MAX_CPUS - 1;
3137 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
3138 return num_cpus;
3141 static int hwi_init_port(struct beiscsi_hba *phba)
3143 struct hwi_controller *phwi_ctrlr;
3144 struct hwi_context_memory *phwi_context;
3145 unsigned int def_pdu_ring_sz;
3146 struct be_ctrl_info *ctrl = &phba->ctrl;
3147 int status;
3149 def_pdu_ring_sz =
3150 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3151 phwi_ctrlr = phba->phwi_ctrlr;
3152 phwi_context = phwi_ctrlr->phwi_ctxt;
3153 phwi_context->max_eqd = 0;
3154 phwi_context->min_eqd = 0;
3155 phwi_context->cur_eqd = 64;
3156 be_cmd_fw_initialize(&phba->ctrl);
3158 status = beiscsi_create_eqs(phba, phwi_context);
3159 if (status != 0) {
3160 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
3161 goto error;
3164 status = be_mcc_queues_create(phba, phwi_context);
3165 if (status != 0)
3166 goto error;
3168 status = mgmt_check_supported_fw(ctrl, phba);
3169 if (status != 0) {
3170 shost_printk(KERN_ERR, phba->shost,
3171 "Unsupported fw version\n");
3172 goto error;
3175 status = beiscsi_create_cqs(phba, phwi_context);
3176 if (status != 0) {
3177 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
3178 goto error;
3181 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3182 def_pdu_ring_sz);
3183 if (status != 0) {
3184 shost_printk(KERN_ERR, phba->shost,
3185 "Default Header not created\n");
3186 goto error;
3189 status = beiscsi_create_def_data(phba, phwi_context,
3190 phwi_ctrlr, def_pdu_ring_sz);
3191 if (status != 0) {
3192 shost_printk(KERN_ERR, phba->shost,
3193 "Default Data not created\n");
3194 goto error;
3197 status = beiscsi_post_pages(phba);
3198 if (status != 0) {
3199 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3200 goto error;
3203 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3204 if (status != 0) {
3205 shost_printk(KERN_ERR, phba->shost,
3206 "WRB Rings not created\n");
3207 goto error;
3210 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3211 return 0;
3213 error:
3214 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3215 hwi_cleanup(phba);
3216 return -ENOMEM;
3219 static int hwi_init_controller(struct beiscsi_hba *phba)
3221 struct hwi_controller *phwi_ctrlr;
3223 phwi_ctrlr = phba->phwi_ctrlr;
3224 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3225 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3226 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3227 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3228 phwi_ctrlr->phwi_ctxt);
3229 } else {
3230 shost_printk(KERN_ERR, phba->shost,
3231 "HWI_MEM_ADDN_CONTEXT is more than one element."
3232 "Failing to load\n");
3233 return -ENOMEM;
3236 iscsi_init_global_templates(phba);
3237 beiscsi_init_wrb_handle(phba);
3238 hwi_init_async_pdu_ctx(phba);
3239 if (hwi_init_port(phba) != 0) {
3240 shost_printk(KERN_ERR, phba->shost,
3241 "hwi_init_controller failed\n");
3242 return -ENOMEM;
3244 return 0;
3247 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3249 struct be_mem_descriptor *mem_descr;
3250 int i, j;
3252 mem_descr = phba->init_mem;
3253 i = 0;
3254 j = 0;
3255 for (i = 0; i < SE_MEM_MAX; i++) {
3256 for (j = mem_descr->num_elements; j > 0; j--) {
3257 pci_free_consistent(phba->pcidev,
3258 mem_descr->mem_array[j - 1].size,
3259 mem_descr->mem_array[j - 1].virtual_address,
3260 (unsigned long)mem_descr->mem_array[j - 1].
3261 bus_address.u.a64.address);
3263 kfree(mem_descr->mem_array);
3264 mem_descr++;
3266 kfree(phba->init_mem);
3267 kfree(phba->phwi_ctrlr);
3270 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3272 int ret = -ENOMEM;
3274 ret = beiscsi_get_memory(phba);
3275 if (ret < 0) {
3276 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3277 "Failed in beiscsi_alloc_memory\n");
3278 return ret;
3281 ret = hwi_init_controller(phba);
3282 if (ret)
3283 goto free_init;
3284 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3285 return 0;
3287 free_init:
3288 beiscsi_free_mem(phba);
3289 return -ENOMEM;
3292 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3294 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3295 struct sgl_handle *psgl_handle;
3296 struct iscsi_sge *pfrag;
3297 unsigned int arr_index, i, idx;
3299 phba->io_sgl_hndl_avbl = 0;
3300 phba->eh_sgl_hndl_avbl = 0;
3302 mem_descr_sglh = phba->init_mem;
3303 mem_descr_sglh += HWI_MEM_SGLH;
3304 if (1 == mem_descr_sglh->num_elements) {
3305 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3306 phba->params.ios_per_ctrl,
3307 GFP_KERNEL);
3308 if (!phba->io_sgl_hndl_base) {
3309 shost_printk(KERN_ERR, phba->shost,
3310 "Mem Alloc Failed. Failing to load\n");
3311 return -ENOMEM;
3313 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3314 (phba->params.icds_per_ctrl -
3315 phba->params.ios_per_ctrl),
3316 GFP_KERNEL);
3317 if (!phba->eh_sgl_hndl_base) {
3318 kfree(phba->io_sgl_hndl_base);
3319 shost_printk(KERN_ERR, phba->shost,
3320 "Mem Alloc Failed. Failing to load\n");
3321 return -ENOMEM;
3323 } else {
3324 shost_printk(KERN_ERR, phba->shost,
3325 "HWI_MEM_SGLH is more than one element."
3326 "Failing to load\n");
3327 return -ENOMEM;
3330 arr_index = 0;
3331 idx = 0;
3332 while (idx < mem_descr_sglh->num_elements) {
3333 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3335 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3336 sizeof(struct sgl_handle)); i++) {
3337 if (arr_index < phba->params.ios_per_ctrl) {
3338 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3339 phba->io_sgl_hndl_avbl++;
3340 arr_index++;
3341 } else {
3342 phba->eh_sgl_hndl_base[arr_index -
3343 phba->params.ios_per_ctrl] =
3344 psgl_handle;
3345 arr_index++;
3346 phba->eh_sgl_hndl_avbl++;
3348 psgl_handle++;
3350 idx++;
3352 SE_DEBUG(DBG_LVL_8,
3353 "phba->io_sgl_hndl_avbl=%d"
3354 "phba->eh_sgl_hndl_avbl=%d\n",
3355 phba->io_sgl_hndl_avbl,
3356 phba->eh_sgl_hndl_avbl);
3357 mem_descr_sg = phba->init_mem;
3358 mem_descr_sg += HWI_MEM_SGE;
3359 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3360 mem_descr_sg->num_elements);
3361 arr_index = 0;
3362 idx = 0;
3363 while (idx < mem_descr_sg->num_elements) {
3364 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3366 for (i = 0;
3367 i < (mem_descr_sg->mem_array[idx].size) /
3368 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3369 i++) {
3370 if (arr_index < phba->params.ios_per_ctrl)
3371 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3372 else
3373 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3374 phba->params.ios_per_ctrl];
3375 psgl_handle->pfrag = pfrag;
3376 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3377 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3378 pfrag += phba->params.num_sge_per_io;
3379 psgl_handle->sgl_index =
3380 phba->fw_config.iscsi_icd_start + arr_index++;
3382 idx++;
3384 phba->io_sgl_free_index = 0;
3385 phba->io_sgl_alloc_index = 0;
3386 phba->eh_sgl_free_index = 0;
3387 phba->eh_sgl_alloc_index = 0;
3388 return 0;
3391 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3393 int i, new_cid;
3395 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3396 GFP_KERNEL);
3397 if (!phba->cid_array) {
3398 shost_printk(KERN_ERR, phba->shost,
3399 "Failed to allocate memory in "
3400 "hba_setup_cid_tbls\n");
3401 return -ENOMEM;
3403 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3404 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3405 if (!phba->ep_array) {
3406 shost_printk(KERN_ERR, phba->shost,
3407 "Failed to allocate memory in "
3408 "hba_setup_cid_tbls\n");
3409 kfree(phba->cid_array);
3410 return -ENOMEM;
3412 new_cid = phba->fw_config.iscsi_cid_start;
3413 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3414 phba->cid_array[i] = new_cid;
3415 new_cid += 2;
3417 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3418 return 0;
3421 static void hwi_enable_intr(struct beiscsi_hba *phba)
3423 struct be_ctrl_info *ctrl = &phba->ctrl;
3424 struct hwi_controller *phwi_ctrlr;
3425 struct hwi_context_memory *phwi_context;
3426 struct be_queue_info *eq;
3427 u8 __iomem *addr;
3428 u32 reg, i;
3429 u32 enabled;
3431 phwi_ctrlr = phba->phwi_ctrlr;
3432 phwi_context = phwi_ctrlr->phwi_ctxt;
3434 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3435 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3436 reg = ioread32(addr);
3438 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3439 if (!enabled) {
3440 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3441 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3442 iowrite32(reg, addr);
3445 if (!phba->msix_enabled) {
3446 eq = &phwi_context->be_eq[0].q;
3447 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3448 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3449 } else {
3450 for (i = 0; i <= phba->num_cpus; i++) {
3451 eq = &phwi_context->be_eq[i].q;
3452 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3453 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3458 static void hwi_disable_intr(struct beiscsi_hba *phba)
3460 struct be_ctrl_info *ctrl = &phba->ctrl;
3462 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3463 u32 reg = ioread32(addr);
3465 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3466 if (enabled) {
3467 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3468 iowrite32(reg, addr);
3469 } else
3470 shost_printk(KERN_WARNING, phba->shost,
3471 "In hwi_disable_intr, Already Disabled\n");
3474 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3476 struct be_cmd_resp_get_boot_target *boot_resp;
3477 struct be_cmd_resp_get_session *session_resp;
3478 struct be_mcc_wrb *wrb;
3479 struct be_dma_mem nonemb_cmd;
3480 unsigned int tag, wrb_num;
3481 unsigned short status, extd_status;
3482 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3483 int ret = -ENOMEM;
3485 tag = beiscsi_get_boot_target(phba);
3486 if (!tag) {
3487 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
3488 return -EAGAIN;
3489 } else
3490 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3491 phba->ctrl.mcc_numtag[tag]);
3493 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3494 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3495 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3496 if (status || extd_status) {
3497 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
3498 " status = %d extd_status = %d\n",
3499 status, extd_status);
3500 free_mcc_tag(&phba->ctrl, tag);
3501 return -EBUSY;
3503 wrb = queue_get_wrb(mccq, wrb_num);
3504 free_mcc_tag(&phba->ctrl, tag);
3505 boot_resp = embedded_payload(wrb);
3507 if (boot_resp->boot_session_handle < 0) {
3508 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
3509 return -ENXIO;
3512 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3513 sizeof(*session_resp),
3514 &nonemb_cmd.dma);
3515 if (nonemb_cmd.va == NULL) {
3516 SE_DEBUG(DBG_LVL_1,
3517 "Failed to allocate memory for"
3518 "beiscsi_get_session_info\n");
3519 return -ENOMEM;
3522 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3523 tag = beiscsi_get_session_info(phba,
3524 boot_resp->boot_session_handle, &nonemb_cmd);
3525 if (!tag) {
3526 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3527 " Failed\n");
3528 goto boot_freemem;
3529 } else
3530 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3531 phba->ctrl.mcc_numtag[tag]);
3533 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3534 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3535 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3536 if (status || extd_status) {
3537 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
3538 " status = %d extd_status = %d\n",
3539 status, extd_status);
3540 free_mcc_tag(&phba->ctrl, tag);
3541 goto boot_freemem;
3543 wrb = queue_get_wrb(mccq, wrb_num);
3544 free_mcc_tag(&phba->ctrl, tag);
3545 session_resp = nonemb_cmd.va ;
3547 memcpy(&phba->boot_sess, &session_resp->session_info,
3548 sizeof(struct mgmt_session_info));
3549 ret = 0;
3551 boot_freemem:
3552 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3553 nonemb_cmd.va, nonemb_cmd.dma);
3554 return ret;
3557 static void beiscsi_boot_release(void *data)
3559 struct beiscsi_hba *phba = data;
3561 scsi_host_put(phba->shost);
3564 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3566 struct iscsi_boot_kobj *boot_kobj;
3568 /* get boot info using mgmt cmd */
3569 if (beiscsi_get_boot_info(phba))
3570 /* Try to see if we can carry on without this */
3571 return 0;
3573 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3574 if (!phba->boot_kset)
3575 return -ENOMEM;
3577 /* get a ref because the show function will ref the phba */
3578 if (!scsi_host_get(phba->shost))
3579 goto free_kset;
3580 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3581 beiscsi_show_boot_tgt_info,
3582 beiscsi_tgt_get_attr_visibility,
3583 beiscsi_boot_release);
3584 if (!boot_kobj)
3585 goto put_shost;
3587 if (!scsi_host_get(phba->shost))
3588 goto free_kset;
3589 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3590 beiscsi_show_boot_ini_info,
3591 beiscsi_ini_get_attr_visibility,
3592 beiscsi_boot_release);
3593 if (!boot_kobj)
3594 goto put_shost;
3596 if (!scsi_host_get(phba->shost))
3597 goto free_kset;
3598 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3599 beiscsi_show_boot_eth_info,
3600 beiscsi_eth_get_attr_visibility,
3601 beiscsi_boot_release);
3602 if (!boot_kobj)
3603 goto put_shost;
3604 return 0;
3606 put_shost:
3607 scsi_host_put(phba->shost);
3608 free_kset:
3609 iscsi_boot_destroy_kset(phba->boot_kset);
3610 return -ENOMEM;
3613 static int beiscsi_init_port(struct beiscsi_hba *phba)
3615 int ret;
3617 ret = beiscsi_init_controller(phba);
3618 if (ret < 0) {
3619 shost_printk(KERN_ERR, phba->shost,
3620 "beiscsi_dev_probe - Failed in"
3621 "beiscsi_init_controller\n");
3622 return ret;
3624 ret = beiscsi_init_sgl_handle(phba);
3625 if (ret < 0) {
3626 shost_printk(KERN_ERR, phba->shost,
3627 "beiscsi_dev_probe - Failed in"
3628 "beiscsi_init_sgl_handle\n");
3629 goto do_cleanup_ctrlr;
3632 if (hba_setup_cid_tbls(phba)) {
3633 shost_printk(KERN_ERR, phba->shost,
3634 "Failed in hba_setup_cid_tbls\n");
3635 kfree(phba->io_sgl_hndl_base);
3636 kfree(phba->eh_sgl_hndl_base);
3637 goto do_cleanup_ctrlr;
3640 return ret;
3642 do_cleanup_ctrlr:
3643 hwi_cleanup(phba);
3644 return ret;
3647 static void hwi_purge_eq(struct beiscsi_hba *phba)
3649 struct hwi_controller *phwi_ctrlr;
3650 struct hwi_context_memory *phwi_context;
3651 struct be_queue_info *eq;
3652 struct be_eq_entry *eqe = NULL;
3653 int i, eq_msix;
3654 unsigned int num_processed;
3656 phwi_ctrlr = phba->phwi_ctrlr;
3657 phwi_context = phwi_ctrlr->phwi_ctxt;
3658 if (phba->msix_enabled)
3659 eq_msix = 1;
3660 else
3661 eq_msix = 0;
3663 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3664 eq = &phwi_context->be_eq[i].q;
3665 eqe = queue_tail_node(eq);
3666 num_processed = 0;
3667 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3668 & EQE_VALID_MASK) {
3669 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3670 queue_tail_inc(eq);
3671 eqe = queue_tail_node(eq);
3672 num_processed++;
3675 if (num_processed)
3676 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3680 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3682 int mgmt_status;
3684 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3685 if (mgmt_status)
3686 shost_printk(KERN_WARNING, phba->shost,
3687 "mgmt_epfw_cleanup FAILED\n");
3689 hwi_purge_eq(phba);
3690 hwi_cleanup(phba);
3691 kfree(phba->io_sgl_hndl_base);
3692 kfree(phba->eh_sgl_hndl_base);
3693 kfree(phba->cid_array);
3694 kfree(phba->ep_array);
3697 void
3698 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3699 struct beiscsi_offload_params *params)
3701 struct wrb_handle *pwrb_handle;
3702 struct iscsi_target_context_update_wrb *pwrb = NULL;
3703 struct be_mem_descriptor *mem_descr;
3704 struct beiscsi_hba *phba = beiscsi_conn->phba;
3705 u32 doorbell = 0;
3708 * We can always use 0 here because it is reserved by libiscsi for
3709 * login/startup related tasks.
3711 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3712 phba->fw_config.iscsi_cid_start));
3713 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3714 memset(pwrb, 0, sizeof(*pwrb));
3715 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3716 max_burst_length, pwrb, params->dw[offsetof
3717 (struct amap_beiscsi_offload_params,
3718 max_burst_length) / 32]);
3719 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3720 max_send_data_segment_length, pwrb,
3721 params->dw[offsetof(struct amap_beiscsi_offload_params,
3722 max_send_data_segment_length) / 32]);
3723 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3724 first_burst_length,
3725 pwrb,
3726 params->dw[offsetof(struct amap_beiscsi_offload_params,
3727 first_burst_length) / 32]);
3729 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3730 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3731 erl) / 32] & OFFLD_PARAMS_ERL));
3732 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3733 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3734 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3735 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3736 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3737 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3738 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3739 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3740 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3741 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3742 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3743 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3744 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3745 pwrb,
3746 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3747 exp_statsn) / 32] + 1));
3748 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3749 0x7);
3750 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3751 pwrb, pwrb_handle->wrb_index);
3752 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3753 pwrb, pwrb_handle->nxt_wrb_index);
3754 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3755 session_state, pwrb, 0);
3756 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3757 pwrb, 1);
3758 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3759 pwrb, 0);
3760 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3763 mem_descr = phba->init_mem;
3764 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3766 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3767 pad_buffer_addr_hi, pwrb,
3768 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3769 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3770 pad_buffer_addr_lo, pwrb,
3771 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3773 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3775 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3776 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3777 << DB_DEF_PDU_WRB_INDEX_SHIFT;
3778 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3780 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3783 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3784 int *index, int *age)
3786 *index = (int)itt;
3787 if (age)
3788 *age = conn->session->age;
3792 * beiscsi_alloc_pdu - allocates pdu and related resources
3793 * @task: libiscsi task
3794 * @opcode: opcode of pdu for task
3796 * This is called with the session lock held. It will allocate
3797 * the wrb and sgl if needed for the command. And it will prep
3798 * the pdu's itt. beiscsi_parse_pdu will later translate
3799 * the pdu itt to the libiscsi task itt.
3801 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3803 struct beiscsi_io_task *io_task = task->dd_data;
3804 struct iscsi_conn *conn = task->conn;
3805 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3806 struct beiscsi_hba *phba = beiscsi_conn->phba;
3807 struct hwi_wrb_context *pwrb_context;
3808 struct hwi_controller *phwi_ctrlr;
3809 itt_t itt;
3810 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3811 dma_addr_t paddr;
3813 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3814 GFP_ATOMIC, &paddr);
3815 if (!io_task->cmd_bhs)
3816 return -ENOMEM;
3817 io_task->bhs_pa.u.a64.address = paddr;
3818 io_task->libiscsi_itt = (itt_t)task->itt;
3819 io_task->conn = beiscsi_conn;
3821 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3822 task->hdr_max = sizeof(struct be_cmd_bhs);
3823 io_task->psgl_handle = NULL;
3824 io_task->psgl_handle = NULL;
3826 if (task->sc) {
3827 spin_lock(&phba->io_sgl_lock);
3828 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3829 spin_unlock(&phba->io_sgl_lock);
3830 if (!io_task->psgl_handle)
3831 goto free_hndls;
3832 io_task->pwrb_handle = alloc_wrb_handle(phba,
3833 beiscsi_conn->beiscsi_conn_cid -
3834 phba->fw_config.iscsi_cid_start);
3835 if (!io_task->pwrb_handle)
3836 goto free_io_hndls;
3837 } else {
3838 io_task->scsi_cmnd = NULL;
3839 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3840 if (!beiscsi_conn->login_in_progress) {
3841 spin_lock(&phba->mgmt_sgl_lock);
3842 io_task->psgl_handle = (struct sgl_handle *)
3843 alloc_mgmt_sgl_handle(phba);
3844 spin_unlock(&phba->mgmt_sgl_lock);
3845 if (!io_task->psgl_handle)
3846 goto free_hndls;
3848 beiscsi_conn->login_in_progress = 1;
3849 beiscsi_conn->plogin_sgl_handle =
3850 io_task->psgl_handle;
3851 io_task->pwrb_handle =
3852 alloc_wrb_handle(phba,
3853 beiscsi_conn->beiscsi_conn_cid -
3854 phba->fw_config.iscsi_cid_start);
3855 if (!io_task->pwrb_handle)
3856 goto free_io_hndls;
3857 beiscsi_conn->plogin_wrb_handle =
3858 io_task->pwrb_handle;
3860 } else {
3861 io_task->psgl_handle =
3862 beiscsi_conn->plogin_sgl_handle;
3863 io_task->pwrb_handle =
3864 beiscsi_conn->plogin_wrb_handle;
3866 } else {
3867 spin_lock(&phba->mgmt_sgl_lock);
3868 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3869 spin_unlock(&phba->mgmt_sgl_lock);
3870 if (!io_task->psgl_handle)
3871 goto free_hndls;
3872 io_task->pwrb_handle =
3873 alloc_wrb_handle(phba,
3874 beiscsi_conn->beiscsi_conn_cid -
3875 phba->fw_config.iscsi_cid_start);
3876 if (!io_task->pwrb_handle)
3877 goto free_mgmt_hndls;
3881 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3882 wrb_index << 16) | (unsigned int)
3883 (io_task->psgl_handle->sgl_index));
3884 io_task->pwrb_handle->pio_handle = task;
3886 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3887 return 0;
3889 free_io_hndls:
3890 spin_lock(&phba->io_sgl_lock);
3891 free_io_sgl_handle(phba, io_task->psgl_handle);
3892 spin_unlock(&phba->io_sgl_lock);
3893 goto free_hndls;
3894 free_mgmt_hndls:
3895 spin_lock(&phba->mgmt_sgl_lock);
3896 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3897 spin_unlock(&phba->mgmt_sgl_lock);
3898 free_hndls:
3899 phwi_ctrlr = phba->phwi_ctrlr;
3900 pwrb_context = &phwi_ctrlr->wrb_context[
3901 beiscsi_conn->beiscsi_conn_cid -
3902 phba->fw_config.iscsi_cid_start];
3903 if (io_task->pwrb_handle)
3904 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3905 io_task->pwrb_handle = NULL;
3906 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3907 io_task->bhs_pa.u.a64.address);
3908 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3909 return -ENOMEM;
3912 static void beiscsi_cleanup_task(struct iscsi_task *task)
3914 struct beiscsi_io_task *io_task = task->dd_data;
3915 struct iscsi_conn *conn = task->conn;
3916 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3917 struct beiscsi_hba *phba = beiscsi_conn->phba;
3918 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3919 struct hwi_wrb_context *pwrb_context;
3920 struct hwi_controller *phwi_ctrlr;
3922 phwi_ctrlr = phba->phwi_ctrlr;
3923 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3924 - phba->fw_config.iscsi_cid_start];
3925 if (io_task->pwrb_handle) {
3926 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3927 io_task->pwrb_handle = NULL;
3930 if (io_task->cmd_bhs) {
3931 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3932 io_task->bhs_pa.u.a64.address);
3935 if (task->sc) {
3936 if (io_task->psgl_handle) {
3937 spin_lock(&phba->io_sgl_lock);
3938 free_io_sgl_handle(phba, io_task->psgl_handle);
3939 spin_unlock(&phba->io_sgl_lock);
3940 io_task->psgl_handle = NULL;
3942 } else {
3943 if (task->hdr &&
3944 ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
3945 return;
3946 if (io_task->psgl_handle) {
3947 spin_lock(&phba->mgmt_sgl_lock);
3948 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3949 spin_unlock(&phba->mgmt_sgl_lock);
3950 io_task->psgl_handle = NULL;
3955 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3956 unsigned int num_sg, unsigned int xferlen,
3957 unsigned int writedir)
3960 struct beiscsi_io_task *io_task = task->dd_data;
3961 struct iscsi_conn *conn = task->conn;
3962 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3963 struct beiscsi_hba *phba = beiscsi_conn->phba;
3964 struct iscsi_wrb *pwrb = NULL;
3965 unsigned int doorbell = 0;
3967 pwrb = io_task->pwrb_handle->pwrb;
3968 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3969 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3971 if (writedir) {
3972 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3973 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3974 &io_task->cmd_bhs->iscsi_data_pdu,
3975 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3976 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3977 &io_task->cmd_bhs->iscsi_data_pdu,
3978 ISCSI_OPCODE_SCSI_DATA_OUT);
3979 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3980 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3981 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3982 INI_WR_CMD);
3983 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3984 } else {
3985 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3986 INI_RD_CMD);
3987 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3989 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3990 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3991 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3993 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3994 cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
3995 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3996 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3997 io_task->pwrb_handle->wrb_index);
3998 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3999 be32_to_cpu(task->cmdsn));
4000 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4001 io_task->psgl_handle->sgl_index);
4003 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4005 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4006 io_task->pwrb_handle->nxt_wrb_index);
4007 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4009 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4010 doorbell |= (io_task->pwrb_handle->wrb_index &
4011 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4012 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4014 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4015 return 0;
4018 static int beiscsi_mtask(struct iscsi_task *task)
4020 struct beiscsi_io_task *io_task = task->dd_data;
4021 struct iscsi_conn *conn = task->conn;
4022 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4023 struct beiscsi_hba *phba = beiscsi_conn->phba;
4024 struct iscsi_wrb *pwrb = NULL;
4025 unsigned int doorbell = 0;
4026 unsigned int cid;
4028 cid = beiscsi_conn->beiscsi_conn_cid;
4029 pwrb = io_task->pwrb_handle->pwrb;
4030 memset(pwrb, 0, sizeof(*pwrb));
4031 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4032 be32_to_cpu(task->cmdsn));
4033 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4034 io_task->pwrb_handle->wrb_index);
4035 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4036 io_task->psgl_handle->sgl_index);
4038 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4039 case ISCSI_OP_LOGIN:
4040 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4041 TGT_DM_CMD);
4042 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4043 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4044 hwi_write_buffer(pwrb, task);
4045 break;
4046 case ISCSI_OP_NOOP_OUT:
4047 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4048 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4049 TGT_DM_CMD);
4050 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4051 pwrb, 0);
4052 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4053 } else {
4054 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4055 INI_RD_CMD);
4056 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4058 hwi_write_buffer(pwrb, task);
4059 break;
4060 case ISCSI_OP_TEXT:
4061 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4062 TGT_DM_CMD);
4063 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4064 hwi_write_buffer(pwrb, task);
4065 break;
4066 case ISCSI_OP_SCSI_TMFUNC:
4067 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4068 INI_TMF_CMD);
4069 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4070 hwi_write_buffer(pwrb, task);
4071 break;
4072 case ISCSI_OP_LOGOUT:
4073 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4074 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4075 HWH_TYPE_LOGOUT);
4076 hwi_write_buffer(pwrb, task);
4077 break;
4079 default:
4080 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
4081 task->hdr->opcode & ISCSI_OPCODE_MASK);
4082 return -EINVAL;
4085 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4086 task->data_count);
4087 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4088 io_task->pwrb_handle->nxt_wrb_index);
4089 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4091 doorbell |= cid & DB_WRB_POST_CID_MASK;
4092 doorbell |= (io_task->pwrb_handle->wrb_index &
4093 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4094 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4095 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4096 return 0;
4099 static int beiscsi_task_xmit(struct iscsi_task *task)
4101 struct beiscsi_io_task *io_task = task->dd_data;
4102 struct scsi_cmnd *sc = task->sc;
4103 struct scatterlist *sg;
4104 int num_sg;
4105 unsigned int writedir = 0, xferlen = 0;
4107 if (!sc)
4108 return beiscsi_mtask(task);
4110 io_task->scsi_cmnd = sc;
4111 num_sg = scsi_dma_map(sc);
4112 if (num_sg < 0) {
4113 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
4114 return num_sg;
4116 xferlen = scsi_bufflen(sc);
4117 sg = scsi_sglist(sc);
4118 if (sc->sc_data_direction == DMA_TO_DEVICE) {
4119 writedir = 1;
4120 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
4121 task->imm_count);
4122 } else
4123 writedir = 0;
4124 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4127 static void beiscsi_quiesce(struct beiscsi_hba *phba)
4129 struct hwi_controller *phwi_ctrlr;
4130 struct hwi_context_memory *phwi_context;
4131 struct be_eq_obj *pbe_eq;
4132 unsigned int i, msix_vec;
4133 u8 *real_offset = 0;
4134 u32 value = 0;
4136 phwi_ctrlr = phba->phwi_ctrlr;
4137 phwi_context = phwi_ctrlr->phwi_ctxt;
4138 hwi_disable_intr(phba);
4139 if (phba->msix_enabled) {
4140 for (i = 0; i <= phba->num_cpus; i++) {
4141 msix_vec = phba->msix_entries[i].vector;
4142 free_irq(msix_vec, &phwi_context->be_eq[i]);
4143 kfree(phba->msi_name[i]);
4145 } else
4146 if (phba->pcidev->irq)
4147 free_irq(phba->pcidev->irq, phba);
4148 pci_disable_msix(phba->pcidev);
4149 destroy_workqueue(phba->wq);
4150 if (blk_iopoll_enabled)
4151 for (i = 0; i < phba->num_cpus; i++) {
4152 pbe_eq = &phwi_context->be_eq[i];
4153 blk_iopoll_disable(&pbe_eq->iopoll);
4156 beiscsi_clean_port(phba);
4157 beiscsi_free_mem(phba);
4158 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4160 value = readl((void *)real_offset);
4162 if (value & 0x00010000) {
4163 value &= 0xfffeffff;
4164 writel(value, (void *)real_offset);
4166 beiscsi_unmap_pci_function(phba);
4167 pci_free_consistent(phba->pcidev,
4168 phba->ctrl.mbox_mem_alloced.size,
4169 phba->ctrl.mbox_mem_alloced.va,
4170 phba->ctrl.mbox_mem_alloced.dma);
4173 static void beiscsi_remove(struct pci_dev *pcidev)
4176 struct beiscsi_hba *phba = NULL;
4178 phba = pci_get_drvdata(pcidev);
4179 if (!phba) {
4180 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4181 return;
4184 beiscsi_quiesce(phba);
4185 iscsi_boot_destroy_kset(phba->boot_kset);
4186 iscsi_host_remove(phba->shost);
4187 pci_dev_put(phba->pcidev);
4188 iscsi_host_free(phba->shost);
4189 pci_disable_device(pcidev);
4192 static void beiscsi_shutdown(struct pci_dev *pcidev)
4195 struct beiscsi_hba *phba = NULL;
4197 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4198 if (!phba) {
4199 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4200 return;
4203 beiscsi_quiesce(phba);
4204 pci_disable_device(pcidev);
4207 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4209 int i, status;
4211 for (i = 0; i <= phba->num_cpus; i++)
4212 phba->msix_entries[i].entry = i;
4214 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4215 (phba->num_cpus + 1));
4216 if (!status)
4217 phba->msix_enabled = true;
4219 return;
4222 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4223 const struct pci_device_id *id)
4225 struct beiscsi_hba *phba = NULL;
4226 struct hwi_controller *phwi_ctrlr;
4227 struct hwi_context_memory *phwi_context;
4228 struct be_eq_obj *pbe_eq;
4229 int ret, num_cpus, i;
4230 u8 *real_offset = 0;
4231 u32 value = 0;
4233 ret = beiscsi_enable_pci(pcidev);
4234 if (ret < 0) {
4235 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4236 " Failed to enable pci device\n");
4237 return ret;
4240 phba = beiscsi_hba_alloc(pcidev);
4241 if (!phba) {
4242 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4243 " Failed in beiscsi_hba_alloc\n");
4244 goto disable_pci;
4247 switch (pcidev->device) {
4248 case BE_DEVICE_ID1:
4249 case OC_DEVICE_ID1:
4250 case OC_DEVICE_ID2:
4251 phba->generation = BE_GEN2;
4252 break;
4253 case BE_DEVICE_ID2:
4254 case OC_DEVICE_ID3:
4255 phba->generation = BE_GEN3;
4256 break;
4257 default:
4258 phba->generation = 0;
4261 if (enable_msix)
4262 num_cpus = find_num_cpus();
4263 else
4264 num_cpus = 1;
4265 phba->num_cpus = num_cpus;
4266 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
4268 if (enable_msix)
4269 beiscsi_msix_enable(phba);
4270 ret = be_ctrl_init(phba, pcidev);
4271 if (ret) {
4272 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4273 "Failed in be_ctrl_init\n");
4274 goto hba_free;
4277 if (!num_hba) {
4278 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4279 value = readl((void *)real_offset);
4280 if (value & 0x00010000) {
4281 gcrashmode++;
4282 shost_printk(KERN_ERR, phba->shost,
4283 "Loading Driver in crashdump mode\n");
4284 ret = beiscsi_cmd_reset_function(phba);
4285 if (ret) {
4286 shost_printk(KERN_ERR, phba->shost,
4287 "Reset Failed. Aborting Crashdump\n");
4288 goto hba_free;
4290 ret = be_chk_reset_complete(phba);
4291 if (ret) {
4292 shost_printk(KERN_ERR, phba->shost,
4293 "Failed to get out of reset."
4294 "Aborting Crashdump\n");
4295 goto hba_free;
4297 } else {
4298 value |= 0x00010000;
4299 writel(value, (void *)real_offset);
4300 num_hba++;
4304 spin_lock_init(&phba->io_sgl_lock);
4305 spin_lock_init(&phba->mgmt_sgl_lock);
4306 spin_lock_init(&phba->isr_lock);
4307 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4308 if (ret != 0) {
4309 shost_printk(KERN_ERR, phba->shost,
4310 "Error getting fw config\n");
4311 goto free_port;
4313 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
4314 beiscsi_get_params(phba);
4315 phba->shost->can_queue = phba->params.ios_per_ctrl;
4316 ret = beiscsi_init_port(phba);
4317 if (ret < 0) {
4318 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4319 "Failed in beiscsi_init_port\n");
4320 goto free_port;
4323 for (i = 0; i < MAX_MCC_CMD ; i++) {
4324 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4325 phba->ctrl.mcc_tag[i] = i + 1;
4326 phba->ctrl.mcc_numtag[i + 1] = 0;
4327 phba->ctrl.mcc_tag_available++;
4330 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4332 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4333 phba->shost->host_no);
4334 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4335 if (!phba->wq) {
4336 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4337 "Failed to allocate work queue\n");
4338 goto free_twq;
4341 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4343 phwi_ctrlr = phba->phwi_ctrlr;
4344 phwi_context = phwi_ctrlr->phwi_ctxt;
4345 if (blk_iopoll_enabled) {
4346 for (i = 0; i < phba->num_cpus; i++) {
4347 pbe_eq = &phwi_context->be_eq[i];
4348 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4349 be_iopoll);
4350 blk_iopoll_enable(&pbe_eq->iopoll);
4353 ret = beiscsi_init_irqs(phba);
4354 if (ret < 0) {
4355 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4356 "Failed to beiscsi_init_irqs\n");
4357 goto free_blkenbld;
4359 hwi_enable_intr(phba);
4361 if (beiscsi_setup_boot_info(phba))
4363 * log error but continue, because we may not be using
4364 * iscsi boot.
4366 shost_printk(KERN_ERR, phba->shost, "Could not set up "
4367 "iSCSI boot info.");
4369 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4370 return 0;
4372 free_blkenbld:
4373 destroy_workqueue(phba->wq);
4374 if (blk_iopoll_enabled)
4375 for (i = 0; i < phba->num_cpus; i++) {
4376 pbe_eq = &phwi_context->be_eq[i];
4377 blk_iopoll_disable(&pbe_eq->iopoll);
4379 free_twq:
4380 beiscsi_clean_port(phba);
4381 beiscsi_free_mem(phba);
4382 free_port:
4383 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4385 value = readl((void *)real_offset);
4387 if (value & 0x00010000) {
4388 value &= 0xfffeffff;
4389 writel(value, (void *)real_offset);
4392 pci_free_consistent(phba->pcidev,
4393 phba->ctrl.mbox_mem_alloced.size,
4394 phba->ctrl.mbox_mem_alloced.va,
4395 phba->ctrl.mbox_mem_alloced.dma);
4396 beiscsi_unmap_pci_function(phba);
4397 hba_free:
4398 if (phba->msix_enabled)
4399 pci_disable_msix(phba->pcidev);
4400 iscsi_host_remove(phba->shost);
4401 pci_dev_put(phba->pcidev);
4402 iscsi_host_free(phba->shost);
4403 disable_pci:
4404 pci_disable_device(pcidev);
4405 return ret;
4408 struct iscsi_transport beiscsi_iscsi_transport = {
4409 .owner = THIS_MODULE,
4410 .name = DRV_NAME,
4411 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4412 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4413 .create_session = beiscsi_session_create,
4414 .destroy_session = beiscsi_session_destroy,
4415 .create_conn = beiscsi_conn_create,
4416 .bind_conn = beiscsi_conn_bind,
4417 .destroy_conn = iscsi_conn_teardown,
4418 .attr_is_visible = be2iscsi_attr_is_visible,
4419 .set_param = beiscsi_set_param,
4420 .get_conn_param = iscsi_conn_get_param,
4421 .get_session_param = iscsi_session_get_param,
4422 .get_host_param = beiscsi_get_host_param,
4423 .start_conn = beiscsi_conn_start,
4424 .stop_conn = iscsi_conn_stop,
4425 .send_pdu = iscsi_conn_send_pdu,
4426 .xmit_task = beiscsi_task_xmit,
4427 .cleanup_task = beiscsi_cleanup_task,
4428 .alloc_pdu = beiscsi_alloc_pdu,
4429 .parse_pdu_itt = beiscsi_parse_pdu,
4430 .get_stats = beiscsi_conn_get_stats,
4431 .get_ep_param = beiscsi_ep_get_param,
4432 .ep_connect = beiscsi_ep_connect,
4433 .ep_poll = beiscsi_ep_poll,
4434 .ep_disconnect = beiscsi_ep_disconnect,
4435 .session_recovery_timedout = iscsi_session_recovery_timedout,
4438 static struct pci_driver beiscsi_pci_driver = {
4439 .name = DRV_NAME,
4440 .probe = beiscsi_dev_probe,
4441 .remove = beiscsi_remove,
4442 .shutdown = beiscsi_shutdown,
4443 .id_table = beiscsi_pci_id_table
4447 static int __init beiscsi_module_init(void)
4449 int ret;
4451 beiscsi_scsi_transport =
4452 iscsi_register_transport(&beiscsi_iscsi_transport);
4453 if (!beiscsi_scsi_transport) {
4454 SE_DEBUG(DBG_LVL_1,
4455 "beiscsi_module_init - Unable to register beiscsi"
4456 "transport.\n");
4457 return -ENOMEM;
4459 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4460 &beiscsi_iscsi_transport);
4462 ret = pci_register_driver(&beiscsi_pci_driver);
4463 if (ret) {
4464 SE_DEBUG(DBG_LVL_1,
4465 "beiscsi_module_init - Unable to register"
4466 "beiscsi pci driver.\n");
4467 goto unregister_iscsi_transport;
4469 return 0;
4471 unregister_iscsi_transport:
4472 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4473 return ret;
4476 static void __exit beiscsi_module_exit(void)
4478 pci_unregister_driver(&beiscsi_pci_driver);
4479 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4482 module_init(beiscsi_module_init);
4483 module_exit(beiscsi_module_exit);