include: replace linux/module.h with "struct module" wherever possible
[linux-2.6/next.git] / drivers / scsi / be2iscsi / be_main.c
blob0a9bdfa3d939866560c296efb40d8aab71973149
1 /**
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
12 * Contact Information:
13 * linux-drivers@emulex.com
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
31 #include <scsi/libiscsi.h>
32 #include <scsi/scsi_transport_iscsi.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi.h>
38 #include "be_main.h"
39 #include "be_iscsi.h"
40 #include "be_mgmt.h"
42 static unsigned int be_iopoll_budget = 10;
43 static unsigned int be_max_phys_size = 64;
44 static unsigned int enable_msix = 1;
45 static unsigned int gcrashmode = 0;
46 static unsigned int num_hba = 0;
48 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
49 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
50 MODULE_AUTHOR("ServerEngines Corporation");
51 MODULE_LICENSE("GPL");
52 module_param(be_iopoll_budget, int, 0);
53 module_param(enable_msix, int, 0);
54 module_param(be_max_phys_size, uint, S_IRUGO);
55 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
56 "contiguous memory that can be allocated."
57 "Range is 16 - 128");
59 static int beiscsi_slave_configure(struct scsi_device *sdev)
61 blk_queue_max_segment_size(sdev->request_queue, 65536);
62 return 0;
65 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
67 struct iscsi_cls_session *cls_session;
68 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
69 struct beiscsi_io_task *aborted_io_task;
70 struct iscsi_conn *conn;
71 struct beiscsi_conn *beiscsi_conn;
72 struct beiscsi_hba *phba;
73 struct iscsi_session *session;
74 struct invalidate_command_table *inv_tbl;
75 struct be_dma_mem nonemb_cmd;
76 unsigned int cid, tag, num_invalidate;
78 cls_session = starget_to_session(scsi_target(sc->device));
79 session = cls_session->dd_data;
81 spin_lock_bh(&session->lock);
82 if (!aborted_task || !aborted_task->sc) {
83 /* we raced */
84 spin_unlock_bh(&session->lock);
85 return SUCCESS;
88 aborted_io_task = aborted_task->dd_data;
89 if (!aborted_io_task->scsi_cmnd) {
90 /* raced or invalid command */
91 spin_unlock_bh(&session->lock);
92 return SUCCESS;
94 spin_unlock_bh(&session->lock);
95 conn = aborted_task->conn;
96 beiscsi_conn = conn->dd_data;
97 phba = beiscsi_conn->phba;
99 /* invalidate iocb */
100 cid = beiscsi_conn->beiscsi_conn_cid;
101 inv_tbl = phba->inv_tbl;
102 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
103 inv_tbl->cid = cid;
104 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
105 num_invalidate = 1;
106 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
107 sizeof(struct invalidate_commands_params_in),
108 &nonemb_cmd.dma);
109 if (nonemb_cmd.va == NULL) {
110 SE_DEBUG(DBG_LVL_1,
111 "Failed to allocate memory for"
112 "mgmt_invalidate_icds\n");
113 return FAILED;
115 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
117 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
118 cid, &nonemb_cmd);
119 if (!tag) {
120 shost_printk(KERN_WARNING, phba->shost,
121 "mgmt_invalidate_icds could not be"
122 " submitted\n");
123 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
124 nonemb_cmd.va, nonemb_cmd.dma);
126 return FAILED;
127 } else {
128 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
129 phba->ctrl.mcc_numtag[tag]);
130 free_mcc_tag(&phba->ctrl, tag);
132 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
133 nonemb_cmd.va, nonemb_cmd.dma);
134 return iscsi_eh_abort(sc);
137 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
139 struct iscsi_task *abrt_task;
140 struct beiscsi_io_task *abrt_io_task;
141 struct iscsi_conn *conn;
142 struct beiscsi_conn *beiscsi_conn;
143 struct beiscsi_hba *phba;
144 struct iscsi_session *session;
145 struct iscsi_cls_session *cls_session;
146 struct invalidate_command_table *inv_tbl;
147 struct be_dma_mem nonemb_cmd;
148 unsigned int cid, tag, i, num_invalidate;
149 int rc = FAILED;
151 /* invalidate iocbs */
152 cls_session = starget_to_session(scsi_target(sc->device));
153 session = cls_session->dd_data;
154 spin_lock_bh(&session->lock);
155 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
156 goto unlock;
158 conn = session->leadconn;
159 beiscsi_conn = conn->dd_data;
160 phba = beiscsi_conn->phba;
161 cid = beiscsi_conn->beiscsi_conn_cid;
162 inv_tbl = phba->inv_tbl;
163 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
164 num_invalidate = 0;
165 for (i = 0; i < conn->session->cmds_max; i++) {
166 abrt_task = conn->session->cmds[i];
167 abrt_io_task = abrt_task->dd_data;
168 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
169 continue;
171 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
172 continue;
174 inv_tbl->cid = cid;
175 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
176 num_invalidate++;
177 inv_tbl++;
179 spin_unlock_bh(&session->lock);
180 inv_tbl = phba->inv_tbl;
182 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
183 sizeof(struct invalidate_commands_params_in),
184 &nonemb_cmd.dma);
185 if (nonemb_cmd.va == NULL) {
186 SE_DEBUG(DBG_LVL_1,
187 "Failed to allocate memory for"
188 "mgmt_invalidate_icds\n");
189 return FAILED;
191 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
192 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
193 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
194 cid, &nonemb_cmd);
195 if (!tag) {
196 shost_printk(KERN_WARNING, phba->shost,
197 "mgmt_invalidate_icds could not be"
198 " submitted\n");
199 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
200 nonemb_cmd.va, nonemb_cmd.dma);
201 return FAILED;
202 } else {
203 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
204 phba->ctrl.mcc_numtag[tag]);
205 free_mcc_tag(&phba->ctrl, tag);
207 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
208 nonemb_cmd.va, nonemb_cmd.dma);
209 return iscsi_eh_device_reset(sc);
210 unlock:
211 spin_unlock_bh(&session->lock);
212 return rc;
215 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
217 struct beiscsi_hba *phba = data;
218 struct mgmt_session_info *boot_sess = &phba->boot_sess;
219 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
220 char *str = buf;
221 int rc;
223 switch (type) {
224 case ISCSI_BOOT_TGT_NAME:
225 rc = sprintf(buf, "%.*s\n",
226 (int)strlen(boot_sess->target_name),
227 (char *)&boot_sess->target_name);
228 break;
229 case ISCSI_BOOT_TGT_IP_ADDR:
230 if (boot_conn->dest_ipaddr.ip_type == 0x1)
231 rc = sprintf(buf, "%pI4\n",
232 (char *)&boot_conn->dest_ipaddr.ip_address);
233 else
234 rc = sprintf(str, "%pI6\n",
235 (char *)&boot_conn->dest_ipaddr.ip_address);
236 break;
237 case ISCSI_BOOT_TGT_PORT:
238 rc = sprintf(str, "%d\n", boot_conn->dest_port);
239 break;
241 case ISCSI_BOOT_TGT_CHAP_NAME:
242 rc = sprintf(str, "%.*s\n",
243 boot_conn->negotiated_login_options.auth_data.chap.
244 target_chap_name_length,
245 (char *)&boot_conn->negotiated_login_options.
246 auth_data.chap.target_chap_name);
247 break;
248 case ISCSI_BOOT_TGT_CHAP_SECRET:
249 rc = sprintf(str, "%.*s\n",
250 boot_conn->negotiated_login_options.auth_data.chap.
251 target_secret_length,
252 (char *)&boot_conn->negotiated_login_options.
253 auth_data.chap.target_secret);
254 break;
255 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
256 rc = sprintf(str, "%.*s\n",
257 boot_conn->negotiated_login_options.auth_data.chap.
258 intr_chap_name_length,
259 (char *)&boot_conn->negotiated_login_options.
260 auth_data.chap.intr_chap_name);
261 break;
262 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
263 rc = sprintf(str, "%.*s\n",
264 boot_conn->negotiated_login_options.auth_data.chap.
265 intr_secret_length,
266 (char *)&boot_conn->negotiated_login_options.
267 auth_data.chap.intr_secret);
268 break;
269 case ISCSI_BOOT_TGT_FLAGS:
270 rc = sprintf(str, "2\n");
271 break;
272 case ISCSI_BOOT_TGT_NIC_ASSOC:
273 rc = sprintf(str, "0\n");
274 break;
275 default:
276 rc = -ENOSYS;
277 break;
279 return rc;
282 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
284 struct beiscsi_hba *phba = data;
285 char *str = buf;
286 int rc;
288 switch (type) {
289 case ISCSI_BOOT_INI_INITIATOR_NAME:
290 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
291 break;
292 default:
293 rc = -ENOSYS;
294 break;
296 return rc;
299 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
301 struct beiscsi_hba *phba = data;
302 char *str = buf;
303 int rc;
305 switch (type) {
306 case ISCSI_BOOT_ETH_FLAGS:
307 rc = sprintf(str, "2\n");
308 break;
309 case ISCSI_BOOT_ETH_INDEX:
310 rc = sprintf(str, "0\n");
311 break;
312 case ISCSI_BOOT_ETH_MAC:
313 rc = beiscsi_get_macaddr(buf, phba);
314 if (rc < 0) {
315 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
316 return rc;
318 break;
319 default:
320 rc = -ENOSYS;
321 break;
323 return rc;
327 static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
329 int rc;
331 switch (type) {
332 case ISCSI_BOOT_TGT_NAME:
333 case ISCSI_BOOT_TGT_IP_ADDR:
334 case ISCSI_BOOT_TGT_PORT:
335 case ISCSI_BOOT_TGT_CHAP_NAME:
336 case ISCSI_BOOT_TGT_CHAP_SECRET:
337 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
338 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
339 case ISCSI_BOOT_TGT_NIC_ASSOC:
340 case ISCSI_BOOT_TGT_FLAGS:
341 rc = S_IRUGO;
342 break;
343 default:
344 rc = 0;
345 break;
347 return rc;
350 static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
352 int rc;
354 switch (type) {
355 case ISCSI_BOOT_INI_INITIATOR_NAME:
356 rc = S_IRUGO;
357 break;
358 default:
359 rc = 0;
360 break;
362 return rc;
366 static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
368 int rc;
370 switch (type) {
371 case ISCSI_BOOT_ETH_FLAGS:
372 case ISCSI_BOOT_ETH_MAC:
373 case ISCSI_BOOT_ETH_INDEX:
374 rc = S_IRUGO;
375 break;
376 default:
377 rc = 0;
378 break;
380 return rc;
383 /*------------------- PCI Driver operations and data ----------------- */
384 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
385 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
386 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
387 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
388 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
389 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
390 { 0 }
392 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
394 static struct scsi_host_template beiscsi_sht = {
395 .module = THIS_MODULE,
396 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
397 .proc_name = DRV_NAME,
398 .queuecommand = iscsi_queuecommand,
399 .change_queue_depth = iscsi_change_queue_depth,
400 .slave_configure = beiscsi_slave_configure,
401 .target_alloc = iscsi_target_alloc,
402 .eh_abort_handler = beiscsi_eh_abort,
403 .eh_device_reset_handler = beiscsi_eh_device_reset,
404 .eh_target_reset_handler = iscsi_eh_session_reset,
405 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
406 .can_queue = BE2_IO_DEPTH,
407 .this_id = -1,
408 .max_sectors = BEISCSI_MAX_SECTORS,
409 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
410 .use_clustering = ENABLE_CLUSTERING,
413 static struct scsi_transport_template *beiscsi_scsi_transport;
415 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
417 struct beiscsi_hba *phba;
418 struct Scsi_Host *shost;
420 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
421 if (!shost) {
422 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
423 "iscsi_host_alloc failed\n");
424 return NULL;
426 shost->dma_boundary = pcidev->dma_mask;
427 shost->max_id = BE2_MAX_SESSIONS;
428 shost->max_channel = 0;
429 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
430 shost->max_lun = BEISCSI_NUM_MAX_LUN;
431 shost->transportt = beiscsi_scsi_transport;
432 phba = iscsi_host_priv(shost);
433 memset(phba, 0, sizeof(*phba));
434 phba->shost = shost;
435 phba->pcidev = pci_dev_get(pcidev);
436 pci_set_drvdata(pcidev, phba);
438 if (iscsi_host_add(shost, &phba->pcidev->dev))
439 goto free_devices;
441 return phba;
443 free_devices:
444 pci_dev_put(phba->pcidev);
445 iscsi_host_free(phba->shost);
446 return NULL;
449 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
451 if (phba->csr_va) {
452 iounmap(phba->csr_va);
453 phba->csr_va = NULL;
455 if (phba->db_va) {
456 iounmap(phba->db_va);
457 phba->db_va = NULL;
459 if (phba->pci_va) {
460 iounmap(phba->pci_va);
461 phba->pci_va = NULL;
465 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
466 struct pci_dev *pcidev)
468 u8 __iomem *addr;
469 int pcicfg_reg;
471 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
472 pci_resource_len(pcidev, 2));
473 if (addr == NULL)
474 return -ENOMEM;
475 phba->ctrl.csr = addr;
476 phba->csr_va = addr;
477 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
479 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
480 if (addr == NULL)
481 goto pci_map_err;
482 phba->ctrl.db = addr;
483 phba->db_va = addr;
484 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
486 if (phba->generation == BE_GEN2)
487 pcicfg_reg = 1;
488 else
489 pcicfg_reg = 0;
491 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
492 pci_resource_len(pcidev, pcicfg_reg));
494 if (addr == NULL)
495 goto pci_map_err;
496 phba->ctrl.pcicfg = addr;
497 phba->pci_va = addr;
498 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
499 return 0;
501 pci_map_err:
502 beiscsi_unmap_pci_function(phba);
503 return -ENOMEM;
506 static int beiscsi_enable_pci(struct pci_dev *pcidev)
508 int ret;
510 ret = pci_enable_device(pcidev);
511 if (ret) {
512 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
513 "failed. Returning -ENODEV\n");
514 return ret;
517 pci_set_master(pcidev);
518 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
519 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
520 if (ret) {
521 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
522 pci_disable_device(pcidev);
523 return ret;
526 return 0;
529 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
531 struct be_ctrl_info *ctrl = &phba->ctrl;
532 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
533 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
534 int status = 0;
536 ctrl->pdev = pdev;
537 status = beiscsi_map_pci_bars(phba, pdev);
538 if (status)
539 return status;
540 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
541 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
542 mbox_mem_alloc->size,
543 &mbox_mem_alloc->dma);
544 if (!mbox_mem_alloc->va) {
545 beiscsi_unmap_pci_function(phba);
546 status = -ENOMEM;
547 return status;
550 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
551 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
552 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
553 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
554 spin_lock_init(&ctrl->mbox_lock);
555 spin_lock_init(&phba->ctrl.mcc_lock);
556 spin_lock_init(&phba->ctrl.mcc_cq_lock);
558 return status;
561 static void beiscsi_get_params(struct beiscsi_hba *phba)
563 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
564 - (phba->fw_config.iscsi_cid_count
565 + BE2_TMFS
566 + BE2_NOPOUT_REQ));
567 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
568 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
569 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
570 phba->params.num_sge_per_io = BE2_SGE;
571 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
572 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
573 phba->params.eq_timer = 64;
574 phba->params.num_eq_entries =
575 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
576 + BE2_TMFS) / 512) + 1) * 512;
577 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
578 ? 1024 : phba->params.num_eq_entries;
579 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
580 phba->params.num_eq_entries);
581 phba->params.num_cq_entries =
582 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
583 + BE2_TMFS) / 512) + 1) * 512;
584 phba->params.wrbs_per_cxn = 256;
587 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
588 unsigned int id, unsigned int clr_interrupt,
589 unsigned int num_processed,
590 unsigned char rearm, unsigned char event)
592 u32 val = 0;
593 val |= id & DB_EQ_RING_ID_MASK;
594 if (rearm)
595 val |= 1 << DB_EQ_REARM_SHIFT;
596 if (clr_interrupt)
597 val |= 1 << DB_EQ_CLR_SHIFT;
598 if (event)
599 val |= 1 << DB_EQ_EVNT_SHIFT;
600 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
601 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
605 * be_isr_mcc - The isr routine of the driver.
606 * @irq: Not used
607 * @dev_id: Pointer to host adapter structure
609 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
611 struct beiscsi_hba *phba;
612 struct be_eq_entry *eqe = NULL;
613 struct be_queue_info *eq;
614 struct be_queue_info *mcc;
615 unsigned int num_eq_processed;
616 struct be_eq_obj *pbe_eq;
617 unsigned long flags;
619 pbe_eq = dev_id;
620 eq = &pbe_eq->q;
621 phba = pbe_eq->phba;
622 mcc = &phba->ctrl.mcc_obj.cq;
623 eqe = queue_tail_node(eq);
624 if (!eqe)
625 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
627 num_eq_processed = 0;
629 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
630 & EQE_VALID_MASK) {
631 if (((eqe->dw[offsetof(struct amap_eq_entry,
632 resource_id) / 32] &
633 EQE_RESID_MASK) >> 16) == mcc->id) {
634 spin_lock_irqsave(&phba->isr_lock, flags);
635 phba->todo_mcc_cq = 1;
636 spin_unlock_irqrestore(&phba->isr_lock, flags);
638 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
639 queue_tail_inc(eq);
640 eqe = queue_tail_node(eq);
641 num_eq_processed++;
643 if (phba->todo_mcc_cq)
644 queue_work(phba->wq, &phba->work_cqs);
645 if (num_eq_processed)
646 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
648 return IRQ_HANDLED;
652 * be_isr_msix - The isr routine of the driver.
653 * @irq: Not used
654 * @dev_id: Pointer to host adapter structure
656 static irqreturn_t be_isr_msix(int irq, void *dev_id)
658 struct beiscsi_hba *phba;
659 struct be_eq_entry *eqe = NULL;
660 struct be_queue_info *eq;
661 struct be_queue_info *cq;
662 unsigned int num_eq_processed;
663 struct be_eq_obj *pbe_eq;
664 unsigned long flags;
666 pbe_eq = dev_id;
667 eq = &pbe_eq->q;
668 cq = pbe_eq->cq;
669 eqe = queue_tail_node(eq);
670 if (!eqe)
671 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
673 phba = pbe_eq->phba;
674 num_eq_processed = 0;
675 if (blk_iopoll_enabled) {
676 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
677 & EQE_VALID_MASK) {
678 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
679 blk_iopoll_sched(&pbe_eq->iopoll);
681 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
682 queue_tail_inc(eq);
683 eqe = queue_tail_node(eq);
684 num_eq_processed++;
686 if (num_eq_processed)
687 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
689 return IRQ_HANDLED;
690 } else {
691 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
692 & EQE_VALID_MASK) {
693 spin_lock_irqsave(&phba->isr_lock, flags);
694 phba->todo_cq = 1;
695 spin_unlock_irqrestore(&phba->isr_lock, flags);
696 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
697 queue_tail_inc(eq);
698 eqe = queue_tail_node(eq);
699 num_eq_processed++;
701 if (phba->todo_cq)
702 queue_work(phba->wq, &phba->work_cqs);
704 if (num_eq_processed)
705 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
707 return IRQ_HANDLED;
712 * be_isr - The isr routine of the driver.
713 * @irq: Not used
714 * @dev_id: Pointer to host adapter structure
716 static irqreturn_t be_isr(int irq, void *dev_id)
718 struct beiscsi_hba *phba;
719 struct hwi_controller *phwi_ctrlr;
720 struct hwi_context_memory *phwi_context;
721 struct be_eq_entry *eqe = NULL;
722 struct be_queue_info *eq;
723 struct be_queue_info *cq;
724 struct be_queue_info *mcc;
725 unsigned long flags, index;
726 unsigned int num_mcceq_processed, num_ioeq_processed;
727 struct be_ctrl_info *ctrl;
728 struct be_eq_obj *pbe_eq;
729 int isr;
731 phba = dev_id;
732 ctrl = &phba->ctrl;
733 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
734 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
735 if (!isr)
736 return IRQ_NONE;
738 phwi_ctrlr = phba->phwi_ctrlr;
739 phwi_context = phwi_ctrlr->phwi_ctxt;
740 pbe_eq = &phwi_context->be_eq[0];
742 eq = &phwi_context->be_eq[0].q;
743 mcc = &phba->ctrl.mcc_obj.cq;
744 index = 0;
745 eqe = queue_tail_node(eq);
746 if (!eqe)
747 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
749 num_ioeq_processed = 0;
750 num_mcceq_processed = 0;
751 if (blk_iopoll_enabled) {
752 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
753 & EQE_VALID_MASK) {
754 if (((eqe->dw[offsetof(struct amap_eq_entry,
755 resource_id) / 32] &
756 EQE_RESID_MASK) >> 16) == mcc->id) {
757 spin_lock_irqsave(&phba->isr_lock, flags);
758 phba->todo_mcc_cq = 1;
759 spin_unlock_irqrestore(&phba->isr_lock, flags);
760 num_mcceq_processed++;
761 } else {
762 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
763 blk_iopoll_sched(&pbe_eq->iopoll);
764 num_ioeq_processed++;
766 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
767 queue_tail_inc(eq);
768 eqe = queue_tail_node(eq);
770 if (num_ioeq_processed || num_mcceq_processed) {
771 if (phba->todo_mcc_cq)
772 queue_work(phba->wq, &phba->work_cqs);
774 if ((num_mcceq_processed) && (!num_ioeq_processed))
775 hwi_ring_eq_db(phba, eq->id, 0,
776 (num_ioeq_processed +
777 num_mcceq_processed) , 1, 1);
778 else
779 hwi_ring_eq_db(phba, eq->id, 0,
780 (num_ioeq_processed +
781 num_mcceq_processed), 0, 1);
783 return IRQ_HANDLED;
784 } else
785 return IRQ_NONE;
786 } else {
787 cq = &phwi_context->be_cq[0];
788 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
789 & EQE_VALID_MASK) {
791 if (((eqe->dw[offsetof(struct amap_eq_entry,
792 resource_id) / 32] &
793 EQE_RESID_MASK) >> 16) != cq->id) {
794 spin_lock_irqsave(&phba->isr_lock, flags);
795 phba->todo_mcc_cq = 1;
796 spin_unlock_irqrestore(&phba->isr_lock, flags);
797 } else {
798 spin_lock_irqsave(&phba->isr_lock, flags);
799 phba->todo_cq = 1;
800 spin_unlock_irqrestore(&phba->isr_lock, flags);
802 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
803 queue_tail_inc(eq);
804 eqe = queue_tail_node(eq);
805 num_ioeq_processed++;
807 if (phba->todo_cq || phba->todo_mcc_cq)
808 queue_work(phba->wq, &phba->work_cqs);
810 if (num_ioeq_processed) {
811 hwi_ring_eq_db(phba, eq->id, 0,
812 num_ioeq_processed, 1, 1);
813 return IRQ_HANDLED;
814 } else
815 return IRQ_NONE;
819 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
821 struct pci_dev *pcidev = phba->pcidev;
822 struct hwi_controller *phwi_ctrlr;
823 struct hwi_context_memory *phwi_context;
824 int ret, msix_vec, i, j;
825 char desc[32];
827 phwi_ctrlr = phba->phwi_ctrlr;
828 phwi_context = phwi_ctrlr->phwi_ctxt;
830 if (phba->msix_enabled) {
831 for (i = 0; i < phba->num_cpus; i++) {
832 sprintf(desc, "beiscsi_msix_%04x", i);
833 msix_vec = phba->msix_entries[i].vector;
834 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
835 &phwi_context->be_eq[i]);
836 if (ret) {
837 shost_printk(KERN_ERR, phba->shost,
838 "beiscsi_init_irqs-Failed to"
839 "register msix for i = %d\n", i);
840 if (!i)
841 return ret;
842 goto free_msix_irqs;
845 msix_vec = phba->msix_entries[i].vector;
846 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
847 &phwi_context->be_eq[i]);
848 if (ret) {
849 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
850 "Failed to register beiscsi_msix_mcc\n");
851 i++;
852 goto free_msix_irqs;
855 } else {
856 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
857 "beiscsi", phba);
858 if (ret) {
859 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
860 "Failed to register irq\\n");
861 return ret;
864 return 0;
865 free_msix_irqs:
866 for (j = i - 1; j == 0; j++)
867 free_irq(msix_vec, &phwi_context->be_eq[j]);
868 return ret;
871 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
872 unsigned int id, unsigned int num_processed,
873 unsigned char rearm, unsigned char event)
875 u32 val = 0;
876 val |= id & DB_CQ_RING_ID_MASK;
877 if (rearm)
878 val |= 1 << DB_CQ_REARM_SHIFT;
879 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
880 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
883 static unsigned int
884 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
885 struct beiscsi_hba *phba,
886 unsigned short cid,
887 struct pdu_base *ppdu,
888 unsigned long pdu_len,
889 void *pbuffer, unsigned long buf_len)
891 struct iscsi_conn *conn = beiscsi_conn->conn;
892 struct iscsi_session *session = conn->session;
893 struct iscsi_task *task;
894 struct beiscsi_io_task *io_task;
895 struct iscsi_hdr *login_hdr;
897 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
898 PDUBASE_OPCODE_MASK) {
899 case ISCSI_OP_NOOP_IN:
900 pbuffer = NULL;
901 buf_len = 0;
902 break;
903 case ISCSI_OP_ASYNC_EVENT:
904 break;
905 case ISCSI_OP_REJECT:
906 WARN_ON(!pbuffer);
907 WARN_ON(!(buf_len == 48));
908 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
909 break;
910 case ISCSI_OP_LOGIN_RSP:
911 case ISCSI_OP_TEXT_RSP:
912 task = conn->login_task;
913 io_task = task->dd_data;
914 login_hdr = (struct iscsi_hdr *)ppdu;
915 login_hdr->itt = io_task->libiscsi_itt;
916 break;
917 default:
918 shost_printk(KERN_WARNING, phba->shost,
919 "Unrecognized opcode 0x%x in async msg\n",
920 (ppdu->
921 dw[offsetof(struct amap_pdu_base, opcode) / 32]
922 & PDUBASE_OPCODE_MASK));
923 return 1;
926 spin_lock_bh(&session->lock);
927 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
928 spin_unlock_bh(&session->lock);
929 return 0;
932 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
934 struct sgl_handle *psgl_handle;
936 if (phba->io_sgl_hndl_avbl) {
937 SE_DEBUG(DBG_LVL_8,
938 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
939 phba->io_sgl_alloc_index);
940 psgl_handle = phba->io_sgl_hndl_base[phba->
941 io_sgl_alloc_index];
942 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
943 phba->io_sgl_hndl_avbl--;
944 if (phba->io_sgl_alloc_index == (phba->params.
945 ios_per_ctrl - 1))
946 phba->io_sgl_alloc_index = 0;
947 else
948 phba->io_sgl_alloc_index++;
949 } else
950 psgl_handle = NULL;
951 return psgl_handle;
954 static void
955 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
957 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
958 phba->io_sgl_free_index);
959 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
961 * this can happen if clean_task is called on a task that
962 * failed in xmit_task or alloc_pdu.
964 SE_DEBUG(DBG_LVL_8,
965 "Double Free in IO SGL io_sgl_free_index=%d,"
966 "value there=%p\n", phba->io_sgl_free_index,
967 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
968 return;
970 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
971 phba->io_sgl_hndl_avbl++;
972 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
973 phba->io_sgl_free_index = 0;
974 else
975 phba->io_sgl_free_index++;
979 * alloc_wrb_handle - To allocate a wrb handle
980 * @phba: The hba pointer
981 * @cid: The cid to use for allocation
983 * This happens under session_lock until submission to chip
985 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
987 struct hwi_wrb_context *pwrb_context;
988 struct hwi_controller *phwi_ctrlr;
989 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
991 phwi_ctrlr = phba->phwi_ctrlr;
992 pwrb_context = &phwi_ctrlr->wrb_context[cid];
993 if (pwrb_context->wrb_handles_available >= 2) {
994 pwrb_handle = pwrb_context->pwrb_handle_base[
995 pwrb_context->alloc_index];
996 pwrb_context->wrb_handles_available--;
997 if (pwrb_context->alloc_index ==
998 (phba->params.wrbs_per_cxn - 1))
999 pwrb_context->alloc_index = 0;
1000 else
1001 pwrb_context->alloc_index++;
1002 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1003 pwrb_context->alloc_index];
1004 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1005 } else
1006 pwrb_handle = NULL;
1007 return pwrb_handle;
1011 * free_wrb_handle - To free the wrb handle back to pool
1012 * @phba: The hba pointer
1013 * @pwrb_context: The context to free from
1014 * @pwrb_handle: The wrb_handle to free
1016 * This happens under session_lock until submission to chip
1018 static void
1019 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1020 struct wrb_handle *pwrb_handle)
1022 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1023 pwrb_context->wrb_handles_available++;
1024 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1025 pwrb_context->free_index = 0;
1026 else
1027 pwrb_context->free_index++;
1029 SE_DEBUG(DBG_LVL_8,
1030 "FREE WRB: pwrb_handle=%p free_index=0x%x"
1031 "wrb_handles_available=%d\n",
1032 pwrb_handle, pwrb_context->free_index,
1033 pwrb_context->wrb_handles_available);
1036 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1038 struct sgl_handle *psgl_handle;
1040 if (phba->eh_sgl_hndl_avbl) {
1041 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1042 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1043 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
1044 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1045 phba->eh_sgl_hndl_avbl--;
1046 if (phba->eh_sgl_alloc_index ==
1047 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1049 phba->eh_sgl_alloc_index = 0;
1050 else
1051 phba->eh_sgl_alloc_index++;
1052 } else
1053 psgl_handle = NULL;
1054 return psgl_handle;
1057 void
1058 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1061 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
1062 phba->eh_sgl_free_index);
1063 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1065 * this can happen if clean_task is called on a task that
1066 * failed in xmit_task or alloc_pdu.
1068 SE_DEBUG(DBG_LVL_8,
1069 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
1070 phba->eh_sgl_free_index);
1071 return;
1073 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1074 phba->eh_sgl_hndl_avbl++;
1075 if (phba->eh_sgl_free_index ==
1076 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1077 phba->eh_sgl_free_index = 0;
1078 else
1079 phba->eh_sgl_free_index++;
1082 static void
1083 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1084 struct iscsi_task *task, struct sol_cqe *psol)
1086 struct beiscsi_io_task *io_task = task->dd_data;
1087 struct be_status_bhs *sts_bhs =
1088 (struct be_status_bhs *)io_task->cmd_bhs;
1089 struct iscsi_conn *conn = beiscsi_conn->conn;
1090 unsigned int sense_len;
1091 unsigned char *sense;
1092 u32 resid = 0, exp_cmdsn, max_cmdsn;
1093 u8 rsp, status, flags;
1095 exp_cmdsn = (psol->
1096 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1097 & SOL_EXP_CMD_SN_MASK);
1098 max_cmdsn = ((psol->
1099 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1100 & SOL_EXP_CMD_SN_MASK) +
1101 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1102 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1103 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1104 & SOL_RESP_MASK) >> 16);
1105 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1106 & SOL_STS_MASK) >> 8);
1107 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1108 & SOL_FLAGS_MASK) >> 24) | 0x80;
1110 task->sc->result = (DID_OK << 16) | status;
1111 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1112 task->sc->result = DID_ERROR << 16;
1113 goto unmap;
1116 /* bidi not initially supported */
1117 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1118 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1119 32] & SOL_RES_CNT_MASK);
1121 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1122 task->sc->result = DID_ERROR << 16;
1124 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1125 scsi_set_resid(task->sc, resid);
1126 if (!status && (scsi_bufflen(task->sc) - resid <
1127 task->sc->underflow))
1128 task->sc->result = DID_ERROR << 16;
1132 if (status == SAM_STAT_CHECK_CONDITION) {
1133 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1134 sense = sts_bhs->sense_info + sizeof(unsigned short);
1135 sense_len = cpu_to_be16(*slen);
1136 memcpy(task->sc->sense_buffer, sense,
1137 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1140 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1141 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1142 & SOL_RES_CNT_MASK)
1143 conn->rxdata_octets += (psol->
1144 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1145 & SOL_RES_CNT_MASK);
1147 unmap:
1148 scsi_dma_unmap(io_task->scsi_cmnd);
1149 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1152 static void
1153 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1154 struct iscsi_task *task, struct sol_cqe *psol)
1156 struct iscsi_logout_rsp *hdr;
1157 struct beiscsi_io_task *io_task = task->dd_data;
1158 struct iscsi_conn *conn = beiscsi_conn->conn;
1160 hdr = (struct iscsi_logout_rsp *)task->hdr;
1161 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1162 hdr->t2wait = 5;
1163 hdr->t2retain = 0;
1164 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1165 & SOL_FLAGS_MASK) >> 24) | 0x80;
1166 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1167 32] & SOL_RESP_MASK);
1168 hdr->exp_cmdsn = cpu_to_be32(psol->
1169 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1170 & SOL_EXP_CMD_SN_MASK);
1171 hdr->max_cmdsn = be32_to_cpu((psol->
1172 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1173 & SOL_EXP_CMD_SN_MASK) +
1174 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1175 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1176 hdr->dlength[0] = 0;
1177 hdr->dlength[1] = 0;
1178 hdr->dlength[2] = 0;
1179 hdr->hlength = 0;
1180 hdr->itt = io_task->libiscsi_itt;
1181 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1184 static void
1185 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1186 struct iscsi_task *task, struct sol_cqe *psol)
1188 struct iscsi_tm_rsp *hdr;
1189 struct iscsi_conn *conn = beiscsi_conn->conn;
1190 struct beiscsi_io_task *io_task = task->dd_data;
1192 hdr = (struct iscsi_tm_rsp *)task->hdr;
1193 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1194 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1195 & SOL_FLAGS_MASK) >> 24) | 0x80;
1196 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1197 32] & SOL_RESP_MASK);
1198 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1199 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1200 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1201 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1202 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1203 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1204 hdr->itt = io_task->libiscsi_itt;
1205 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1208 static void
1209 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1210 struct beiscsi_hba *phba, struct sol_cqe *psol)
1212 struct hwi_wrb_context *pwrb_context;
1213 struct wrb_handle *pwrb_handle = NULL;
1214 struct hwi_controller *phwi_ctrlr;
1215 struct iscsi_task *task;
1216 struct beiscsi_io_task *io_task;
1217 struct iscsi_conn *conn = beiscsi_conn->conn;
1218 struct iscsi_session *session = conn->session;
1220 phwi_ctrlr = phba->phwi_ctrlr;
1221 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1222 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1223 SOL_CID_MASK) >> 6) -
1224 phba->fw_config.iscsi_cid_start];
1225 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1226 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1227 32] & SOL_WRB_INDEX_MASK) >> 16)];
1228 task = pwrb_handle->pio_handle;
1230 io_task = task->dd_data;
1231 spin_lock(&phba->mgmt_sgl_lock);
1232 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1233 spin_unlock(&phba->mgmt_sgl_lock);
1234 spin_lock_bh(&session->lock);
1235 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1236 spin_unlock_bh(&session->lock);
1239 static void
1240 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1241 struct iscsi_task *task, struct sol_cqe *psol)
1243 struct iscsi_nopin *hdr;
1244 struct iscsi_conn *conn = beiscsi_conn->conn;
1245 struct beiscsi_io_task *io_task = task->dd_data;
1247 hdr = (struct iscsi_nopin *)task->hdr;
1248 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1249 & SOL_FLAGS_MASK) >> 24) | 0x80;
1250 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1251 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1252 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1253 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1254 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1255 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1256 hdr->opcode = ISCSI_OP_NOOP_IN;
1257 hdr->itt = io_task->libiscsi_itt;
1258 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1261 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1262 struct beiscsi_hba *phba, struct sol_cqe *psol)
1264 struct hwi_wrb_context *pwrb_context;
1265 struct wrb_handle *pwrb_handle;
1266 struct iscsi_wrb *pwrb = NULL;
1267 struct hwi_controller *phwi_ctrlr;
1268 struct iscsi_task *task;
1269 unsigned int type;
1270 struct iscsi_conn *conn = beiscsi_conn->conn;
1271 struct iscsi_session *session = conn->session;
1273 phwi_ctrlr = phba->phwi_ctrlr;
1274 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1275 (struct amap_sol_cqe, cid) / 32]
1276 & SOL_CID_MASK) >> 6) -
1277 phba->fw_config.iscsi_cid_start];
1278 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1279 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1280 32] & SOL_WRB_INDEX_MASK) >> 16)];
1281 task = pwrb_handle->pio_handle;
1282 pwrb = pwrb_handle->pwrb;
1283 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1284 WRB_TYPE_MASK) >> 28;
1286 spin_lock_bh(&session->lock);
1287 switch (type) {
1288 case HWH_TYPE_IO:
1289 case HWH_TYPE_IO_RD:
1290 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1291 ISCSI_OP_NOOP_OUT)
1292 be_complete_nopin_resp(beiscsi_conn, task, psol);
1293 else
1294 be_complete_io(beiscsi_conn, task, psol);
1295 break;
1297 case HWH_TYPE_LOGOUT:
1298 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1299 be_complete_logout(beiscsi_conn, task, psol);
1300 else
1301 be_complete_tmf(beiscsi_conn, task, psol);
1303 break;
1305 case HWH_TYPE_LOGIN:
1306 SE_DEBUG(DBG_LVL_1,
1307 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1308 "- Solicited path\n");
1309 break;
1311 case HWH_TYPE_NOP:
1312 be_complete_nopin_resp(beiscsi_conn, task, psol);
1313 break;
1315 default:
1316 shost_printk(KERN_WARNING, phba->shost,
1317 "In hwi_complete_cmd, unknown type = %d"
1318 "wrb_index 0x%x CID 0x%x\n", type,
1319 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1320 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1321 ((psol->dw[offsetof(struct amap_sol_cqe,
1322 cid) / 32] & SOL_CID_MASK) >> 6));
1323 break;
1326 spin_unlock_bh(&session->lock);
1329 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1330 *pasync_ctx, unsigned int is_header,
1331 unsigned int host_write_ptr)
1333 if (is_header)
1334 return &pasync_ctx->async_entry[host_write_ptr].
1335 header_busy_list;
1336 else
1337 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1340 static struct async_pdu_handle *
1341 hwi_get_async_handle(struct beiscsi_hba *phba,
1342 struct beiscsi_conn *beiscsi_conn,
1343 struct hwi_async_pdu_context *pasync_ctx,
1344 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1346 struct be_bus_address phys_addr;
1347 struct list_head *pbusy_list;
1348 struct async_pdu_handle *pasync_handle = NULL;
1349 int buffer_len = 0;
1350 unsigned char buffer_index = -1;
1351 unsigned char is_header = 0;
1353 phys_addr.u.a32.address_lo =
1354 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1355 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1356 & PDUCQE_DPL_MASK) >> 16);
1357 phys_addr.u.a32.address_hi =
1358 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1360 phys_addr.u.a64.address =
1361 *((unsigned long long *)(&phys_addr.u.a64.address));
1363 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1364 & PDUCQE_CODE_MASK) {
1365 case UNSOL_HDR_NOTIFY:
1366 is_header = 1;
1368 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1369 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1370 index) / 32] & PDUCQE_INDEX_MASK));
1372 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1373 pasync_ctx->async_header.pa_base.u.a64.address);
1375 buffer_index = buffer_len /
1376 pasync_ctx->async_header.buffer_size;
1378 break;
1379 case UNSOL_DATA_NOTIFY:
1380 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1381 dw[offsetof(struct amap_i_t_dpdu_cqe,
1382 index) / 32] & PDUCQE_INDEX_MASK));
1383 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1384 pasync_ctx->async_data.pa_base.u.
1385 a64.address);
1386 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1387 break;
1388 default:
1389 pbusy_list = NULL;
1390 shost_printk(KERN_WARNING, phba->shost,
1391 "Unexpected code=%d\n",
1392 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1393 code) / 32] & PDUCQE_CODE_MASK);
1394 return NULL;
1397 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1398 WARN_ON(list_empty(pbusy_list));
1399 list_for_each_entry(pasync_handle, pbusy_list, link) {
1400 WARN_ON(pasync_handle->consumed);
1401 if (pasync_handle->index == buffer_index)
1402 break;
1405 WARN_ON(!pasync_handle);
1407 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1408 phba->fw_config.iscsi_cid_start;
1409 pasync_handle->is_header = is_header;
1410 pasync_handle->buffer_len = ((pdpdu_cqe->
1411 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1412 & PDUCQE_DPL_MASK) >> 16);
1414 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1415 index) / 32] & PDUCQE_INDEX_MASK);
1416 return pasync_handle;
1419 static unsigned int
1420 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1421 unsigned int is_header, unsigned int cq_index)
1423 struct list_head *pbusy_list;
1424 struct async_pdu_handle *pasync_handle;
1425 unsigned int num_entries, writables = 0;
1426 unsigned int *pep_read_ptr, *pwritables;
1429 if (is_header) {
1430 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1431 pwritables = &pasync_ctx->async_header.writables;
1432 num_entries = pasync_ctx->async_header.num_entries;
1433 } else {
1434 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1435 pwritables = &pasync_ctx->async_data.writables;
1436 num_entries = pasync_ctx->async_data.num_entries;
1439 while ((*pep_read_ptr) != cq_index) {
1440 (*pep_read_ptr)++;
1441 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1443 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1444 *pep_read_ptr);
1445 if (writables == 0)
1446 WARN_ON(list_empty(pbusy_list));
1448 if (!list_empty(pbusy_list)) {
1449 pasync_handle = list_entry(pbusy_list->next,
1450 struct async_pdu_handle,
1451 link);
1452 WARN_ON(!pasync_handle);
1453 pasync_handle->consumed = 1;
1456 writables++;
1459 if (!writables) {
1460 SE_DEBUG(DBG_LVL_1,
1461 "Duplicate notification received - index 0x%x!!\n",
1462 cq_index);
1463 WARN_ON(1);
1466 *pwritables = *pwritables + writables;
1467 return 0;
1470 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1471 unsigned int cri)
1473 struct hwi_controller *phwi_ctrlr;
1474 struct hwi_async_pdu_context *pasync_ctx;
1475 struct async_pdu_handle *pasync_handle, *tmp_handle;
1476 struct list_head *plist;
1477 unsigned int i = 0;
1479 phwi_ctrlr = phba->phwi_ctrlr;
1480 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1482 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1484 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1485 list_del(&pasync_handle->link);
1487 if (i == 0) {
1488 list_add_tail(&pasync_handle->link,
1489 &pasync_ctx->async_header.free_list);
1490 pasync_ctx->async_header.free_entries++;
1491 i++;
1492 } else {
1493 list_add_tail(&pasync_handle->link,
1494 &pasync_ctx->async_data.free_list);
1495 pasync_ctx->async_data.free_entries++;
1496 i++;
1500 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1501 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1502 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1503 return 0;
1506 static struct phys_addr *
1507 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1508 unsigned int is_header, unsigned int host_write_ptr)
1510 struct phys_addr *pasync_sge = NULL;
1512 if (is_header)
1513 pasync_sge = pasync_ctx->async_header.ring_base;
1514 else
1515 pasync_sge = pasync_ctx->async_data.ring_base;
1517 return pasync_sge + host_write_ptr;
1520 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1521 unsigned int is_header)
1523 struct hwi_controller *phwi_ctrlr;
1524 struct hwi_async_pdu_context *pasync_ctx;
1525 struct async_pdu_handle *pasync_handle;
1526 struct list_head *pfree_link, *pbusy_list;
1527 struct phys_addr *pasync_sge;
1528 unsigned int ring_id, num_entries;
1529 unsigned int host_write_num;
1530 unsigned int writables;
1531 unsigned int i = 0;
1532 u32 doorbell = 0;
1534 phwi_ctrlr = phba->phwi_ctrlr;
1535 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1537 if (is_header) {
1538 num_entries = pasync_ctx->async_header.num_entries;
1539 writables = min(pasync_ctx->async_header.writables,
1540 pasync_ctx->async_header.free_entries);
1541 pfree_link = pasync_ctx->async_header.free_list.next;
1542 host_write_num = pasync_ctx->async_header.host_write_ptr;
1543 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1544 } else {
1545 num_entries = pasync_ctx->async_data.num_entries;
1546 writables = min(pasync_ctx->async_data.writables,
1547 pasync_ctx->async_data.free_entries);
1548 pfree_link = pasync_ctx->async_data.free_list.next;
1549 host_write_num = pasync_ctx->async_data.host_write_ptr;
1550 ring_id = phwi_ctrlr->default_pdu_data.id;
1553 writables = (writables / 8) * 8;
1554 if (writables) {
1555 for (i = 0; i < writables; i++) {
1556 pbusy_list =
1557 hwi_get_async_busy_list(pasync_ctx, is_header,
1558 host_write_num);
1559 pasync_handle =
1560 list_entry(pfree_link, struct async_pdu_handle,
1561 link);
1562 WARN_ON(!pasync_handle);
1563 pasync_handle->consumed = 0;
1565 pfree_link = pfree_link->next;
1567 pasync_sge = hwi_get_ring_address(pasync_ctx,
1568 is_header, host_write_num);
1570 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1571 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1573 list_move(&pasync_handle->link, pbusy_list);
1575 host_write_num++;
1576 host_write_num = host_write_num % num_entries;
1579 if (is_header) {
1580 pasync_ctx->async_header.host_write_ptr =
1581 host_write_num;
1582 pasync_ctx->async_header.free_entries -= writables;
1583 pasync_ctx->async_header.writables -= writables;
1584 pasync_ctx->async_header.busy_entries += writables;
1585 } else {
1586 pasync_ctx->async_data.host_write_ptr = host_write_num;
1587 pasync_ctx->async_data.free_entries -= writables;
1588 pasync_ctx->async_data.writables -= writables;
1589 pasync_ctx->async_data.busy_entries += writables;
1592 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1593 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1594 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1595 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1596 << DB_DEF_PDU_CQPROC_SHIFT;
1598 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1602 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1603 struct beiscsi_conn *beiscsi_conn,
1604 struct i_t_dpdu_cqe *pdpdu_cqe)
1606 struct hwi_controller *phwi_ctrlr;
1607 struct hwi_async_pdu_context *pasync_ctx;
1608 struct async_pdu_handle *pasync_handle = NULL;
1609 unsigned int cq_index = -1;
1611 phwi_ctrlr = phba->phwi_ctrlr;
1612 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1614 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1615 pdpdu_cqe, &cq_index);
1616 BUG_ON(pasync_handle->is_header != 0);
1617 if (pasync_handle->consumed == 0)
1618 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1619 cq_index);
1621 hwi_free_async_msg(phba, pasync_handle->cri);
1622 hwi_post_async_buffers(phba, pasync_handle->is_header);
1625 static unsigned int
1626 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1627 struct beiscsi_hba *phba,
1628 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1630 struct list_head *plist;
1631 struct async_pdu_handle *pasync_handle;
1632 void *phdr = NULL;
1633 unsigned int hdr_len = 0, buf_len = 0;
1634 unsigned int status, index = 0, offset = 0;
1635 void *pfirst_buffer = NULL;
1636 unsigned int num_buf = 0;
1638 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1640 list_for_each_entry(pasync_handle, plist, link) {
1641 if (index == 0) {
1642 phdr = pasync_handle->pbuffer;
1643 hdr_len = pasync_handle->buffer_len;
1644 } else {
1645 buf_len = pasync_handle->buffer_len;
1646 if (!num_buf) {
1647 pfirst_buffer = pasync_handle->pbuffer;
1648 num_buf++;
1650 memcpy(pfirst_buffer + offset,
1651 pasync_handle->pbuffer, buf_len);
1652 offset = buf_len;
1654 index++;
1657 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1658 (beiscsi_conn->beiscsi_conn_cid -
1659 phba->fw_config.iscsi_cid_start),
1660 phdr, hdr_len, pfirst_buffer,
1661 buf_len);
1663 if (status == 0)
1664 hwi_free_async_msg(phba, cri);
1665 return 0;
1668 static unsigned int
1669 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1670 struct beiscsi_hba *phba,
1671 struct async_pdu_handle *pasync_handle)
1673 struct hwi_async_pdu_context *pasync_ctx;
1674 struct hwi_controller *phwi_ctrlr;
1675 unsigned int bytes_needed = 0, status = 0;
1676 unsigned short cri = pasync_handle->cri;
1677 struct pdu_base *ppdu;
1679 phwi_ctrlr = phba->phwi_ctrlr;
1680 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1682 list_del(&pasync_handle->link);
1683 if (pasync_handle->is_header) {
1684 pasync_ctx->async_header.busy_entries--;
1685 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1686 hwi_free_async_msg(phba, cri);
1687 BUG();
1690 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1691 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1692 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1693 (unsigned short)pasync_handle->buffer_len;
1694 list_add_tail(&pasync_handle->link,
1695 &pasync_ctx->async_entry[cri].wait_queue.list);
1697 ppdu = pasync_handle->pbuffer;
1698 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1699 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1700 0xFFFF0000) | ((be16_to_cpu((ppdu->
1701 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1702 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1704 if (status == 0) {
1705 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1706 bytes_needed;
1708 if (bytes_needed == 0)
1709 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1710 pasync_ctx, cri);
1712 } else {
1713 pasync_ctx->async_data.busy_entries--;
1714 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1715 list_add_tail(&pasync_handle->link,
1716 &pasync_ctx->async_entry[cri].wait_queue.
1717 list);
1718 pasync_ctx->async_entry[cri].wait_queue.
1719 bytes_received +=
1720 (unsigned short)pasync_handle->buffer_len;
1722 if (pasync_ctx->async_entry[cri].wait_queue.
1723 bytes_received >=
1724 pasync_ctx->async_entry[cri].wait_queue.
1725 bytes_needed)
1726 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1727 pasync_ctx, cri);
1730 return status;
1733 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1734 struct beiscsi_hba *phba,
1735 struct i_t_dpdu_cqe *pdpdu_cqe)
1737 struct hwi_controller *phwi_ctrlr;
1738 struct hwi_async_pdu_context *pasync_ctx;
1739 struct async_pdu_handle *pasync_handle = NULL;
1740 unsigned int cq_index = -1;
1742 phwi_ctrlr = phba->phwi_ctrlr;
1743 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1744 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1745 pdpdu_cqe, &cq_index);
1747 if (pasync_handle->consumed == 0)
1748 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1749 cq_index);
1750 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1751 hwi_post_async_buffers(phba, pasync_handle->is_header);
1754 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1756 struct be_queue_info *mcc_cq;
1757 struct be_mcc_compl *mcc_compl;
1758 unsigned int num_processed = 0;
1760 mcc_cq = &phba->ctrl.mcc_obj.cq;
1761 mcc_compl = queue_tail_node(mcc_cq);
1762 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1763 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1765 if (num_processed >= 32) {
1766 hwi_ring_cq_db(phba, mcc_cq->id,
1767 num_processed, 0, 0);
1768 num_processed = 0;
1770 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1771 /* Interpret flags as an async trailer */
1772 if (is_link_state_evt(mcc_compl->flags))
1773 /* Interpret compl as a async link evt */
1774 beiscsi_async_link_state_process(phba,
1775 (struct be_async_event_link_state *) mcc_compl);
1776 else
1777 SE_DEBUG(DBG_LVL_1,
1778 " Unsupported Async Event, flags"
1779 " = 0x%08x\n", mcc_compl->flags);
1780 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1781 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1782 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1785 mcc_compl->flags = 0;
1786 queue_tail_inc(mcc_cq);
1787 mcc_compl = queue_tail_node(mcc_cq);
1788 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1789 num_processed++;
1792 if (num_processed > 0)
1793 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1797 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1799 struct be_queue_info *cq;
1800 struct sol_cqe *sol;
1801 struct dmsg_cqe *dmsg;
1802 unsigned int num_processed = 0;
1803 unsigned int tot_nump = 0;
1804 struct beiscsi_conn *beiscsi_conn;
1805 struct beiscsi_endpoint *beiscsi_ep;
1806 struct iscsi_endpoint *ep;
1807 struct beiscsi_hba *phba;
1809 cq = pbe_eq->cq;
1810 sol = queue_tail_node(cq);
1811 phba = pbe_eq->phba;
1813 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1814 CQE_VALID_MASK) {
1815 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1817 ep = phba->ep_array[(u32) ((sol->
1818 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1819 SOL_CID_MASK) >> 6) -
1820 phba->fw_config.iscsi_cid_start];
1822 beiscsi_ep = ep->dd_data;
1823 beiscsi_conn = beiscsi_ep->conn;
1825 if (num_processed >= 32) {
1826 hwi_ring_cq_db(phba, cq->id,
1827 num_processed, 0, 0);
1828 tot_nump += num_processed;
1829 num_processed = 0;
1832 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1833 32] & CQE_CODE_MASK) {
1834 case SOL_CMD_COMPLETE:
1835 hwi_complete_cmd(beiscsi_conn, phba, sol);
1836 break;
1837 case DRIVERMSG_NOTIFY:
1838 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1839 dmsg = (struct dmsg_cqe *)sol;
1840 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1841 break;
1842 case UNSOL_HDR_NOTIFY:
1843 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1844 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1845 (struct i_t_dpdu_cqe *)sol);
1846 break;
1847 case UNSOL_DATA_NOTIFY:
1848 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1849 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1850 (struct i_t_dpdu_cqe *)sol);
1851 break;
1852 case CXN_INVALIDATE_INDEX_NOTIFY:
1853 case CMD_INVALIDATED_NOTIFY:
1854 case CXN_INVALIDATE_NOTIFY:
1855 SE_DEBUG(DBG_LVL_1,
1856 "Ignoring CQ Error notification for cmd/cxn"
1857 "invalidate\n");
1858 break;
1859 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1860 case CMD_KILLED_INVALID_STATSN_RCVD:
1861 case CMD_KILLED_INVALID_R2T_RCVD:
1862 case CMD_CXN_KILLED_LUN_INVALID:
1863 case CMD_CXN_KILLED_ICD_INVALID:
1864 case CMD_CXN_KILLED_ITT_INVALID:
1865 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1866 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1867 SE_DEBUG(DBG_LVL_1,
1868 "CQ Error notification for cmd.. "
1869 "code %d cid 0x%x\n",
1870 sol->dw[offsetof(struct amap_sol_cqe, code) /
1871 32] & CQE_CODE_MASK,
1872 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1873 32] & SOL_CID_MASK));
1874 break;
1875 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1876 SE_DEBUG(DBG_LVL_1,
1877 "Digest error on def pdu ring, dropping..\n");
1878 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1879 (struct i_t_dpdu_cqe *) sol);
1880 break;
1881 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1882 case CXN_KILLED_BURST_LEN_MISMATCH:
1883 case CXN_KILLED_AHS_RCVD:
1884 case CXN_KILLED_HDR_DIGEST_ERR:
1885 case CXN_KILLED_UNKNOWN_HDR:
1886 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1887 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1888 case CXN_KILLED_TIMED_OUT:
1889 case CXN_KILLED_FIN_RCVD:
1890 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1891 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1892 case CXN_KILLED_OVER_RUN_RESIDUAL:
1893 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1894 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1895 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1896 "0x%x...\n",
1897 sol->dw[offsetof(struct amap_sol_cqe, code) /
1898 32] & CQE_CODE_MASK,
1899 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1900 32] & CQE_CID_MASK));
1901 iscsi_conn_failure(beiscsi_conn->conn,
1902 ISCSI_ERR_CONN_FAILED);
1903 break;
1904 case CXN_KILLED_RST_SENT:
1905 case CXN_KILLED_RST_RCVD:
1906 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1907 "received/sent on CID 0x%x...\n",
1908 sol->dw[offsetof(struct amap_sol_cqe, code) /
1909 32] & CQE_CODE_MASK,
1910 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1911 32] & CQE_CID_MASK));
1912 iscsi_conn_failure(beiscsi_conn->conn,
1913 ISCSI_ERR_CONN_FAILED);
1914 break;
1915 default:
1916 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1917 "received on CID 0x%x...\n",
1918 sol->dw[offsetof(struct amap_sol_cqe, code) /
1919 32] & CQE_CODE_MASK,
1920 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1921 32] & CQE_CID_MASK));
1922 break;
1925 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1926 queue_tail_inc(cq);
1927 sol = queue_tail_node(cq);
1928 num_processed++;
1931 if (num_processed > 0) {
1932 tot_nump += num_processed;
1933 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1935 return tot_nump;
1938 void beiscsi_process_all_cqs(struct work_struct *work)
1940 unsigned long flags;
1941 struct hwi_controller *phwi_ctrlr;
1942 struct hwi_context_memory *phwi_context;
1943 struct be_eq_obj *pbe_eq;
1944 struct beiscsi_hba *phba =
1945 container_of(work, struct beiscsi_hba, work_cqs);
1947 phwi_ctrlr = phba->phwi_ctrlr;
1948 phwi_context = phwi_ctrlr->phwi_ctxt;
1949 if (phba->msix_enabled)
1950 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1951 else
1952 pbe_eq = &phwi_context->be_eq[0];
1954 if (phba->todo_mcc_cq) {
1955 spin_lock_irqsave(&phba->isr_lock, flags);
1956 phba->todo_mcc_cq = 0;
1957 spin_unlock_irqrestore(&phba->isr_lock, flags);
1958 beiscsi_process_mcc_isr(phba);
1961 if (phba->todo_cq) {
1962 spin_lock_irqsave(&phba->isr_lock, flags);
1963 phba->todo_cq = 0;
1964 spin_unlock_irqrestore(&phba->isr_lock, flags);
1965 beiscsi_process_cq(pbe_eq);
1969 static int be_iopoll(struct blk_iopoll *iop, int budget)
1971 static unsigned int ret;
1972 struct beiscsi_hba *phba;
1973 struct be_eq_obj *pbe_eq;
1975 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1976 ret = beiscsi_process_cq(pbe_eq);
1977 if (ret < budget) {
1978 phba = pbe_eq->phba;
1979 blk_iopoll_complete(iop);
1980 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1981 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1983 return ret;
1986 static void
1987 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1988 unsigned int num_sg, struct beiscsi_io_task *io_task)
1990 struct iscsi_sge *psgl;
1991 unsigned int sg_len, index;
1992 unsigned int sge_len = 0;
1993 unsigned long long addr;
1994 struct scatterlist *l_sg;
1995 unsigned int offset;
1997 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1998 io_task->bhs_pa.u.a32.address_lo);
1999 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2000 io_task->bhs_pa.u.a32.address_hi);
2002 l_sg = sg;
2003 for (index = 0; (index < num_sg) && (index < 2); index++,
2004 sg = sg_next(sg)) {
2005 if (index == 0) {
2006 sg_len = sg_dma_len(sg);
2007 addr = (u64) sg_dma_address(sg);
2008 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2009 ((u32)(addr & 0xFFFFFFFF)));
2010 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2011 ((u32)(addr >> 32)));
2012 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2013 sg_len);
2014 sge_len = sg_len;
2015 } else {
2016 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2017 pwrb, sge_len);
2018 sg_len = sg_dma_len(sg);
2019 addr = (u64) sg_dma_address(sg);
2020 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2021 ((u32)(addr & 0xFFFFFFFF)));
2022 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2023 ((u32)(addr >> 32)));
2024 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2025 sg_len);
2028 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2029 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2031 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2033 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2034 io_task->bhs_pa.u.a32.address_hi);
2035 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2036 io_task->bhs_pa.u.a32.address_lo);
2038 if (num_sg == 1) {
2039 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2041 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2043 } else if (num_sg == 2) {
2044 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2046 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2048 } else {
2049 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2051 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2054 sg = l_sg;
2055 psgl++;
2056 psgl++;
2057 offset = 0;
2058 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2059 sg_len = sg_dma_len(sg);
2060 addr = (u64) sg_dma_address(sg);
2061 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2062 (addr & 0xFFFFFFFF));
2063 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2064 (addr >> 32));
2065 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2066 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2067 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2068 offset += sg_len;
2070 psgl--;
2071 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2074 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2076 struct iscsi_sge *psgl;
2077 unsigned long long addr;
2078 struct beiscsi_io_task *io_task = task->dd_data;
2079 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2080 struct beiscsi_hba *phba = beiscsi_conn->phba;
2082 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2083 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2084 io_task->bhs_pa.u.a32.address_lo);
2085 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2086 io_task->bhs_pa.u.a32.address_hi);
2088 if (task->data) {
2089 if (task->data_count) {
2090 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2091 addr = (u64) pci_map_single(phba->pcidev,
2092 task->data,
2093 task->data_count, 1);
2094 } else {
2095 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2096 addr = 0;
2098 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2099 ((u32)(addr & 0xFFFFFFFF)));
2100 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2101 ((u32)(addr >> 32)));
2102 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2103 task->data_count);
2105 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2106 } else {
2107 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2108 addr = 0;
2111 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2113 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2115 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2116 io_task->bhs_pa.u.a32.address_hi);
2117 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2118 io_task->bhs_pa.u.a32.address_lo);
2119 if (task->data) {
2120 psgl++;
2121 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2122 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2123 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2124 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2125 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2126 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2128 psgl++;
2129 if (task->data) {
2130 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2131 ((u32)(addr & 0xFFFFFFFF)));
2132 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2133 ((u32)(addr >> 32)));
2135 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2137 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2140 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2142 unsigned int num_cq_pages, num_async_pdu_buf_pages;
2143 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2144 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2146 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2147 sizeof(struct sol_cqe));
2148 num_async_pdu_buf_pages =
2149 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2150 phba->params.defpdu_hdr_sz);
2151 num_async_pdu_buf_sgl_pages =
2152 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2153 sizeof(struct phys_addr));
2154 num_async_pdu_data_pages =
2155 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2156 phba->params.defpdu_data_sz);
2157 num_async_pdu_data_sgl_pages =
2158 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2159 sizeof(struct phys_addr));
2161 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2163 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2164 BE_ISCSI_PDU_HEADER_SIZE;
2165 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2166 sizeof(struct hwi_context_memory);
2169 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2170 * (phba->params.wrbs_per_cxn)
2171 * phba->params.cxns_per_ctrl;
2172 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2173 (phba->params.wrbs_per_cxn);
2174 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2175 phba->params.cxns_per_ctrl);
2177 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2178 phba->params.icds_per_ctrl;
2179 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2180 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2182 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2183 num_async_pdu_buf_pages * PAGE_SIZE;
2184 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2185 num_async_pdu_data_pages * PAGE_SIZE;
2186 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2187 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2188 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2189 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2190 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2191 phba->params.asyncpdus_per_ctrl *
2192 sizeof(struct async_pdu_handle);
2193 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2194 phba->params.asyncpdus_per_ctrl *
2195 sizeof(struct async_pdu_handle);
2196 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2197 sizeof(struct hwi_async_pdu_context) +
2198 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2201 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2203 struct be_mem_descriptor *mem_descr;
2204 dma_addr_t bus_add;
2205 struct mem_array *mem_arr, *mem_arr_orig;
2206 unsigned int i, j, alloc_size, curr_alloc_size;
2208 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2209 if (!phba->phwi_ctrlr)
2210 return -ENOMEM;
2212 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2213 GFP_KERNEL);
2214 if (!phba->init_mem) {
2215 kfree(phba->phwi_ctrlr);
2216 return -ENOMEM;
2219 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2220 GFP_KERNEL);
2221 if (!mem_arr_orig) {
2222 kfree(phba->init_mem);
2223 kfree(phba->phwi_ctrlr);
2224 return -ENOMEM;
2227 mem_descr = phba->init_mem;
2228 for (i = 0; i < SE_MEM_MAX; i++) {
2229 j = 0;
2230 mem_arr = mem_arr_orig;
2231 alloc_size = phba->mem_req[i];
2232 memset(mem_arr, 0, sizeof(struct mem_array) *
2233 BEISCSI_MAX_FRAGS_INIT);
2234 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2235 do {
2236 mem_arr->virtual_address = pci_alloc_consistent(
2237 phba->pcidev,
2238 curr_alloc_size,
2239 &bus_add);
2240 if (!mem_arr->virtual_address) {
2241 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2242 goto free_mem;
2243 if (curr_alloc_size -
2244 rounddown_pow_of_two(curr_alloc_size))
2245 curr_alloc_size = rounddown_pow_of_two
2246 (curr_alloc_size);
2247 else
2248 curr_alloc_size = curr_alloc_size / 2;
2249 } else {
2250 mem_arr->bus_address.u.
2251 a64.address = (__u64) bus_add;
2252 mem_arr->size = curr_alloc_size;
2253 alloc_size -= curr_alloc_size;
2254 curr_alloc_size = min(be_max_phys_size *
2255 1024, alloc_size);
2256 j++;
2257 mem_arr++;
2259 } while (alloc_size);
2260 mem_descr->num_elements = j;
2261 mem_descr->size_in_bytes = phba->mem_req[i];
2262 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2263 GFP_KERNEL);
2264 if (!mem_descr->mem_array)
2265 goto free_mem;
2267 memcpy(mem_descr->mem_array, mem_arr_orig,
2268 sizeof(struct mem_array) * j);
2269 mem_descr++;
2271 kfree(mem_arr_orig);
2272 return 0;
2273 free_mem:
2274 mem_descr->num_elements = j;
2275 while ((i) || (j)) {
2276 for (j = mem_descr->num_elements; j > 0; j--) {
2277 pci_free_consistent(phba->pcidev,
2278 mem_descr->mem_array[j - 1].size,
2279 mem_descr->mem_array[j - 1].
2280 virtual_address,
2281 (unsigned long)mem_descr->
2282 mem_array[j - 1].
2283 bus_address.u.a64.address);
2285 if (i) {
2286 i--;
2287 kfree(mem_descr->mem_array);
2288 mem_descr--;
2291 kfree(mem_arr_orig);
2292 kfree(phba->init_mem);
2293 kfree(phba->phwi_ctrlr);
2294 return -ENOMEM;
2297 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2299 beiscsi_find_mem_req(phba);
2300 return beiscsi_alloc_mem(phba);
2303 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2305 struct pdu_data_out *pdata_out;
2306 struct pdu_nop_out *pnop_out;
2307 struct be_mem_descriptor *mem_descr;
2309 mem_descr = phba->init_mem;
2310 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2311 pdata_out =
2312 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2313 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2315 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2316 IIOC_SCSI_DATA);
2318 pnop_out =
2319 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2320 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2322 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2323 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2324 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2325 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2328 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2330 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2331 struct wrb_handle *pwrb_handle;
2332 struct hwi_controller *phwi_ctrlr;
2333 struct hwi_wrb_context *pwrb_context;
2334 struct iscsi_wrb *pwrb;
2335 unsigned int num_cxn_wrbh;
2336 unsigned int num_cxn_wrb, j, idx, index;
2338 mem_descr_wrbh = phba->init_mem;
2339 mem_descr_wrbh += HWI_MEM_WRBH;
2341 mem_descr_wrb = phba->init_mem;
2342 mem_descr_wrb += HWI_MEM_WRB;
2344 idx = 0;
2345 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2346 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2347 ((sizeof(struct wrb_handle)) *
2348 phba->params.wrbs_per_cxn));
2349 phwi_ctrlr = phba->phwi_ctrlr;
2351 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2352 pwrb_context = &phwi_ctrlr->wrb_context[index];
2353 pwrb_context->pwrb_handle_base =
2354 kzalloc(sizeof(struct wrb_handle *) *
2355 phba->params.wrbs_per_cxn, GFP_KERNEL);
2356 pwrb_context->pwrb_handle_basestd =
2357 kzalloc(sizeof(struct wrb_handle *) *
2358 phba->params.wrbs_per_cxn, GFP_KERNEL);
2359 if (num_cxn_wrbh) {
2360 pwrb_context->alloc_index = 0;
2361 pwrb_context->wrb_handles_available = 0;
2362 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2363 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2364 pwrb_context->pwrb_handle_basestd[j] =
2365 pwrb_handle;
2366 pwrb_context->wrb_handles_available++;
2367 pwrb_handle->wrb_index = j;
2368 pwrb_handle++;
2370 pwrb_context->free_index = 0;
2371 num_cxn_wrbh--;
2372 } else {
2373 idx++;
2374 pwrb_handle =
2375 mem_descr_wrbh->mem_array[idx].virtual_address;
2376 num_cxn_wrbh =
2377 ((mem_descr_wrbh->mem_array[idx].size) /
2378 ((sizeof(struct wrb_handle)) *
2379 phba->params.wrbs_per_cxn));
2380 pwrb_context->alloc_index = 0;
2381 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2382 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2383 pwrb_context->pwrb_handle_basestd[j] =
2384 pwrb_handle;
2385 pwrb_context->wrb_handles_available++;
2386 pwrb_handle->wrb_index = j;
2387 pwrb_handle++;
2389 pwrb_context->free_index = 0;
2390 num_cxn_wrbh--;
2393 idx = 0;
2394 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2395 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2396 ((sizeof(struct iscsi_wrb) *
2397 phba->params.wrbs_per_cxn));
2398 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2399 pwrb_context = &phwi_ctrlr->wrb_context[index];
2400 if (num_cxn_wrb) {
2401 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2402 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2403 pwrb_handle->pwrb = pwrb;
2404 pwrb++;
2406 num_cxn_wrb--;
2407 } else {
2408 idx++;
2409 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2410 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2411 ((sizeof(struct iscsi_wrb) *
2412 phba->params.wrbs_per_cxn));
2413 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2414 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2415 pwrb_handle->pwrb = pwrb;
2416 pwrb++;
2418 num_cxn_wrb--;
2423 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2425 struct hwi_controller *phwi_ctrlr;
2426 struct hba_parameters *p = &phba->params;
2427 struct hwi_async_pdu_context *pasync_ctx;
2428 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2429 unsigned int index;
2430 struct be_mem_descriptor *mem_descr;
2432 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2433 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2435 phwi_ctrlr = phba->phwi_ctrlr;
2436 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2437 mem_descr->mem_array[0].virtual_address;
2438 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2439 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2441 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2442 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2443 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2444 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2446 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2447 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2448 if (mem_descr->mem_array[0].virtual_address) {
2449 SE_DEBUG(DBG_LVL_8,
2450 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2451 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2452 } else
2453 shost_printk(KERN_WARNING, phba->shost,
2454 "No Virtual address\n");
2456 pasync_ctx->async_header.va_base =
2457 mem_descr->mem_array[0].virtual_address;
2459 pasync_ctx->async_header.pa_base.u.a64.address =
2460 mem_descr->mem_array[0].bus_address.u.a64.address;
2462 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2463 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2464 if (mem_descr->mem_array[0].virtual_address) {
2465 SE_DEBUG(DBG_LVL_8,
2466 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2467 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2468 } else
2469 shost_printk(KERN_WARNING, phba->shost,
2470 "No Virtual address\n");
2471 pasync_ctx->async_header.ring_base =
2472 mem_descr->mem_array[0].virtual_address;
2474 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2475 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2476 if (mem_descr->mem_array[0].virtual_address) {
2477 SE_DEBUG(DBG_LVL_8,
2478 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2479 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2480 } else
2481 shost_printk(KERN_WARNING, phba->shost,
2482 "No Virtual address\n");
2484 pasync_ctx->async_header.handle_base =
2485 mem_descr->mem_array[0].virtual_address;
2486 pasync_ctx->async_header.writables = 0;
2487 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2489 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2490 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2491 if (mem_descr->mem_array[0].virtual_address) {
2492 SE_DEBUG(DBG_LVL_8,
2493 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2494 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2495 } else
2496 shost_printk(KERN_WARNING, phba->shost,
2497 "No Virtual address\n");
2498 pasync_ctx->async_data.va_base =
2499 mem_descr->mem_array[0].virtual_address;
2500 pasync_ctx->async_data.pa_base.u.a64.address =
2501 mem_descr->mem_array[0].bus_address.u.a64.address;
2503 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2504 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2505 if (mem_descr->mem_array[0].virtual_address) {
2506 SE_DEBUG(DBG_LVL_8,
2507 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2508 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2509 } else
2510 shost_printk(KERN_WARNING, phba->shost,
2511 "No Virtual address\n");
2513 pasync_ctx->async_data.ring_base =
2514 mem_descr->mem_array[0].virtual_address;
2516 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2517 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2518 if (!mem_descr->mem_array[0].virtual_address)
2519 shost_printk(KERN_WARNING, phba->shost,
2520 "No Virtual address\n");
2522 pasync_ctx->async_data.handle_base =
2523 mem_descr->mem_array[0].virtual_address;
2524 pasync_ctx->async_data.writables = 0;
2525 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2527 pasync_header_h =
2528 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2529 pasync_data_h =
2530 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2532 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2533 pasync_header_h->cri = -1;
2534 pasync_header_h->index = (char)index;
2535 INIT_LIST_HEAD(&pasync_header_h->link);
2536 pasync_header_h->pbuffer =
2537 (void *)((unsigned long)
2538 (pasync_ctx->async_header.va_base) +
2539 (p->defpdu_hdr_sz * index));
2541 pasync_header_h->pa.u.a64.address =
2542 pasync_ctx->async_header.pa_base.u.a64.address +
2543 (p->defpdu_hdr_sz * index);
2545 list_add_tail(&pasync_header_h->link,
2546 &pasync_ctx->async_header.free_list);
2547 pasync_header_h++;
2548 pasync_ctx->async_header.free_entries++;
2549 pasync_ctx->async_header.writables++;
2551 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2552 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2553 header_busy_list);
2554 pasync_data_h->cri = -1;
2555 pasync_data_h->index = (char)index;
2556 INIT_LIST_HEAD(&pasync_data_h->link);
2557 pasync_data_h->pbuffer =
2558 (void *)((unsigned long)
2559 (pasync_ctx->async_data.va_base) +
2560 (p->defpdu_data_sz * index));
2562 pasync_data_h->pa.u.a64.address =
2563 pasync_ctx->async_data.pa_base.u.a64.address +
2564 (p->defpdu_data_sz * index);
2566 list_add_tail(&pasync_data_h->link,
2567 &pasync_ctx->async_data.free_list);
2568 pasync_data_h++;
2569 pasync_ctx->async_data.free_entries++;
2570 pasync_ctx->async_data.writables++;
2572 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2575 pasync_ctx->async_header.host_write_ptr = 0;
2576 pasync_ctx->async_header.ep_read_ptr = -1;
2577 pasync_ctx->async_data.host_write_ptr = 0;
2578 pasync_ctx->async_data.ep_read_ptr = -1;
2581 static int
2582 be_sgl_create_contiguous(void *virtual_address,
2583 u64 physical_address, u32 length,
2584 struct be_dma_mem *sgl)
2586 WARN_ON(!virtual_address);
2587 WARN_ON(!physical_address);
2588 WARN_ON(!length > 0);
2589 WARN_ON(!sgl);
2591 sgl->va = virtual_address;
2592 sgl->dma = (unsigned long)physical_address;
2593 sgl->size = length;
2595 return 0;
2598 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2600 memset(sgl, 0, sizeof(*sgl));
2603 static void
2604 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2605 struct mem_array *pmem, struct be_dma_mem *sgl)
2607 if (sgl->va)
2608 be_sgl_destroy_contiguous(sgl);
2610 be_sgl_create_contiguous(pmem->virtual_address,
2611 pmem->bus_address.u.a64.address,
2612 pmem->size, sgl);
2615 static void
2616 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2617 struct mem_array *pmem, struct be_dma_mem *sgl)
2619 if (sgl->va)
2620 be_sgl_destroy_contiguous(sgl);
2622 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2623 pmem->bus_address.u.a64.address,
2624 pmem->size, sgl);
2627 static int be_fill_queue(struct be_queue_info *q,
2628 u16 len, u16 entry_size, void *vaddress)
2630 struct be_dma_mem *mem = &q->dma_mem;
2632 memset(q, 0, sizeof(*q));
2633 q->len = len;
2634 q->entry_size = entry_size;
2635 mem->size = len * entry_size;
2636 mem->va = vaddress;
2637 if (!mem->va)
2638 return -ENOMEM;
2639 memset(mem->va, 0, mem->size);
2640 return 0;
2643 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2644 struct hwi_context_memory *phwi_context)
2646 unsigned int i, num_eq_pages;
2647 int ret, eq_for_mcc;
2648 struct be_queue_info *eq;
2649 struct be_dma_mem *mem;
2650 void *eq_vaddress;
2651 dma_addr_t paddr;
2653 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2654 sizeof(struct be_eq_entry));
2656 if (phba->msix_enabled)
2657 eq_for_mcc = 1;
2658 else
2659 eq_for_mcc = 0;
2660 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2661 eq = &phwi_context->be_eq[i].q;
2662 mem = &eq->dma_mem;
2663 phwi_context->be_eq[i].phba = phba;
2664 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2665 num_eq_pages * PAGE_SIZE,
2666 &paddr);
2667 if (!eq_vaddress)
2668 goto create_eq_error;
2670 mem->va = eq_vaddress;
2671 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2672 sizeof(struct be_eq_entry), eq_vaddress);
2673 if (ret) {
2674 shost_printk(KERN_ERR, phba->shost,
2675 "be_fill_queue Failed for EQ\n");
2676 goto create_eq_error;
2679 mem->dma = paddr;
2680 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2681 phwi_context->cur_eqd);
2682 if (ret) {
2683 shost_printk(KERN_ERR, phba->shost,
2684 "beiscsi_cmd_eq_create"
2685 "Failedfor EQ\n");
2686 goto create_eq_error;
2688 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2690 return 0;
2691 create_eq_error:
2692 for (i = 0; i < (phba->num_cpus + 1); i++) {
2693 eq = &phwi_context->be_eq[i].q;
2694 mem = &eq->dma_mem;
2695 if (mem->va)
2696 pci_free_consistent(phba->pcidev, num_eq_pages
2697 * PAGE_SIZE,
2698 mem->va, mem->dma);
2700 return ret;
2703 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2704 struct hwi_context_memory *phwi_context)
2706 unsigned int i, num_cq_pages;
2707 int ret;
2708 struct be_queue_info *cq, *eq;
2709 struct be_dma_mem *mem;
2710 struct be_eq_obj *pbe_eq;
2711 void *cq_vaddress;
2712 dma_addr_t paddr;
2714 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2715 sizeof(struct sol_cqe));
2717 for (i = 0; i < phba->num_cpus; i++) {
2718 cq = &phwi_context->be_cq[i];
2719 eq = &phwi_context->be_eq[i].q;
2720 pbe_eq = &phwi_context->be_eq[i];
2721 pbe_eq->cq = cq;
2722 pbe_eq->phba = phba;
2723 mem = &cq->dma_mem;
2724 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2725 num_cq_pages * PAGE_SIZE,
2726 &paddr);
2727 if (!cq_vaddress)
2728 goto create_cq_error;
2729 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2730 sizeof(struct sol_cqe), cq_vaddress);
2731 if (ret) {
2732 shost_printk(KERN_ERR, phba->shost,
2733 "be_fill_queue Failed for ISCSI CQ\n");
2734 goto create_cq_error;
2737 mem->dma = paddr;
2738 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2739 false, 0);
2740 if (ret) {
2741 shost_printk(KERN_ERR, phba->shost,
2742 "beiscsi_cmd_eq_create"
2743 "Failed for ISCSI CQ\n");
2744 goto create_cq_error;
2746 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2747 cq->id, eq->id);
2748 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2750 return 0;
2752 create_cq_error:
2753 for (i = 0; i < phba->num_cpus; i++) {
2754 cq = &phwi_context->be_cq[i];
2755 mem = &cq->dma_mem;
2756 if (mem->va)
2757 pci_free_consistent(phba->pcidev, num_cq_pages
2758 * PAGE_SIZE,
2759 mem->va, mem->dma);
2761 return ret;
2765 static int
2766 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2767 struct hwi_context_memory *phwi_context,
2768 struct hwi_controller *phwi_ctrlr,
2769 unsigned int def_pdu_ring_sz)
2771 unsigned int idx;
2772 int ret;
2773 struct be_queue_info *dq, *cq;
2774 struct be_dma_mem *mem;
2775 struct be_mem_descriptor *mem_descr;
2776 void *dq_vaddress;
2778 idx = 0;
2779 dq = &phwi_context->be_def_hdrq;
2780 cq = &phwi_context->be_cq[0];
2781 mem = &dq->dma_mem;
2782 mem_descr = phba->init_mem;
2783 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2784 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2785 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2786 sizeof(struct phys_addr),
2787 sizeof(struct phys_addr), dq_vaddress);
2788 if (ret) {
2789 shost_printk(KERN_ERR, phba->shost,
2790 "be_fill_queue Failed for DEF PDU HDR\n");
2791 return ret;
2793 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2794 bus_address.u.a64.address;
2795 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2796 def_pdu_ring_sz,
2797 phba->params.defpdu_hdr_sz);
2798 if (ret) {
2799 shost_printk(KERN_ERR, phba->shost,
2800 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2801 return ret;
2803 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2804 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2805 phwi_context->be_def_hdrq.id);
2806 hwi_post_async_buffers(phba, 1);
2807 return 0;
2810 static int
2811 beiscsi_create_def_data(struct beiscsi_hba *phba,
2812 struct hwi_context_memory *phwi_context,
2813 struct hwi_controller *phwi_ctrlr,
2814 unsigned int def_pdu_ring_sz)
2816 unsigned int idx;
2817 int ret;
2818 struct be_queue_info *dataq, *cq;
2819 struct be_dma_mem *mem;
2820 struct be_mem_descriptor *mem_descr;
2821 void *dq_vaddress;
2823 idx = 0;
2824 dataq = &phwi_context->be_def_dataq;
2825 cq = &phwi_context->be_cq[0];
2826 mem = &dataq->dma_mem;
2827 mem_descr = phba->init_mem;
2828 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2829 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2830 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2831 sizeof(struct phys_addr),
2832 sizeof(struct phys_addr), dq_vaddress);
2833 if (ret) {
2834 shost_printk(KERN_ERR, phba->shost,
2835 "be_fill_queue Failed for DEF PDU DATA\n");
2836 return ret;
2838 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2839 bus_address.u.a64.address;
2840 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2841 def_pdu_ring_sz,
2842 phba->params.defpdu_data_sz);
2843 if (ret) {
2844 shost_printk(KERN_ERR, phba->shost,
2845 "be_cmd_create_default_pdu_queue Failed"
2846 " for DEF PDU DATA\n");
2847 return ret;
2849 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2850 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2851 phwi_context->be_def_dataq.id);
2852 hwi_post_async_buffers(phba, 0);
2853 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2854 return 0;
2857 static int
2858 beiscsi_post_pages(struct beiscsi_hba *phba)
2860 struct be_mem_descriptor *mem_descr;
2861 struct mem_array *pm_arr;
2862 unsigned int page_offset, i;
2863 struct be_dma_mem sgl;
2864 int status;
2866 mem_descr = phba->init_mem;
2867 mem_descr += HWI_MEM_SGE;
2868 pm_arr = mem_descr->mem_array;
2870 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2871 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2872 for (i = 0; i < mem_descr->num_elements; i++) {
2873 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2874 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2875 page_offset,
2876 (pm_arr->size / PAGE_SIZE));
2877 page_offset += pm_arr->size / PAGE_SIZE;
2878 if (status != 0) {
2879 shost_printk(KERN_ERR, phba->shost,
2880 "post sgl failed.\n");
2881 return status;
2883 pm_arr++;
2885 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2886 return 0;
2889 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2891 struct be_dma_mem *mem = &q->dma_mem;
2892 if (mem->va)
2893 pci_free_consistent(phba->pcidev, mem->size,
2894 mem->va, mem->dma);
2897 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2898 u16 len, u16 entry_size)
2900 struct be_dma_mem *mem = &q->dma_mem;
2902 memset(q, 0, sizeof(*q));
2903 q->len = len;
2904 q->entry_size = entry_size;
2905 mem->size = len * entry_size;
2906 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2907 if (!mem->va)
2908 return -ENOMEM;
2909 memset(mem->va, 0, mem->size);
2910 return 0;
2913 static int
2914 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2915 struct hwi_context_memory *phwi_context,
2916 struct hwi_controller *phwi_ctrlr)
2918 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2919 u64 pa_addr_lo;
2920 unsigned int idx, num, i;
2921 struct mem_array *pwrb_arr;
2922 void *wrb_vaddr;
2923 struct be_dma_mem sgl;
2924 struct be_mem_descriptor *mem_descr;
2925 int status;
2927 idx = 0;
2928 mem_descr = phba->init_mem;
2929 mem_descr += HWI_MEM_WRB;
2930 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2931 GFP_KERNEL);
2932 if (!pwrb_arr) {
2933 shost_printk(KERN_ERR, phba->shost,
2934 "Memory alloc failed in create wrb ring.\n");
2935 return -ENOMEM;
2937 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2938 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2939 num_wrb_rings = mem_descr->mem_array[idx].size /
2940 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2942 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2943 if (num_wrb_rings) {
2944 pwrb_arr[num].virtual_address = wrb_vaddr;
2945 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2946 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2947 sizeof(struct iscsi_wrb);
2948 wrb_vaddr += pwrb_arr[num].size;
2949 pa_addr_lo += pwrb_arr[num].size;
2950 num_wrb_rings--;
2951 } else {
2952 idx++;
2953 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2954 pa_addr_lo = mem_descr->mem_array[idx].\
2955 bus_address.u.a64.address;
2956 num_wrb_rings = mem_descr->mem_array[idx].size /
2957 (phba->params.wrbs_per_cxn *
2958 sizeof(struct iscsi_wrb));
2959 pwrb_arr[num].virtual_address = wrb_vaddr;
2960 pwrb_arr[num].bus_address.u.a64.address\
2961 = pa_addr_lo;
2962 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2963 sizeof(struct iscsi_wrb);
2964 wrb_vaddr += pwrb_arr[num].size;
2965 pa_addr_lo += pwrb_arr[num].size;
2966 num_wrb_rings--;
2969 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2970 wrb_mem_index = 0;
2971 offset = 0;
2972 size = 0;
2974 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2975 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2976 &phwi_context->be_wrbq[i]);
2977 if (status != 0) {
2978 shost_printk(KERN_ERR, phba->shost,
2979 "wrbq create failed.");
2980 kfree(pwrb_arr);
2981 return status;
2983 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2986 kfree(pwrb_arr);
2987 return 0;
2990 static void free_wrb_handles(struct beiscsi_hba *phba)
2992 unsigned int index;
2993 struct hwi_controller *phwi_ctrlr;
2994 struct hwi_wrb_context *pwrb_context;
2996 phwi_ctrlr = phba->phwi_ctrlr;
2997 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2998 pwrb_context = &phwi_ctrlr->wrb_context[index];
2999 kfree(pwrb_context->pwrb_handle_base);
3000 kfree(pwrb_context->pwrb_handle_basestd);
3004 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3006 struct be_queue_info *q;
3007 struct be_ctrl_info *ctrl = &phba->ctrl;
3009 q = &phba->ctrl.mcc_obj.q;
3010 if (q->created)
3011 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3012 be_queue_free(phba, q);
3014 q = &phba->ctrl.mcc_obj.cq;
3015 if (q->created)
3016 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3017 be_queue_free(phba, q);
3020 static void hwi_cleanup(struct beiscsi_hba *phba)
3022 struct be_queue_info *q;
3023 struct be_ctrl_info *ctrl = &phba->ctrl;
3024 struct hwi_controller *phwi_ctrlr;
3025 struct hwi_context_memory *phwi_context;
3026 int i, eq_num;
3028 phwi_ctrlr = phba->phwi_ctrlr;
3029 phwi_context = phwi_ctrlr->phwi_ctxt;
3030 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3031 q = &phwi_context->be_wrbq[i];
3032 if (q->created)
3033 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3035 free_wrb_handles(phba);
3037 q = &phwi_context->be_def_hdrq;
3038 if (q->created)
3039 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3041 q = &phwi_context->be_def_dataq;
3042 if (q->created)
3043 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3045 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3047 for (i = 0; i < (phba->num_cpus); i++) {
3048 q = &phwi_context->be_cq[i];
3049 if (q->created)
3050 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3052 if (phba->msix_enabled)
3053 eq_num = 1;
3054 else
3055 eq_num = 0;
3056 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3057 q = &phwi_context->be_eq[i].q;
3058 if (q->created)
3059 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3061 be_mcc_queues_destroy(phba);
3064 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3065 struct hwi_context_memory *phwi_context)
3067 struct be_queue_info *q, *cq;
3068 struct be_ctrl_info *ctrl = &phba->ctrl;
3070 /* Alloc MCC compl queue */
3071 cq = &phba->ctrl.mcc_obj.cq;
3072 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3073 sizeof(struct be_mcc_compl)))
3074 goto err;
3075 /* Ask BE to create MCC compl queue; */
3076 if (phba->msix_enabled) {
3077 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3078 [phba->num_cpus].q, false, true, 0))
3079 goto mcc_cq_free;
3080 } else {
3081 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3082 false, true, 0))
3083 goto mcc_cq_free;
3086 /* Alloc MCC queue */
3087 q = &phba->ctrl.mcc_obj.q;
3088 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3089 goto mcc_cq_destroy;
3091 /* Ask BE to create MCC queue */
3092 if (beiscsi_cmd_mccq_create(phba, q, cq))
3093 goto mcc_q_free;
3095 return 0;
3097 mcc_q_free:
3098 be_queue_free(phba, q);
3099 mcc_cq_destroy:
3100 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3101 mcc_cq_free:
3102 be_queue_free(phba, cq);
3103 err:
3104 return -ENOMEM;
3107 static int find_num_cpus(void)
3109 int num_cpus = 0;
3111 num_cpus = num_online_cpus();
3112 if (num_cpus >= MAX_CPUS)
3113 num_cpus = MAX_CPUS - 1;
3115 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
3116 return num_cpus;
3119 static int hwi_init_port(struct beiscsi_hba *phba)
3121 struct hwi_controller *phwi_ctrlr;
3122 struct hwi_context_memory *phwi_context;
3123 unsigned int def_pdu_ring_sz;
3124 struct be_ctrl_info *ctrl = &phba->ctrl;
3125 int status;
3127 def_pdu_ring_sz =
3128 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3129 phwi_ctrlr = phba->phwi_ctrlr;
3130 phwi_context = phwi_ctrlr->phwi_ctxt;
3131 phwi_context->max_eqd = 0;
3132 phwi_context->min_eqd = 0;
3133 phwi_context->cur_eqd = 64;
3134 be_cmd_fw_initialize(&phba->ctrl);
3136 status = beiscsi_create_eqs(phba, phwi_context);
3137 if (status != 0) {
3138 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
3139 goto error;
3142 status = be_mcc_queues_create(phba, phwi_context);
3143 if (status != 0)
3144 goto error;
3146 status = mgmt_check_supported_fw(ctrl, phba);
3147 if (status != 0) {
3148 shost_printk(KERN_ERR, phba->shost,
3149 "Unsupported fw version\n");
3150 goto error;
3153 status = beiscsi_create_cqs(phba, phwi_context);
3154 if (status != 0) {
3155 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
3156 goto error;
3159 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3160 def_pdu_ring_sz);
3161 if (status != 0) {
3162 shost_printk(KERN_ERR, phba->shost,
3163 "Default Header not created\n");
3164 goto error;
3167 status = beiscsi_create_def_data(phba, phwi_context,
3168 phwi_ctrlr, def_pdu_ring_sz);
3169 if (status != 0) {
3170 shost_printk(KERN_ERR, phba->shost,
3171 "Default Data not created\n");
3172 goto error;
3175 status = beiscsi_post_pages(phba);
3176 if (status != 0) {
3177 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3178 goto error;
3181 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3182 if (status != 0) {
3183 shost_printk(KERN_ERR, phba->shost,
3184 "WRB Rings not created\n");
3185 goto error;
3188 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3189 return 0;
3191 error:
3192 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3193 hwi_cleanup(phba);
3194 return -ENOMEM;
3197 static int hwi_init_controller(struct beiscsi_hba *phba)
3199 struct hwi_controller *phwi_ctrlr;
3201 phwi_ctrlr = phba->phwi_ctrlr;
3202 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3203 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3204 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3205 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3206 phwi_ctrlr->phwi_ctxt);
3207 } else {
3208 shost_printk(KERN_ERR, phba->shost,
3209 "HWI_MEM_ADDN_CONTEXT is more than one element."
3210 "Failing to load\n");
3211 return -ENOMEM;
3214 iscsi_init_global_templates(phba);
3215 beiscsi_init_wrb_handle(phba);
3216 hwi_init_async_pdu_ctx(phba);
3217 if (hwi_init_port(phba) != 0) {
3218 shost_printk(KERN_ERR, phba->shost,
3219 "hwi_init_controller failed\n");
3220 return -ENOMEM;
3222 return 0;
3225 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3227 struct be_mem_descriptor *mem_descr;
3228 int i, j;
3230 mem_descr = phba->init_mem;
3231 i = 0;
3232 j = 0;
3233 for (i = 0; i < SE_MEM_MAX; i++) {
3234 for (j = mem_descr->num_elements; j > 0; j--) {
3235 pci_free_consistent(phba->pcidev,
3236 mem_descr->mem_array[j - 1].size,
3237 mem_descr->mem_array[j - 1].virtual_address,
3238 (unsigned long)mem_descr->mem_array[j - 1].
3239 bus_address.u.a64.address);
3241 kfree(mem_descr->mem_array);
3242 mem_descr++;
3244 kfree(phba->init_mem);
3245 kfree(phba->phwi_ctrlr);
3248 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3250 int ret = -ENOMEM;
3252 ret = beiscsi_get_memory(phba);
3253 if (ret < 0) {
3254 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3255 "Failed in beiscsi_alloc_memory\n");
3256 return ret;
3259 ret = hwi_init_controller(phba);
3260 if (ret)
3261 goto free_init;
3262 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3263 return 0;
3265 free_init:
3266 beiscsi_free_mem(phba);
3267 return -ENOMEM;
3270 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3272 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3273 struct sgl_handle *psgl_handle;
3274 struct iscsi_sge *pfrag;
3275 unsigned int arr_index, i, idx;
3277 phba->io_sgl_hndl_avbl = 0;
3278 phba->eh_sgl_hndl_avbl = 0;
3280 mem_descr_sglh = phba->init_mem;
3281 mem_descr_sglh += HWI_MEM_SGLH;
3282 if (1 == mem_descr_sglh->num_elements) {
3283 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3284 phba->params.ios_per_ctrl,
3285 GFP_KERNEL);
3286 if (!phba->io_sgl_hndl_base) {
3287 shost_printk(KERN_ERR, phba->shost,
3288 "Mem Alloc Failed. Failing to load\n");
3289 return -ENOMEM;
3291 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3292 (phba->params.icds_per_ctrl -
3293 phba->params.ios_per_ctrl),
3294 GFP_KERNEL);
3295 if (!phba->eh_sgl_hndl_base) {
3296 kfree(phba->io_sgl_hndl_base);
3297 shost_printk(KERN_ERR, phba->shost,
3298 "Mem Alloc Failed. Failing to load\n");
3299 return -ENOMEM;
3301 } else {
3302 shost_printk(KERN_ERR, phba->shost,
3303 "HWI_MEM_SGLH is more than one element."
3304 "Failing to load\n");
3305 return -ENOMEM;
3308 arr_index = 0;
3309 idx = 0;
3310 while (idx < mem_descr_sglh->num_elements) {
3311 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3313 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3314 sizeof(struct sgl_handle)); i++) {
3315 if (arr_index < phba->params.ios_per_ctrl) {
3316 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3317 phba->io_sgl_hndl_avbl++;
3318 arr_index++;
3319 } else {
3320 phba->eh_sgl_hndl_base[arr_index -
3321 phba->params.ios_per_ctrl] =
3322 psgl_handle;
3323 arr_index++;
3324 phba->eh_sgl_hndl_avbl++;
3326 psgl_handle++;
3328 idx++;
3330 SE_DEBUG(DBG_LVL_8,
3331 "phba->io_sgl_hndl_avbl=%d"
3332 "phba->eh_sgl_hndl_avbl=%d\n",
3333 phba->io_sgl_hndl_avbl,
3334 phba->eh_sgl_hndl_avbl);
3335 mem_descr_sg = phba->init_mem;
3336 mem_descr_sg += HWI_MEM_SGE;
3337 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3338 mem_descr_sg->num_elements);
3339 arr_index = 0;
3340 idx = 0;
3341 while (idx < mem_descr_sg->num_elements) {
3342 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3344 for (i = 0;
3345 i < (mem_descr_sg->mem_array[idx].size) /
3346 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3347 i++) {
3348 if (arr_index < phba->params.ios_per_ctrl)
3349 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3350 else
3351 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3352 phba->params.ios_per_ctrl];
3353 psgl_handle->pfrag = pfrag;
3354 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3355 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3356 pfrag += phba->params.num_sge_per_io;
3357 psgl_handle->sgl_index =
3358 phba->fw_config.iscsi_icd_start + arr_index++;
3360 idx++;
3362 phba->io_sgl_free_index = 0;
3363 phba->io_sgl_alloc_index = 0;
3364 phba->eh_sgl_free_index = 0;
3365 phba->eh_sgl_alloc_index = 0;
3366 return 0;
3369 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3371 int i, new_cid;
3373 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3374 GFP_KERNEL);
3375 if (!phba->cid_array) {
3376 shost_printk(KERN_ERR, phba->shost,
3377 "Failed to allocate memory in "
3378 "hba_setup_cid_tbls\n");
3379 return -ENOMEM;
3381 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3382 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3383 if (!phba->ep_array) {
3384 shost_printk(KERN_ERR, phba->shost,
3385 "Failed to allocate memory in "
3386 "hba_setup_cid_tbls\n");
3387 kfree(phba->cid_array);
3388 return -ENOMEM;
3390 new_cid = phba->fw_config.iscsi_cid_start;
3391 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3392 phba->cid_array[i] = new_cid;
3393 new_cid += 2;
3395 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3396 return 0;
3399 static void hwi_enable_intr(struct beiscsi_hba *phba)
3401 struct be_ctrl_info *ctrl = &phba->ctrl;
3402 struct hwi_controller *phwi_ctrlr;
3403 struct hwi_context_memory *phwi_context;
3404 struct be_queue_info *eq;
3405 u8 __iomem *addr;
3406 u32 reg, i;
3407 u32 enabled;
3409 phwi_ctrlr = phba->phwi_ctrlr;
3410 phwi_context = phwi_ctrlr->phwi_ctxt;
3412 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3413 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3414 reg = ioread32(addr);
3416 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3417 if (!enabled) {
3418 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3419 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3420 iowrite32(reg, addr);
3423 if (!phba->msix_enabled) {
3424 eq = &phwi_context->be_eq[0].q;
3425 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3426 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3427 } else {
3428 for (i = 0; i <= phba->num_cpus; i++) {
3429 eq = &phwi_context->be_eq[i].q;
3430 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3431 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3436 static void hwi_disable_intr(struct beiscsi_hba *phba)
3438 struct be_ctrl_info *ctrl = &phba->ctrl;
3440 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3441 u32 reg = ioread32(addr);
3443 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3444 if (enabled) {
3445 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3446 iowrite32(reg, addr);
3447 } else
3448 shost_printk(KERN_WARNING, phba->shost,
3449 "In hwi_disable_intr, Already Disabled\n");
3452 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3454 struct be_cmd_resp_get_boot_target *boot_resp;
3455 struct be_cmd_resp_get_session *session_resp;
3456 struct be_mcc_wrb *wrb;
3457 struct be_dma_mem nonemb_cmd;
3458 unsigned int tag, wrb_num;
3459 unsigned short status, extd_status;
3460 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3461 int ret = -ENOMEM;
3463 tag = beiscsi_get_boot_target(phba);
3464 if (!tag) {
3465 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
3466 return -EAGAIN;
3467 } else
3468 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3469 phba->ctrl.mcc_numtag[tag]);
3471 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3472 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3473 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3474 if (status || extd_status) {
3475 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
3476 " status = %d extd_status = %d\n",
3477 status, extd_status);
3478 free_mcc_tag(&phba->ctrl, tag);
3479 return -EBUSY;
3481 wrb = queue_get_wrb(mccq, wrb_num);
3482 free_mcc_tag(&phba->ctrl, tag);
3483 boot_resp = embedded_payload(wrb);
3485 if (boot_resp->boot_session_handle < 0) {
3486 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
3487 return -ENXIO;
3490 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3491 sizeof(*session_resp),
3492 &nonemb_cmd.dma);
3493 if (nonemb_cmd.va == NULL) {
3494 SE_DEBUG(DBG_LVL_1,
3495 "Failed to allocate memory for"
3496 "beiscsi_get_session_info\n");
3497 return -ENOMEM;
3500 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3501 tag = beiscsi_get_session_info(phba,
3502 boot_resp->boot_session_handle, &nonemb_cmd);
3503 if (!tag) {
3504 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3505 " Failed\n");
3506 goto boot_freemem;
3507 } else
3508 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3509 phba->ctrl.mcc_numtag[tag]);
3511 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3512 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3513 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3514 if (status || extd_status) {
3515 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
3516 " status = %d extd_status = %d\n",
3517 status, extd_status);
3518 free_mcc_tag(&phba->ctrl, tag);
3519 goto boot_freemem;
3521 wrb = queue_get_wrb(mccq, wrb_num);
3522 free_mcc_tag(&phba->ctrl, tag);
3523 session_resp = nonemb_cmd.va ;
3525 memcpy(&phba->boot_sess, &session_resp->session_info,
3526 sizeof(struct mgmt_session_info));
3527 ret = 0;
3529 boot_freemem:
3530 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3531 nonemb_cmd.va, nonemb_cmd.dma);
3532 return ret;
3535 static void beiscsi_boot_release(void *data)
3537 struct beiscsi_hba *phba = data;
3539 scsi_host_put(phba->shost);
3542 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3544 struct iscsi_boot_kobj *boot_kobj;
3546 /* get boot info using mgmt cmd */
3547 if (beiscsi_get_boot_info(phba))
3548 /* Try to see if we can carry on without this */
3549 return 0;
3551 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3552 if (!phba->boot_kset)
3553 return -ENOMEM;
3555 /* get a ref because the show function will ref the phba */
3556 if (!scsi_host_get(phba->shost))
3557 goto free_kset;
3558 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3559 beiscsi_show_boot_tgt_info,
3560 beiscsi_tgt_get_attr_visibility,
3561 beiscsi_boot_release);
3562 if (!boot_kobj)
3563 goto put_shost;
3565 if (!scsi_host_get(phba->shost))
3566 goto free_kset;
3567 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3568 beiscsi_show_boot_ini_info,
3569 beiscsi_ini_get_attr_visibility,
3570 beiscsi_boot_release);
3571 if (!boot_kobj)
3572 goto put_shost;
3574 if (!scsi_host_get(phba->shost))
3575 goto free_kset;
3576 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3577 beiscsi_show_boot_eth_info,
3578 beiscsi_eth_get_attr_visibility,
3579 beiscsi_boot_release);
3580 if (!boot_kobj)
3581 goto put_shost;
3582 return 0;
3584 put_shost:
3585 scsi_host_put(phba->shost);
3586 free_kset:
3587 iscsi_boot_destroy_kset(phba->boot_kset);
3588 return -ENOMEM;
3591 static int beiscsi_init_port(struct beiscsi_hba *phba)
3593 int ret;
3595 ret = beiscsi_init_controller(phba);
3596 if (ret < 0) {
3597 shost_printk(KERN_ERR, phba->shost,
3598 "beiscsi_dev_probe - Failed in"
3599 "beiscsi_init_controller\n");
3600 return ret;
3602 ret = beiscsi_init_sgl_handle(phba);
3603 if (ret < 0) {
3604 shost_printk(KERN_ERR, phba->shost,
3605 "beiscsi_dev_probe - Failed in"
3606 "beiscsi_init_sgl_handle\n");
3607 goto do_cleanup_ctrlr;
3610 if (hba_setup_cid_tbls(phba)) {
3611 shost_printk(KERN_ERR, phba->shost,
3612 "Failed in hba_setup_cid_tbls\n");
3613 kfree(phba->io_sgl_hndl_base);
3614 kfree(phba->eh_sgl_hndl_base);
3615 goto do_cleanup_ctrlr;
3618 return ret;
3620 do_cleanup_ctrlr:
3621 hwi_cleanup(phba);
3622 return ret;
3625 static void hwi_purge_eq(struct beiscsi_hba *phba)
3627 struct hwi_controller *phwi_ctrlr;
3628 struct hwi_context_memory *phwi_context;
3629 struct be_queue_info *eq;
3630 struct be_eq_entry *eqe = NULL;
3631 int i, eq_msix;
3632 unsigned int num_processed;
3634 phwi_ctrlr = phba->phwi_ctrlr;
3635 phwi_context = phwi_ctrlr->phwi_ctxt;
3636 if (phba->msix_enabled)
3637 eq_msix = 1;
3638 else
3639 eq_msix = 0;
3641 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3642 eq = &phwi_context->be_eq[i].q;
3643 eqe = queue_tail_node(eq);
3644 num_processed = 0;
3645 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3646 & EQE_VALID_MASK) {
3647 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3648 queue_tail_inc(eq);
3649 eqe = queue_tail_node(eq);
3650 num_processed++;
3653 if (num_processed)
3654 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3658 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3660 int mgmt_status;
3662 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3663 if (mgmt_status)
3664 shost_printk(KERN_WARNING, phba->shost,
3665 "mgmt_epfw_cleanup FAILED\n");
3667 hwi_purge_eq(phba);
3668 hwi_cleanup(phba);
3669 kfree(phba->io_sgl_hndl_base);
3670 kfree(phba->eh_sgl_hndl_base);
3671 kfree(phba->cid_array);
3672 kfree(phba->ep_array);
3675 void
3676 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3677 struct beiscsi_offload_params *params)
3679 struct wrb_handle *pwrb_handle;
3680 struct iscsi_target_context_update_wrb *pwrb = NULL;
3681 struct be_mem_descriptor *mem_descr;
3682 struct beiscsi_hba *phba = beiscsi_conn->phba;
3683 u32 doorbell = 0;
3686 * We can always use 0 here because it is reserved by libiscsi for
3687 * login/startup related tasks.
3689 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3690 phba->fw_config.iscsi_cid_start));
3691 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3692 memset(pwrb, 0, sizeof(*pwrb));
3693 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3694 max_burst_length, pwrb, params->dw[offsetof
3695 (struct amap_beiscsi_offload_params,
3696 max_burst_length) / 32]);
3697 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3698 max_send_data_segment_length, pwrb,
3699 params->dw[offsetof(struct amap_beiscsi_offload_params,
3700 max_send_data_segment_length) / 32]);
3701 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3702 first_burst_length,
3703 pwrb,
3704 params->dw[offsetof(struct amap_beiscsi_offload_params,
3705 first_burst_length) / 32]);
3707 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3708 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3709 erl) / 32] & OFFLD_PARAMS_ERL));
3710 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3711 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3712 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3713 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3714 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3715 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3716 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3717 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3718 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3719 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3720 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3721 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3722 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3723 pwrb,
3724 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3725 exp_statsn) / 32] + 1));
3726 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3727 0x7);
3728 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3729 pwrb, pwrb_handle->wrb_index);
3730 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3731 pwrb, pwrb_handle->nxt_wrb_index);
3732 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3733 session_state, pwrb, 0);
3734 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3735 pwrb, 1);
3736 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3737 pwrb, 0);
3738 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3741 mem_descr = phba->init_mem;
3742 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3744 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3745 pad_buffer_addr_hi, pwrb,
3746 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3747 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3748 pad_buffer_addr_lo, pwrb,
3749 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3751 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3753 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3754 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3755 << DB_DEF_PDU_WRB_INDEX_SHIFT;
3756 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3758 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3761 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3762 int *index, int *age)
3764 *index = (int)itt;
3765 if (age)
3766 *age = conn->session->age;
3770 * beiscsi_alloc_pdu - allocates pdu and related resources
3771 * @task: libiscsi task
3772 * @opcode: opcode of pdu for task
3774 * This is called with the session lock held. It will allocate
3775 * the wrb and sgl if needed for the command. And it will prep
3776 * the pdu's itt. beiscsi_parse_pdu will later translate
3777 * the pdu itt to the libiscsi task itt.
3779 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3781 struct beiscsi_io_task *io_task = task->dd_data;
3782 struct iscsi_conn *conn = task->conn;
3783 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3784 struct beiscsi_hba *phba = beiscsi_conn->phba;
3785 struct hwi_wrb_context *pwrb_context;
3786 struct hwi_controller *phwi_ctrlr;
3787 itt_t itt;
3788 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3789 dma_addr_t paddr;
3791 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3792 GFP_ATOMIC, &paddr);
3793 if (!io_task->cmd_bhs)
3794 return -ENOMEM;
3795 io_task->bhs_pa.u.a64.address = paddr;
3796 io_task->libiscsi_itt = (itt_t)task->itt;
3797 io_task->conn = beiscsi_conn;
3799 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3800 task->hdr_max = sizeof(struct be_cmd_bhs);
3801 io_task->psgl_handle = NULL;
3802 io_task->psgl_handle = NULL;
3804 if (task->sc) {
3805 spin_lock(&phba->io_sgl_lock);
3806 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3807 spin_unlock(&phba->io_sgl_lock);
3808 if (!io_task->psgl_handle)
3809 goto free_hndls;
3810 io_task->pwrb_handle = alloc_wrb_handle(phba,
3811 beiscsi_conn->beiscsi_conn_cid -
3812 phba->fw_config.iscsi_cid_start);
3813 if (!io_task->pwrb_handle)
3814 goto free_io_hndls;
3815 } else {
3816 io_task->scsi_cmnd = NULL;
3817 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3818 if (!beiscsi_conn->login_in_progress) {
3819 spin_lock(&phba->mgmt_sgl_lock);
3820 io_task->psgl_handle = (struct sgl_handle *)
3821 alloc_mgmt_sgl_handle(phba);
3822 spin_unlock(&phba->mgmt_sgl_lock);
3823 if (!io_task->psgl_handle)
3824 goto free_hndls;
3826 beiscsi_conn->login_in_progress = 1;
3827 beiscsi_conn->plogin_sgl_handle =
3828 io_task->psgl_handle;
3829 io_task->pwrb_handle =
3830 alloc_wrb_handle(phba,
3831 beiscsi_conn->beiscsi_conn_cid -
3832 phba->fw_config.iscsi_cid_start);
3833 if (!io_task->pwrb_handle)
3834 goto free_io_hndls;
3835 beiscsi_conn->plogin_wrb_handle =
3836 io_task->pwrb_handle;
3838 } else {
3839 io_task->psgl_handle =
3840 beiscsi_conn->plogin_sgl_handle;
3841 io_task->pwrb_handle =
3842 beiscsi_conn->plogin_wrb_handle;
3844 } else {
3845 spin_lock(&phba->mgmt_sgl_lock);
3846 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3847 spin_unlock(&phba->mgmt_sgl_lock);
3848 if (!io_task->psgl_handle)
3849 goto free_hndls;
3850 io_task->pwrb_handle =
3851 alloc_wrb_handle(phba,
3852 beiscsi_conn->beiscsi_conn_cid -
3853 phba->fw_config.iscsi_cid_start);
3854 if (!io_task->pwrb_handle)
3855 goto free_mgmt_hndls;
3859 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3860 wrb_index << 16) | (unsigned int)
3861 (io_task->psgl_handle->sgl_index));
3862 io_task->pwrb_handle->pio_handle = task;
3864 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3865 return 0;
3867 free_io_hndls:
3868 spin_lock(&phba->io_sgl_lock);
3869 free_io_sgl_handle(phba, io_task->psgl_handle);
3870 spin_unlock(&phba->io_sgl_lock);
3871 goto free_hndls;
3872 free_mgmt_hndls:
3873 spin_lock(&phba->mgmt_sgl_lock);
3874 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3875 spin_unlock(&phba->mgmt_sgl_lock);
3876 free_hndls:
3877 phwi_ctrlr = phba->phwi_ctrlr;
3878 pwrb_context = &phwi_ctrlr->wrb_context[
3879 beiscsi_conn->beiscsi_conn_cid -
3880 phba->fw_config.iscsi_cid_start];
3881 if (io_task->pwrb_handle)
3882 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3883 io_task->pwrb_handle = NULL;
3884 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3885 io_task->bhs_pa.u.a64.address);
3886 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3887 return -ENOMEM;
3890 static void beiscsi_cleanup_task(struct iscsi_task *task)
3892 struct beiscsi_io_task *io_task = task->dd_data;
3893 struct iscsi_conn *conn = task->conn;
3894 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3895 struct beiscsi_hba *phba = beiscsi_conn->phba;
3896 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3897 struct hwi_wrb_context *pwrb_context;
3898 struct hwi_controller *phwi_ctrlr;
3900 phwi_ctrlr = phba->phwi_ctrlr;
3901 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3902 - phba->fw_config.iscsi_cid_start];
3903 if (io_task->pwrb_handle) {
3904 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3905 io_task->pwrb_handle = NULL;
3908 if (io_task->cmd_bhs) {
3909 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3910 io_task->bhs_pa.u.a64.address);
3913 if (task->sc) {
3914 if (io_task->psgl_handle) {
3915 spin_lock(&phba->io_sgl_lock);
3916 free_io_sgl_handle(phba, io_task->psgl_handle);
3917 spin_unlock(&phba->io_sgl_lock);
3918 io_task->psgl_handle = NULL;
3920 } else {
3921 if (task->hdr &&
3922 ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
3923 return;
3924 if (io_task->psgl_handle) {
3925 spin_lock(&phba->mgmt_sgl_lock);
3926 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3927 spin_unlock(&phba->mgmt_sgl_lock);
3928 io_task->psgl_handle = NULL;
3933 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3934 unsigned int num_sg, unsigned int xferlen,
3935 unsigned int writedir)
3938 struct beiscsi_io_task *io_task = task->dd_data;
3939 struct iscsi_conn *conn = task->conn;
3940 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3941 struct beiscsi_hba *phba = beiscsi_conn->phba;
3942 struct iscsi_wrb *pwrb = NULL;
3943 unsigned int doorbell = 0;
3945 pwrb = io_task->pwrb_handle->pwrb;
3946 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3947 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3949 if (writedir) {
3950 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3951 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3952 &io_task->cmd_bhs->iscsi_data_pdu,
3953 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3954 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3955 &io_task->cmd_bhs->iscsi_data_pdu,
3956 ISCSI_OPCODE_SCSI_DATA_OUT);
3957 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3958 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3959 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3960 INI_WR_CMD);
3961 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3962 } else {
3963 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3964 INI_RD_CMD);
3965 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3967 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3968 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3969 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3971 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3972 cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
3973 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3974 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3975 io_task->pwrb_handle->wrb_index);
3976 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3977 be32_to_cpu(task->cmdsn));
3978 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3979 io_task->psgl_handle->sgl_index);
3981 hwi_write_sgl(pwrb, sg, num_sg, io_task);
3983 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3984 io_task->pwrb_handle->nxt_wrb_index);
3985 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3987 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3988 doorbell |= (io_task->pwrb_handle->wrb_index &
3989 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3990 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3992 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3993 return 0;
3996 static int beiscsi_mtask(struct iscsi_task *task)
3998 struct beiscsi_io_task *io_task = task->dd_data;
3999 struct iscsi_conn *conn = task->conn;
4000 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4001 struct beiscsi_hba *phba = beiscsi_conn->phba;
4002 struct iscsi_wrb *pwrb = NULL;
4003 unsigned int doorbell = 0;
4004 unsigned int cid;
4006 cid = beiscsi_conn->beiscsi_conn_cid;
4007 pwrb = io_task->pwrb_handle->pwrb;
4008 memset(pwrb, 0, sizeof(*pwrb));
4009 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4010 be32_to_cpu(task->cmdsn));
4011 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4012 io_task->pwrb_handle->wrb_index);
4013 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4014 io_task->psgl_handle->sgl_index);
4016 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4017 case ISCSI_OP_LOGIN:
4018 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4019 TGT_DM_CMD);
4020 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4021 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4022 hwi_write_buffer(pwrb, task);
4023 break;
4024 case ISCSI_OP_NOOP_OUT:
4025 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4026 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4027 TGT_DM_CMD);
4028 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4029 pwrb, 0);
4030 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4031 } else {
4032 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4033 INI_RD_CMD);
4034 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4036 hwi_write_buffer(pwrb, task);
4037 break;
4038 case ISCSI_OP_TEXT:
4039 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4040 TGT_DM_CMD);
4041 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4042 hwi_write_buffer(pwrb, task);
4043 break;
4044 case ISCSI_OP_SCSI_TMFUNC:
4045 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4046 INI_TMF_CMD);
4047 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4048 hwi_write_buffer(pwrb, task);
4049 break;
4050 case ISCSI_OP_LOGOUT:
4051 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4052 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4053 HWH_TYPE_LOGOUT);
4054 hwi_write_buffer(pwrb, task);
4055 break;
4057 default:
4058 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
4059 task->hdr->opcode & ISCSI_OPCODE_MASK);
4060 return -EINVAL;
4063 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4064 task->data_count);
4065 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4066 io_task->pwrb_handle->nxt_wrb_index);
4067 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4069 doorbell |= cid & DB_WRB_POST_CID_MASK;
4070 doorbell |= (io_task->pwrb_handle->wrb_index &
4071 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4072 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4073 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4074 return 0;
4077 static int beiscsi_task_xmit(struct iscsi_task *task)
4079 struct beiscsi_io_task *io_task = task->dd_data;
4080 struct scsi_cmnd *sc = task->sc;
4081 struct scatterlist *sg;
4082 int num_sg;
4083 unsigned int writedir = 0, xferlen = 0;
4085 if (!sc)
4086 return beiscsi_mtask(task);
4088 io_task->scsi_cmnd = sc;
4089 num_sg = scsi_dma_map(sc);
4090 if (num_sg < 0) {
4091 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
4092 return num_sg;
4094 xferlen = scsi_bufflen(sc);
4095 sg = scsi_sglist(sc);
4096 if (sc->sc_data_direction == DMA_TO_DEVICE) {
4097 writedir = 1;
4098 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
4099 task->imm_count);
4100 } else
4101 writedir = 0;
4102 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4105 static void beiscsi_remove(struct pci_dev *pcidev)
4107 struct beiscsi_hba *phba = NULL;
4108 struct hwi_controller *phwi_ctrlr;
4109 struct hwi_context_memory *phwi_context;
4110 struct be_eq_obj *pbe_eq;
4111 unsigned int i, msix_vec;
4112 u8 *real_offset = 0;
4113 u32 value = 0;
4115 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4116 if (!phba) {
4117 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4118 return;
4121 phwi_ctrlr = phba->phwi_ctrlr;
4122 phwi_context = phwi_ctrlr->phwi_ctxt;
4123 hwi_disable_intr(phba);
4124 if (phba->msix_enabled) {
4125 for (i = 0; i <= phba->num_cpus; i++) {
4126 msix_vec = phba->msix_entries[i].vector;
4127 free_irq(msix_vec, &phwi_context->be_eq[i]);
4129 } else
4130 if (phba->pcidev->irq)
4131 free_irq(phba->pcidev->irq, phba);
4132 pci_disable_msix(phba->pcidev);
4133 destroy_workqueue(phba->wq);
4134 if (blk_iopoll_enabled)
4135 for (i = 0; i < phba->num_cpus; i++) {
4136 pbe_eq = &phwi_context->be_eq[i];
4137 blk_iopoll_disable(&pbe_eq->iopoll);
4140 beiscsi_clean_port(phba);
4141 beiscsi_free_mem(phba);
4142 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4144 value = readl((void *)real_offset);
4146 if (value & 0x00010000) {
4147 value &= 0xfffeffff;
4148 writel(value, (void *)real_offset);
4150 beiscsi_unmap_pci_function(phba);
4151 pci_free_consistent(phba->pcidev,
4152 phba->ctrl.mbox_mem_alloced.size,
4153 phba->ctrl.mbox_mem_alloced.va,
4154 phba->ctrl.mbox_mem_alloced.dma);
4155 iscsi_boot_destroy_kset(phba->boot_kset);
4156 iscsi_host_remove(phba->shost);
4157 pci_dev_put(phba->pcidev);
4158 iscsi_host_free(phba->shost);
4161 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4163 int i, status;
4165 for (i = 0; i <= phba->num_cpus; i++)
4166 phba->msix_entries[i].entry = i;
4168 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4169 (phba->num_cpus + 1));
4170 if (!status)
4171 phba->msix_enabled = true;
4173 return;
4176 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4177 const struct pci_device_id *id)
4179 struct beiscsi_hba *phba = NULL;
4180 struct hwi_controller *phwi_ctrlr;
4181 struct hwi_context_memory *phwi_context;
4182 struct be_eq_obj *pbe_eq;
4183 int ret, num_cpus, i;
4184 u8 *real_offset = 0;
4185 u32 value = 0;
4187 ret = beiscsi_enable_pci(pcidev);
4188 if (ret < 0) {
4189 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4190 " Failed to enable pci device\n");
4191 return ret;
4194 phba = beiscsi_hba_alloc(pcidev);
4195 if (!phba) {
4196 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4197 " Failed in beiscsi_hba_alloc\n");
4198 goto disable_pci;
4201 switch (pcidev->device) {
4202 case BE_DEVICE_ID1:
4203 case OC_DEVICE_ID1:
4204 case OC_DEVICE_ID2:
4205 phba->generation = BE_GEN2;
4206 break;
4207 case BE_DEVICE_ID2:
4208 case OC_DEVICE_ID3:
4209 phba->generation = BE_GEN3;
4210 break;
4211 default:
4212 phba->generation = 0;
4215 if (enable_msix)
4216 num_cpus = find_num_cpus();
4217 else
4218 num_cpus = 1;
4219 phba->num_cpus = num_cpus;
4220 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
4222 if (enable_msix)
4223 beiscsi_msix_enable(phba);
4224 ret = be_ctrl_init(phba, pcidev);
4225 if (ret) {
4226 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4227 "Failed in be_ctrl_init\n");
4228 goto hba_free;
4231 if (!num_hba) {
4232 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4233 value = readl((void *)real_offset);
4234 if (value & 0x00010000) {
4235 gcrashmode++;
4236 shost_printk(KERN_ERR, phba->shost,
4237 "Loading Driver in crashdump mode\n");
4238 ret = beiscsi_pci_soft_reset(phba);
4239 if (ret) {
4240 shost_printk(KERN_ERR, phba->shost,
4241 "Reset Failed. Aborting Crashdump\n");
4242 goto hba_free;
4244 ret = be_chk_reset_complete(phba);
4245 if (ret) {
4246 shost_printk(KERN_ERR, phba->shost,
4247 "Failed to get out of reset."
4248 "Aborting Crashdump\n");
4249 goto hba_free;
4251 } else {
4252 value |= 0x00010000;
4253 writel(value, (void *)real_offset);
4254 num_hba++;
4258 spin_lock_init(&phba->io_sgl_lock);
4259 spin_lock_init(&phba->mgmt_sgl_lock);
4260 spin_lock_init(&phba->isr_lock);
4261 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4262 if (ret != 0) {
4263 shost_printk(KERN_ERR, phba->shost,
4264 "Error getting fw config\n");
4265 goto free_port;
4267 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
4268 beiscsi_get_params(phba);
4269 phba->shost->can_queue = phba->params.ios_per_ctrl;
4270 ret = beiscsi_init_port(phba);
4271 if (ret < 0) {
4272 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4273 "Failed in beiscsi_init_port\n");
4274 goto free_port;
4277 for (i = 0; i < MAX_MCC_CMD ; i++) {
4278 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4279 phba->ctrl.mcc_tag[i] = i + 1;
4280 phba->ctrl.mcc_numtag[i + 1] = 0;
4281 phba->ctrl.mcc_tag_available++;
4284 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4286 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4287 phba->shost->host_no);
4288 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4289 if (!phba->wq) {
4290 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4291 "Failed to allocate work queue\n");
4292 goto free_twq;
4295 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4297 phwi_ctrlr = phba->phwi_ctrlr;
4298 phwi_context = phwi_ctrlr->phwi_ctxt;
4299 if (blk_iopoll_enabled) {
4300 for (i = 0; i < phba->num_cpus; i++) {
4301 pbe_eq = &phwi_context->be_eq[i];
4302 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4303 be_iopoll);
4304 blk_iopoll_enable(&pbe_eq->iopoll);
4307 ret = beiscsi_init_irqs(phba);
4308 if (ret < 0) {
4309 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4310 "Failed to beiscsi_init_irqs\n");
4311 goto free_blkenbld;
4313 hwi_enable_intr(phba);
4315 if (beiscsi_setup_boot_info(phba))
4317 * log error but continue, because we may not be using
4318 * iscsi boot.
4320 shost_printk(KERN_ERR, phba->shost, "Could not set up "
4321 "iSCSI boot info.");
4323 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4324 return 0;
4326 free_blkenbld:
4327 destroy_workqueue(phba->wq);
4328 if (blk_iopoll_enabled)
4329 for (i = 0; i < phba->num_cpus; i++) {
4330 pbe_eq = &phwi_context->be_eq[i];
4331 blk_iopoll_disable(&pbe_eq->iopoll);
4333 free_twq:
4334 beiscsi_clean_port(phba);
4335 beiscsi_free_mem(phba);
4336 free_port:
4337 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4339 value = readl((void *)real_offset);
4341 if (value & 0x00010000) {
4342 value &= 0xfffeffff;
4343 writel(value, (void *)real_offset);
4346 pci_free_consistent(phba->pcidev,
4347 phba->ctrl.mbox_mem_alloced.size,
4348 phba->ctrl.mbox_mem_alloced.va,
4349 phba->ctrl.mbox_mem_alloced.dma);
4350 beiscsi_unmap_pci_function(phba);
4351 hba_free:
4352 if (phba->msix_enabled)
4353 pci_disable_msix(phba->pcidev);
4354 iscsi_host_remove(phba->shost);
4355 pci_dev_put(phba->pcidev);
4356 iscsi_host_free(phba->shost);
4357 disable_pci:
4358 pci_disable_device(pcidev);
4359 return ret;
4362 struct iscsi_transport beiscsi_iscsi_transport = {
4363 .owner = THIS_MODULE,
4364 .name = DRV_NAME,
4365 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4366 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4367 .param_mask = ISCSI_MAX_RECV_DLENGTH |
4368 ISCSI_MAX_XMIT_DLENGTH |
4369 ISCSI_HDRDGST_EN |
4370 ISCSI_DATADGST_EN |
4371 ISCSI_INITIAL_R2T_EN |
4372 ISCSI_MAX_R2T |
4373 ISCSI_IMM_DATA_EN |
4374 ISCSI_FIRST_BURST |
4375 ISCSI_MAX_BURST |
4376 ISCSI_PDU_INORDER_EN |
4377 ISCSI_DATASEQ_INORDER_EN |
4378 ISCSI_ERL |
4379 ISCSI_CONN_PORT |
4380 ISCSI_CONN_ADDRESS |
4381 ISCSI_EXP_STATSN |
4382 ISCSI_PERSISTENT_PORT |
4383 ISCSI_PERSISTENT_ADDRESS |
4384 ISCSI_TARGET_NAME | ISCSI_TPGT |
4385 ISCSI_USERNAME | ISCSI_PASSWORD |
4386 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
4387 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
4388 ISCSI_LU_RESET_TMO |
4389 ISCSI_PING_TMO | ISCSI_RECV_TMO |
4390 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
4391 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
4392 ISCSI_HOST_INITIATOR_NAME,
4393 .create_session = beiscsi_session_create,
4394 .destroy_session = beiscsi_session_destroy,
4395 .create_conn = beiscsi_conn_create,
4396 .bind_conn = beiscsi_conn_bind,
4397 .destroy_conn = iscsi_conn_teardown,
4398 .set_param = beiscsi_set_param,
4399 .get_conn_param = iscsi_conn_get_param,
4400 .get_session_param = iscsi_session_get_param,
4401 .get_host_param = beiscsi_get_host_param,
4402 .start_conn = beiscsi_conn_start,
4403 .stop_conn = iscsi_conn_stop,
4404 .send_pdu = iscsi_conn_send_pdu,
4405 .xmit_task = beiscsi_task_xmit,
4406 .cleanup_task = beiscsi_cleanup_task,
4407 .alloc_pdu = beiscsi_alloc_pdu,
4408 .parse_pdu_itt = beiscsi_parse_pdu,
4409 .get_stats = beiscsi_conn_get_stats,
4410 .get_ep_param = beiscsi_ep_get_param,
4411 .ep_connect = beiscsi_ep_connect,
4412 .ep_poll = beiscsi_ep_poll,
4413 .ep_disconnect = beiscsi_ep_disconnect,
4414 .session_recovery_timedout = iscsi_session_recovery_timedout,
4417 static struct pci_driver beiscsi_pci_driver = {
4418 .name = DRV_NAME,
4419 .probe = beiscsi_dev_probe,
4420 .remove = beiscsi_remove,
4421 .id_table = beiscsi_pci_id_table
4425 static int __init beiscsi_module_init(void)
4427 int ret;
4429 beiscsi_scsi_transport =
4430 iscsi_register_transport(&beiscsi_iscsi_transport);
4431 if (!beiscsi_scsi_transport) {
4432 SE_DEBUG(DBG_LVL_1,
4433 "beiscsi_module_init - Unable to register beiscsi"
4434 "transport.\n");
4435 return -ENOMEM;
4437 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4438 &beiscsi_iscsi_transport);
4440 ret = pci_register_driver(&beiscsi_pci_driver);
4441 if (ret) {
4442 SE_DEBUG(DBG_LVL_1,
4443 "beiscsi_module_init - Unable to register"
4444 "beiscsi pci driver.\n");
4445 goto unregister_iscsi_transport;
4447 return 0;
4449 unregister_iscsi_transport:
4450 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4451 return ret;
4454 static void __exit beiscsi_module_exit(void)
4456 pci_unregister_driver(&beiscsi_pci_driver);
4457 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4460 module_init(beiscsi_module_init);
4461 module_exit(beiscsi_module_exit);