Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / scsi / qla4xxx / ql4_os.c
blobce391d5511e30e912afad18fb9628493692f35b0
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsicam.h>
15 #include "ql4_def.h"
16 #include "ql4_version.h"
17 #include "ql4_glbl.h"
18 #include "ql4_dbg.h"
19 #include "ql4_inline.h"
22 * Driver version
24 static char qla4xxx_version_str[40];
27 * SRB allocation cache
29 static struct kmem_cache *srb_cachep;
32 * Module parameter information and variables
34 int ql4xdontresethba = 0;
35 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
36 MODULE_PARM_DESC(ql4xdontresethba,
37 "Don't reset the HBA for driver recovery \n"
38 " 0 - It will reset HBA (Default)\n"
39 " 1 - It will NOT reset HBA");
41 int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
42 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(ql4xextended_error_logging,
44 "Option to enable extended error logging, "
45 "Default is 0 - no logging, 1 - debug logging");
47 int ql4xenablemsix = 1;
48 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49 MODULE_PARM_DESC(ql4xenablemsix,
50 "Set to enable MSI or MSI-X interrupt mechanism.\n"
51 " 0 = enable INTx interrupt mechanism.\n"
52 " 1 = enable MSI-X interrupt mechanism (Default).\n"
53 " 2 = enable MSI interrupt mechanism.");
55 #define QL4_DEF_QDEPTH 32
56 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58 MODULE_PARM_DESC(ql4xmaxqdepth,
59 "Maximum queue depth to report for target devices.\n"
60 " Default: 32.");
62 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 "Target Session Recovery Timeout.\n"
66 " Default: 30 sec.");
68 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
70 * SCSI host template entry points
72 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
75 * iSCSI template entry points
77 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf);
79 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 enum iscsi_host_param param, char *buf);
81 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data,
82 int count);
83 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 enum iscsi_param_type param_type,
85 int param, char *buf);
86 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
87 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 struct sockaddr *dst_addr,
89 int non_blocking);
90 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 enum iscsi_param param, char *buf);
94 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95 static struct iscsi_cls_conn *
96 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 struct iscsi_cls_conn *cls_conn,
99 uint64_t transport_fd, int is_leading);
100 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101 static struct iscsi_cls_session *
102 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 uint16_t qdepth, uint32_t initial_cmdsn);
104 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105 static void qla4xxx_task_work(struct work_struct *wdata);
106 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107 static int qla4xxx_task_xmit(struct iscsi_task *);
108 static void qla4xxx_task_cleanup(struct iscsi_task *);
109 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 struct iscsi_stats *stats);
113 * SCSI host template entry points
115 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
116 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
117 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
118 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
119 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120 static int qla4xxx_slave_alloc(struct scsi_device *device);
121 static int qla4xxx_slave_configure(struct scsi_device *device);
122 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
123 static mode_t ql4_attr_is_visible(int param_type, int param);
124 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
126 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
127 QLA82XX_LEGACY_INTR_CONFIG;
129 static struct scsi_host_template qla4xxx_driver_template = {
130 .module = THIS_MODULE,
131 .name = DRIVER_NAME,
132 .proc_name = DRIVER_NAME,
133 .queuecommand = qla4xxx_queuecommand,
135 .eh_abort_handler = qla4xxx_eh_abort,
136 .eh_device_reset_handler = qla4xxx_eh_device_reset,
137 .eh_target_reset_handler = qla4xxx_eh_target_reset,
138 .eh_host_reset_handler = qla4xxx_eh_host_reset,
139 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
141 .slave_configure = qla4xxx_slave_configure,
142 .slave_alloc = qla4xxx_slave_alloc,
143 .slave_destroy = qla4xxx_slave_destroy,
145 .this_id = -1,
146 .cmd_per_lun = 3,
147 .use_clustering = ENABLE_CLUSTERING,
148 .sg_tablesize = SG_ALL,
150 .max_sectors = 0xFFFF,
151 .shost_attrs = qla4xxx_host_attrs,
152 .host_reset = qla4xxx_host_reset,
153 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
156 static struct iscsi_transport qla4xxx_iscsi_transport = {
157 .owner = THIS_MODULE,
158 .name = DRIVER_NAME,
159 .caps = CAP_TEXT_NEGO |
160 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
161 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
162 CAP_MULTI_R2T,
163 .attr_is_visible = ql4_attr_is_visible,
164 .create_session = qla4xxx_session_create,
165 .destroy_session = qla4xxx_session_destroy,
166 .start_conn = qla4xxx_conn_start,
167 .create_conn = qla4xxx_conn_create,
168 .bind_conn = qla4xxx_conn_bind,
169 .stop_conn = iscsi_conn_stop,
170 .destroy_conn = qla4xxx_conn_destroy,
171 .set_param = iscsi_set_param,
172 .get_conn_param = qla4xxx_conn_get_param,
173 .get_session_param = iscsi_session_get_param,
174 .get_ep_param = qla4xxx_get_ep_param,
175 .ep_connect = qla4xxx_ep_connect,
176 .ep_poll = qla4xxx_ep_poll,
177 .ep_disconnect = qla4xxx_ep_disconnect,
178 .get_stats = qla4xxx_conn_get_stats,
179 .send_pdu = iscsi_conn_send_pdu,
180 .xmit_task = qla4xxx_task_xmit,
181 .cleanup_task = qla4xxx_task_cleanup,
182 .alloc_pdu = qla4xxx_alloc_pdu,
184 .get_host_param = qla4xxx_host_get_param,
185 .set_iface_param = qla4xxx_iface_set_param,
186 .get_iface_param = qla4xxx_get_iface_param,
187 .bsg_request = qla4xxx_bsg_request,
190 static struct scsi_transport_template *qla4xxx_scsi_transport;
192 static mode_t ql4_attr_is_visible(int param_type, int param)
194 switch (param_type) {
195 case ISCSI_HOST_PARAM:
196 switch (param) {
197 case ISCSI_HOST_PARAM_HWADDRESS:
198 case ISCSI_HOST_PARAM_IPADDRESS:
199 case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 return S_IRUGO;
201 default:
202 return 0;
204 case ISCSI_PARAM:
205 switch (param) {
206 case ISCSI_PARAM_CONN_ADDRESS:
207 case ISCSI_PARAM_CONN_PORT:
208 case ISCSI_PARAM_TARGET_NAME:
209 case ISCSI_PARAM_TPGT:
210 case ISCSI_PARAM_TARGET_ALIAS:
211 case ISCSI_PARAM_MAX_BURST:
212 case ISCSI_PARAM_MAX_R2T:
213 case ISCSI_PARAM_FIRST_BURST:
214 case ISCSI_PARAM_MAX_RECV_DLENGTH:
215 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
216 case ISCSI_PARAM_IFACE_NAME:
217 return S_IRUGO;
218 default:
219 return 0;
221 case ISCSI_NET_PARAM:
222 switch (param) {
223 case ISCSI_NET_PARAM_IPV4_ADDR:
224 case ISCSI_NET_PARAM_IPV4_SUBNET:
225 case ISCSI_NET_PARAM_IPV4_GW:
226 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
227 case ISCSI_NET_PARAM_IFACE_ENABLE:
228 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
229 case ISCSI_NET_PARAM_IPV6_ADDR:
230 case ISCSI_NET_PARAM_IPV6_ROUTER:
231 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
232 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
233 case ISCSI_NET_PARAM_VLAN_ID:
234 case ISCSI_NET_PARAM_VLAN_PRIORITY:
235 case ISCSI_NET_PARAM_VLAN_ENABLED:
236 case ISCSI_NET_PARAM_MTU:
237 case ISCSI_NET_PARAM_PORT:
238 return S_IRUGO;
239 default:
240 return 0;
244 return 0;
247 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
248 enum iscsi_param_type param_type,
249 int param, char *buf)
251 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
252 struct scsi_qla_host *ha = to_qla_host(shost);
253 int len = -ENOSYS;
255 if (param_type != ISCSI_NET_PARAM)
256 return -ENOSYS;
258 switch (param) {
259 case ISCSI_NET_PARAM_IPV4_ADDR:
260 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
261 break;
262 case ISCSI_NET_PARAM_IPV4_SUBNET:
263 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
264 break;
265 case ISCSI_NET_PARAM_IPV4_GW:
266 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
267 break;
268 case ISCSI_NET_PARAM_IFACE_ENABLE:
269 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
270 len = sprintf(buf, "%s\n",
271 (ha->ip_config.ipv4_options &
272 IPOPT_IPV4_PROTOCOL_ENABLE) ?
273 "enabled" : "disabled");
274 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
275 len = sprintf(buf, "%s\n",
276 (ha->ip_config.ipv6_options &
277 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
278 "enabled" : "disabled");
279 break;
280 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 len = sprintf(buf, "%s\n",
282 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
283 "dhcp" : "static");
284 break;
285 case ISCSI_NET_PARAM_IPV6_ADDR:
286 if (iface->iface_num == 0)
287 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
288 if (iface->iface_num == 1)
289 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
290 break;
291 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
292 len = sprintf(buf, "%pI6\n",
293 &ha->ip_config.ipv6_link_local_addr);
294 break;
295 case ISCSI_NET_PARAM_IPV6_ROUTER:
296 len = sprintf(buf, "%pI6\n",
297 &ha->ip_config.ipv6_default_router_addr);
298 break;
299 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
300 len = sprintf(buf, "%s\n",
301 (ha->ip_config.ipv6_addl_options &
302 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
303 "nd" : "static");
304 break;
305 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
306 len = sprintf(buf, "%s\n",
307 (ha->ip_config.ipv6_addl_options &
308 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
309 "auto" : "static");
310 break;
311 case ISCSI_NET_PARAM_VLAN_ID:
312 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
313 len = sprintf(buf, "%d\n",
314 (ha->ip_config.ipv4_vlan_tag &
315 ISCSI_MAX_VLAN_ID));
316 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
317 len = sprintf(buf, "%d\n",
318 (ha->ip_config.ipv6_vlan_tag &
319 ISCSI_MAX_VLAN_ID));
320 break;
321 case ISCSI_NET_PARAM_VLAN_PRIORITY:
322 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
323 len = sprintf(buf, "%d\n",
324 ((ha->ip_config.ipv4_vlan_tag >> 13) &
325 ISCSI_MAX_VLAN_PRIORITY));
326 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
327 len = sprintf(buf, "%d\n",
328 ((ha->ip_config.ipv6_vlan_tag >> 13) &
329 ISCSI_MAX_VLAN_PRIORITY));
330 break;
331 case ISCSI_NET_PARAM_VLAN_ENABLED:
332 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
333 len = sprintf(buf, "%s\n",
334 (ha->ip_config.ipv4_options &
335 IPOPT_VLAN_TAGGING_ENABLE) ?
336 "enabled" : "disabled");
337 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
338 len = sprintf(buf, "%s\n",
339 (ha->ip_config.ipv6_options &
340 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
341 "enabled" : "disabled");
342 break;
343 case ISCSI_NET_PARAM_MTU:
344 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
345 break;
346 case ISCSI_NET_PARAM_PORT:
347 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
348 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
349 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
350 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
351 break;
352 default:
353 len = -ENOSYS;
356 return len;
359 static struct iscsi_endpoint *
360 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
361 int non_blocking)
363 int ret;
364 struct iscsi_endpoint *ep;
365 struct qla_endpoint *qla_ep;
366 struct scsi_qla_host *ha;
367 struct sockaddr_in *addr;
368 struct sockaddr_in6 *addr6;
370 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
371 if (!shost) {
372 ret = -ENXIO;
373 printk(KERN_ERR "%s: shost is NULL\n",
374 __func__);
375 return ERR_PTR(ret);
378 ha = iscsi_host_priv(shost);
380 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
381 if (!ep) {
382 ret = -ENOMEM;
383 return ERR_PTR(ret);
386 qla_ep = ep->dd_data;
387 memset(qla_ep, 0, sizeof(struct qla_endpoint));
388 if (dst_addr->sa_family == AF_INET) {
389 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
390 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
391 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
392 (char *)&addr->sin_addr));
393 } else if (dst_addr->sa_family == AF_INET6) {
394 memcpy(&qla_ep->dst_addr, dst_addr,
395 sizeof(struct sockaddr_in6));
396 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
397 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
398 (char *)&addr6->sin6_addr));
401 qla_ep->host = shost;
403 return ep;
406 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
408 struct qla_endpoint *qla_ep;
409 struct scsi_qla_host *ha;
410 int ret = 0;
412 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
413 qla_ep = ep->dd_data;
414 ha = to_qla_host(qla_ep->host);
416 if (adapter_up(ha))
417 ret = 1;
419 return ret;
422 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
424 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
425 iscsi_destroy_endpoint(ep);
428 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
429 enum iscsi_param param,
430 char *buf)
432 struct qla_endpoint *qla_ep = ep->dd_data;
433 struct sockaddr *dst_addr;
435 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
437 switch (param) {
438 case ISCSI_PARAM_CONN_PORT:
439 case ISCSI_PARAM_CONN_ADDRESS:
440 if (!qla_ep)
441 return -ENOTCONN;
443 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
444 if (!dst_addr)
445 return -ENOTCONN;
447 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
448 &qla_ep->dst_addr, param, buf);
449 default:
450 return -ENOSYS;
454 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
455 struct iscsi_stats *stats)
457 struct iscsi_session *sess;
458 struct iscsi_cls_session *cls_sess;
459 struct ddb_entry *ddb_entry;
460 struct scsi_qla_host *ha;
461 struct ql_iscsi_stats *ql_iscsi_stats;
462 int stats_size;
463 int ret;
464 dma_addr_t iscsi_stats_dma;
466 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
468 cls_sess = iscsi_conn_to_session(cls_conn);
469 sess = cls_sess->dd_data;
470 ddb_entry = sess->dd_data;
471 ha = ddb_entry->ha;
473 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
474 /* Allocate memory */
475 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
476 &iscsi_stats_dma, GFP_KERNEL);
477 if (!ql_iscsi_stats) {
478 ql4_printk(KERN_ERR, ha,
479 "Unable to allocate memory for iscsi stats\n");
480 goto exit_get_stats;
483 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
484 iscsi_stats_dma);
485 if (ret != QLA_SUCCESS) {
486 ql4_printk(KERN_ERR, ha,
487 "Unable to retreive iscsi stats\n");
488 goto free_stats;
491 /* octets */
492 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
493 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
494 /* xmit pdus */
495 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
496 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
497 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
498 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
499 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
500 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
501 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
502 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
503 /* recv pdus */
504 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
505 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
506 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
507 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
508 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
509 stats->logoutrsp_pdus =
510 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
511 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
512 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
513 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
515 free_stats:
516 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
517 iscsi_stats_dma);
518 exit_get_stats:
519 return;
522 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
524 struct iscsi_cls_session *session;
525 struct iscsi_session *sess;
526 unsigned long flags;
527 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
529 session = starget_to_session(scsi_target(sc->device));
530 sess = session->dd_data;
532 spin_lock_irqsave(&session->lock, flags);
533 if (session->state == ISCSI_SESSION_FAILED)
534 ret = BLK_EH_RESET_TIMER;
535 spin_unlock_irqrestore(&session->lock, flags);
537 return ret;
540 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
541 enum iscsi_host_param param, char *buf)
543 struct scsi_qla_host *ha = to_qla_host(shost);
544 int len;
546 switch (param) {
547 case ISCSI_HOST_PARAM_HWADDRESS:
548 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
549 break;
550 case ISCSI_HOST_PARAM_IPADDRESS:
551 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
552 break;
553 case ISCSI_HOST_PARAM_INITIATOR_NAME:
554 len = sprintf(buf, "%s\n", ha->name_string);
555 break;
556 default:
557 return -ENOSYS;
560 return len;
563 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
565 if (ha->iface_ipv4)
566 return;
568 /* IPv4 */
569 ha->iface_ipv4 = iscsi_create_iface(ha->host,
570 &qla4xxx_iscsi_transport,
571 ISCSI_IFACE_TYPE_IPV4, 0, 0);
572 if (!ha->iface_ipv4)
573 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
574 "iface0.\n");
577 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
579 if (!ha->iface_ipv6_0)
580 /* IPv6 iface-0 */
581 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
582 &qla4xxx_iscsi_transport,
583 ISCSI_IFACE_TYPE_IPV6, 0,
585 if (!ha->iface_ipv6_0)
586 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 "iface0.\n");
589 if (!ha->iface_ipv6_1)
590 /* IPv6 iface-1 */
591 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
592 &qla4xxx_iscsi_transport,
593 ISCSI_IFACE_TYPE_IPV6, 1,
595 if (!ha->iface_ipv6_1)
596 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
597 "iface1.\n");
600 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
602 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
603 qla4xxx_create_ipv4_iface(ha);
605 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
606 qla4xxx_create_ipv6_iface(ha);
609 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
611 if (ha->iface_ipv4) {
612 iscsi_destroy_iface(ha->iface_ipv4);
613 ha->iface_ipv4 = NULL;
617 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
619 if (ha->iface_ipv6_0) {
620 iscsi_destroy_iface(ha->iface_ipv6_0);
621 ha->iface_ipv6_0 = NULL;
623 if (ha->iface_ipv6_1) {
624 iscsi_destroy_iface(ha->iface_ipv6_1);
625 ha->iface_ipv6_1 = NULL;
629 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
631 qla4xxx_destroy_ipv4_iface(ha);
632 qla4xxx_destroy_ipv6_iface(ha);
635 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
636 struct iscsi_iface_param_info *iface_param,
637 struct addr_ctrl_blk *init_fw_cb)
640 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
641 * iface_num 1 is valid only for IPv6 Addr.
643 switch (iface_param->param) {
644 case ISCSI_NET_PARAM_IPV6_ADDR:
645 if (iface_param->iface_num & 0x1)
646 /* IPv6 Addr 1 */
647 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
648 sizeof(init_fw_cb->ipv6_addr1));
649 else
650 /* IPv6 Addr 0 */
651 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
652 sizeof(init_fw_cb->ipv6_addr0));
653 break;
654 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
655 if (iface_param->iface_num & 0x1)
656 break;
657 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
658 sizeof(init_fw_cb->ipv6_if_id));
659 break;
660 case ISCSI_NET_PARAM_IPV6_ROUTER:
661 if (iface_param->iface_num & 0x1)
662 break;
663 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
664 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
665 break;
666 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
667 /* Autocfg applies to even interface */
668 if (iface_param->iface_num & 0x1)
669 break;
671 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
672 init_fw_cb->ipv6_addtl_opts &=
673 cpu_to_le16(
674 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
675 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
676 init_fw_cb->ipv6_addtl_opts |=
677 cpu_to_le16(
678 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
679 else
680 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
681 "IPv6 addr\n");
682 break;
683 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
684 /* Autocfg applies to even interface */
685 if (iface_param->iface_num & 0x1)
686 break;
688 if (iface_param->value[0] ==
689 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
690 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
691 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
692 else if (iface_param->value[0] ==
693 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
694 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
695 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
696 else
697 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
698 "IPv6 linklocal addr\n");
699 break;
700 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
701 /* Autocfg applies to even interface */
702 if (iface_param->iface_num & 0x1)
703 break;
705 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
706 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
707 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
708 break;
709 case ISCSI_NET_PARAM_IFACE_ENABLE:
710 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
711 init_fw_cb->ipv6_opts |=
712 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
713 qla4xxx_create_ipv6_iface(ha);
714 } else {
715 init_fw_cb->ipv6_opts &=
716 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
717 0xFFFF);
718 qla4xxx_destroy_ipv6_iface(ha);
720 break;
721 case ISCSI_NET_PARAM_VLAN_ID:
722 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
723 break;
724 init_fw_cb->ipv6_vlan_tag =
725 cpu_to_be16(*(uint16_t *)iface_param->value);
726 break;
727 case ISCSI_NET_PARAM_VLAN_ENABLED:
728 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
729 init_fw_cb->ipv6_opts |=
730 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
731 else
732 init_fw_cb->ipv6_opts &=
733 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
734 break;
735 case ISCSI_NET_PARAM_MTU:
736 init_fw_cb->eth_mtu_size =
737 cpu_to_le16(*(uint16_t *)iface_param->value);
738 break;
739 case ISCSI_NET_PARAM_PORT:
740 /* Autocfg applies to even interface */
741 if (iface_param->iface_num & 0x1)
742 break;
744 init_fw_cb->ipv6_port =
745 cpu_to_le16(*(uint16_t *)iface_param->value);
746 break;
747 default:
748 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
749 iface_param->param);
750 break;
754 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
755 struct iscsi_iface_param_info *iface_param,
756 struct addr_ctrl_blk *init_fw_cb)
758 switch (iface_param->param) {
759 case ISCSI_NET_PARAM_IPV4_ADDR:
760 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
761 sizeof(init_fw_cb->ipv4_addr));
762 break;
763 case ISCSI_NET_PARAM_IPV4_SUBNET:
764 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
765 sizeof(init_fw_cb->ipv4_subnet));
766 break;
767 case ISCSI_NET_PARAM_IPV4_GW:
768 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
769 sizeof(init_fw_cb->ipv4_gw_addr));
770 break;
771 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
772 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
773 init_fw_cb->ipv4_tcp_opts |=
774 cpu_to_le16(TCPOPT_DHCP_ENABLE);
775 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
776 init_fw_cb->ipv4_tcp_opts &=
777 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
778 else
779 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
780 break;
781 case ISCSI_NET_PARAM_IFACE_ENABLE:
782 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
783 init_fw_cb->ipv4_ip_opts |=
784 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
785 qla4xxx_create_ipv4_iface(ha);
786 } else {
787 init_fw_cb->ipv4_ip_opts &=
788 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
789 0xFFFF);
790 qla4xxx_destroy_ipv4_iface(ha);
792 break;
793 case ISCSI_NET_PARAM_VLAN_ID:
794 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
795 break;
796 init_fw_cb->ipv4_vlan_tag =
797 cpu_to_be16(*(uint16_t *)iface_param->value);
798 break;
799 case ISCSI_NET_PARAM_VLAN_ENABLED:
800 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
801 init_fw_cb->ipv4_ip_opts |=
802 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
803 else
804 init_fw_cb->ipv4_ip_opts &=
805 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
806 break;
807 case ISCSI_NET_PARAM_MTU:
808 init_fw_cb->eth_mtu_size =
809 cpu_to_le16(*(uint16_t *)iface_param->value);
810 break;
811 case ISCSI_NET_PARAM_PORT:
812 init_fw_cb->ipv4_port =
813 cpu_to_le16(*(uint16_t *)iface_param->value);
814 break;
815 default:
816 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
817 iface_param->param);
818 break;
822 static void
823 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
825 struct addr_ctrl_blk_def *acb;
826 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
827 memset(acb->reserved1, 0, sizeof(acb->reserved1));
828 memset(acb->reserved2, 0, sizeof(acb->reserved2));
829 memset(acb->reserved3, 0, sizeof(acb->reserved3));
830 memset(acb->reserved4, 0, sizeof(acb->reserved4));
831 memset(acb->reserved5, 0, sizeof(acb->reserved5));
832 memset(acb->reserved6, 0, sizeof(acb->reserved6));
833 memset(acb->reserved7, 0, sizeof(acb->reserved7));
834 memset(acb->reserved8, 0, sizeof(acb->reserved8));
835 memset(acb->reserved9, 0, sizeof(acb->reserved9));
836 memset(acb->reserved10, 0, sizeof(acb->reserved10));
837 memset(acb->reserved11, 0, sizeof(acb->reserved11));
838 memset(acb->reserved12, 0, sizeof(acb->reserved12));
839 memset(acb->reserved13, 0, sizeof(acb->reserved13));
840 memset(acb->reserved14, 0, sizeof(acb->reserved14));
841 memset(acb->reserved15, 0, sizeof(acb->reserved15));
844 static int
845 qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data, int count)
847 struct scsi_qla_host *ha = to_qla_host(shost);
848 int rval = 0;
849 struct iscsi_iface_param_info *iface_param = NULL;
850 struct addr_ctrl_blk *init_fw_cb = NULL;
851 dma_addr_t init_fw_cb_dma;
852 uint32_t mbox_cmd[MBOX_REG_COUNT];
853 uint32_t mbox_sts[MBOX_REG_COUNT];
854 uint32_t total_param_count;
855 uint32_t length;
857 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
858 sizeof(struct addr_ctrl_blk),
859 &init_fw_cb_dma, GFP_KERNEL);
860 if (!init_fw_cb) {
861 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
862 __func__);
863 return -ENOMEM;
866 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
867 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
868 memset(&mbox_sts, 0, sizeof(mbox_sts));
870 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
871 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
872 rval = -EIO;
873 goto exit_init_fw_cb;
876 total_param_count = count;
877 iface_param = (struct iscsi_iface_param_info *)data;
879 for ( ; total_param_count != 0; total_param_count--) {
880 length = iface_param->len;
882 if (iface_param->param_type != ISCSI_NET_PARAM)
883 continue;
885 switch (iface_param->iface_type) {
886 case ISCSI_IFACE_TYPE_IPV4:
887 switch (iface_param->iface_num) {
888 case 0:
889 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
890 break;
891 default:
892 /* Cannot have more than one IPv4 interface */
893 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
894 "number = %d\n",
895 iface_param->iface_num);
896 break;
898 break;
899 case ISCSI_IFACE_TYPE_IPV6:
900 switch (iface_param->iface_num) {
901 case 0:
902 case 1:
903 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
904 break;
905 default:
906 /* Cannot have more than two IPv6 interface */
907 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
908 "number = %d\n",
909 iface_param->iface_num);
910 break;
912 break;
913 default:
914 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
915 break;
918 iface_param = (struct iscsi_iface_param_info *)
919 ((uint8_t *)iface_param +
920 sizeof(struct iscsi_iface_param_info) + length);
923 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
925 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
926 sizeof(struct addr_ctrl_blk),
927 FLASH_OPT_RMW_COMMIT);
928 if (rval != QLA_SUCCESS) {
929 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
930 __func__);
931 rval = -EIO;
932 goto exit_init_fw_cb;
935 qla4xxx_disable_acb(ha);
937 qla4xxx_initcb_to_acb(init_fw_cb);
939 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
940 if (rval != QLA_SUCCESS) {
941 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
942 __func__);
943 rval = -EIO;
944 goto exit_init_fw_cb;
947 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
948 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
949 init_fw_cb_dma);
951 exit_init_fw_cb:
952 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
953 init_fw_cb, init_fw_cb_dma);
955 return rval;
958 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
959 enum iscsi_param param, char *buf)
961 struct iscsi_conn *conn;
962 struct qla_conn *qla_conn;
963 struct sockaddr *dst_addr;
964 int len = 0;
966 conn = cls_conn->dd_data;
967 qla_conn = conn->dd_data;
968 dst_addr = &qla_conn->qla_ep->dst_addr;
970 switch (param) {
971 case ISCSI_PARAM_CONN_PORT:
972 case ISCSI_PARAM_CONN_ADDRESS:
973 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
974 dst_addr, param, buf);
975 default:
976 return iscsi_conn_get_param(cls_conn, param, buf);
979 return len;
983 static struct iscsi_cls_session *
984 qla4xxx_session_create(struct iscsi_endpoint *ep,
985 uint16_t cmds_max, uint16_t qdepth,
986 uint32_t initial_cmdsn)
988 struct iscsi_cls_session *cls_sess;
989 struct scsi_qla_host *ha;
990 struct qla_endpoint *qla_ep;
991 struct ddb_entry *ddb_entry;
992 uint32_t ddb_index;
993 uint32_t mbx_sts = 0;
994 struct iscsi_session *sess;
995 struct sockaddr *dst_addr;
996 int ret;
998 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
999 if (!ep) {
1000 printk(KERN_ERR "qla4xxx: missing ep.\n");
1001 return NULL;
1004 qla_ep = ep->dd_data;
1005 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1006 ha = to_qla_host(qla_ep->host);
1007 get_ddb_index:
1008 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1010 if (ddb_index >= MAX_DDB_ENTRIES) {
1011 DEBUG2(ql4_printk(KERN_INFO, ha,
1012 "Free DDB index not available\n"));
1013 return NULL;
1016 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1017 goto get_ddb_index;
1019 DEBUG2(ql4_printk(KERN_INFO, ha,
1020 "Found a free DDB index at %d\n", ddb_index));
1021 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1022 if (ret == QLA_ERROR) {
1023 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1024 ql4_printk(KERN_INFO, ha,
1025 "DDB index = %d not available trying next\n",
1026 ddb_index);
1027 goto get_ddb_index;
1029 DEBUG2(ql4_printk(KERN_INFO, ha,
1030 "Free FW DDB not available\n"));
1031 return NULL;
1034 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1035 cmds_max, sizeof(struct ddb_entry),
1036 sizeof(struct ql4_task_data),
1037 initial_cmdsn, ddb_index);
1038 if (!cls_sess)
1039 return NULL;
1041 sess = cls_sess->dd_data;
1042 ddb_entry = sess->dd_data;
1043 ddb_entry->fw_ddb_index = ddb_index;
1044 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1045 ddb_entry->ha = ha;
1046 ddb_entry->sess = cls_sess;
1047 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1048 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1049 ha->tot_ddbs++;
1051 return cls_sess;
1054 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1056 struct iscsi_session *sess;
1057 struct ddb_entry *ddb_entry;
1058 struct scsi_qla_host *ha;
1059 unsigned long flags;
1061 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1062 sess = cls_sess->dd_data;
1063 ddb_entry = sess->dd_data;
1064 ha = ddb_entry->ha;
1066 spin_lock_irqsave(&ha->hardware_lock, flags);
1067 qla4xxx_free_ddb(ha, ddb_entry);
1068 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1069 iscsi_session_teardown(cls_sess);
1072 static struct iscsi_cls_conn *
1073 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1075 struct iscsi_cls_conn *cls_conn;
1076 struct iscsi_session *sess;
1077 struct ddb_entry *ddb_entry;
1079 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1080 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1081 conn_idx);
1082 sess = cls_sess->dd_data;
1083 ddb_entry = sess->dd_data;
1084 ddb_entry->conn = cls_conn;
1086 return cls_conn;
1089 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1090 struct iscsi_cls_conn *cls_conn,
1091 uint64_t transport_fd, int is_leading)
1093 struct iscsi_conn *conn;
1094 struct qla_conn *qla_conn;
1095 struct iscsi_endpoint *ep;
1097 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1099 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1100 return -EINVAL;
1101 ep = iscsi_lookup_endpoint(transport_fd);
1102 conn = cls_conn->dd_data;
1103 qla_conn = conn->dd_data;
1104 qla_conn->qla_ep = ep->dd_data;
1105 return 0;
1108 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1110 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1111 struct iscsi_session *sess;
1112 struct ddb_entry *ddb_entry;
1113 struct scsi_qla_host *ha;
1114 struct dev_db_entry *fw_ddb_entry;
1115 dma_addr_t fw_ddb_entry_dma;
1116 uint32_t mbx_sts = 0;
1117 int ret = 0;
1118 int status = QLA_SUCCESS;
1120 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1121 sess = cls_sess->dd_data;
1122 ddb_entry = sess->dd_data;
1123 ha = ddb_entry->ha;
1125 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1126 &fw_ddb_entry_dma, GFP_KERNEL);
1127 if (!fw_ddb_entry) {
1128 ql4_printk(KERN_ERR, ha,
1129 "%s: Unable to allocate dma buffer\n", __func__);
1130 return -ENOMEM;
1133 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1134 if (ret) {
1135 /* If iscsid is stopped and started then no need to do
1136 * set param again since ddb state will be already
1137 * active and FW does not allow set ddb to an
1138 * active session.
1140 if (mbx_sts)
1141 if (ddb_entry->fw_ddb_device_state ==
1142 DDB_DS_SESSION_ACTIVE)
1143 goto exit_set_param;
1145 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1146 __func__, ddb_entry->fw_ddb_index);
1147 goto exit_conn_start;
1150 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1151 if (status == QLA_ERROR) {
1152 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1153 sess->targetname);
1154 ret = -EINVAL;
1155 goto exit_conn_start;
1158 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1160 exit_set_param:
1161 iscsi_conn_start(cls_conn);
1162 ret = 0;
1164 exit_conn_start:
1165 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1166 fw_ddb_entry, fw_ddb_entry_dma);
1167 return ret;
1170 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1172 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1173 struct iscsi_session *sess;
1174 struct scsi_qla_host *ha;
1175 struct ddb_entry *ddb_entry;
1176 int options;
1178 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1179 sess = cls_sess->dd_data;
1180 ddb_entry = sess->dd_data;
1181 ha = ddb_entry->ha;
1183 options = LOGOUT_OPTION_CLOSE_SESSION;
1184 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1185 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1186 else
1187 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1190 * Clear the DDB bit so that next login can use the bit
1191 * if FW is not clearing the DDB entry then set DDB will fail anyways
1193 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
1196 static void qla4xxx_task_work(struct work_struct *wdata)
1198 struct ql4_task_data *task_data;
1199 struct scsi_qla_host *ha;
1200 struct passthru_status *sts;
1201 struct iscsi_task *task;
1202 struct iscsi_hdr *hdr;
1203 uint8_t *data;
1204 uint32_t data_len;
1205 struct iscsi_conn *conn;
1206 int hdr_len;
1207 itt_t itt;
1209 task_data = container_of(wdata, struct ql4_task_data, task_work);
1210 ha = task_data->ha;
1211 task = task_data->task;
1212 sts = &task_data->sts;
1213 hdr_len = sizeof(struct iscsi_hdr);
1215 DEBUG3(printk(KERN_INFO "Status returned\n"));
1216 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1217 DEBUG3(printk(KERN_INFO "Response buffer"));
1218 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1220 conn = task->conn;
1222 switch (sts->completionStatus) {
1223 case PASSTHRU_STATUS_COMPLETE:
1224 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1225 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1226 itt = sts->handle;
1227 hdr->itt = itt;
1228 data = task_data->resp_buffer + hdr_len;
1229 data_len = task_data->resp_len - hdr_len;
1230 iscsi_complete_pdu(conn, hdr, data, data_len);
1231 break;
1232 default:
1233 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1234 sts->completionStatus);
1235 break;
1237 return;
1240 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1242 struct ql4_task_data *task_data;
1243 struct iscsi_session *sess;
1244 struct ddb_entry *ddb_entry;
1245 struct scsi_qla_host *ha;
1246 int hdr_len;
1248 sess = task->conn->session;
1249 ddb_entry = sess->dd_data;
1250 ha = ddb_entry->ha;
1251 task_data = task->dd_data;
1252 memset(task_data, 0, sizeof(struct ql4_task_data));
1254 if (task->sc) {
1255 ql4_printk(KERN_INFO, ha,
1256 "%s: SCSI Commands not implemented\n", __func__);
1257 return -EINVAL;
1260 hdr_len = sizeof(struct iscsi_hdr);
1261 task_data->ha = ha;
1262 task_data->task = task;
1264 if (task->data_count) {
1265 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1266 task->data_count,
1267 PCI_DMA_TODEVICE);
1270 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1271 __func__, task->conn->max_recv_dlength, hdr_len));
1273 task_data->resp_len = task->conn->max_recv_dlength;
1274 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1275 task_data->resp_len,
1276 &task_data->resp_dma,
1277 GFP_ATOMIC);
1278 if (!task_data->resp_buffer)
1279 goto exit_alloc_pdu;
1281 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1282 task->data_count + hdr_len,
1283 &task_data->req_dma,
1284 GFP_ATOMIC);
1285 if (!task_data->req_buffer)
1286 goto exit_alloc_pdu;
1288 task->hdr = task_data->req_buffer;
1290 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1292 return 0;
1294 exit_alloc_pdu:
1295 if (task_data->resp_buffer)
1296 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1297 task_data->resp_buffer, task_data->resp_dma);
1299 if (task_data->req_buffer)
1300 dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1301 task_data->req_buffer, task_data->req_dma);
1302 return -ENOMEM;
1305 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1307 struct ql4_task_data *task_data;
1308 struct iscsi_session *sess;
1309 struct ddb_entry *ddb_entry;
1310 struct scsi_qla_host *ha;
1311 int hdr_len;
1313 hdr_len = sizeof(struct iscsi_hdr);
1314 sess = task->conn->session;
1315 ddb_entry = sess->dd_data;
1316 ha = ddb_entry->ha;
1317 task_data = task->dd_data;
1319 if (task->data_count) {
1320 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1321 task->data_count, PCI_DMA_TODEVICE);
1324 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1325 __func__, task->conn->max_recv_dlength, hdr_len));
1327 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1328 task_data->resp_buffer, task_data->resp_dma);
1329 dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1330 task_data->req_buffer, task_data->req_dma);
1331 return;
1334 static int qla4xxx_task_xmit(struct iscsi_task *task)
1336 struct scsi_cmnd *sc = task->sc;
1337 struct iscsi_session *sess = task->conn->session;
1338 struct ddb_entry *ddb_entry = sess->dd_data;
1339 struct scsi_qla_host *ha = ddb_entry->ha;
1341 if (!sc)
1342 return qla4xxx_send_passthru0(task);
1344 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1345 __func__);
1346 return -ENOSYS;
1349 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1350 struct ddb_entry *ddb_entry)
1352 struct iscsi_cls_session *cls_sess;
1353 struct iscsi_cls_conn *cls_conn;
1354 struct iscsi_session *sess;
1355 struct iscsi_conn *conn;
1356 uint32_t ddb_state;
1357 dma_addr_t fw_ddb_entry_dma;
1358 struct dev_db_entry *fw_ddb_entry;
1360 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1361 &fw_ddb_entry_dma, GFP_KERNEL);
1362 if (!fw_ddb_entry) {
1363 ql4_printk(KERN_ERR, ha,
1364 "%s: Unable to allocate dma buffer\n", __func__);
1365 return;
1368 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1369 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1370 NULL, NULL, NULL) == QLA_ERROR) {
1371 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1372 "get_ddb_entry for fw_ddb_index %d\n",
1373 ha->host_no, __func__,
1374 ddb_entry->fw_ddb_index));
1375 return;
1378 cls_sess = ddb_entry->sess;
1379 sess = cls_sess->dd_data;
1381 cls_conn = ddb_entry->conn;
1382 conn = cls_conn->dd_data;
1384 /* Update params */
1385 conn->max_recv_dlength = BYTE_UNITS *
1386 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1388 conn->max_xmit_dlength = BYTE_UNITS *
1389 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1391 sess->initial_r2t_en =
1392 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1394 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1396 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1398 sess->first_burst = BYTE_UNITS *
1399 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1401 sess->max_burst = BYTE_UNITS *
1402 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1404 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1406 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1408 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1410 memcpy(sess->initiatorname, ha->name_string,
1411 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1415 * Timer routines
1418 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1419 unsigned long interval)
1421 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1422 __func__, ha->host->host_no));
1423 init_timer(&ha->timer);
1424 ha->timer.expires = jiffies + interval * HZ;
1425 ha->timer.data = (unsigned long)ha;
1426 ha->timer.function = (void (*)(unsigned long))func;
1427 add_timer(&ha->timer);
1428 ha->timer_active = 1;
1431 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1433 del_timer_sync(&ha->timer);
1434 ha->timer_active = 0;
1437 /***
1438 * qla4xxx_mark_device_missing - blocks the session
1439 * @cls_session: Pointer to the session to be blocked
1440 * @ddb_entry: Pointer to device database entry
1442 * This routine marks a device missing and close connection.
1444 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
1446 iscsi_block_session(cls_session);
1450 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1451 * @ha: Pointer to host adapter structure.
1453 * This routine marks a device missing and resets the relogin retry count.
1455 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1457 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
1460 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1461 struct ddb_entry *ddb_entry,
1462 struct scsi_cmnd *cmd)
1464 struct srb *srb;
1466 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1467 if (!srb)
1468 return srb;
1470 kref_init(&srb->srb_ref);
1471 srb->ha = ha;
1472 srb->ddb = ddb_entry;
1473 srb->cmd = cmd;
1474 srb->flags = 0;
1475 CMD_SP(cmd) = (void *)srb;
1477 return srb;
1480 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1482 struct scsi_cmnd *cmd = srb->cmd;
1484 if (srb->flags & SRB_DMA_VALID) {
1485 scsi_dma_unmap(cmd);
1486 srb->flags &= ~SRB_DMA_VALID;
1488 CMD_SP(cmd) = NULL;
1491 void qla4xxx_srb_compl(struct kref *ref)
1493 struct srb *srb = container_of(ref, struct srb, srb_ref);
1494 struct scsi_cmnd *cmd = srb->cmd;
1495 struct scsi_qla_host *ha = srb->ha;
1497 qla4xxx_srb_free_dma(ha, srb);
1499 mempool_free(srb, ha->srb_mempool);
1501 cmd->scsi_done(cmd);
1505 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
1506 * @host: scsi host
1507 * @cmd: Pointer to Linux's SCSI command structure
1509 * Remarks:
1510 * This routine is invoked by Linux to send a SCSI command to the driver.
1511 * The mid-level driver tries to ensure that queuecommand never gets
1512 * invoked concurrently with itself or the interrupt handler (although
1513 * the interrupt handler may call this routine as part of request-
1514 * completion handling). Unfortunely, it sometimes calls the scheduler
1515 * in interrupt context which is a big NO! NO!.
1517 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1519 struct scsi_qla_host *ha = to_qla_host(host);
1520 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1521 struct iscsi_cls_session *sess = ddb_entry->sess;
1522 struct srb *srb;
1523 int rval;
1525 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1526 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1527 cmd->result = DID_NO_CONNECT << 16;
1528 else
1529 cmd->result = DID_REQUEUE << 16;
1530 goto qc_fail_command;
1533 if (!sess) {
1534 cmd->result = DID_IMM_RETRY << 16;
1535 goto qc_fail_command;
1538 rval = iscsi_session_chkready(sess);
1539 if (rval) {
1540 cmd->result = rval;
1541 goto qc_fail_command;
1544 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1545 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1546 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1547 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1548 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1549 !test_bit(AF_ONLINE, &ha->flags) ||
1550 !test_bit(AF_LINK_UP, &ha->flags) ||
1551 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
1552 goto qc_host_busy;
1554 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
1555 if (!srb)
1556 goto qc_host_busy;
1558 rval = qla4xxx_send_command_to_isp(ha, srb);
1559 if (rval != QLA_SUCCESS)
1560 goto qc_host_busy_free_sp;
1562 return 0;
1564 qc_host_busy_free_sp:
1565 qla4xxx_srb_free_dma(ha, srb);
1566 mempool_free(srb, ha->srb_mempool);
1568 qc_host_busy:
1569 return SCSI_MLQUEUE_HOST_BUSY;
1571 qc_fail_command:
1572 cmd->scsi_done(cmd);
1574 return 0;
1578 * qla4xxx_mem_free - frees memory allocated to adapter
1579 * @ha: Pointer to host adapter structure.
1581 * Frees memory previously allocated by qla4xxx_mem_alloc
1583 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1585 if (ha->queues)
1586 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1587 ha->queues_dma);
1589 ha->queues_len = 0;
1590 ha->queues = NULL;
1591 ha->queues_dma = 0;
1592 ha->request_ring = NULL;
1593 ha->request_dma = 0;
1594 ha->response_ring = NULL;
1595 ha->response_dma = 0;
1596 ha->shadow_regs = NULL;
1597 ha->shadow_regs_dma = 0;
1599 /* Free srb pool. */
1600 if (ha->srb_mempool)
1601 mempool_destroy(ha->srb_mempool);
1603 ha->srb_mempool = NULL;
1605 if (ha->chap_dma_pool)
1606 dma_pool_destroy(ha->chap_dma_pool);
1608 /* release io space registers */
1609 if (is_qla8022(ha)) {
1610 if (ha->nx_pcibase)
1611 iounmap(
1612 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
1613 } else if (ha->reg)
1614 iounmap(ha->reg);
1615 pci_release_regions(ha->pdev);
1619 * qla4xxx_mem_alloc - allocates memory for use by adapter.
1620 * @ha: Pointer to host adapter structure
1622 * Allocates DMA memory for request and response queues. Also allocates memory
1623 * for srbs.
1625 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1627 unsigned long align;
1629 /* Allocate contiguous block of DMA memory for queues. */
1630 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1631 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1632 sizeof(struct shadow_regs) +
1633 MEM_ALIGN_VALUE +
1634 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1635 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1636 &ha->queues_dma, GFP_KERNEL);
1637 if (ha->queues == NULL) {
1638 ql4_printk(KERN_WARNING, ha,
1639 "Memory Allocation failed - queues.\n");
1641 goto mem_alloc_error_exit;
1643 memset(ha->queues, 0, ha->queues_len);
1646 * As per RISC alignment requirements -- the bus-address must be a
1647 * multiple of the request-ring size (in bytes).
1649 align = 0;
1650 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1651 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1652 (MEM_ALIGN_VALUE - 1));
1654 /* Update request and response queue pointers. */
1655 ha->request_dma = ha->queues_dma + align;
1656 ha->request_ring = (struct queue_entry *) (ha->queues + align);
1657 ha->response_dma = ha->queues_dma + align +
1658 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1659 ha->response_ring = (struct queue_entry *) (ha->queues + align +
1660 (REQUEST_QUEUE_DEPTH *
1661 QUEUE_SIZE));
1662 ha->shadow_regs_dma = ha->queues_dma + align +
1663 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1664 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1665 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1666 (REQUEST_QUEUE_DEPTH *
1667 QUEUE_SIZE) +
1668 (RESPONSE_QUEUE_DEPTH *
1669 QUEUE_SIZE));
1671 /* Allocate memory for srb pool. */
1672 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1673 mempool_free_slab, srb_cachep);
1674 if (ha->srb_mempool == NULL) {
1675 ql4_printk(KERN_WARNING, ha,
1676 "Memory Allocation failed - SRB Pool.\n");
1678 goto mem_alloc_error_exit;
1681 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1682 CHAP_DMA_BLOCK_SIZE, 8, 0);
1684 if (ha->chap_dma_pool == NULL) {
1685 ql4_printk(KERN_WARNING, ha,
1686 "%s: chap_dma_pool allocation failed..\n", __func__);
1687 goto mem_alloc_error_exit;
1690 return QLA_SUCCESS;
1692 mem_alloc_error_exit:
1693 qla4xxx_mem_free(ha);
1694 return QLA_ERROR;
1698 * qla4_8xxx_check_fw_alive - Check firmware health
1699 * @ha: Pointer to host adapter structure.
1701 * Context: Interrupt
1703 static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1705 uint32_t fw_heartbeat_counter, halt_status;
1707 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1708 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1709 if (fw_heartbeat_counter == 0xffffffff) {
1710 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1711 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1712 ha->host_no, __func__));
1713 return;
1716 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1717 ha->seconds_since_last_heartbeat++;
1718 /* FW not alive after 2 seconds */
1719 if (ha->seconds_since_last_heartbeat == 2) {
1720 ha->seconds_since_last_heartbeat = 0;
1721 halt_status = qla4_8xxx_rd_32(ha,
1722 QLA82XX_PEG_HALT_STATUS1);
1724 ql4_printk(KERN_INFO, ha,
1725 "scsi(%ld): %s, Dumping hw/fw registers:\n "
1726 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1727 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1728 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1729 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1730 ha->host_no, __func__, halt_status,
1731 qla4_8xxx_rd_32(ha,
1732 QLA82XX_PEG_HALT_STATUS2),
1733 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1734 0x3c),
1735 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1736 0x3c),
1737 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1738 0x3c),
1739 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1740 0x3c),
1741 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1742 0x3c));
1744 /* Since we cannot change dev_state in interrupt
1745 * context, set appropriate DPC flag then wakeup
1746 * DPC */
1747 if (halt_status & HALT_STATUS_UNRECOVERABLE)
1748 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1749 else {
1750 printk("scsi%ld: %s: detect abort needed!\n",
1751 ha->host_no, __func__);
1752 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1754 qla4xxx_wake_dpc(ha);
1755 qla4xxx_mailbox_premature_completion(ha);
1757 } else
1758 ha->seconds_since_last_heartbeat = 0;
1760 ha->fw_heartbeat_counter = fw_heartbeat_counter;
1764 * qla4_8xxx_watchdog - Poll dev state
1765 * @ha: Pointer to host adapter structure.
1767 * Context: Interrupt
1769 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1771 uint32_t dev_state;
1773 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1775 /* don't poll if reset is going on */
1776 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1777 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1778 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
1779 if (dev_state == QLA82XX_DEV_NEED_RESET &&
1780 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1781 if (!ql4xdontresethba) {
1782 ql4_printk(KERN_INFO, ha, "%s: HW State: "
1783 "NEED RESET!\n", __func__);
1784 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1785 qla4xxx_wake_dpc(ha);
1786 qla4xxx_mailbox_premature_completion(ha);
1788 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1789 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
1790 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1791 __func__);
1792 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1793 qla4xxx_wake_dpc(ha);
1794 } else {
1795 /* Check firmware health */
1796 qla4_8xxx_check_fw_alive(ha);
1802 * qla4xxx_timer - checks every second for work to do.
1803 * @ha: Pointer to host adapter structure.
1805 static void qla4xxx_timer(struct scsi_qla_host *ha)
1807 int start_dpc = 0;
1808 uint16_t w;
1810 /* If we are in the middle of AER/EEH processing
1811 * skip any processing and reschedule the timer
1813 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1814 mod_timer(&ha->timer, jiffies + HZ);
1815 return;
1818 /* Hardware read to trigger an EEH error during mailbox waits. */
1819 if (!pci_channel_offline(ha->pdev))
1820 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1822 if (is_qla8022(ha)) {
1823 qla4_8xxx_watchdog(ha);
1826 if (!is_qla8022(ha)) {
1827 /* Check for heartbeat interval. */
1828 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1829 ha->heartbeat_interval != 0) {
1830 ha->seconds_since_last_heartbeat++;
1831 if (ha->seconds_since_last_heartbeat >
1832 ha->heartbeat_interval + 2)
1833 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1837 /* Wakeup the dpc routine for this adapter, if needed. */
1838 if (start_dpc ||
1839 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1840 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1841 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
1842 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
1843 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1844 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
1845 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
1846 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1847 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1848 test_bit(DPC_AEN, &ha->dpc_flags)) {
1849 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1850 " - dpc flags = 0x%lx\n",
1851 ha->host_no, __func__, ha->dpc_flags));
1852 qla4xxx_wake_dpc(ha);
1855 /* Reschedule timer thread to call us back in one second */
1856 mod_timer(&ha->timer, jiffies + HZ);
1858 DEBUG2(ha->seconds_since_last_intr++);
1862 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1863 * @ha: Pointer to host adapter structure.
1865 * This routine stalls the driver until all outstanding commands are returned.
1866 * Caller must release the Hardware Lock prior to calling this routine.
1868 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1870 uint32_t index = 0;
1871 unsigned long flags;
1872 struct scsi_cmnd *cmd;
1874 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1876 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1877 "complete\n", WAIT_CMD_TOV));
1879 while (!time_after_eq(jiffies, wtime)) {
1880 spin_lock_irqsave(&ha->hardware_lock, flags);
1881 /* Find a command that hasn't completed. */
1882 for (index = 0; index < ha->host->can_queue; index++) {
1883 cmd = scsi_host_find_tag(ha->host, index);
1885 * We cannot just check if the index is valid,
1886 * becase if we are run from the scsi eh, then
1887 * the scsi/block layer is going to prevent
1888 * the tag from being released.
1890 if (cmd != NULL && CMD_SP(cmd))
1891 break;
1893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1895 /* If No Commands are pending, wait is complete */
1896 if (index == ha->host->can_queue)
1897 return QLA_SUCCESS;
1899 msleep(1000);
1901 /* If we timed out on waiting for commands to come back
1902 * return ERROR. */
1903 return QLA_ERROR;
1906 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
1908 uint32_t ctrl_status;
1909 unsigned long flags = 0;
1911 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
1913 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1914 return QLA_ERROR;
1916 spin_lock_irqsave(&ha->hardware_lock, flags);
1919 * If the SCSI Reset Interrupt bit is set, clear it.
1920 * Otherwise, the Soft Reset won't work.
1922 ctrl_status = readw(&ha->reg->ctrl_status);
1923 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1924 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1926 /* Issue Soft Reset */
1927 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1928 readl(&ha->reg->ctrl_status);
1930 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1931 return QLA_SUCCESS;
1935 * qla4xxx_soft_reset - performs soft reset.
1936 * @ha: Pointer to host adapter structure.
1938 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1940 uint32_t max_wait_time;
1941 unsigned long flags = 0;
1942 int status;
1943 uint32_t ctrl_status;
1945 status = qla4xxx_hw_reset(ha);
1946 if (status != QLA_SUCCESS)
1947 return status;
1949 status = QLA_ERROR;
1950 /* Wait until the Network Reset Intr bit is cleared */
1951 max_wait_time = RESET_INTR_TOV;
1952 do {
1953 spin_lock_irqsave(&ha->hardware_lock, flags);
1954 ctrl_status = readw(&ha->reg->ctrl_status);
1955 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1957 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1958 break;
1960 msleep(1000);
1961 } while ((--max_wait_time));
1963 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1964 DEBUG2(printk(KERN_WARNING
1965 "scsi%ld: Network Reset Intr not cleared by "
1966 "Network function, clearing it now!\n",
1967 ha->host_no));
1968 spin_lock_irqsave(&ha->hardware_lock, flags);
1969 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1970 readl(&ha->reg->ctrl_status);
1971 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1974 /* Wait until the firmware tells us the Soft Reset is done */
1975 max_wait_time = SOFT_RESET_TOV;
1976 do {
1977 spin_lock_irqsave(&ha->hardware_lock, flags);
1978 ctrl_status = readw(&ha->reg->ctrl_status);
1979 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1981 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1982 status = QLA_SUCCESS;
1983 break;
1986 msleep(1000);
1987 } while ((--max_wait_time));
1990 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1991 * after the soft reset has taken place.
1993 spin_lock_irqsave(&ha->hardware_lock, flags);
1994 ctrl_status = readw(&ha->reg->ctrl_status);
1995 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
1996 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1997 readl(&ha->reg->ctrl_status);
1999 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2001 /* If soft reset fails then most probably the bios on other
2002 * function is also enabled.
2003 * Since the initialization is sequential the other fn
2004 * wont be able to acknowledge the soft reset.
2005 * Issue a force soft reset to workaround this scenario.
2007 if (max_wait_time == 0) {
2008 /* Issue Force Soft Reset */
2009 spin_lock_irqsave(&ha->hardware_lock, flags);
2010 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2011 readl(&ha->reg->ctrl_status);
2012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2013 /* Wait until the firmware tells us the Soft Reset is done */
2014 max_wait_time = SOFT_RESET_TOV;
2015 do {
2016 spin_lock_irqsave(&ha->hardware_lock, flags);
2017 ctrl_status = readw(&ha->reg->ctrl_status);
2018 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2020 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2021 status = QLA_SUCCESS;
2022 break;
2025 msleep(1000);
2026 } while ((--max_wait_time));
2029 return status;
2033 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2034 * @ha: Pointer to host adapter structure.
2035 * @res: returned scsi status
2037 * This routine is called just prior to a HARD RESET to return all
2038 * outstanding commands back to the Operating System.
2039 * Caller should make sure that the following locks are released
2040 * before this calling routine: Hardware lock, and io_request_lock.
2042 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2044 struct srb *srb;
2045 int i;
2046 unsigned long flags;
2048 spin_lock_irqsave(&ha->hardware_lock, flags);
2049 for (i = 0; i < ha->host->can_queue; i++) {
2050 srb = qla4xxx_del_from_active_array(ha, i);
2051 if (srb != NULL) {
2052 srb->cmd->result = res;
2053 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2059 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2061 clear_bit(AF_ONLINE, &ha->flags);
2063 /* Disable the board */
2064 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2066 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2067 qla4xxx_mark_all_devices_missing(ha);
2068 clear_bit(AF_INIT_DONE, &ha->flags);
2071 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2073 struct iscsi_session *sess;
2074 struct ddb_entry *ddb_entry;
2076 sess = cls_session->dd_data;
2077 ddb_entry = sess->dd_data;
2078 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2079 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2083 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2084 * @ha: Pointer to host adapter structure.
2086 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2088 int status = QLA_ERROR;
2089 uint8_t reset_chip = 0;
2091 /* Stall incoming I/O until we are done */
2092 scsi_block_requests(ha->host);
2093 clear_bit(AF_ONLINE, &ha->flags);
2094 clear_bit(AF_LINK_UP, &ha->flags);
2096 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2098 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2100 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2102 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2103 reset_chip = 1;
2105 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2106 * do not reset adapter, jump to initialize_adapter */
2107 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2108 status = QLA_SUCCESS;
2109 goto recover_ha_init_adapter;
2112 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2113 * from eh_host_reset or ioctl module */
2114 if (is_qla8022(ha) && !reset_chip &&
2115 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2117 DEBUG2(ql4_printk(KERN_INFO, ha,
2118 "scsi%ld: %s - Performing stop_firmware...\n",
2119 ha->host_no, __func__));
2120 status = ha->isp_ops->reset_firmware(ha);
2121 if (status == QLA_SUCCESS) {
2122 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2123 qla4xxx_cmd_wait(ha);
2124 ha->isp_ops->disable_intrs(ha);
2125 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2126 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2127 } else {
2128 /* If the stop_firmware fails then
2129 * reset the entire chip */
2130 reset_chip = 1;
2131 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2132 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2136 /* Issue full chip reset if recovering from a catastrophic error,
2137 * or if stop_firmware fails for ISP-82xx.
2138 * This is the default case for ISP-4xxx */
2139 if (!is_qla8022(ha) || reset_chip) {
2140 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2141 qla4xxx_cmd_wait(ha);
2142 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2143 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2144 DEBUG2(ql4_printk(KERN_INFO, ha,
2145 "scsi%ld: %s - Performing chip reset..\n",
2146 ha->host_no, __func__));
2147 status = ha->isp_ops->reset_chip(ha);
2150 /* Flush any pending ddb changed AENs */
2151 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2153 recover_ha_init_adapter:
2154 /* Upon successful firmware/chip reset, re-initialize the adapter */
2155 if (status == QLA_SUCCESS) {
2156 /* For ISP-4xxx, force function 1 to always initialize
2157 * before function 3 to prevent both funcions from
2158 * stepping on top of the other */
2159 if (!is_qla8022(ha) && (ha->mac_index == 3))
2160 ssleep(6);
2162 /* NOTE: AF_ONLINE flag set upon successful completion of
2163 * qla4xxx_initialize_adapter */
2164 status = qla4xxx_initialize_adapter(ha);
2167 /* Retry failed adapter initialization, if necessary
2168 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2169 * case to prevent ping-pong resets between functions */
2170 if (!test_bit(AF_ONLINE, &ha->flags) &&
2171 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2172 /* Adapter initialization failed, see if we can retry
2173 * resetting the ha.
2174 * Since we don't want to block the DPC for too long
2175 * with multiple resets in the same thread,
2176 * utilize DPC to retry */
2177 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2178 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2179 DEBUG2(printk("scsi%ld: recover adapter - retrying "
2180 "(%d) more times\n", ha->host_no,
2181 ha->retry_reset_ha_cnt));
2182 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2183 status = QLA_ERROR;
2184 } else {
2185 if (ha->retry_reset_ha_cnt > 0) {
2186 /* Schedule another Reset HA--DPC will retry */
2187 ha->retry_reset_ha_cnt--;
2188 DEBUG2(printk("scsi%ld: recover adapter - "
2189 "retry remaining %d\n",
2190 ha->host_no,
2191 ha->retry_reset_ha_cnt));
2192 status = QLA_ERROR;
2195 if (ha->retry_reset_ha_cnt == 0) {
2196 /* Recover adapter retries have been exhausted.
2197 * Adapter DEAD */
2198 DEBUG2(printk("scsi%ld: recover adapter "
2199 "failed - board disabled\n",
2200 ha->host_no));
2201 qla4xxx_dead_adapter_cleanup(ha);
2202 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2203 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2204 clear_bit(DPC_RESET_HA_FW_CONTEXT,
2205 &ha->dpc_flags);
2206 status = QLA_ERROR;
2209 } else {
2210 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2211 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2212 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2215 ha->adapter_error_count++;
2217 if (test_bit(AF_ONLINE, &ha->flags))
2218 ha->isp_ops->enable_intrs(ha);
2220 scsi_unblock_requests(ha->host);
2222 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2223 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
2224 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
2226 return status;
2229 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2231 struct iscsi_session *sess;
2232 struct ddb_entry *ddb_entry;
2233 struct scsi_qla_host *ha;
2235 sess = cls_session->dd_data;
2236 ddb_entry = sess->dd_data;
2237 ha = ddb_entry->ha;
2238 if (!iscsi_is_session_online(cls_session)) {
2239 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2240 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2241 " unblock session\n", ha->host_no, __func__,
2242 ddb_entry->fw_ddb_index);
2243 iscsi_unblock_session(ddb_entry->sess);
2244 } else {
2245 /* Trigger relogin */
2246 iscsi_session_failure(cls_session->dd_data,
2247 ISCSI_ERR_CONN_FAILED);
2252 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2254 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2257 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2259 if (ha->dpc_thread)
2260 queue_work(ha->dpc_thread, &ha->dpc_work);
2264 * qla4xxx_do_dpc - dpc routine
2265 * @data: in our case pointer to adapter structure
2267 * This routine is a task that is schedule by the interrupt handler
2268 * to perform the background processing for interrupts. We put it
2269 * on a task queue that is consumed whenever the scheduler runs; that's
2270 * so you can do anything (i.e. put the process to sleep etc). In fact,
2271 * the mid-level tries to sleep when it reaches the driver threshold
2272 * "host->can_queue". This can cause a panic if we were in our interrupt code.
2274 static void qla4xxx_do_dpc(struct work_struct *work)
2276 struct scsi_qla_host *ha =
2277 container_of(work, struct scsi_qla_host, dpc_work);
2278 int status = QLA_ERROR;
2280 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
2281 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2282 ha->host_no, __func__, ha->flags, ha->dpc_flags))
2284 /* Initialization not yet finished. Don't do anything yet. */
2285 if (!test_bit(AF_INIT_DONE, &ha->flags))
2286 return;
2288 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2289 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2290 ha->host_no, __func__, ha->flags));
2291 return;
2294 if (is_qla8022(ha)) {
2295 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2296 qla4_8xxx_idc_lock(ha);
2297 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2298 QLA82XX_DEV_FAILED);
2299 qla4_8xxx_idc_unlock(ha);
2300 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2301 qla4_8xxx_device_state_handler(ha);
2303 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2304 qla4_8xxx_need_qsnt_handler(ha);
2308 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2309 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2310 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2311 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2312 if (ql4xdontresethba) {
2313 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2314 ha->host_no, __func__));
2315 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2316 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2317 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2318 goto dpc_post_reset_ha;
2320 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2321 test_bit(DPC_RESET_HA, &ha->dpc_flags))
2322 qla4xxx_recover_adapter(ha);
2324 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2325 uint8_t wait_time = RESET_INTR_TOV;
2327 while ((readw(&ha->reg->ctrl_status) &
2328 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2329 if (--wait_time == 0)
2330 break;
2331 msleep(1000);
2333 if (wait_time == 0)
2334 DEBUG2(printk("scsi%ld: %s: SR|FSR "
2335 "bit not cleared-- resetting\n",
2336 ha->host_no, __func__));
2337 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2338 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2339 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2340 status = qla4xxx_recover_adapter(ha);
2342 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2343 if (status == QLA_SUCCESS)
2344 ha->isp_ops->enable_intrs(ha);
2348 dpc_post_reset_ha:
2349 /* ---- process AEN? --- */
2350 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2351 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2353 /* ---- Get DHCP IP Address? --- */
2354 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2355 qla4xxx_get_dhcp_ip_address(ha);
2357 /* ---- link change? --- */
2358 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2359 if (!test_bit(AF_LINK_UP, &ha->flags)) {
2360 /* ---- link down? --- */
2361 qla4xxx_mark_all_devices_missing(ha);
2362 } else {
2363 /* ---- link up? --- *
2364 * F/W will auto login to all devices ONLY ONCE after
2365 * link up during driver initialization and runtime
2366 * fatal error recovery. Therefore, the driver must
2367 * manually relogin to devices when recovering from
2368 * connection failures, logouts, expired KATO, etc. */
2370 qla4xxx_relogin_all_devices(ha);
2376 * qla4xxx_free_adapter - release the adapter
2377 * @ha: pointer to adapter structure
2379 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2382 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2383 /* Turn-off interrupts on the card. */
2384 ha->isp_ops->disable_intrs(ha);
2387 /* Remove timer thread, if present */
2388 if (ha->timer_active)
2389 qla4xxx_stop_timer(ha);
2391 /* Kill the kernel thread for this host */
2392 if (ha->dpc_thread)
2393 destroy_workqueue(ha->dpc_thread);
2395 /* Kill the kernel thread for this host */
2396 if (ha->task_wq)
2397 destroy_workqueue(ha->task_wq);
2399 /* Put firmware in known state */
2400 ha->isp_ops->reset_firmware(ha);
2402 if (is_qla8022(ha)) {
2403 qla4_8xxx_idc_lock(ha);
2404 qla4_8xxx_clear_drv_active(ha);
2405 qla4_8xxx_idc_unlock(ha);
2408 /* Detach interrupts */
2409 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
2410 qla4xxx_free_irqs(ha);
2412 /* free extra memory */
2413 qla4xxx_mem_free(ha);
2416 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2418 int status = 0;
2419 uint8_t revision_id;
2420 unsigned long mem_base, mem_len, db_base, db_len;
2421 struct pci_dev *pdev = ha->pdev;
2423 status = pci_request_regions(pdev, DRIVER_NAME);
2424 if (status) {
2425 printk(KERN_WARNING
2426 "scsi(%ld) Failed to reserve PIO regions (%s) "
2427 "status=%d\n", ha->host_no, pci_name(pdev), status);
2428 goto iospace_error_exit;
2431 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2432 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2433 __func__, revision_id));
2434 ha->revision_id = revision_id;
2436 /* remap phys address */
2437 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2438 mem_len = pci_resource_len(pdev, 0);
2439 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2440 __func__, mem_base, mem_len));
2442 /* mapping of pcibase pointer */
2443 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2444 if (!ha->nx_pcibase) {
2445 printk(KERN_ERR
2446 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2447 pci_release_regions(ha->pdev);
2448 goto iospace_error_exit;
2451 /* Mapping of IO base pointer, door bell read and write pointer */
2453 /* mapping of IO base pointer */
2454 ha->qla4_8xxx_reg =
2455 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
2456 0xbc000 + (ha->pdev->devfn << 11));
2458 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
2459 db_len = pci_resource_len(pdev, 4);
2461 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2462 QLA82XX_CAM_RAM_DB2);
2464 return 0;
2465 iospace_error_exit:
2466 return -ENOMEM;
2469 /***
2470 * qla4xxx_iospace_config - maps registers
2471 * @ha: pointer to adapter structure
2473 * This routines maps HBA's registers from the pci address space
2474 * into the kernel virtual address space for memory mapped i/o.
2476 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
2478 unsigned long pio, pio_len, pio_flags;
2479 unsigned long mmio, mmio_len, mmio_flags;
2481 pio = pci_resource_start(ha->pdev, 0);
2482 pio_len = pci_resource_len(ha->pdev, 0);
2483 pio_flags = pci_resource_flags(ha->pdev, 0);
2484 if (pio_flags & IORESOURCE_IO) {
2485 if (pio_len < MIN_IOBASE_LEN) {
2486 ql4_printk(KERN_WARNING, ha,
2487 "Invalid PCI I/O region size\n");
2488 pio = 0;
2490 } else {
2491 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
2492 pio = 0;
2495 /* Use MMIO operations for all accesses. */
2496 mmio = pci_resource_start(ha->pdev, 1);
2497 mmio_len = pci_resource_len(ha->pdev, 1);
2498 mmio_flags = pci_resource_flags(ha->pdev, 1);
2500 if (!(mmio_flags & IORESOURCE_MEM)) {
2501 ql4_printk(KERN_ERR, ha,
2502 "region #0 not an MMIO resource, aborting\n");
2504 goto iospace_error_exit;
2507 if (mmio_len < MIN_IOBASE_LEN) {
2508 ql4_printk(KERN_ERR, ha,
2509 "Invalid PCI mem region size, aborting\n");
2510 goto iospace_error_exit;
2513 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
2514 ql4_printk(KERN_WARNING, ha,
2515 "Failed to reserve PIO/MMIO regions\n");
2517 goto iospace_error_exit;
2520 ha->pio_address = pio;
2521 ha->pio_length = pio_len;
2522 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2523 if (!ha->reg) {
2524 ql4_printk(KERN_ERR, ha,
2525 "cannot remap MMIO, aborting\n");
2527 goto iospace_error_exit;
2530 return 0;
2532 iospace_error_exit:
2533 return -ENOMEM;
2536 static struct isp_operations qla4xxx_isp_ops = {
2537 .iospace_config = qla4xxx_iospace_config,
2538 .pci_config = qla4xxx_pci_config,
2539 .disable_intrs = qla4xxx_disable_intrs,
2540 .enable_intrs = qla4xxx_enable_intrs,
2541 .start_firmware = qla4xxx_start_firmware,
2542 .intr_handler = qla4xxx_intr_handler,
2543 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
2544 .reset_chip = qla4xxx_soft_reset,
2545 .reset_firmware = qla4xxx_hw_reset,
2546 .queue_iocb = qla4xxx_queue_iocb,
2547 .complete_iocb = qla4xxx_complete_iocb,
2548 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
2549 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
2550 .get_sys_info = qla4xxx_get_sys_info,
2553 static struct isp_operations qla4_8xxx_isp_ops = {
2554 .iospace_config = qla4_8xxx_iospace_config,
2555 .pci_config = qla4_8xxx_pci_config,
2556 .disable_intrs = qla4_8xxx_disable_intrs,
2557 .enable_intrs = qla4_8xxx_enable_intrs,
2558 .start_firmware = qla4_8xxx_load_risc,
2559 .intr_handler = qla4_8xxx_intr_handler,
2560 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2561 .reset_chip = qla4_8xxx_isp_reset,
2562 .reset_firmware = qla4_8xxx_stop_firmware,
2563 .queue_iocb = qla4_8xxx_queue_iocb,
2564 .complete_iocb = qla4_8xxx_complete_iocb,
2565 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
2566 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
2567 .get_sys_info = qla4_8xxx_get_sys_info,
2570 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2572 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2575 uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2577 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2580 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2582 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2585 uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2587 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2590 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2592 struct scsi_qla_host *ha = data;
2593 char *str = buf;
2594 int rc;
2596 switch (type) {
2597 case ISCSI_BOOT_ETH_FLAGS:
2598 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2599 break;
2600 case ISCSI_BOOT_ETH_INDEX:
2601 rc = sprintf(str, "0\n");
2602 break;
2603 case ISCSI_BOOT_ETH_MAC:
2604 rc = sysfs_format_mac(str, ha->my_mac,
2605 MAC_ADDR_LEN);
2606 break;
2607 default:
2608 rc = -ENOSYS;
2609 break;
2611 return rc;
2614 static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2616 int rc;
2618 switch (type) {
2619 case ISCSI_BOOT_ETH_FLAGS:
2620 case ISCSI_BOOT_ETH_MAC:
2621 case ISCSI_BOOT_ETH_INDEX:
2622 rc = S_IRUGO;
2623 break;
2624 default:
2625 rc = 0;
2626 break;
2628 return rc;
2631 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2633 struct scsi_qla_host *ha = data;
2634 char *str = buf;
2635 int rc;
2637 switch (type) {
2638 case ISCSI_BOOT_INI_INITIATOR_NAME:
2639 rc = sprintf(str, "%s\n", ha->name_string);
2640 break;
2641 default:
2642 rc = -ENOSYS;
2643 break;
2645 return rc;
2648 static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2650 int rc;
2652 switch (type) {
2653 case ISCSI_BOOT_INI_INITIATOR_NAME:
2654 rc = S_IRUGO;
2655 break;
2656 default:
2657 rc = 0;
2658 break;
2660 return rc;
2663 static ssize_t
2664 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2665 char *buf)
2667 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2668 char *str = buf;
2669 int rc;
2671 switch (type) {
2672 case ISCSI_BOOT_TGT_NAME:
2673 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2674 break;
2675 case ISCSI_BOOT_TGT_IP_ADDR:
2676 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2677 rc = sprintf(buf, "%pI4\n",
2678 &boot_conn->dest_ipaddr.ip_address);
2679 else
2680 rc = sprintf(str, "%pI6\n",
2681 &boot_conn->dest_ipaddr.ip_address);
2682 break;
2683 case ISCSI_BOOT_TGT_PORT:
2684 rc = sprintf(str, "%d\n", boot_conn->dest_port);
2685 break;
2686 case ISCSI_BOOT_TGT_CHAP_NAME:
2687 rc = sprintf(str, "%.*s\n",
2688 boot_conn->chap.target_chap_name_length,
2689 (char *)&boot_conn->chap.target_chap_name);
2690 break;
2691 case ISCSI_BOOT_TGT_CHAP_SECRET:
2692 rc = sprintf(str, "%.*s\n",
2693 boot_conn->chap.target_secret_length,
2694 (char *)&boot_conn->chap.target_secret);
2695 break;
2696 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2697 rc = sprintf(str, "%.*s\n",
2698 boot_conn->chap.intr_chap_name_length,
2699 (char *)&boot_conn->chap.intr_chap_name);
2700 break;
2701 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2702 rc = sprintf(str, "%.*s\n",
2703 boot_conn->chap.intr_secret_length,
2704 (char *)&boot_conn->chap.intr_secret);
2705 break;
2706 case ISCSI_BOOT_TGT_FLAGS:
2707 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2708 break;
2709 case ISCSI_BOOT_TGT_NIC_ASSOC:
2710 rc = sprintf(str, "0\n");
2711 break;
2712 default:
2713 rc = -ENOSYS;
2714 break;
2716 return rc;
2719 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2721 struct scsi_qla_host *ha = data;
2722 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2724 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2727 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2729 struct scsi_qla_host *ha = data;
2730 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2732 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2735 static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2737 int rc;
2739 switch (type) {
2740 case ISCSI_BOOT_TGT_NAME:
2741 case ISCSI_BOOT_TGT_IP_ADDR:
2742 case ISCSI_BOOT_TGT_PORT:
2743 case ISCSI_BOOT_TGT_CHAP_NAME:
2744 case ISCSI_BOOT_TGT_CHAP_SECRET:
2745 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2746 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2747 case ISCSI_BOOT_TGT_NIC_ASSOC:
2748 case ISCSI_BOOT_TGT_FLAGS:
2749 rc = S_IRUGO;
2750 break;
2751 default:
2752 rc = 0;
2753 break;
2755 return rc;
2758 static void qla4xxx_boot_release(void *data)
2760 struct scsi_qla_host *ha = data;
2762 scsi_host_put(ha->host);
2765 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2767 dma_addr_t buf_dma;
2768 uint32_t addr, pri_addr, sec_addr;
2769 uint32_t offset;
2770 uint16_t func_num;
2771 uint8_t val;
2772 uint8_t *buf = NULL;
2773 size_t size = 13 * sizeof(uint8_t);
2774 int ret = QLA_SUCCESS;
2776 func_num = PCI_FUNC(ha->pdev->devfn);
2778 DEBUG2(ql4_printk(KERN_INFO, ha,
2779 "%s: Get FW boot info for 0x%x func %d\n", __func__,
2780 (is_qla4032(ha) ? PCI_DEVICE_ID_QLOGIC_ISP4032 :
2781 PCI_DEVICE_ID_QLOGIC_ISP8022), func_num));
2783 if (is_qla4032(ha)) {
2784 if (func_num == 1) {
2785 addr = NVRAM_PORT0_BOOT_MODE;
2786 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2787 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2788 } else if (func_num == 3) {
2789 addr = NVRAM_PORT1_BOOT_MODE;
2790 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2791 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2792 } else {
2793 ret = QLA_ERROR;
2794 goto exit_boot_info;
2797 /* Check Boot Mode */
2798 val = rd_nvram_byte(ha, addr);
2799 if (!(val & 0x07)) {
2800 DEBUG2(ql4_printk(KERN_ERR, ha,
2801 "%s: Failed Boot options : 0x%x\n",
2802 __func__, val));
2803 ret = QLA_ERROR;
2804 goto exit_boot_info;
2807 /* get primary valid target index */
2808 val = rd_nvram_byte(ha, pri_addr);
2809 if (val & BIT_7)
2810 ddb_index[0] = (val & 0x7f);
2811 else
2812 ddb_index[0] = 0;
2814 /* get secondary valid target index */
2815 val = rd_nvram_byte(ha, sec_addr);
2816 if (val & BIT_7)
2817 ddb_index[1] = (val & 0x7f);
2818 else
2819 ddb_index[1] = 1;
2821 } else if (is_qla8022(ha)) {
2822 buf = dma_alloc_coherent(&ha->pdev->dev, size,
2823 &buf_dma, GFP_KERNEL);
2824 if (!buf) {
2825 DEBUG2(ql4_printk(KERN_ERR, ha,
2826 "%s: Unable to allocate dma buffer\n",
2827 __func__));
2828 ret = QLA_ERROR;
2829 goto exit_boot_info;
2832 if (ha->port_num == 0)
2833 offset = BOOT_PARAM_OFFSET_PORT0;
2834 else if (ha->port_num == 1)
2835 offset = BOOT_PARAM_OFFSET_PORT1;
2836 else {
2837 ret = QLA_ERROR;
2838 goto exit_boot_info_free;
2840 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2841 offset;
2842 if (qla4xxx_get_flash(ha, buf_dma, addr,
2843 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2844 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2845 "failed\n", ha->host_no, __func__));
2846 ret = QLA_ERROR;
2847 goto exit_boot_info_free;
2849 /* Check Boot Mode */
2850 if (!(buf[1] & 0x07)) {
2851 DEBUG2(ql4_printk(KERN_INFO, ha,
2852 "Failed: Boot options : 0x%x\n",
2853 buf[1]));
2854 ret = QLA_ERROR;
2855 goto exit_boot_info_free;
2858 /* get primary valid target index */
2859 if (buf[2] & BIT_7)
2860 ddb_index[0] = buf[2] & 0x7f;
2861 else
2862 ddb_index[0] = 0;
2864 /* get secondary valid target index */
2865 if (buf[11] & BIT_7)
2866 ddb_index[1] = buf[11] & 0x7f;
2867 else
2868 ddb_index[1] = 1;
2870 } else {
2871 ret = QLA_ERROR;
2872 goto exit_boot_info;
2875 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2876 " target ID %d\n", __func__, ddb_index[0],
2877 ddb_index[1]));
2879 exit_boot_info_free:
2880 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2881 exit_boot_info:
2882 return ret;
2885 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2886 struct ql4_boot_session_info *boot_sess,
2887 uint16_t ddb_index)
2889 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2890 struct dev_db_entry *fw_ddb_entry;
2891 dma_addr_t fw_ddb_entry_dma;
2892 uint16_t idx;
2893 uint16_t options;
2894 int ret = QLA_SUCCESS;
2896 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2897 &fw_ddb_entry_dma, GFP_KERNEL);
2898 if (!fw_ddb_entry) {
2899 DEBUG2(ql4_printk(KERN_ERR, ha,
2900 "%s: Unable to allocate dma buffer.\n",
2901 __func__));
2902 ret = QLA_ERROR;
2903 return ret;
2906 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2907 fw_ddb_entry_dma, ddb_index)) {
2908 DEBUG2(ql4_printk(KERN_ERR, ha,
2909 "%s: Flash DDB read Failed\n", __func__));
2910 ret = QLA_ERROR;
2911 goto exit_boot_target;
2914 /* Update target name and IP from DDB */
2915 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2916 min(sizeof(boot_sess->target_name),
2917 sizeof(fw_ddb_entry->iscsi_name)));
2919 options = le16_to_cpu(fw_ddb_entry->options);
2920 if (options & DDB_OPT_IPV6_DEVICE) {
2921 memcpy(&boot_conn->dest_ipaddr.ip_address,
2922 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2923 } else {
2924 boot_conn->dest_ipaddr.ip_type = 0x1;
2925 memcpy(&boot_conn->dest_ipaddr.ip_address,
2926 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2929 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2931 /* update chap information */
2932 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2934 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2936 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2938 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2939 target_chap_name,
2940 (char *)&boot_conn->chap.target_secret,
2941 idx);
2942 if (ret) {
2943 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2944 ret = QLA_ERROR;
2945 goto exit_boot_target;
2948 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2949 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2952 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2954 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
2956 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2957 intr_chap_name,
2958 (char *)&boot_conn->chap.intr_secret,
2959 (idx + 1));
2960 if (ret) {
2961 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
2962 ret = QLA_ERROR;
2963 goto exit_boot_target;
2966 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2967 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2970 exit_boot_target:
2971 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2972 fw_ddb_entry, fw_ddb_entry_dma);
2973 return ret;
2976 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
2978 uint16_t ddb_index[2];
2979 int ret = QLA_SUCCESS;
2981 memset(ddb_index, 0, sizeof(ddb_index));
2982 ret = get_fw_boot_info(ha, ddb_index);
2983 if (ret != QLA_SUCCESS) {
2984 DEBUG2(ql4_printk(KERN_ERR, ha,
2985 "%s: Failed to set boot info.\n", __func__));
2986 return ret;
2989 ret = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2990 ddb_index[0]);
2991 if (ret != QLA_SUCCESS) {
2992 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
2993 "primary target\n", __func__));
2996 ret = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2997 ddb_index[1]);
2998 if (ret != QLA_SUCCESS) {
2999 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3000 "secondary target\n", __func__));
3002 return ret;
3005 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3007 struct iscsi_boot_kobj *boot_kobj;
3009 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3010 return 0;
3012 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3013 if (!ha->boot_kset)
3014 goto kset_free;
3016 if (!scsi_host_get(ha->host))
3017 goto kset_free;
3018 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3019 qla4xxx_show_boot_tgt_pri_info,
3020 qla4xxx_tgt_get_attr_visibility,
3021 qla4xxx_boot_release);
3022 if (!boot_kobj)
3023 goto put_host;
3025 if (!scsi_host_get(ha->host))
3026 goto kset_free;
3027 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3028 qla4xxx_show_boot_tgt_sec_info,
3029 qla4xxx_tgt_get_attr_visibility,
3030 qla4xxx_boot_release);
3031 if (!boot_kobj)
3032 goto put_host;
3034 if (!scsi_host_get(ha->host))
3035 goto kset_free;
3036 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3037 qla4xxx_show_boot_ini_info,
3038 qla4xxx_ini_get_attr_visibility,
3039 qla4xxx_boot_release);
3040 if (!boot_kobj)
3041 goto put_host;
3043 if (!scsi_host_get(ha->host))
3044 goto kset_free;
3045 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3046 qla4xxx_show_boot_eth_info,
3047 qla4xxx_eth_get_attr_visibility,
3048 qla4xxx_boot_release);
3049 if (!boot_kobj)
3050 goto put_host;
3052 return 0;
3054 put_host:
3055 scsi_host_put(ha->host);
3056 kset_free:
3057 iscsi_boot_destroy_kset(ha->boot_kset);
3058 return -ENOMEM;
3062 * qla4xxx_probe_adapter - callback function to probe HBA
3063 * @pdev: pointer to pci_dev structure
3064 * @pci_device_id: pointer to pci_device entry
3066 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3067 * It returns zero if successful. It also initializes all data necessary for
3068 * the driver.
3070 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3071 const struct pci_device_id *ent)
3073 int ret = -ENODEV, status;
3074 struct Scsi_Host *host;
3075 struct scsi_qla_host *ha;
3076 uint8_t init_retry_count = 0;
3077 char buf[34];
3078 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
3079 uint32_t dev_state;
3081 if (pci_enable_device(pdev))
3082 return -1;
3084 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
3085 if (host == NULL) {
3086 printk(KERN_WARNING
3087 "qla4xxx: Couldn't allocate host from scsi layer!\n");
3088 goto probe_disable_device;
3091 /* Clear our data area */
3092 ha = to_qla_host(host);
3093 memset(ha, 0, sizeof(*ha));
3095 /* Save the information from PCI BIOS. */
3096 ha->pdev = pdev;
3097 ha->host = host;
3098 ha->host_no = host->host_no;
3100 pci_enable_pcie_error_reporting(pdev);
3102 /* Setup Runtime configurable options */
3103 if (is_qla8022(ha)) {
3104 ha->isp_ops = &qla4_8xxx_isp_ops;
3105 rwlock_init(&ha->hw_lock);
3106 ha->qdr_sn_window = -1;
3107 ha->ddr_mn_window = -1;
3108 ha->curr_window = 255;
3109 ha->func_num = PCI_FUNC(ha->pdev->devfn);
3110 nx_legacy_intr = &legacy_intr[ha->func_num];
3111 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3112 ha->nx_legacy_intr.tgt_status_reg =
3113 nx_legacy_intr->tgt_status_reg;
3114 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3115 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3116 } else {
3117 ha->isp_ops = &qla4xxx_isp_ops;
3120 /* Set EEH reset type to fundamental if required by hba */
3121 if (is_qla8022(ha))
3122 pdev->needs_freset = 1;
3124 /* Configure PCI I/O space. */
3125 ret = ha->isp_ops->iospace_config(ha);
3126 if (ret)
3127 goto probe_failed_ioconfig;
3129 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
3130 pdev->device, pdev->irq, ha->reg);
3132 qla4xxx_config_dma_addressing(ha);
3134 /* Initialize lists and spinlocks. */
3135 INIT_LIST_HEAD(&ha->free_srb_q);
3137 mutex_init(&ha->mbox_sem);
3138 init_completion(&ha->mbx_intr_comp);
3139 init_completion(&ha->disable_acb_comp);
3141 spin_lock_init(&ha->hardware_lock);
3143 /* Allocate dma buffers */
3144 if (qla4xxx_mem_alloc(ha)) {
3145 ql4_printk(KERN_WARNING, ha,
3146 "[ERROR] Failed to allocate memory for adapter\n");
3148 ret = -ENOMEM;
3149 goto probe_failed;
3152 host->cmd_per_lun = 3;
3153 host->max_channel = 0;
3154 host->max_lun = MAX_LUNS - 1;
3155 host->max_id = MAX_TARGETS;
3156 host->max_cmd_len = IOCB_MAX_CDB_LEN;
3157 host->can_queue = MAX_SRBS ;
3158 host->transportt = qla4xxx_scsi_transport;
3160 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3161 if (ret) {
3162 ql4_printk(KERN_WARNING, ha,
3163 "%s: scsi_init_shared_tag_map failed\n", __func__);
3164 goto probe_failed;
3167 pci_set_drvdata(pdev, ha);
3169 ret = scsi_add_host(host, &pdev->dev);
3170 if (ret)
3171 goto probe_failed;
3173 if (is_qla8022(ha))
3174 (void) qla4_8xxx_get_flash_info(ha);
3177 * Initialize the Host adapter request/response queues and
3178 * firmware
3179 * NOTE: interrupts enabled upon successful completion
3181 status = qla4xxx_initialize_adapter(ha);
3182 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3183 init_retry_count++ < MAX_INIT_RETRIES) {
3185 if (is_qla8022(ha)) {
3186 qla4_8xxx_idc_lock(ha);
3187 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3188 qla4_8xxx_idc_unlock(ha);
3189 if (dev_state == QLA82XX_DEV_FAILED) {
3190 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3191 "initialize adapter. H/W is in failed state\n",
3192 __func__);
3193 break;
3196 DEBUG2(printk("scsi: %s: retrying adapter initialization "
3197 "(%d)\n", __func__, init_retry_count));
3199 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3200 continue;
3202 status = qla4xxx_initialize_adapter(ha);
3205 if (!test_bit(AF_ONLINE, &ha->flags)) {
3206 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
3208 if (is_qla8022(ha) && ql4xdontresethba) {
3209 /* Put the device in failed state. */
3210 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3211 qla4_8xxx_idc_lock(ha);
3212 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3213 QLA82XX_DEV_FAILED);
3214 qla4_8xxx_idc_unlock(ha);
3216 ret = -ENODEV;
3217 goto remove_host;
3220 /* Startup the kernel thread for this host adapter. */
3221 DEBUG2(printk("scsi: %s: Starting kernel thread for "
3222 "qla4xxx_dpc\n", __func__));
3223 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3224 ha->dpc_thread = create_singlethread_workqueue(buf);
3225 if (!ha->dpc_thread) {
3226 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
3227 ret = -ENODEV;
3228 goto remove_host;
3230 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
3232 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3233 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3234 if (!ha->task_wq) {
3235 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3236 ret = -ENODEV;
3237 goto remove_host;
3240 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3241 * (which is called indirectly by qla4xxx_initialize_adapter),
3242 * so that irqs will be registered after crbinit but before
3243 * mbx_intr_enable.
3245 if (!is_qla8022(ha)) {
3246 ret = qla4xxx_request_irqs(ha);
3247 if (ret) {
3248 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3249 "interrupt %d already in use.\n", pdev->irq);
3250 goto remove_host;
3254 pci_save_state(ha->pdev);
3255 ha->isp_ops->enable_intrs(ha);
3257 /* Start timer thread. */
3258 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3260 set_bit(AF_INIT_DONE, &ha->flags);
3262 printk(KERN_INFO
3263 " QLogic iSCSI HBA Driver version: %s\n"
3264 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3265 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3266 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3267 ha->patch_number, ha->build_number);
3269 if (qla4xxx_setup_boot_info(ha))
3270 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3271 __func__);
3273 qla4xxx_create_ifaces(ha);
3274 return 0;
3276 remove_host:
3277 scsi_remove_host(ha->host);
3279 probe_failed:
3280 qla4xxx_free_adapter(ha);
3282 probe_failed_ioconfig:
3283 pci_disable_pcie_error_reporting(pdev);
3284 scsi_host_put(ha->host);
3286 probe_disable_device:
3287 pci_disable_device(pdev);
3289 return ret;
3293 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3294 * @ha: pointer to adapter structure
3296 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3297 * so that the other port will not re-initialize while in the process of
3298 * removing the ha due to driver unload or hba hotplug.
3300 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3302 struct scsi_qla_host *other_ha = NULL;
3303 struct pci_dev *other_pdev = NULL;
3304 int fn = ISP4XXX_PCI_FN_2;
3306 /*iscsi function numbers for ISP4xxx is 1 and 3*/
3307 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3308 fn = ISP4XXX_PCI_FN_1;
3310 other_pdev =
3311 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3312 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3313 fn));
3315 /* Get other_ha if other_pdev is valid and state is enable*/
3316 if (other_pdev) {
3317 if (atomic_read(&other_pdev->enable_cnt)) {
3318 other_ha = pci_get_drvdata(other_pdev);
3319 if (other_ha) {
3320 set_bit(AF_HA_REMOVAL, &other_ha->flags);
3321 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3322 "Prevent %s reinit\n", __func__,
3323 dev_name(&other_ha->pdev->dev)));
3326 pci_dev_put(other_pdev);
3331 * qla4xxx_remove_adapter - calback function to remove adapter.
3332 * @pci_dev: PCI device pointer
3334 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3336 struct scsi_qla_host *ha;
3338 ha = pci_get_drvdata(pdev);
3340 if (!is_qla8022(ha))
3341 qla4xxx_prevent_other_port_reinit(ha);
3343 /* destroy iface from sysfs */
3344 qla4xxx_destroy_ifaces(ha);
3346 if (ha->boot_kset)
3347 iscsi_boot_destroy_kset(ha->boot_kset);
3349 scsi_remove_host(ha->host);
3351 qla4xxx_free_adapter(ha);
3353 scsi_host_put(ha->host);
3355 pci_disable_pcie_error_reporting(pdev);
3356 pci_disable_device(pdev);
3357 pci_set_drvdata(pdev, NULL);
3361 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3362 * @ha: HA context
3364 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3365 * supported addressing method.
3367 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
3369 int retval;
3371 /* Update our PCI device dma_mask for full 64 bit mask */
3372 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3373 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
3374 dev_dbg(&ha->pdev->dev,
3375 "Failed to set 64 bit PCI consistent mask; "
3376 "using 32 bit.\n");
3377 retval = pci_set_consistent_dma_mask(ha->pdev,
3378 DMA_BIT_MASK(32));
3380 } else
3381 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
3384 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3386 struct iscsi_cls_session *cls_sess;
3387 struct iscsi_session *sess;
3388 struct ddb_entry *ddb;
3389 int queue_depth = QL4_DEF_QDEPTH;
3391 cls_sess = starget_to_session(sdev->sdev_target);
3392 sess = cls_sess->dd_data;
3393 ddb = sess->dd_data;
3395 sdev->hostdata = ddb;
3396 sdev->tagged_supported = 1;
3398 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3399 queue_depth = ql4xmaxqdepth;
3401 scsi_activate_tcq(sdev, queue_depth);
3402 return 0;
3405 static int qla4xxx_slave_configure(struct scsi_device *sdev)
3407 sdev->tagged_supported = 1;
3408 return 0;
3411 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3413 scsi_deactivate_tcq(sdev, 1);
3417 * qla4xxx_del_from_active_array - returns an active srb
3418 * @ha: Pointer to host adapter structure.
3419 * @index: index into the active_array
3421 * This routine removes and returns the srb at the specified index
3423 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3424 uint32_t index)
3426 struct srb *srb = NULL;
3427 struct scsi_cmnd *cmd = NULL;
3429 cmd = scsi_host_find_tag(ha->host, index);
3430 if (!cmd)
3431 return srb;
3433 srb = (struct srb *)CMD_SP(cmd);
3434 if (!srb)
3435 return srb;
3437 /* update counters */
3438 if (srb->flags & SRB_DMA_VALID) {
3439 ha->req_q_count += srb->iocb_cnt;
3440 ha->iocb_cnt -= srb->iocb_cnt;
3441 if (srb->cmd)
3442 srb->cmd->host_scribble =
3443 (unsigned char *)(unsigned long) MAX_SRBS;
3445 return srb;
3449 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
3450 * @ha: Pointer to host adapter structure.
3451 * @cmd: Scsi Command to wait on.
3453 * This routine waits for the command to be returned by the Firmware
3454 * for some max time.
3456 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3457 struct scsi_cmnd *cmd)
3459 int done = 0;
3460 struct srb *rp;
3461 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
3462 int ret = SUCCESS;
3464 /* Dont wait on command if PCI error is being handled
3465 * by PCI AER driver
3467 if (unlikely(pci_channel_offline(ha->pdev)) ||
3468 (test_bit(AF_EEH_BUSY, &ha->flags))) {
3469 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3470 ha->host_no, __func__);
3471 return ret;
3474 do {
3475 /* Checking to see if its returned to OS */
3476 rp = (struct srb *) CMD_SP(cmd);
3477 if (rp == NULL) {
3478 done++;
3479 break;
3482 msleep(2000);
3483 } while (max_wait_time--);
3485 return done;
3489 * qla4xxx_wait_for_hba_online - waits for HBA to come online
3490 * @ha: Pointer to host adapter structure
3492 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3494 unsigned long wait_online;
3496 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
3497 while (time_before(jiffies, wait_online)) {
3499 if (adapter_up(ha))
3500 return QLA_SUCCESS;
3502 msleep(2000);
3505 return QLA_ERROR;
3509 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
3510 * @ha: pointer to HBA
3511 * @t: target id
3512 * @l: lun id
3514 * This function waits for all outstanding commands to a lun to complete. It
3515 * returns 0 if all pending commands are returned and 1 otherwise.
3517 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3518 struct scsi_target *stgt,
3519 struct scsi_device *sdev)
3521 int cnt;
3522 int status = 0;
3523 struct scsi_cmnd *cmd;
3526 * Waiting for all commands for the designated target or dev
3527 * in the active array
3529 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3530 cmd = scsi_host_find_tag(ha->host, cnt);
3531 if (cmd && stgt == scsi_target(cmd->device) &&
3532 (!sdev || sdev == cmd->device)) {
3533 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3534 status++;
3535 break;
3539 return status;
3543 * qla4xxx_eh_abort - callback for abort task.
3544 * @cmd: Pointer to Linux's SCSI command structure
3546 * This routine is called by the Linux OS to abort the specified
3547 * command.
3549 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3551 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3552 unsigned int id = cmd->device->id;
3553 unsigned int lun = cmd->device->lun;
3554 unsigned long flags;
3555 struct srb *srb = NULL;
3556 int ret = SUCCESS;
3557 int wait = 0;
3559 ql4_printk(KERN_INFO, ha,
3560 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3561 ha->host_no, id, lun, cmd);
3563 spin_lock_irqsave(&ha->hardware_lock, flags);
3564 srb = (struct srb *) CMD_SP(cmd);
3565 if (!srb) {
3566 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3567 return SUCCESS;
3569 kref_get(&srb->srb_ref);
3570 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3572 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3573 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3574 ha->host_no, id, lun));
3575 ret = FAILED;
3576 } else {
3577 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3578 ha->host_no, id, lun));
3579 wait = 1;
3582 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3584 /* Wait for command to complete */
3585 if (wait) {
3586 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3587 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3588 ha->host_no, id, lun));
3589 ret = FAILED;
3593 ql4_printk(KERN_INFO, ha,
3594 "scsi%ld:%d:%d: Abort command - %s\n",
3595 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
3597 return ret;
3601 * qla4xxx_eh_device_reset - callback for target reset.
3602 * @cmd: Pointer to Linux's SCSI command structure
3604 * This routine is called by the Linux OS to reset all luns on the
3605 * specified target.
3607 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3609 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3610 struct ddb_entry *ddb_entry = cmd->device->hostdata;
3611 int ret = FAILED, stat;
3613 if (!ddb_entry)
3614 return ret;
3616 ret = iscsi_block_scsi_eh(cmd);
3617 if (ret)
3618 return ret;
3619 ret = FAILED;
3621 ql4_printk(KERN_INFO, ha,
3622 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3623 cmd->device->channel, cmd->device->id, cmd->device->lun);
3625 DEBUG2(printk(KERN_INFO
3626 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3627 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
3628 cmd, jiffies, cmd->request->timeout / HZ,
3629 ha->dpc_flags, cmd->result, cmd->allowed));
3631 /* FIXME: wait for hba to go online */
3632 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3633 if (stat != QLA_SUCCESS) {
3634 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
3635 goto eh_dev_reset_done;
3638 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3639 cmd->device)) {
3640 ql4_printk(KERN_INFO, ha,
3641 "DEVICE RESET FAILED - waiting for "
3642 "commands.\n");
3643 goto eh_dev_reset_done;
3646 /* Send marker. */
3647 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3648 MM_LUN_RESET) != QLA_SUCCESS)
3649 goto eh_dev_reset_done;
3651 ql4_printk(KERN_INFO, ha,
3652 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3653 ha->host_no, cmd->device->channel, cmd->device->id,
3654 cmd->device->lun);
3656 ret = SUCCESS;
3658 eh_dev_reset_done:
3660 return ret;
3664 * qla4xxx_eh_target_reset - callback for target reset.
3665 * @cmd: Pointer to Linux's SCSI command structure
3667 * This routine is called by the Linux OS to reset the target.
3669 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3671 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3672 struct ddb_entry *ddb_entry = cmd->device->hostdata;
3673 int stat, ret;
3675 if (!ddb_entry)
3676 return FAILED;
3678 ret = iscsi_block_scsi_eh(cmd);
3679 if (ret)
3680 return ret;
3682 starget_printk(KERN_INFO, scsi_target(cmd->device),
3683 "WARM TARGET RESET ISSUED.\n");
3685 DEBUG2(printk(KERN_INFO
3686 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3687 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
3688 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
3689 ha->dpc_flags, cmd->result, cmd->allowed));
3691 stat = qla4xxx_reset_target(ha, ddb_entry);
3692 if (stat != QLA_SUCCESS) {
3693 starget_printk(KERN_INFO, scsi_target(cmd->device),
3694 "WARM TARGET RESET FAILED.\n");
3695 return FAILED;
3698 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3699 NULL)) {
3700 starget_printk(KERN_INFO, scsi_target(cmd->device),
3701 "WARM TARGET DEVICE RESET FAILED - "
3702 "waiting for commands.\n");
3703 return FAILED;
3706 /* Send marker. */
3707 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3708 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3709 starget_printk(KERN_INFO, scsi_target(cmd->device),
3710 "WARM TARGET DEVICE RESET FAILED - "
3711 "marker iocb failed.\n");
3712 return FAILED;
3715 starget_printk(KERN_INFO, scsi_target(cmd->device),
3716 "WARM TARGET RESET SUCCEEDED.\n");
3717 return SUCCESS;
3721 * qla4xxx_eh_host_reset - kernel callback
3722 * @cmd: Pointer to Linux's SCSI command structure
3724 * This routine is invoked by the Linux kernel to perform fatal error
3725 * recovery on the specified adapter.
3727 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3729 int return_status = FAILED;
3730 struct scsi_qla_host *ha;
3732 ha = to_qla_host(cmd->device->host);
3734 if (ql4xdontresethba) {
3735 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3736 ha->host_no, __func__));
3737 return FAILED;
3740 ql4_printk(KERN_INFO, ha,
3741 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
3742 cmd->device->channel, cmd->device->id, cmd->device->lun);
3744 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3745 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
3746 "DEAD.\n", ha->host_no, cmd->device->channel,
3747 __func__));
3749 return FAILED;
3752 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3753 if (is_qla8022(ha))
3754 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3755 else
3756 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3759 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
3760 return_status = SUCCESS;
3762 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
3763 return_status == FAILED ? "FAILED" : "SUCCEEDED");
3765 return return_status;
3768 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3770 uint32_t mbox_cmd[MBOX_REG_COUNT];
3771 uint32_t mbox_sts[MBOX_REG_COUNT];
3772 struct addr_ctrl_blk_def *acb = NULL;
3773 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3774 int rval = QLA_SUCCESS;
3775 dma_addr_t acb_dma;
3777 acb = dma_alloc_coherent(&ha->pdev->dev,
3778 sizeof(struct addr_ctrl_blk_def),
3779 &acb_dma, GFP_KERNEL);
3780 if (!acb) {
3781 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3782 __func__);
3783 rval = -ENOMEM;
3784 goto exit_port_reset;
3787 memset(acb, 0, acb_len);
3789 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3790 if (rval != QLA_SUCCESS) {
3791 rval = -EIO;
3792 goto exit_free_acb;
3795 rval = qla4xxx_disable_acb(ha);
3796 if (rval != QLA_SUCCESS) {
3797 rval = -EIO;
3798 goto exit_free_acb;
3801 wait_for_completion_timeout(&ha->disable_acb_comp,
3802 DISABLE_ACB_TOV * HZ);
3804 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3805 if (rval != QLA_SUCCESS) {
3806 rval = -EIO;
3807 goto exit_free_acb;
3810 exit_free_acb:
3811 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3812 acb, acb_dma);
3813 exit_port_reset:
3814 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3815 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3816 return rval;
3819 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3821 struct scsi_qla_host *ha = to_qla_host(shost);
3822 int rval = QLA_SUCCESS;
3824 if (ql4xdontresethba) {
3825 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3826 __func__));
3827 rval = -EPERM;
3828 goto exit_host_reset;
3831 rval = qla4xxx_wait_for_hba_online(ha);
3832 if (rval != QLA_SUCCESS) {
3833 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3834 "adapter\n", __func__));
3835 rval = -EIO;
3836 goto exit_host_reset;
3839 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3840 goto recover_adapter;
3842 switch (reset_type) {
3843 case SCSI_ADAPTER_RESET:
3844 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3845 break;
3846 case SCSI_FIRMWARE_RESET:
3847 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3848 if (is_qla8022(ha))
3849 /* set firmware context reset */
3850 set_bit(DPC_RESET_HA_FW_CONTEXT,
3851 &ha->dpc_flags);
3852 else {
3853 rval = qla4xxx_context_reset(ha);
3854 goto exit_host_reset;
3857 break;
3860 recover_adapter:
3861 rval = qla4xxx_recover_adapter(ha);
3862 if (rval != QLA_SUCCESS) {
3863 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3864 __func__));
3865 rval = -EIO;
3868 exit_host_reset:
3869 return rval;
3872 /* PCI AER driver recovers from all correctable errors w/o
3873 * driver intervention. For uncorrectable errors PCI AER
3874 * driver calls the following device driver's callbacks
3876 * - Fatal Errors - link_reset
3877 * - Non-Fatal Errors - driver's pci_error_detected() which
3878 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
3880 * PCI AER driver calls
3881 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
3882 * returns RECOVERED or NEED_RESET if fw_hung
3883 * NEED_RESET - driver's slot_reset()
3884 * DISCONNECT - device is dead & cannot recover
3885 * RECOVERED - driver's pci_resume()
3887 static pci_ers_result_t
3888 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3890 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3892 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
3893 ha->host_no, __func__, state);
3895 if (!is_aer_supported(ha))
3896 return PCI_ERS_RESULT_NONE;
3898 switch (state) {
3899 case pci_channel_io_normal:
3900 clear_bit(AF_EEH_BUSY, &ha->flags);
3901 return PCI_ERS_RESULT_CAN_RECOVER;
3902 case pci_channel_io_frozen:
3903 set_bit(AF_EEH_BUSY, &ha->flags);
3904 qla4xxx_mailbox_premature_completion(ha);
3905 qla4xxx_free_irqs(ha);
3906 pci_disable_device(pdev);
3907 /* Return back all IOs */
3908 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3909 return PCI_ERS_RESULT_NEED_RESET;
3910 case pci_channel_io_perm_failure:
3911 set_bit(AF_EEH_BUSY, &ha->flags);
3912 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
3913 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3914 return PCI_ERS_RESULT_DISCONNECT;
3916 return PCI_ERS_RESULT_NEED_RESET;
3920 * qla4xxx_pci_mmio_enabled() gets called if
3921 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
3922 * and read/write to the device still works.
3924 static pci_ers_result_t
3925 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
3927 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3929 if (!is_aer_supported(ha))
3930 return PCI_ERS_RESULT_NONE;
3932 return PCI_ERS_RESULT_RECOVERED;
3935 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
3937 uint32_t rval = QLA_ERROR;
3938 uint32_t ret = 0;
3939 int fn;
3940 struct pci_dev *other_pdev = NULL;
3942 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
3944 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3946 if (test_bit(AF_ONLINE, &ha->flags)) {
3947 clear_bit(AF_ONLINE, &ha->flags);
3948 clear_bit(AF_LINK_UP, &ha->flags);
3949 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3950 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3953 fn = PCI_FUNC(ha->pdev->devfn);
3954 while (fn > 0) {
3955 fn--;
3956 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
3957 "func %x\n", ha->host_no, __func__, fn);
3958 /* Get the pci device given the domain, bus,
3959 * slot/function number */
3960 other_pdev =
3961 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3962 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3963 fn));
3965 if (!other_pdev)
3966 continue;
3968 if (atomic_read(&other_pdev->enable_cnt)) {
3969 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
3970 "func in enabled state%x\n", ha->host_no,
3971 __func__, fn);
3972 pci_dev_put(other_pdev);
3973 break;
3975 pci_dev_put(other_pdev);
3978 /* The first function on the card, the reset owner will
3979 * start & initialize the firmware. The other functions
3980 * on the card will reset the firmware context
3982 if (!fn) {
3983 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
3984 "0x%x is the owner\n", ha->host_no, __func__,
3985 ha->pdev->devfn);
3987 qla4_8xxx_idc_lock(ha);
3988 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3989 QLA82XX_DEV_COLD);
3991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
3992 QLA82XX_IDC_VERSION);
3994 qla4_8xxx_idc_unlock(ha);
3995 clear_bit(AF_FW_RECOVERY, &ha->flags);
3996 rval = qla4xxx_initialize_adapter(ha);
3997 qla4_8xxx_idc_lock(ha);
3999 if (rval != QLA_SUCCESS) {
4000 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4001 "FAILED\n", ha->host_no, __func__);
4002 qla4_8xxx_clear_drv_active(ha);
4003 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4004 QLA82XX_DEV_FAILED);
4005 } else {
4006 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4007 "READY\n", ha->host_no, __func__);
4008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4009 QLA82XX_DEV_READY);
4010 /* Clear driver state register */
4011 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4012 qla4_8xxx_set_drv_active(ha);
4013 ret = qla4xxx_request_irqs(ha);
4014 if (ret) {
4015 ql4_printk(KERN_WARNING, ha, "Failed to "
4016 "reserve interrupt %d already in use.\n",
4017 ha->pdev->irq);
4018 rval = QLA_ERROR;
4019 } else {
4020 ha->isp_ops->enable_intrs(ha);
4021 rval = QLA_SUCCESS;
4024 qla4_8xxx_idc_unlock(ha);
4025 } else {
4026 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
4027 "the reset owner\n", ha->host_no, __func__,
4028 ha->pdev->devfn);
4029 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4030 QLA82XX_DEV_READY)) {
4031 clear_bit(AF_FW_RECOVERY, &ha->flags);
4032 rval = qla4xxx_initialize_adapter(ha);
4033 if (rval == QLA_SUCCESS) {
4034 ret = qla4xxx_request_irqs(ha);
4035 if (ret) {
4036 ql4_printk(KERN_WARNING, ha, "Failed to"
4037 " reserve interrupt %d already in"
4038 " use.\n", ha->pdev->irq);
4039 rval = QLA_ERROR;
4040 } else {
4041 ha->isp_ops->enable_intrs(ha);
4042 rval = QLA_SUCCESS;
4045 qla4_8xxx_idc_lock(ha);
4046 qla4_8xxx_set_drv_active(ha);
4047 qla4_8xxx_idc_unlock(ha);
4050 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4051 return rval;
4054 static pci_ers_result_t
4055 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
4057 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4058 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4059 int rc;
4061 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
4062 ha->host_no, __func__);
4064 if (!is_aer_supported(ha))
4065 return PCI_ERS_RESULT_NONE;
4067 /* Restore the saved state of PCIe device -
4068 * BAR registers, PCI Config space, PCIX, MSI,
4069 * IOV states
4071 pci_restore_state(pdev);
4073 /* pci_restore_state() clears the saved_state flag of the device
4074 * save restored state which resets saved_state flag
4076 pci_save_state(pdev);
4078 /* Initialize device or resume if in suspended state */
4079 rc = pci_enable_device(pdev);
4080 if (rc) {
4081 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
4082 "device after reset\n", ha->host_no, __func__);
4083 goto exit_slot_reset;
4086 ha->isp_ops->disable_intrs(ha);
4088 if (is_qla8022(ha)) {
4089 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
4090 ret = PCI_ERS_RESULT_RECOVERED;
4091 goto exit_slot_reset;
4092 } else
4093 goto exit_slot_reset;
4096 exit_slot_reset:
4097 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
4098 "device after reset\n", ha->host_no, __func__, ret);
4099 return ret;
4102 static void
4103 qla4xxx_pci_resume(struct pci_dev *pdev)
4105 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4106 int ret;
4108 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
4109 ha->host_no, __func__);
4111 ret = qla4xxx_wait_for_hba_online(ha);
4112 if (ret != QLA_SUCCESS) {
4113 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
4114 "resume I/O from slot/link_reset\n", ha->host_no,
4115 __func__);
4118 pci_cleanup_aer_uncorrect_error_status(pdev);
4119 clear_bit(AF_EEH_BUSY, &ha->flags);
4122 static struct pci_error_handlers qla4xxx_err_handler = {
4123 .error_detected = qla4xxx_pci_error_detected,
4124 .mmio_enabled = qla4xxx_pci_mmio_enabled,
4125 .slot_reset = qla4xxx_pci_slot_reset,
4126 .resume = qla4xxx_pci_resume,
4129 static struct pci_device_id qla4xxx_pci_tbl[] = {
4131 .vendor = PCI_VENDOR_ID_QLOGIC,
4132 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
4133 .subvendor = PCI_ANY_ID,
4134 .subdevice = PCI_ANY_ID,
4137 .vendor = PCI_VENDOR_ID_QLOGIC,
4138 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
4139 .subvendor = PCI_ANY_ID,
4140 .subdevice = PCI_ANY_ID,
4143 .vendor = PCI_VENDOR_ID_QLOGIC,
4144 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
4145 .subvendor = PCI_ANY_ID,
4146 .subdevice = PCI_ANY_ID,
4149 .vendor = PCI_VENDOR_ID_QLOGIC,
4150 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
4151 .subvendor = PCI_ANY_ID,
4152 .subdevice = PCI_ANY_ID,
4154 {0, 0},
4156 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4158 static struct pci_driver qla4xxx_pci_driver = {
4159 .name = DRIVER_NAME,
4160 .id_table = qla4xxx_pci_tbl,
4161 .probe = qla4xxx_probe_adapter,
4162 .remove = qla4xxx_remove_adapter,
4163 .err_handler = &qla4xxx_err_handler,
4166 static int __init qla4xxx_module_init(void)
4168 int ret;
4170 /* Allocate cache for SRBs. */
4171 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
4172 SLAB_HWCACHE_ALIGN, NULL);
4173 if (srb_cachep == NULL) {
4174 printk(KERN_ERR
4175 "%s: Unable to allocate SRB cache..."
4176 "Failing load!\n", DRIVER_NAME);
4177 ret = -ENOMEM;
4178 goto no_srp_cache;
4181 /* Derive version string. */
4182 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
4183 if (ql4xextended_error_logging)
4184 strcat(qla4xxx_version_str, "-debug");
4186 qla4xxx_scsi_transport =
4187 iscsi_register_transport(&qla4xxx_iscsi_transport);
4188 if (!qla4xxx_scsi_transport){
4189 ret = -ENODEV;
4190 goto release_srb_cache;
4193 ret = pci_register_driver(&qla4xxx_pci_driver);
4194 if (ret)
4195 goto unregister_transport;
4197 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4198 return 0;
4200 unregister_transport:
4201 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4202 release_srb_cache:
4203 kmem_cache_destroy(srb_cachep);
4204 no_srp_cache:
4205 return ret;
4208 static void __exit qla4xxx_module_exit(void)
4210 pci_unregister_driver(&qla4xxx_pci_driver);
4211 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4212 kmem_cache_destroy(srb_cachep);
4215 module_init(qla4xxx_module_init);
4216 module_exit(qla4xxx_module_exit);
4218 MODULE_AUTHOR("QLogic Corporation");
4219 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4220 MODULE_LICENSE("GPL");
4221 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);