treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / qla4xxx / ql4_os.c
blob5504ab11decc7537ffb90da752868144f8734072
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
24 * Driver version
26 static char qla4xxx_version_str[40];
29 * SRB allocation cache
31 static struct kmem_cache *srb_cachep;
34 * Module parameter information and variables
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 " Set to disable exporting boot targets to sysfs.\n"
40 "\t\t 0 - Export boot targets\n"
41 "\t\t 1 - Do not export boot targets (Default)");
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 " Don't reset the HBA for driver recovery.\n"
47 "\t\t 0 - It will reset HBA (Default)\n"
48 "\t\t 1 - It will NOT reset HBA");
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 " Option to enable extended error logging.\n"
54 "\t\t 0 - no logging (Default)\n"
55 "\t\t 2 - debug logging");
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 "\t\t 0 = enable INTx interrupt mechanism.\n"
62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
63 "\t\t 2 = enable MSI interrupt mechanism.");
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 " Maximum queue depth to report for target devices.\n"
70 "\t\t Default: 32.");
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 " Enable or disable dynamic tracking and adjustment of\n"
76 "\t\t scsi device queue depth.\n"
77 "\t\t 0 - Disable.\n"
78 "\t\t 1 - Enable. (Default)");
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec.");
86 int ql4xmdcapmask = 0;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0 (firmware default capture mask)\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 " Set to enable minidump.\n"
97 "\t\t 0 - disable minidump\n"
98 "\t\t 1 - enable minidump (Default)");
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
102 * SCSI host template entry points
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
107 * iSCSI template entry points
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 enum iscsi_param_type param_type,
119 int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 struct sockaddr *dst_addr,
123 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 struct iscsi_cls_conn *cls_conn,
133 uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 uint32_t iface_type, uint32_t payload_size,
148 uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
153 int len);
154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
157 * SCSI host template entry points
159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
164 static int qla4xxx_slave_alloc(struct scsi_device *device);
165 static umode_t qla4_attr_is_visible(int param_type, int param);
166 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
169 * iSCSI Flash DDB sysfs entry points
171 static int
172 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
173 struct iscsi_bus_flash_conn *fnode_conn,
174 void *data, int len);
175 static int
176 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
177 int param, char *buf);
178 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
179 int len);
180 static int
181 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
182 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
183 struct iscsi_bus_flash_conn *fnode_conn);
184 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
185 struct iscsi_bus_flash_conn *fnode_conn);
186 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
189 QLA82XX_LEGACY_INTR_CONFIG;
191 static struct scsi_host_template qla4xxx_driver_template = {
192 .module = THIS_MODULE,
193 .name = DRIVER_NAME,
194 .proc_name = DRIVER_NAME,
195 .queuecommand = qla4xxx_queuecommand,
197 .eh_abort_handler = qla4xxx_eh_abort,
198 .eh_device_reset_handler = qla4xxx_eh_device_reset,
199 .eh_target_reset_handler = qla4xxx_eh_target_reset,
200 .eh_host_reset_handler = qla4xxx_eh_host_reset,
201 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
203 .slave_alloc = qla4xxx_slave_alloc,
204 .change_queue_depth = scsi_change_queue_depth,
206 .this_id = -1,
207 .cmd_per_lun = 3,
208 .sg_tablesize = SG_ALL,
210 .max_sectors = 0xFFFF,
211 .shost_attrs = qla4xxx_host_attrs,
212 .host_reset = qla4xxx_host_reset,
213 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
216 static struct iscsi_transport qla4xxx_iscsi_transport = {
217 .owner = THIS_MODULE,
218 .name = DRIVER_NAME,
219 .caps = CAP_TEXT_NEGO |
220 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
221 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
222 CAP_MULTI_R2T,
223 .attr_is_visible = qla4_attr_is_visible,
224 .create_session = qla4xxx_session_create,
225 .destroy_session = qla4xxx_session_destroy,
226 .start_conn = qla4xxx_conn_start,
227 .create_conn = qla4xxx_conn_create,
228 .bind_conn = qla4xxx_conn_bind,
229 .stop_conn = iscsi_conn_stop,
230 .destroy_conn = qla4xxx_conn_destroy,
231 .set_param = iscsi_set_param,
232 .get_conn_param = qla4xxx_conn_get_param,
233 .get_session_param = qla4xxx_session_get_param,
234 .get_ep_param = qla4xxx_get_ep_param,
235 .ep_connect = qla4xxx_ep_connect,
236 .ep_poll = qla4xxx_ep_poll,
237 .ep_disconnect = qla4xxx_ep_disconnect,
238 .get_stats = qla4xxx_conn_get_stats,
239 .send_pdu = iscsi_conn_send_pdu,
240 .xmit_task = qla4xxx_task_xmit,
241 .cleanup_task = qla4xxx_task_cleanup,
242 .alloc_pdu = qla4xxx_alloc_pdu,
244 .get_host_param = qla4xxx_host_get_param,
245 .set_iface_param = qla4xxx_iface_set_param,
246 .get_iface_param = qla4xxx_get_iface_param,
247 .bsg_request = qla4xxx_bsg_request,
248 .send_ping = qla4xxx_send_ping,
249 .get_chap = qla4xxx_get_chap_list,
250 .delete_chap = qla4xxx_delete_chap,
251 .set_chap = qla4xxx_set_chap_entry,
252 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
253 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
254 .new_flashnode = qla4xxx_sysfs_ddb_add,
255 .del_flashnode = qla4xxx_sysfs_ddb_delete,
256 .login_flashnode = qla4xxx_sysfs_ddb_login,
257 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
258 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
259 .get_host_stats = qla4xxx_get_host_stats,
262 static struct scsi_transport_template *qla4xxx_scsi_transport;
264 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
266 u32 reg_val = 0;
267 int rval = QLA_SUCCESS;
269 if (is_qla8022(ha))
270 reg_val = readl(&ha->qla4_82xx_reg->host_status);
271 else if (is_qla8032(ha) || is_qla8042(ha))
272 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
273 else
274 reg_val = readw(&ha->reg->ctrl_status);
276 if (reg_val == QL4_ISP_REG_DISCONNECT)
277 rval = QLA_ERROR;
279 return rval;
282 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
283 uint32_t iface_type, uint32_t payload_size,
284 uint32_t pid, struct sockaddr *dst_addr)
286 struct scsi_qla_host *ha = to_qla_host(shost);
287 struct sockaddr_in *addr;
288 struct sockaddr_in6 *addr6;
289 uint32_t options = 0;
290 uint8_t ipaddr[IPv6_ADDR_LEN];
291 int rval;
293 memset(ipaddr, 0, IPv6_ADDR_LEN);
294 /* IPv4 to IPv4 */
295 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
296 (dst_addr->sa_family == AF_INET)) {
297 addr = (struct sockaddr_in *)dst_addr;
298 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
299 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
300 "dest: %pI4\n", __func__,
301 &ha->ip_config.ip_address, ipaddr));
302 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
303 ipaddr);
304 if (rval)
305 rval = -EINVAL;
306 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
307 (dst_addr->sa_family == AF_INET6)) {
308 /* IPv6 to IPv6 */
309 addr6 = (struct sockaddr_in6 *)dst_addr;
310 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
312 options |= PING_IPV6_PROTOCOL_ENABLE;
314 /* Ping using LinkLocal address */
315 if ((iface_num == 0) || (iface_num == 1)) {
316 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
317 "src: %pI6 dest: %pI6\n", __func__,
318 &ha->ip_config.ipv6_link_local_addr,
319 ipaddr));
320 options |= PING_IPV6_LINKLOCAL_ADDR;
321 rval = qla4xxx_ping_iocb(ha, options, payload_size,
322 pid, ipaddr);
323 } else {
324 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
325 "not supported\n", __func__, iface_num);
326 rval = -ENOSYS;
327 goto exit_send_ping;
331 * If ping using LinkLocal address fails, try ping using
332 * IPv6 address
334 if (rval != QLA_SUCCESS) {
335 options &= ~PING_IPV6_LINKLOCAL_ADDR;
336 if (iface_num == 0) {
337 options |= PING_IPV6_ADDR0;
338 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
339 "Ping src: %pI6 "
340 "dest: %pI6\n", __func__,
341 &ha->ip_config.ipv6_addr0,
342 ipaddr));
343 } else if (iface_num == 1) {
344 options |= PING_IPV6_ADDR1;
345 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
346 "Ping src: %pI6 "
347 "dest: %pI6\n", __func__,
348 &ha->ip_config.ipv6_addr1,
349 ipaddr));
351 rval = qla4xxx_ping_iocb(ha, options, payload_size,
352 pid, ipaddr);
353 if (rval)
354 rval = -EINVAL;
356 } else
357 rval = -ENOSYS;
358 exit_send_ping:
359 return rval;
362 static umode_t qla4_attr_is_visible(int param_type, int param)
364 switch (param_type) {
365 case ISCSI_HOST_PARAM:
366 switch (param) {
367 case ISCSI_HOST_PARAM_HWADDRESS:
368 case ISCSI_HOST_PARAM_IPADDRESS:
369 case ISCSI_HOST_PARAM_INITIATOR_NAME:
370 case ISCSI_HOST_PARAM_PORT_STATE:
371 case ISCSI_HOST_PARAM_PORT_SPEED:
372 return S_IRUGO;
373 default:
374 return 0;
376 case ISCSI_PARAM:
377 switch (param) {
378 case ISCSI_PARAM_PERSISTENT_ADDRESS:
379 case ISCSI_PARAM_PERSISTENT_PORT:
380 case ISCSI_PARAM_CONN_ADDRESS:
381 case ISCSI_PARAM_CONN_PORT:
382 case ISCSI_PARAM_TARGET_NAME:
383 case ISCSI_PARAM_TPGT:
384 case ISCSI_PARAM_TARGET_ALIAS:
385 case ISCSI_PARAM_MAX_BURST:
386 case ISCSI_PARAM_MAX_R2T:
387 case ISCSI_PARAM_FIRST_BURST:
388 case ISCSI_PARAM_MAX_RECV_DLENGTH:
389 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
390 case ISCSI_PARAM_IFACE_NAME:
391 case ISCSI_PARAM_CHAP_OUT_IDX:
392 case ISCSI_PARAM_CHAP_IN_IDX:
393 case ISCSI_PARAM_USERNAME:
394 case ISCSI_PARAM_PASSWORD:
395 case ISCSI_PARAM_USERNAME_IN:
396 case ISCSI_PARAM_PASSWORD_IN:
397 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
398 case ISCSI_PARAM_DISCOVERY_SESS:
399 case ISCSI_PARAM_PORTAL_TYPE:
400 case ISCSI_PARAM_CHAP_AUTH_EN:
401 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
402 case ISCSI_PARAM_BIDI_CHAP_EN:
403 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
404 case ISCSI_PARAM_DEF_TIME2WAIT:
405 case ISCSI_PARAM_DEF_TIME2RETAIN:
406 case ISCSI_PARAM_HDRDGST_EN:
407 case ISCSI_PARAM_DATADGST_EN:
408 case ISCSI_PARAM_INITIAL_R2T_EN:
409 case ISCSI_PARAM_IMM_DATA_EN:
410 case ISCSI_PARAM_PDU_INORDER_EN:
411 case ISCSI_PARAM_DATASEQ_INORDER_EN:
412 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
413 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
414 case ISCSI_PARAM_TCP_WSF_DISABLE:
415 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
416 case ISCSI_PARAM_TCP_TIMER_SCALE:
417 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
418 case ISCSI_PARAM_TCP_XMIT_WSF:
419 case ISCSI_PARAM_TCP_RECV_WSF:
420 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
421 case ISCSI_PARAM_IPV4_TOS:
422 case ISCSI_PARAM_IPV6_TC:
423 case ISCSI_PARAM_IPV6_FLOW_LABEL:
424 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
425 case ISCSI_PARAM_KEEPALIVE_TMO:
426 case ISCSI_PARAM_LOCAL_PORT:
427 case ISCSI_PARAM_ISID:
428 case ISCSI_PARAM_TSID:
429 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
430 case ISCSI_PARAM_ERL:
431 case ISCSI_PARAM_STATSN:
432 case ISCSI_PARAM_EXP_STATSN:
433 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
434 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
435 case ISCSI_PARAM_LOCAL_IPADDR:
436 return S_IRUGO;
437 default:
438 return 0;
440 case ISCSI_NET_PARAM:
441 switch (param) {
442 case ISCSI_NET_PARAM_IPV4_ADDR:
443 case ISCSI_NET_PARAM_IPV4_SUBNET:
444 case ISCSI_NET_PARAM_IPV4_GW:
445 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
446 case ISCSI_NET_PARAM_IFACE_ENABLE:
447 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
448 case ISCSI_NET_PARAM_IPV6_ADDR:
449 case ISCSI_NET_PARAM_IPV6_ROUTER:
450 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
451 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
452 case ISCSI_NET_PARAM_VLAN_ID:
453 case ISCSI_NET_PARAM_VLAN_PRIORITY:
454 case ISCSI_NET_PARAM_VLAN_ENABLED:
455 case ISCSI_NET_PARAM_MTU:
456 case ISCSI_NET_PARAM_PORT:
457 case ISCSI_NET_PARAM_IPADDR_STATE:
458 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
459 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
460 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
461 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
462 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
463 case ISCSI_NET_PARAM_TCP_WSF:
464 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
465 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
466 case ISCSI_NET_PARAM_CACHE_ID:
467 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
468 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
469 case ISCSI_NET_PARAM_IPV4_TOS_EN:
470 case ISCSI_NET_PARAM_IPV4_TOS:
471 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
472 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
473 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
474 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
475 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
476 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
477 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
478 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
479 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
480 case ISCSI_NET_PARAM_REDIRECT_EN:
481 case ISCSI_NET_PARAM_IPV4_TTL:
482 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
483 case ISCSI_NET_PARAM_IPV6_MLD_EN:
484 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
485 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
486 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
487 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
488 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
489 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
490 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
491 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
492 return S_IRUGO;
493 default:
494 return 0;
496 case ISCSI_IFACE_PARAM:
497 switch (param) {
498 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
499 case ISCSI_IFACE_PARAM_HDRDGST_EN:
500 case ISCSI_IFACE_PARAM_DATADGST_EN:
501 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
502 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
503 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
504 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
505 case ISCSI_IFACE_PARAM_ERL:
506 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
507 case ISCSI_IFACE_PARAM_FIRST_BURST:
508 case ISCSI_IFACE_PARAM_MAX_R2T:
509 case ISCSI_IFACE_PARAM_MAX_BURST:
510 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
511 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
512 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
513 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
514 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
515 case ISCSI_IFACE_PARAM_INITIATOR_NAME:
516 return S_IRUGO;
517 default:
518 return 0;
520 case ISCSI_FLASHNODE_PARAM:
521 switch (param) {
522 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
523 case ISCSI_FLASHNODE_PORTAL_TYPE:
524 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
525 case ISCSI_FLASHNODE_DISCOVERY_SESS:
526 case ISCSI_FLASHNODE_ENTRY_EN:
527 case ISCSI_FLASHNODE_HDR_DGST_EN:
528 case ISCSI_FLASHNODE_DATA_DGST_EN:
529 case ISCSI_FLASHNODE_IMM_DATA_EN:
530 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
531 case ISCSI_FLASHNODE_DATASEQ_INORDER:
532 case ISCSI_FLASHNODE_PDU_INORDER:
533 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
534 case ISCSI_FLASHNODE_SNACK_REQ_EN:
535 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
536 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
537 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
538 case ISCSI_FLASHNODE_ERL:
539 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
540 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
541 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
542 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
543 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
544 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
545 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
546 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
547 case ISCSI_FLASHNODE_FIRST_BURST:
548 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
549 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
550 case ISCSI_FLASHNODE_MAX_R2T:
551 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
552 case ISCSI_FLASHNODE_ISID:
553 case ISCSI_FLASHNODE_TSID:
554 case ISCSI_FLASHNODE_PORT:
555 case ISCSI_FLASHNODE_MAX_BURST:
556 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
557 case ISCSI_FLASHNODE_IPADDR:
558 case ISCSI_FLASHNODE_ALIAS:
559 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
560 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
561 case ISCSI_FLASHNODE_LOCAL_PORT:
562 case ISCSI_FLASHNODE_IPV4_TOS:
563 case ISCSI_FLASHNODE_IPV6_TC:
564 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
565 case ISCSI_FLASHNODE_NAME:
566 case ISCSI_FLASHNODE_TPGT:
567 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
568 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
569 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
570 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
571 case ISCSI_FLASHNODE_TCP_RECV_WSF:
572 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
573 case ISCSI_FLASHNODE_USERNAME:
574 case ISCSI_FLASHNODE_PASSWORD:
575 case ISCSI_FLASHNODE_STATSN:
576 case ISCSI_FLASHNODE_EXP_STATSN:
577 case ISCSI_FLASHNODE_IS_BOOT_TGT:
578 return S_IRUGO;
579 default:
580 return 0;
584 return 0;
588 * qla4xxx_create chap_list - Create CHAP list from FLASH
589 * @ha: pointer to adapter structure
591 * Read flash and make a list of CHAP entries, during login when a CHAP entry
592 * is received, it will be checked in this list. If entry exist then the CHAP
593 * entry index is set in the DDB. If CHAP entry does not exist in this list
594 * then a new entry is added in FLASH in CHAP table and the index obtained is
595 * used in the DDB.
597 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
599 int rval = 0;
600 uint8_t *chap_flash_data = NULL;
601 uint32_t offset;
602 dma_addr_t chap_dma;
603 uint32_t chap_size = 0;
605 if (is_qla40XX(ha))
606 chap_size = MAX_CHAP_ENTRIES_40XX *
607 sizeof(struct ql4_chap_table);
608 else /* Single region contains CHAP info for both
609 * ports which is divided into half for each port.
611 chap_size = ha->hw.flt_chap_size / 2;
613 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
614 &chap_dma, GFP_KERNEL);
615 if (!chap_flash_data) {
616 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
617 return;
620 if (is_qla40XX(ha)) {
621 offset = FLASH_CHAP_OFFSET;
622 } else {
623 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
624 if (ha->port_num == 1)
625 offset += chap_size;
628 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
629 if (rval != QLA_SUCCESS)
630 goto exit_chap_list;
632 if (ha->chap_list == NULL)
633 ha->chap_list = vmalloc(chap_size);
634 if (ha->chap_list == NULL) {
635 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
636 goto exit_chap_list;
639 memset(ha->chap_list, 0, chap_size);
640 memcpy(ha->chap_list, chap_flash_data, chap_size);
642 exit_chap_list:
643 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
646 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
647 int16_t chap_index,
648 struct ql4_chap_table **chap_entry)
650 int rval = QLA_ERROR;
651 int max_chap_entries;
653 if (!ha->chap_list) {
654 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
655 rval = QLA_ERROR;
656 goto exit_get_chap;
659 if (is_qla80XX(ha))
660 max_chap_entries = (ha->hw.flt_chap_size / 2) /
661 sizeof(struct ql4_chap_table);
662 else
663 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
665 if (chap_index > max_chap_entries) {
666 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
667 rval = QLA_ERROR;
668 goto exit_get_chap;
671 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
672 if ((*chap_entry)->cookie !=
673 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
674 rval = QLA_ERROR;
675 *chap_entry = NULL;
676 } else {
677 rval = QLA_SUCCESS;
680 exit_get_chap:
681 return rval;
685 * qla4xxx_find_free_chap_index - Find the first free chap index
686 * @ha: pointer to adapter structure
687 * @chap_index: CHAP index to be returned
689 * Find the first free chap index available in the chap table
691 * Note: Caller should acquire the chap lock before getting here.
693 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
694 uint16_t *chap_index)
696 int i, rval;
697 int free_index = -1;
698 int max_chap_entries = 0;
699 struct ql4_chap_table *chap_table;
701 if (is_qla80XX(ha))
702 max_chap_entries = (ha->hw.flt_chap_size / 2) /
703 sizeof(struct ql4_chap_table);
704 else
705 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
707 if (!ha->chap_list) {
708 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
709 rval = QLA_ERROR;
710 goto exit_find_chap;
713 for (i = 0; i < max_chap_entries; i++) {
714 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
716 if ((chap_table->cookie !=
717 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
718 (i > MAX_RESRV_CHAP_IDX)) {
719 free_index = i;
720 break;
724 if (free_index != -1) {
725 *chap_index = free_index;
726 rval = QLA_SUCCESS;
727 } else {
728 rval = QLA_ERROR;
731 exit_find_chap:
732 return rval;
735 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
736 uint32_t *num_entries, char *buf)
738 struct scsi_qla_host *ha = to_qla_host(shost);
739 struct ql4_chap_table *chap_table;
740 struct iscsi_chap_rec *chap_rec;
741 int max_chap_entries = 0;
742 int valid_chap_entries = 0;
743 int ret = 0, i;
745 if (is_qla80XX(ha))
746 max_chap_entries = (ha->hw.flt_chap_size / 2) /
747 sizeof(struct ql4_chap_table);
748 else
749 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
751 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
752 __func__, *num_entries, chap_tbl_idx);
754 if (!buf) {
755 ret = -ENOMEM;
756 goto exit_get_chap_list;
759 qla4xxx_create_chap_list(ha);
761 chap_rec = (struct iscsi_chap_rec *) buf;
762 mutex_lock(&ha->chap_sem);
763 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
764 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
765 if (chap_table->cookie !=
766 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
767 continue;
769 chap_rec->chap_tbl_idx = i;
770 strlcpy(chap_rec->username, chap_table->name,
771 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
772 strlcpy(chap_rec->password, chap_table->secret,
773 QL4_CHAP_MAX_SECRET_LEN);
774 chap_rec->password_length = chap_table->secret_len;
776 if (chap_table->flags & BIT_7) /* local */
777 chap_rec->chap_type = CHAP_TYPE_OUT;
779 if (chap_table->flags & BIT_6) /* peer */
780 chap_rec->chap_type = CHAP_TYPE_IN;
782 chap_rec++;
784 valid_chap_entries++;
785 if (valid_chap_entries == *num_entries)
786 break;
787 else
788 continue;
790 mutex_unlock(&ha->chap_sem);
792 exit_get_chap_list:
793 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
794 __func__, valid_chap_entries);
795 *num_entries = valid_chap_entries;
796 return ret;
799 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
801 int ret = 0;
802 uint16_t *chap_tbl_idx = (uint16_t *) data;
803 struct iscsi_cls_session *cls_session;
804 struct iscsi_session *sess;
805 struct ddb_entry *ddb_entry;
807 if (!iscsi_is_session_dev(dev))
808 goto exit_is_chap_active;
810 cls_session = iscsi_dev_to_session(dev);
811 sess = cls_session->dd_data;
812 ddb_entry = sess->dd_data;
814 if (iscsi_session_chkready(cls_session))
815 goto exit_is_chap_active;
817 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
818 ret = 1;
820 exit_is_chap_active:
821 return ret;
824 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
825 uint16_t chap_tbl_idx)
827 int ret = 0;
829 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
830 __qla4xxx_is_chap_active);
832 return ret;
835 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
837 struct scsi_qla_host *ha = to_qla_host(shost);
838 struct ql4_chap_table *chap_table;
839 dma_addr_t chap_dma;
840 int max_chap_entries = 0;
841 uint32_t offset = 0;
842 uint32_t chap_size;
843 int ret = 0;
845 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
846 if (chap_table == NULL)
847 return -ENOMEM;
849 if (is_qla80XX(ha))
850 max_chap_entries = (ha->hw.flt_chap_size / 2) /
851 sizeof(struct ql4_chap_table);
852 else
853 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
855 if (chap_tbl_idx > max_chap_entries) {
856 ret = -EINVAL;
857 goto exit_delete_chap;
860 /* Check if chap index is in use.
861 * If chap is in use don't delet chap entry */
862 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
863 if (ret) {
864 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
865 "delete from flash\n", chap_tbl_idx);
866 ret = -EBUSY;
867 goto exit_delete_chap;
870 chap_size = sizeof(struct ql4_chap_table);
871 if (is_qla40XX(ha))
872 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
873 else {
874 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
875 /* flt_chap_size is CHAP table size for both ports
876 * so divide it by 2 to calculate the offset for second port
878 if (ha->port_num == 1)
879 offset += (ha->hw.flt_chap_size / 2);
880 offset += (chap_tbl_idx * chap_size);
883 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
884 if (ret != QLA_SUCCESS) {
885 ret = -EINVAL;
886 goto exit_delete_chap;
889 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
890 __le16_to_cpu(chap_table->cookie)));
892 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
893 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
894 goto exit_delete_chap;
897 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
899 offset = FLASH_CHAP_OFFSET |
900 (chap_tbl_idx * sizeof(struct ql4_chap_table));
901 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
902 FLASH_OPT_RMW_COMMIT);
903 if (ret == QLA_SUCCESS && ha->chap_list) {
904 mutex_lock(&ha->chap_sem);
905 /* Update ha chap_list cache */
906 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
907 chap_table, sizeof(struct ql4_chap_table));
908 mutex_unlock(&ha->chap_sem);
910 if (ret != QLA_SUCCESS)
911 ret = -EINVAL;
913 exit_delete_chap:
914 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
915 return ret;
919 * qla4xxx_set_chap_entry - Make chap entry with given information
920 * @shost: pointer to host
921 * @data: chap info - credentials, index and type to make chap entry
922 * @len: length of data
924 * Add or update chap entry with the given information
926 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
928 struct scsi_qla_host *ha = to_qla_host(shost);
929 struct iscsi_chap_rec chap_rec;
930 struct ql4_chap_table *chap_entry = NULL;
931 struct iscsi_param_info *param_info;
932 struct nlattr *attr;
933 int max_chap_entries = 0;
934 int type;
935 int rem = len;
936 int rc = 0;
937 int size;
939 memset(&chap_rec, 0, sizeof(chap_rec));
941 nla_for_each_attr(attr, data, len, rem) {
942 param_info = nla_data(attr);
944 switch (param_info->param) {
945 case ISCSI_CHAP_PARAM_INDEX:
946 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
947 break;
948 case ISCSI_CHAP_PARAM_CHAP_TYPE:
949 chap_rec.chap_type = param_info->value[0];
950 break;
951 case ISCSI_CHAP_PARAM_USERNAME:
952 size = min_t(size_t, sizeof(chap_rec.username),
953 param_info->len);
954 memcpy(chap_rec.username, param_info->value, size);
955 break;
956 case ISCSI_CHAP_PARAM_PASSWORD:
957 size = min_t(size_t, sizeof(chap_rec.password),
958 param_info->len);
959 memcpy(chap_rec.password, param_info->value, size);
960 break;
961 case ISCSI_CHAP_PARAM_PASSWORD_LEN:
962 chap_rec.password_length = param_info->value[0];
963 break;
964 default:
965 ql4_printk(KERN_ERR, ha,
966 "%s: No such sysfs attribute\n", __func__);
967 rc = -ENOSYS;
968 goto exit_set_chap;
972 if (chap_rec.chap_type == CHAP_TYPE_IN)
973 type = BIDI_CHAP;
974 else
975 type = LOCAL_CHAP;
977 if (is_qla80XX(ha))
978 max_chap_entries = (ha->hw.flt_chap_size / 2) /
979 sizeof(struct ql4_chap_table);
980 else
981 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
983 mutex_lock(&ha->chap_sem);
984 if (chap_rec.chap_tbl_idx < max_chap_entries) {
985 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
986 &chap_entry);
987 if (!rc) {
988 if (!(type == qla4xxx_get_chap_type(chap_entry))) {
989 ql4_printk(KERN_INFO, ha,
990 "Type mismatch for CHAP entry %d\n",
991 chap_rec.chap_tbl_idx);
992 rc = -EINVAL;
993 goto exit_unlock_chap;
996 /* If chap index is in use then don't modify it */
997 rc = qla4xxx_is_chap_active(shost,
998 chap_rec.chap_tbl_idx);
999 if (rc) {
1000 ql4_printk(KERN_INFO, ha,
1001 "CHAP entry %d is in use\n",
1002 chap_rec.chap_tbl_idx);
1003 rc = -EBUSY;
1004 goto exit_unlock_chap;
1007 } else {
1008 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
1009 if (rc) {
1010 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
1011 rc = -EBUSY;
1012 goto exit_unlock_chap;
1016 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
1017 chap_rec.chap_tbl_idx, type);
1019 exit_unlock_chap:
1020 mutex_unlock(&ha->chap_sem);
1022 exit_set_chap:
1023 return rc;
1027 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
1029 struct scsi_qla_host *ha = to_qla_host(shost);
1030 struct iscsi_offload_host_stats *host_stats = NULL;
1031 int host_stats_size;
1032 int ret = 0;
1033 int ddb_idx = 0;
1034 struct ql_iscsi_stats *ql_iscsi_stats = NULL;
1035 int stats_size;
1036 dma_addr_t iscsi_stats_dma;
1038 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
1040 host_stats_size = sizeof(struct iscsi_offload_host_stats);
1042 if (host_stats_size != len) {
1043 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
1044 __func__, len, host_stats_size);
1045 ret = -EINVAL;
1046 goto exit_host_stats;
1048 host_stats = (struct iscsi_offload_host_stats *)buf;
1050 if (!buf) {
1051 ret = -ENOMEM;
1052 goto exit_host_stats;
1055 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1057 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1058 &iscsi_stats_dma, GFP_KERNEL);
1059 if (!ql_iscsi_stats) {
1060 ql4_printk(KERN_ERR, ha,
1061 "Unable to allocate memory for iscsi stats\n");
1062 ret = -ENOMEM;
1063 goto exit_host_stats;
1066 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
1067 iscsi_stats_dma);
1068 if (ret != QLA_SUCCESS) {
1069 ql4_printk(KERN_ERR, ha,
1070 "Unable to retrieve iscsi stats\n");
1071 ret = -EIO;
1072 goto exit_host_stats;
1074 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
1075 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
1076 host_stats->mactx_multicast_frames =
1077 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
1078 host_stats->mactx_broadcast_frames =
1079 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
1080 host_stats->mactx_pause_frames =
1081 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
1082 host_stats->mactx_control_frames =
1083 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
1084 host_stats->mactx_deferral =
1085 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
1086 host_stats->mactx_excess_deferral =
1087 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
1088 host_stats->mactx_late_collision =
1089 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
1090 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
1091 host_stats->mactx_single_collision =
1092 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
1093 host_stats->mactx_multiple_collision =
1094 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
1095 host_stats->mactx_collision =
1096 le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
1097 host_stats->mactx_frames_dropped =
1098 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
1099 host_stats->mactx_jumbo_frames =
1100 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
1101 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
1102 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
1103 host_stats->macrx_unknown_control_frames =
1104 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
1105 host_stats->macrx_pause_frames =
1106 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
1107 host_stats->macrx_control_frames =
1108 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
1109 host_stats->macrx_dribble =
1110 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
1111 host_stats->macrx_frame_length_error =
1112 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
1113 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
1114 host_stats->macrx_carrier_sense_error =
1115 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
1116 host_stats->macrx_frame_discarded =
1117 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
1118 host_stats->macrx_frames_dropped =
1119 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
1120 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
1121 host_stats->mac_encoding_error =
1122 le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
1123 host_stats->macrx_length_error_large =
1124 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
1125 host_stats->macrx_length_error_small =
1126 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
1127 host_stats->macrx_multicast_frames =
1128 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
1129 host_stats->macrx_broadcast_frames =
1130 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
1131 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
1132 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
1133 host_stats->iptx_fragments =
1134 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
1135 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
1136 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
1137 host_stats->iprx_fragments =
1138 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
1139 host_stats->ip_datagram_reassembly =
1140 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
1141 host_stats->ip_invalid_address_error =
1142 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
1143 host_stats->ip_error_packets =
1144 le64_to_cpu(ql_iscsi_stats->ip_error_packets);
1145 host_stats->ip_fragrx_overlap =
1146 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
1147 host_stats->ip_fragrx_outoforder =
1148 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
1149 host_stats->ip_datagram_reassembly_timeout =
1150 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
1151 host_stats->ipv6tx_packets =
1152 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
1153 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
1154 host_stats->ipv6tx_fragments =
1155 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
1156 host_stats->ipv6rx_packets =
1157 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
1158 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
1159 host_stats->ipv6rx_fragments =
1160 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
1161 host_stats->ipv6_datagram_reassembly =
1162 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
1163 host_stats->ipv6_invalid_address_error =
1164 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
1165 host_stats->ipv6_error_packets =
1166 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
1167 host_stats->ipv6_fragrx_overlap =
1168 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
1169 host_stats->ipv6_fragrx_outoforder =
1170 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
1171 host_stats->ipv6_datagram_reassembly_timeout =
1172 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
1173 host_stats->tcptx_segments =
1174 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
1175 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
1176 host_stats->tcprx_segments =
1177 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
1178 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
1179 host_stats->tcp_duplicate_ack_retx =
1180 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
1181 host_stats->tcp_retx_timer_expired =
1182 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
1183 host_stats->tcprx_duplicate_ack =
1184 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
1185 host_stats->tcprx_pure_ackr =
1186 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
1187 host_stats->tcptx_delayed_ack =
1188 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
1189 host_stats->tcptx_pure_ack =
1190 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
1191 host_stats->tcprx_segment_error =
1192 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
1193 host_stats->tcprx_segment_outoforder =
1194 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
1195 host_stats->tcprx_window_probe =
1196 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
1197 host_stats->tcprx_window_update =
1198 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
1199 host_stats->tcptx_window_probe_persist =
1200 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
1201 host_stats->ecc_error_correction =
1202 le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
1203 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
1204 host_stats->iscsi_data_bytes_tx =
1205 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
1206 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
1207 host_stats->iscsi_data_bytes_rx =
1208 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
1209 host_stats->iscsi_io_completed =
1210 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
1211 host_stats->iscsi_unexpected_io_rx =
1212 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
1213 host_stats->iscsi_format_error =
1214 le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
1215 host_stats->iscsi_hdr_digest_error =
1216 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
1217 host_stats->iscsi_data_digest_error =
1218 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
1219 host_stats->iscsi_sequence_error =
1220 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
1221 exit_host_stats:
1222 if (ql_iscsi_stats)
1223 dma_free_coherent(&ha->pdev->dev, host_stats_size,
1224 ql_iscsi_stats, iscsi_stats_dma);
1226 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
1227 __func__);
1228 return ret;
1231 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
1232 enum iscsi_param_type param_type,
1233 int param, char *buf)
1235 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
1236 struct scsi_qla_host *ha = to_qla_host(shost);
1237 int ival;
1238 char *pval = NULL;
1239 int len = -ENOSYS;
1241 if (param_type == ISCSI_NET_PARAM) {
1242 switch (param) {
1243 case ISCSI_NET_PARAM_IPV4_ADDR:
1244 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1245 break;
1246 case ISCSI_NET_PARAM_IPV4_SUBNET:
1247 len = sprintf(buf, "%pI4\n",
1248 &ha->ip_config.subnet_mask);
1249 break;
1250 case ISCSI_NET_PARAM_IPV4_GW:
1251 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
1252 break;
1253 case ISCSI_NET_PARAM_IFACE_ENABLE:
1254 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1255 OP_STATE(ha->ip_config.ipv4_options,
1256 IPOPT_IPV4_PROTOCOL_ENABLE, pval);
1257 } else {
1258 OP_STATE(ha->ip_config.ipv6_options,
1259 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
1262 len = sprintf(buf, "%s\n", pval);
1263 break;
1264 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1265 len = sprintf(buf, "%s\n",
1266 (ha->ip_config.tcp_options &
1267 TCPOPT_DHCP_ENABLE) ?
1268 "dhcp" : "static");
1269 break;
1270 case ISCSI_NET_PARAM_IPV6_ADDR:
1271 if (iface->iface_num == 0)
1272 len = sprintf(buf, "%pI6\n",
1273 &ha->ip_config.ipv6_addr0);
1274 if (iface->iface_num == 1)
1275 len = sprintf(buf, "%pI6\n",
1276 &ha->ip_config.ipv6_addr1);
1277 break;
1278 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1279 len = sprintf(buf, "%pI6\n",
1280 &ha->ip_config.ipv6_link_local_addr);
1281 break;
1282 case ISCSI_NET_PARAM_IPV6_ROUTER:
1283 len = sprintf(buf, "%pI6\n",
1284 &ha->ip_config.ipv6_default_router_addr);
1285 break;
1286 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1287 pval = (ha->ip_config.ipv6_addl_options &
1288 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
1289 "nd" : "static";
1291 len = sprintf(buf, "%s\n", pval);
1292 break;
1293 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1294 pval = (ha->ip_config.ipv6_addl_options &
1295 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
1296 "auto" : "static";
1298 len = sprintf(buf, "%s\n", pval);
1299 break;
1300 case ISCSI_NET_PARAM_VLAN_ID:
1301 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1302 ival = ha->ip_config.ipv4_vlan_tag &
1303 ISCSI_MAX_VLAN_ID;
1304 else
1305 ival = ha->ip_config.ipv6_vlan_tag &
1306 ISCSI_MAX_VLAN_ID;
1308 len = sprintf(buf, "%d\n", ival);
1309 break;
1310 case ISCSI_NET_PARAM_VLAN_PRIORITY:
1311 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1312 ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
1313 ISCSI_MAX_VLAN_PRIORITY;
1314 else
1315 ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
1316 ISCSI_MAX_VLAN_PRIORITY;
1318 len = sprintf(buf, "%d\n", ival);
1319 break;
1320 case ISCSI_NET_PARAM_VLAN_ENABLED:
1321 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1322 OP_STATE(ha->ip_config.ipv4_options,
1323 IPOPT_VLAN_TAGGING_ENABLE, pval);
1324 } else {
1325 OP_STATE(ha->ip_config.ipv6_options,
1326 IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
1328 len = sprintf(buf, "%s\n", pval);
1329 break;
1330 case ISCSI_NET_PARAM_MTU:
1331 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
1332 break;
1333 case ISCSI_NET_PARAM_PORT:
1334 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1335 len = sprintf(buf, "%d\n",
1336 ha->ip_config.ipv4_port);
1337 else
1338 len = sprintf(buf, "%d\n",
1339 ha->ip_config.ipv6_port);
1340 break;
1341 case ISCSI_NET_PARAM_IPADDR_STATE:
1342 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1343 pval = iscsi_get_ipaddress_state_name(
1344 ha->ip_config.ipv4_addr_state);
1345 } else {
1346 if (iface->iface_num == 0)
1347 pval = iscsi_get_ipaddress_state_name(
1348 ha->ip_config.ipv6_addr0_state);
1349 else if (iface->iface_num == 1)
1350 pval = iscsi_get_ipaddress_state_name(
1351 ha->ip_config.ipv6_addr1_state);
1354 len = sprintf(buf, "%s\n", pval);
1355 break;
1356 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
1357 pval = iscsi_get_ipaddress_state_name(
1358 ha->ip_config.ipv6_link_local_state);
1359 len = sprintf(buf, "%s\n", pval);
1360 break;
1361 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
1362 pval = iscsi_get_router_state_name(
1363 ha->ip_config.ipv6_default_router_state);
1364 len = sprintf(buf, "%s\n", pval);
1365 break;
1366 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
1367 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1368 OP_STATE(~ha->ip_config.tcp_options,
1369 TCPOPT_DELAYED_ACK_DISABLE, pval);
1370 } else {
1371 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1372 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
1374 len = sprintf(buf, "%s\n", pval);
1375 break;
1376 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
1377 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1378 OP_STATE(~ha->ip_config.tcp_options,
1379 TCPOPT_NAGLE_ALGO_DISABLE, pval);
1380 } else {
1381 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1382 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
1384 len = sprintf(buf, "%s\n", pval);
1385 break;
1386 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
1387 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1388 OP_STATE(~ha->ip_config.tcp_options,
1389 TCPOPT_WINDOW_SCALE_DISABLE, pval);
1390 } else {
1391 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1392 IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
1393 pval);
1395 len = sprintf(buf, "%s\n", pval);
1396 break;
1397 case ISCSI_NET_PARAM_TCP_WSF:
1398 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1399 len = sprintf(buf, "%d\n",
1400 ha->ip_config.tcp_wsf);
1401 else
1402 len = sprintf(buf, "%d\n",
1403 ha->ip_config.ipv6_tcp_wsf);
1404 break;
1405 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
1406 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1407 ival = (ha->ip_config.tcp_options &
1408 TCPOPT_TIMER_SCALE) >> 1;
1409 else
1410 ival = (ha->ip_config.ipv6_tcp_options &
1411 IPV6_TCPOPT_TIMER_SCALE) >> 1;
1413 len = sprintf(buf, "%d\n", ival);
1414 break;
1415 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
1416 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1417 OP_STATE(ha->ip_config.tcp_options,
1418 TCPOPT_TIMESTAMP_ENABLE, pval);
1419 } else {
1420 OP_STATE(ha->ip_config.ipv6_tcp_options,
1421 IPV6_TCPOPT_TIMESTAMP_EN, pval);
1423 len = sprintf(buf, "%s\n", pval);
1424 break;
1425 case ISCSI_NET_PARAM_CACHE_ID:
1426 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1427 len = sprintf(buf, "%d\n",
1428 ha->ip_config.ipv4_cache_id);
1429 else
1430 len = sprintf(buf, "%d\n",
1431 ha->ip_config.ipv6_cache_id);
1432 break;
1433 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
1434 OP_STATE(ha->ip_config.tcp_options,
1435 TCPOPT_DNS_SERVER_IP_EN, pval);
1437 len = sprintf(buf, "%s\n", pval);
1438 break;
1439 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
1440 OP_STATE(ha->ip_config.tcp_options,
1441 TCPOPT_SLP_DA_INFO_EN, pval);
1443 len = sprintf(buf, "%s\n", pval);
1444 break;
1445 case ISCSI_NET_PARAM_IPV4_TOS_EN:
1446 OP_STATE(ha->ip_config.ipv4_options,
1447 IPOPT_IPV4_TOS_EN, pval);
1449 len = sprintf(buf, "%s\n", pval);
1450 break;
1451 case ISCSI_NET_PARAM_IPV4_TOS:
1452 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
1453 break;
1454 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
1455 OP_STATE(ha->ip_config.ipv4_options,
1456 IPOPT_GRAT_ARP_EN, pval);
1458 len = sprintf(buf, "%s\n", pval);
1459 break;
1460 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
1461 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
1462 pval);
1464 len = sprintf(buf, "%s\n", pval);
1465 break;
1466 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
1467 pval = (ha->ip_config.ipv4_alt_cid_len) ?
1468 (char *)ha->ip_config.ipv4_alt_cid : "";
1470 len = sprintf(buf, "%s\n", pval);
1471 break;
1472 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
1473 OP_STATE(ha->ip_config.ipv4_options,
1474 IPOPT_REQ_VID_EN, pval);
1476 len = sprintf(buf, "%s\n", pval);
1477 break;
1478 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
1479 OP_STATE(ha->ip_config.ipv4_options,
1480 IPOPT_USE_VID_EN, pval);
1482 len = sprintf(buf, "%s\n", pval);
1483 break;
1484 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
1485 pval = (ha->ip_config.ipv4_vid_len) ?
1486 (char *)ha->ip_config.ipv4_vid : "";
1488 len = sprintf(buf, "%s\n", pval);
1489 break;
1490 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
1491 OP_STATE(ha->ip_config.ipv4_options,
1492 IPOPT_LEARN_IQN_EN, pval);
1494 len = sprintf(buf, "%s\n", pval);
1495 break;
1496 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
1497 OP_STATE(~ha->ip_config.ipv4_options,
1498 IPOPT_FRAGMENTATION_DISABLE, pval);
1500 len = sprintf(buf, "%s\n", pval);
1501 break;
1502 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
1503 OP_STATE(ha->ip_config.ipv4_options,
1504 IPOPT_IN_FORWARD_EN, pval);
1506 len = sprintf(buf, "%s\n", pval);
1507 break;
1508 case ISCSI_NET_PARAM_REDIRECT_EN:
1509 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1510 OP_STATE(ha->ip_config.ipv4_options,
1511 IPOPT_ARP_REDIRECT_EN, pval);
1512 } else {
1513 OP_STATE(ha->ip_config.ipv6_options,
1514 IPV6_OPT_REDIRECT_EN, pval);
1516 len = sprintf(buf, "%s\n", pval);
1517 break;
1518 case ISCSI_NET_PARAM_IPV4_TTL:
1519 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
1520 break;
1521 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
1522 OP_STATE(ha->ip_config.ipv6_options,
1523 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
1525 len = sprintf(buf, "%s\n", pval);
1526 break;
1527 case ISCSI_NET_PARAM_IPV6_MLD_EN:
1528 OP_STATE(ha->ip_config.ipv6_addl_options,
1529 IPV6_ADDOPT_MLD_EN, pval);
1531 len = sprintf(buf, "%s\n", pval);
1532 break;
1533 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
1534 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
1535 break;
1536 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
1537 len = sprintf(buf, "%d\n",
1538 ha->ip_config.ipv6_traffic_class);
1539 break;
1540 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
1541 len = sprintf(buf, "%d\n",
1542 ha->ip_config.ipv6_hop_limit);
1543 break;
1544 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
1545 len = sprintf(buf, "%d\n",
1546 ha->ip_config.ipv6_nd_reach_time);
1547 break;
1548 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
1549 len = sprintf(buf, "%d\n",
1550 ha->ip_config.ipv6_nd_rexmit_timer);
1551 break;
1552 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
1553 len = sprintf(buf, "%d\n",
1554 ha->ip_config.ipv6_nd_stale_timeout);
1555 break;
1556 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
1557 len = sprintf(buf, "%d\n",
1558 ha->ip_config.ipv6_dup_addr_detect_count);
1559 break;
1560 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
1561 len = sprintf(buf, "%d\n",
1562 ha->ip_config.ipv6_gw_advrt_mtu);
1563 break;
1564 default:
1565 len = -ENOSYS;
1567 } else if (param_type == ISCSI_IFACE_PARAM) {
1568 switch (param) {
1569 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
1570 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
1571 break;
1572 case ISCSI_IFACE_PARAM_HDRDGST_EN:
1573 OP_STATE(ha->ip_config.iscsi_options,
1574 ISCSIOPTS_HEADER_DIGEST_EN, pval);
1576 len = sprintf(buf, "%s\n", pval);
1577 break;
1578 case ISCSI_IFACE_PARAM_DATADGST_EN:
1579 OP_STATE(ha->ip_config.iscsi_options,
1580 ISCSIOPTS_DATA_DIGEST_EN, pval);
1582 len = sprintf(buf, "%s\n", pval);
1583 break;
1584 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
1585 OP_STATE(ha->ip_config.iscsi_options,
1586 ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
1588 len = sprintf(buf, "%s\n", pval);
1589 break;
1590 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
1591 OP_STATE(ha->ip_config.iscsi_options,
1592 ISCSIOPTS_INITIAL_R2T_EN, pval);
1594 len = sprintf(buf, "%s\n", pval);
1595 break;
1596 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
1597 OP_STATE(ha->ip_config.iscsi_options,
1598 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
1600 len = sprintf(buf, "%s\n", pval);
1601 break;
1602 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
1603 OP_STATE(ha->ip_config.iscsi_options,
1604 ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
1606 len = sprintf(buf, "%s\n", pval);
1607 break;
1608 case ISCSI_IFACE_PARAM_ERL:
1609 len = sprintf(buf, "%d\n",
1610 (ha->ip_config.iscsi_options &
1611 ISCSIOPTS_ERL));
1612 break;
1613 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
1614 len = sprintf(buf, "%u\n",
1615 ha->ip_config.iscsi_max_pdu_size *
1616 BYTE_UNITS);
1617 break;
1618 case ISCSI_IFACE_PARAM_FIRST_BURST:
1619 len = sprintf(buf, "%u\n",
1620 ha->ip_config.iscsi_first_burst_len *
1621 BYTE_UNITS);
1622 break;
1623 case ISCSI_IFACE_PARAM_MAX_R2T:
1624 len = sprintf(buf, "%d\n",
1625 ha->ip_config.iscsi_max_outstnd_r2t);
1626 break;
1627 case ISCSI_IFACE_PARAM_MAX_BURST:
1628 len = sprintf(buf, "%u\n",
1629 ha->ip_config.iscsi_max_burst_len *
1630 BYTE_UNITS);
1631 break;
1632 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
1633 OP_STATE(ha->ip_config.iscsi_options,
1634 ISCSIOPTS_CHAP_AUTH_EN, pval);
1636 len = sprintf(buf, "%s\n", pval);
1637 break;
1638 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
1639 OP_STATE(ha->ip_config.iscsi_options,
1640 ISCSIOPTS_BIDI_CHAP_EN, pval);
1642 len = sprintf(buf, "%s\n", pval);
1643 break;
1644 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
1645 OP_STATE(ha->ip_config.iscsi_options,
1646 ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
1648 len = sprintf(buf, "%s\n", pval);
1649 break;
1650 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
1651 OP_STATE(ha->ip_config.iscsi_options,
1652 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
1654 len = sprintf(buf, "%s\n", pval);
1655 break;
1656 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
1657 OP_STATE(ha->ip_config.iscsi_options,
1658 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
1660 len = sprintf(buf, "%s\n", pval);
1661 break;
1662 case ISCSI_IFACE_PARAM_INITIATOR_NAME:
1663 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
1664 break;
1665 default:
1666 len = -ENOSYS;
1670 return len;
1673 static struct iscsi_endpoint *
1674 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1675 int non_blocking)
1677 int ret;
1678 struct iscsi_endpoint *ep;
1679 struct qla_endpoint *qla_ep;
1680 struct scsi_qla_host *ha;
1681 struct sockaddr_in *addr;
1682 struct sockaddr_in6 *addr6;
1684 if (!shost) {
1685 ret = -ENXIO;
1686 pr_err("%s: shost is NULL\n", __func__);
1687 return ERR_PTR(ret);
1690 ha = iscsi_host_priv(shost);
1691 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1692 if (!ep) {
1693 ret = -ENOMEM;
1694 return ERR_PTR(ret);
1697 qla_ep = ep->dd_data;
1698 memset(qla_ep, 0, sizeof(struct qla_endpoint));
1699 if (dst_addr->sa_family == AF_INET) {
1700 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
1701 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
1702 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
1703 (char *)&addr->sin_addr));
1704 } else if (dst_addr->sa_family == AF_INET6) {
1705 memcpy(&qla_ep->dst_addr, dst_addr,
1706 sizeof(struct sockaddr_in6));
1707 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1708 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1709 (char *)&addr6->sin6_addr));
1710 } else {
1711 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
1712 __func__);
1715 qla_ep->host = shost;
1717 return ep;
1720 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1722 struct qla_endpoint *qla_ep;
1723 struct scsi_qla_host *ha;
1724 int ret = 0;
1726 qla_ep = ep->dd_data;
1727 ha = to_qla_host(qla_ep->host);
1728 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
1730 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1731 ret = 1;
1733 return ret;
1736 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1738 struct qla_endpoint *qla_ep;
1739 struct scsi_qla_host *ha;
1741 qla_ep = ep->dd_data;
1742 ha = to_qla_host(qla_ep->host);
1743 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1744 ha->host_no));
1745 iscsi_destroy_endpoint(ep);
1748 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1749 enum iscsi_param param,
1750 char *buf)
1752 struct qla_endpoint *qla_ep = ep->dd_data;
1753 struct sockaddr *dst_addr;
1754 struct scsi_qla_host *ha;
1756 if (!qla_ep)
1757 return -ENOTCONN;
1759 ha = to_qla_host(qla_ep->host);
1760 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1761 ha->host_no));
1763 switch (param) {
1764 case ISCSI_PARAM_CONN_PORT:
1765 case ISCSI_PARAM_CONN_ADDRESS:
1766 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1767 if (!dst_addr)
1768 return -ENOTCONN;
1770 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1771 &qla_ep->dst_addr, param, buf);
1772 default:
1773 return -ENOSYS;
1777 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1778 struct iscsi_stats *stats)
1780 struct iscsi_session *sess;
1781 struct iscsi_cls_session *cls_sess;
1782 struct ddb_entry *ddb_entry;
1783 struct scsi_qla_host *ha;
1784 struct ql_iscsi_stats *ql_iscsi_stats;
1785 int stats_size;
1786 int ret;
1787 dma_addr_t iscsi_stats_dma;
1789 cls_sess = iscsi_conn_to_session(cls_conn);
1790 sess = cls_sess->dd_data;
1791 ddb_entry = sess->dd_data;
1792 ha = ddb_entry->ha;
1794 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1795 ha->host_no));
1796 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1797 /* Allocate memory */
1798 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1799 &iscsi_stats_dma, GFP_KERNEL);
1800 if (!ql_iscsi_stats) {
1801 ql4_printk(KERN_ERR, ha,
1802 "Unable to allocate memory for iscsi stats\n");
1803 goto exit_get_stats;
1806 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
1807 iscsi_stats_dma);
1808 if (ret != QLA_SUCCESS) {
1809 ql4_printk(KERN_ERR, ha,
1810 "Unable to retrieve iscsi stats\n");
1811 goto free_stats;
1814 /* octets */
1815 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
1816 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
1817 /* xmit pdus */
1818 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
1819 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
1820 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
1821 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
1822 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
1823 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
1824 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
1825 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
1826 /* recv pdus */
1827 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
1828 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
1829 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
1830 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
1831 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
1832 stats->logoutrsp_pdus =
1833 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
1834 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
1835 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
1836 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
1838 free_stats:
1839 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
1840 iscsi_stats_dma);
1841 exit_get_stats:
1842 return;
1845 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
1847 struct iscsi_cls_session *session;
1848 struct iscsi_session *sess;
1849 unsigned long flags;
1850 enum blk_eh_timer_return ret = BLK_EH_DONE;
1852 session = starget_to_session(scsi_target(sc->device));
1853 sess = session->dd_data;
1855 spin_lock_irqsave(&session->lock, flags);
1856 if (session->state == ISCSI_SESSION_FAILED)
1857 ret = BLK_EH_RESET_TIMER;
1858 spin_unlock_irqrestore(&session->lock, flags);
1860 return ret;
1863 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
1865 struct scsi_qla_host *ha = to_qla_host(shost);
1866 struct iscsi_cls_host *ihost = shost->shost_data;
1867 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
1869 qla4xxx_get_firmware_state(ha);
1871 switch (ha->addl_fw_state & 0x0F00) {
1872 case FW_ADDSTATE_LINK_SPEED_10MBPS:
1873 speed = ISCSI_PORT_SPEED_10MBPS;
1874 break;
1875 case FW_ADDSTATE_LINK_SPEED_100MBPS:
1876 speed = ISCSI_PORT_SPEED_100MBPS;
1877 break;
1878 case FW_ADDSTATE_LINK_SPEED_1GBPS:
1879 speed = ISCSI_PORT_SPEED_1GBPS;
1880 break;
1881 case FW_ADDSTATE_LINK_SPEED_10GBPS:
1882 speed = ISCSI_PORT_SPEED_10GBPS;
1883 break;
1885 ihost->port_speed = speed;
1888 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1890 struct scsi_qla_host *ha = to_qla_host(shost);
1891 struct iscsi_cls_host *ihost = shost->shost_data;
1892 uint32_t state = ISCSI_PORT_STATE_DOWN;
1894 if (test_bit(AF_LINK_UP, &ha->flags))
1895 state = ISCSI_PORT_STATE_UP;
1897 ihost->port_state = state;
1900 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1901 enum iscsi_host_param param, char *buf)
1903 struct scsi_qla_host *ha = to_qla_host(shost);
1904 int len;
1906 switch (param) {
1907 case ISCSI_HOST_PARAM_HWADDRESS:
1908 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1909 break;
1910 case ISCSI_HOST_PARAM_IPADDRESS:
1911 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1912 break;
1913 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1914 len = sprintf(buf, "%s\n", ha->name_string);
1915 break;
1916 case ISCSI_HOST_PARAM_PORT_STATE:
1917 qla4xxx_set_port_state(shost);
1918 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1919 break;
1920 case ISCSI_HOST_PARAM_PORT_SPEED:
1921 qla4xxx_set_port_speed(shost);
1922 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1923 break;
1924 default:
1925 return -ENOSYS;
1928 return len;
1931 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1933 if (ha->iface_ipv4)
1934 return;
1936 /* IPv4 */
1937 ha->iface_ipv4 = iscsi_create_iface(ha->host,
1938 &qla4xxx_iscsi_transport,
1939 ISCSI_IFACE_TYPE_IPV4, 0, 0);
1940 if (!ha->iface_ipv4)
1941 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1942 "iface0.\n");
1945 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1947 if (!ha->iface_ipv6_0)
1948 /* IPv6 iface-0 */
1949 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1950 &qla4xxx_iscsi_transport,
1951 ISCSI_IFACE_TYPE_IPV6, 0,
1953 if (!ha->iface_ipv6_0)
1954 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1955 "iface0.\n");
1957 if (!ha->iface_ipv6_1)
1958 /* IPv6 iface-1 */
1959 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1960 &qla4xxx_iscsi_transport,
1961 ISCSI_IFACE_TYPE_IPV6, 1,
1963 if (!ha->iface_ipv6_1)
1964 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1965 "iface1.\n");
1968 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1970 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1971 qla4xxx_create_ipv4_iface(ha);
1973 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1974 qla4xxx_create_ipv6_iface(ha);
1977 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1979 if (ha->iface_ipv4) {
1980 iscsi_destroy_iface(ha->iface_ipv4);
1981 ha->iface_ipv4 = NULL;
1985 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1987 if (ha->iface_ipv6_0) {
1988 iscsi_destroy_iface(ha->iface_ipv6_0);
1989 ha->iface_ipv6_0 = NULL;
1991 if (ha->iface_ipv6_1) {
1992 iscsi_destroy_iface(ha->iface_ipv6_1);
1993 ha->iface_ipv6_1 = NULL;
1997 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1999 qla4xxx_destroy_ipv4_iface(ha);
2000 qla4xxx_destroy_ipv6_iface(ha);
2003 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
2004 struct iscsi_iface_param_info *iface_param,
2005 struct addr_ctrl_blk *init_fw_cb)
2008 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
2009 * iface_num 1 is valid only for IPv6 Addr.
2011 switch (iface_param->param) {
2012 case ISCSI_NET_PARAM_IPV6_ADDR:
2013 if (iface_param->iface_num & 0x1)
2014 /* IPv6 Addr 1 */
2015 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
2016 sizeof(init_fw_cb->ipv6_addr1));
2017 else
2018 /* IPv6 Addr 0 */
2019 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
2020 sizeof(init_fw_cb->ipv6_addr0));
2021 break;
2022 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
2023 if (iface_param->iface_num & 0x1)
2024 break;
2025 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
2026 sizeof(init_fw_cb->ipv6_if_id));
2027 break;
2028 case ISCSI_NET_PARAM_IPV6_ROUTER:
2029 if (iface_param->iface_num & 0x1)
2030 break;
2031 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
2032 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2033 break;
2034 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
2035 /* Autocfg applies to even interface */
2036 if (iface_param->iface_num & 0x1)
2037 break;
2039 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
2040 init_fw_cb->ipv6_addtl_opts &=
2041 cpu_to_le16(
2042 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2043 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
2044 init_fw_cb->ipv6_addtl_opts |=
2045 cpu_to_le16(
2046 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2047 else
2048 ql4_printk(KERN_ERR, ha,
2049 "Invalid autocfg setting for IPv6 addr\n");
2050 break;
2051 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
2052 /* Autocfg applies to even interface */
2053 if (iface_param->iface_num & 0x1)
2054 break;
2056 if (iface_param->value[0] ==
2057 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
2058 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
2059 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2060 else if (iface_param->value[0] ==
2061 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
2062 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
2063 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2064 else
2065 ql4_printk(KERN_ERR, ha,
2066 "Invalid autocfg setting for IPv6 linklocal addr\n");
2067 break;
2068 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
2069 /* Autocfg applies to even interface */
2070 if (iface_param->iface_num & 0x1)
2071 break;
2073 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
2074 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
2075 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2076 break;
2077 case ISCSI_NET_PARAM_IFACE_ENABLE:
2078 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2079 init_fw_cb->ipv6_opts |=
2080 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
2081 qla4xxx_create_ipv6_iface(ha);
2082 } else {
2083 init_fw_cb->ipv6_opts &=
2084 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
2085 0xFFFF);
2086 qla4xxx_destroy_ipv6_iface(ha);
2088 break;
2089 case ISCSI_NET_PARAM_VLAN_TAG:
2090 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
2091 break;
2092 init_fw_cb->ipv6_vlan_tag =
2093 cpu_to_be16(*(uint16_t *)iface_param->value);
2094 break;
2095 case ISCSI_NET_PARAM_VLAN_ENABLED:
2096 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2097 init_fw_cb->ipv6_opts |=
2098 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
2099 else
2100 init_fw_cb->ipv6_opts &=
2101 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
2102 break;
2103 case ISCSI_NET_PARAM_MTU:
2104 init_fw_cb->eth_mtu_size =
2105 cpu_to_le16(*(uint16_t *)iface_param->value);
2106 break;
2107 case ISCSI_NET_PARAM_PORT:
2108 /* Autocfg applies to even interface */
2109 if (iface_param->iface_num & 0x1)
2110 break;
2112 init_fw_cb->ipv6_port =
2113 cpu_to_le16(*(uint16_t *)iface_param->value);
2114 break;
2115 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2116 if (iface_param->iface_num & 0x1)
2117 break;
2118 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2119 init_fw_cb->ipv6_tcp_opts |=
2120 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
2121 else
2122 init_fw_cb->ipv6_tcp_opts &=
2123 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
2124 0xFFFF);
2125 break;
2126 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2127 if (iface_param->iface_num & 0x1)
2128 break;
2129 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2130 init_fw_cb->ipv6_tcp_opts |=
2131 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2132 else
2133 init_fw_cb->ipv6_tcp_opts &=
2134 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2135 break;
2136 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2137 if (iface_param->iface_num & 0x1)
2138 break;
2139 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2140 init_fw_cb->ipv6_tcp_opts |=
2141 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2142 else
2143 init_fw_cb->ipv6_tcp_opts &=
2144 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2145 break;
2146 case ISCSI_NET_PARAM_TCP_WSF:
2147 if (iface_param->iface_num & 0x1)
2148 break;
2149 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
2150 break;
2151 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2152 if (iface_param->iface_num & 0x1)
2153 break;
2154 init_fw_cb->ipv6_tcp_opts &=
2155 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
2156 init_fw_cb->ipv6_tcp_opts |=
2157 cpu_to_le16((iface_param->value[0] << 1) &
2158 IPV6_TCPOPT_TIMER_SCALE);
2159 break;
2160 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2161 if (iface_param->iface_num & 0x1)
2162 break;
2163 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2164 init_fw_cb->ipv6_tcp_opts |=
2165 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
2166 else
2167 init_fw_cb->ipv6_tcp_opts &=
2168 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
2169 break;
2170 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
2171 if (iface_param->iface_num & 0x1)
2172 break;
2173 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2174 init_fw_cb->ipv6_opts |=
2175 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2176 else
2177 init_fw_cb->ipv6_opts &=
2178 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2179 break;
2180 case ISCSI_NET_PARAM_REDIRECT_EN:
2181 if (iface_param->iface_num & 0x1)
2182 break;
2183 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2184 init_fw_cb->ipv6_opts |=
2185 cpu_to_le16(IPV6_OPT_REDIRECT_EN);
2186 else
2187 init_fw_cb->ipv6_opts &=
2188 cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
2189 break;
2190 case ISCSI_NET_PARAM_IPV6_MLD_EN:
2191 if (iface_param->iface_num & 0x1)
2192 break;
2193 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2194 init_fw_cb->ipv6_addtl_opts |=
2195 cpu_to_le16(IPV6_ADDOPT_MLD_EN);
2196 else
2197 init_fw_cb->ipv6_addtl_opts &=
2198 cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
2199 break;
2200 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
2201 if (iface_param->iface_num & 0x1)
2202 break;
2203 init_fw_cb->ipv6_flow_lbl =
2204 cpu_to_le16(*(uint16_t *)iface_param->value);
2205 break;
2206 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
2207 if (iface_param->iface_num & 0x1)
2208 break;
2209 init_fw_cb->ipv6_traffic_class = iface_param->value[0];
2210 break;
2211 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
2212 if (iface_param->iface_num & 0x1)
2213 break;
2214 init_fw_cb->ipv6_hop_limit = iface_param->value[0];
2215 break;
2216 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
2217 if (iface_param->iface_num & 0x1)
2218 break;
2219 init_fw_cb->ipv6_nd_reach_time =
2220 cpu_to_le32(*(uint32_t *)iface_param->value);
2221 break;
2222 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
2223 if (iface_param->iface_num & 0x1)
2224 break;
2225 init_fw_cb->ipv6_nd_rexmit_timer =
2226 cpu_to_le32(*(uint32_t *)iface_param->value);
2227 break;
2228 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
2229 if (iface_param->iface_num & 0x1)
2230 break;
2231 init_fw_cb->ipv6_nd_stale_timeout =
2232 cpu_to_le32(*(uint32_t *)iface_param->value);
2233 break;
2234 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
2235 if (iface_param->iface_num & 0x1)
2236 break;
2237 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
2238 break;
2239 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
2240 if (iface_param->iface_num & 0x1)
2241 break;
2242 init_fw_cb->ipv6_gw_advrt_mtu =
2243 cpu_to_le32(*(uint32_t *)iface_param->value);
2244 break;
2245 default:
2246 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
2247 iface_param->param);
2248 break;
2252 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
2253 struct iscsi_iface_param_info *iface_param,
2254 struct addr_ctrl_blk *init_fw_cb)
2256 switch (iface_param->param) {
2257 case ISCSI_NET_PARAM_IPV4_ADDR:
2258 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
2259 sizeof(init_fw_cb->ipv4_addr));
2260 break;
2261 case ISCSI_NET_PARAM_IPV4_SUBNET:
2262 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
2263 sizeof(init_fw_cb->ipv4_subnet));
2264 break;
2265 case ISCSI_NET_PARAM_IPV4_GW:
2266 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
2267 sizeof(init_fw_cb->ipv4_gw_addr));
2268 break;
2269 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
2270 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
2271 init_fw_cb->ipv4_tcp_opts |=
2272 cpu_to_le16(TCPOPT_DHCP_ENABLE);
2273 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
2274 init_fw_cb->ipv4_tcp_opts &=
2275 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
2276 else
2277 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
2278 break;
2279 case ISCSI_NET_PARAM_IFACE_ENABLE:
2280 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2281 init_fw_cb->ipv4_ip_opts |=
2282 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
2283 qla4xxx_create_ipv4_iface(ha);
2284 } else {
2285 init_fw_cb->ipv4_ip_opts &=
2286 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
2287 0xFFFF);
2288 qla4xxx_destroy_ipv4_iface(ha);
2290 break;
2291 case ISCSI_NET_PARAM_VLAN_TAG:
2292 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
2293 break;
2294 init_fw_cb->ipv4_vlan_tag =
2295 cpu_to_be16(*(uint16_t *)iface_param->value);
2296 break;
2297 case ISCSI_NET_PARAM_VLAN_ENABLED:
2298 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2299 init_fw_cb->ipv4_ip_opts |=
2300 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
2301 else
2302 init_fw_cb->ipv4_ip_opts &=
2303 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
2304 break;
2305 case ISCSI_NET_PARAM_MTU:
2306 init_fw_cb->eth_mtu_size =
2307 cpu_to_le16(*(uint16_t *)iface_param->value);
2308 break;
2309 case ISCSI_NET_PARAM_PORT:
2310 init_fw_cb->ipv4_port =
2311 cpu_to_le16(*(uint16_t *)iface_param->value);
2312 break;
2313 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2314 if (iface_param->iface_num & 0x1)
2315 break;
2316 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2317 init_fw_cb->ipv4_tcp_opts |=
2318 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
2319 else
2320 init_fw_cb->ipv4_tcp_opts &=
2321 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
2322 0xFFFF);
2323 break;
2324 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2325 if (iface_param->iface_num & 0x1)
2326 break;
2327 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2328 init_fw_cb->ipv4_tcp_opts |=
2329 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
2330 else
2331 init_fw_cb->ipv4_tcp_opts &=
2332 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
2333 break;
2334 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2335 if (iface_param->iface_num & 0x1)
2336 break;
2337 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2338 init_fw_cb->ipv4_tcp_opts |=
2339 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
2340 else
2341 init_fw_cb->ipv4_tcp_opts &=
2342 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
2343 break;
2344 case ISCSI_NET_PARAM_TCP_WSF:
2345 if (iface_param->iface_num & 0x1)
2346 break;
2347 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
2348 break;
2349 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2350 if (iface_param->iface_num & 0x1)
2351 break;
2352 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
2353 init_fw_cb->ipv4_tcp_opts |=
2354 cpu_to_le16((iface_param->value[0] << 1) &
2355 TCPOPT_TIMER_SCALE);
2356 break;
2357 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2358 if (iface_param->iface_num & 0x1)
2359 break;
2360 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2361 init_fw_cb->ipv4_tcp_opts |=
2362 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
2363 else
2364 init_fw_cb->ipv4_tcp_opts &=
2365 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
2366 break;
2367 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
2368 if (iface_param->iface_num & 0x1)
2369 break;
2370 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2371 init_fw_cb->ipv4_tcp_opts |=
2372 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
2373 else
2374 init_fw_cb->ipv4_tcp_opts &=
2375 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
2376 break;
2377 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
2378 if (iface_param->iface_num & 0x1)
2379 break;
2380 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2381 init_fw_cb->ipv4_tcp_opts |=
2382 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
2383 else
2384 init_fw_cb->ipv4_tcp_opts &=
2385 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
2386 break;
2387 case ISCSI_NET_PARAM_IPV4_TOS_EN:
2388 if (iface_param->iface_num & 0x1)
2389 break;
2390 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2391 init_fw_cb->ipv4_ip_opts |=
2392 cpu_to_le16(IPOPT_IPV4_TOS_EN);
2393 else
2394 init_fw_cb->ipv4_ip_opts &=
2395 cpu_to_le16(~IPOPT_IPV4_TOS_EN);
2396 break;
2397 case ISCSI_NET_PARAM_IPV4_TOS:
2398 if (iface_param->iface_num & 0x1)
2399 break;
2400 init_fw_cb->ipv4_tos = iface_param->value[0];
2401 break;
2402 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
2403 if (iface_param->iface_num & 0x1)
2404 break;
2405 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2406 init_fw_cb->ipv4_ip_opts |=
2407 cpu_to_le16(IPOPT_GRAT_ARP_EN);
2408 else
2409 init_fw_cb->ipv4_ip_opts &=
2410 cpu_to_le16(~IPOPT_GRAT_ARP_EN);
2411 break;
2412 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
2413 if (iface_param->iface_num & 0x1)
2414 break;
2415 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2416 init_fw_cb->ipv4_ip_opts |=
2417 cpu_to_le16(IPOPT_ALT_CID_EN);
2418 else
2419 init_fw_cb->ipv4_ip_opts &=
2420 cpu_to_le16(~IPOPT_ALT_CID_EN);
2421 break;
2422 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
2423 if (iface_param->iface_num & 0x1)
2424 break;
2425 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
2426 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
2427 init_fw_cb->ipv4_dhcp_alt_cid_len =
2428 strlen(init_fw_cb->ipv4_dhcp_alt_cid);
2429 break;
2430 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
2431 if (iface_param->iface_num & 0x1)
2432 break;
2433 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2434 init_fw_cb->ipv4_ip_opts |=
2435 cpu_to_le16(IPOPT_REQ_VID_EN);
2436 else
2437 init_fw_cb->ipv4_ip_opts &=
2438 cpu_to_le16(~IPOPT_REQ_VID_EN);
2439 break;
2440 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
2441 if (iface_param->iface_num & 0x1)
2442 break;
2443 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2444 init_fw_cb->ipv4_ip_opts |=
2445 cpu_to_le16(IPOPT_USE_VID_EN);
2446 else
2447 init_fw_cb->ipv4_ip_opts &=
2448 cpu_to_le16(~IPOPT_USE_VID_EN);
2449 break;
2450 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
2451 if (iface_param->iface_num & 0x1)
2452 break;
2453 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
2454 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
2455 init_fw_cb->ipv4_dhcp_vid_len =
2456 strlen(init_fw_cb->ipv4_dhcp_vid);
2457 break;
2458 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
2459 if (iface_param->iface_num & 0x1)
2460 break;
2461 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2462 init_fw_cb->ipv4_ip_opts |=
2463 cpu_to_le16(IPOPT_LEARN_IQN_EN);
2464 else
2465 init_fw_cb->ipv4_ip_opts &=
2466 cpu_to_le16(~IPOPT_LEARN_IQN_EN);
2467 break;
2468 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
2469 if (iface_param->iface_num & 0x1)
2470 break;
2471 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2472 init_fw_cb->ipv4_ip_opts |=
2473 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
2474 else
2475 init_fw_cb->ipv4_ip_opts &=
2476 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
2477 break;
2478 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
2479 if (iface_param->iface_num & 0x1)
2480 break;
2481 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2482 init_fw_cb->ipv4_ip_opts |=
2483 cpu_to_le16(IPOPT_IN_FORWARD_EN);
2484 else
2485 init_fw_cb->ipv4_ip_opts &=
2486 cpu_to_le16(~IPOPT_IN_FORWARD_EN);
2487 break;
2488 case ISCSI_NET_PARAM_REDIRECT_EN:
2489 if (iface_param->iface_num & 0x1)
2490 break;
2491 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2492 init_fw_cb->ipv4_ip_opts |=
2493 cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
2494 else
2495 init_fw_cb->ipv4_ip_opts &=
2496 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
2497 break;
2498 case ISCSI_NET_PARAM_IPV4_TTL:
2499 if (iface_param->iface_num & 0x1)
2500 break;
2501 init_fw_cb->ipv4_ttl = iface_param->value[0];
2502 break;
2503 default:
2504 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
2505 iface_param->param);
2506 break;
2510 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
2511 struct iscsi_iface_param_info *iface_param,
2512 struct addr_ctrl_blk *init_fw_cb)
2514 switch (iface_param->param) {
2515 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
2516 if (iface_param->iface_num & 0x1)
2517 break;
2518 init_fw_cb->def_timeout =
2519 cpu_to_le16(*(uint16_t *)iface_param->value);
2520 break;
2521 case ISCSI_IFACE_PARAM_HDRDGST_EN:
2522 if (iface_param->iface_num & 0x1)
2523 break;
2524 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2525 init_fw_cb->iscsi_opts |=
2526 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
2527 else
2528 init_fw_cb->iscsi_opts &=
2529 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
2530 break;
2531 case ISCSI_IFACE_PARAM_DATADGST_EN:
2532 if (iface_param->iface_num & 0x1)
2533 break;
2534 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2535 init_fw_cb->iscsi_opts |=
2536 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
2537 else
2538 init_fw_cb->iscsi_opts &=
2539 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
2540 break;
2541 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
2542 if (iface_param->iface_num & 0x1)
2543 break;
2544 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2545 init_fw_cb->iscsi_opts |=
2546 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
2547 else
2548 init_fw_cb->iscsi_opts &=
2549 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
2550 break;
2551 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
2552 if (iface_param->iface_num & 0x1)
2553 break;
2554 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2555 init_fw_cb->iscsi_opts |=
2556 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
2557 else
2558 init_fw_cb->iscsi_opts &=
2559 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
2560 break;
2561 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
2562 if (iface_param->iface_num & 0x1)
2563 break;
2564 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2565 init_fw_cb->iscsi_opts |=
2566 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
2567 else
2568 init_fw_cb->iscsi_opts &=
2569 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
2570 break;
2571 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
2572 if (iface_param->iface_num & 0x1)
2573 break;
2574 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2575 init_fw_cb->iscsi_opts |=
2576 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
2577 else
2578 init_fw_cb->iscsi_opts &=
2579 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
2580 break;
2581 case ISCSI_IFACE_PARAM_ERL:
2582 if (iface_param->iface_num & 0x1)
2583 break;
2584 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
2585 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
2586 ISCSIOPTS_ERL);
2587 break;
2588 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
2589 if (iface_param->iface_num & 0x1)
2590 break;
2591 init_fw_cb->iscsi_max_pdu_size =
2592 cpu_to_le32(*(uint32_t *)iface_param->value) /
2593 BYTE_UNITS;
2594 break;
2595 case ISCSI_IFACE_PARAM_FIRST_BURST:
2596 if (iface_param->iface_num & 0x1)
2597 break;
2598 init_fw_cb->iscsi_fburst_len =
2599 cpu_to_le32(*(uint32_t *)iface_param->value) /
2600 BYTE_UNITS;
2601 break;
2602 case ISCSI_IFACE_PARAM_MAX_R2T:
2603 if (iface_param->iface_num & 0x1)
2604 break;
2605 init_fw_cb->iscsi_max_outstnd_r2t =
2606 cpu_to_le16(*(uint16_t *)iface_param->value);
2607 break;
2608 case ISCSI_IFACE_PARAM_MAX_BURST:
2609 if (iface_param->iface_num & 0x1)
2610 break;
2611 init_fw_cb->iscsi_max_burst_len =
2612 cpu_to_le32(*(uint32_t *)iface_param->value) /
2613 BYTE_UNITS;
2614 break;
2615 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
2616 if (iface_param->iface_num & 0x1)
2617 break;
2618 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2619 init_fw_cb->iscsi_opts |=
2620 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
2621 else
2622 init_fw_cb->iscsi_opts &=
2623 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
2624 break;
2625 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
2626 if (iface_param->iface_num & 0x1)
2627 break;
2628 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2629 init_fw_cb->iscsi_opts |=
2630 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
2631 else
2632 init_fw_cb->iscsi_opts &=
2633 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
2634 break;
2635 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
2636 if (iface_param->iface_num & 0x1)
2637 break;
2638 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2639 init_fw_cb->iscsi_opts |=
2640 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
2641 else
2642 init_fw_cb->iscsi_opts &=
2643 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
2644 break;
2645 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
2646 if (iface_param->iface_num & 0x1)
2647 break;
2648 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2649 init_fw_cb->iscsi_opts |=
2650 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2651 else
2652 init_fw_cb->iscsi_opts &=
2653 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2654 break;
2655 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
2656 if (iface_param->iface_num & 0x1)
2657 break;
2658 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2659 init_fw_cb->iscsi_opts |=
2660 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2661 else
2662 init_fw_cb->iscsi_opts &=
2663 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2664 break;
2665 default:
2666 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
2667 iface_param->param);
2668 break;
2672 static void
2673 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
2675 struct addr_ctrl_blk_def *acb;
2676 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
2677 memset(acb->reserved1, 0, sizeof(acb->reserved1));
2678 memset(acb->reserved2, 0, sizeof(acb->reserved2));
2679 memset(acb->reserved3, 0, sizeof(acb->reserved3));
2680 memset(acb->reserved4, 0, sizeof(acb->reserved4));
2681 memset(acb->reserved5, 0, sizeof(acb->reserved5));
2682 memset(acb->reserved6, 0, sizeof(acb->reserved6));
2683 memset(acb->reserved7, 0, sizeof(acb->reserved7));
2684 memset(acb->reserved8, 0, sizeof(acb->reserved8));
2685 memset(acb->reserved9, 0, sizeof(acb->reserved9));
2686 memset(acb->reserved10, 0, sizeof(acb->reserved10));
2687 memset(acb->reserved11, 0, sizeof(acb->reserved11));
2688 memset(acb->reserved12, 0, sizeof(acb->reserved12));
2689 memset(acb->reserved13, 0, sizeof(acb->reserved13));
2690 memset(acb->reserved14, 0, sizeof(acb->reserved14));
2691 memset(acb->reserved15, 0, sizeof(acb->reserved15));
2694 static int
2695 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2697 struct scsi_qla_host *ha = to_qla_host(shost);
2698 int rval = 0;
2699 struct iscsi_iface_param_info *iface_param = NULL;
2700 struct addr_ctrl_blk *init_fw_cb = NULL;
2701 dma_addr_t init_fw_cb_dma;
2702 uint32_t mbox_cmd[MBOX_REG_COUNT];
2703 uint32_t mbox_sts[MBOX_REG_COUNT];
2704 uint32_t rem = len;
2705 struct nlattr *attr;
2707 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2708 sizeof(struct addr_ctrl_blk),
2709 &init_fw_cb_dma, GFP_KERNEL);
2710 if (!init_fw_cb) {
2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2712 __func__);
2713 return -ENOMEM;
2716 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2717 memset(&mbox_sts, 0, sizeof(mbox_sts));
2719 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
2720 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
2721 rval = -EIO;
2722 goto exit_init_fw_cb;
2725 nla_for_each_attr(attr, data, len, rem) {
2726 iface_param = nla_data(attr);
2728 if (iface_param->param_type == ISCSI_NET_PARAM) {
2729 switch (iface_param->iface_type) {
2730 case ISCSI_IFACE_TYPE_IPV4:
2731 switch (iface_param->iface_num) {
2732 case 0:
2733 qla4xxx_set_ipv4(ha, iface_param,
2734 init_fw_cb);
2735 break;
2736 default:
2737 /* Cannot have more than one IPv4 interface */
2738 ql4_printk(KERN_ERR, ha,
2739 "Invalid IPv4 iface number = %d\n",
2740 iface_param->iface_num);
2741 break;
2743 break;
2744 case ISCSI_IFACE_TYPE_IPV6:
2745 switch (iface_param->iface_num) {
2746 case 0:
2747 case 1:
2748 qla4xxx_set_ipv6(ha, iface_param,
2749 init_fw_cb);
2750 break;
2751 default:
2752 /* Cannot have more than two IPv6 interface */
2753 ql4_printk(KERN_ERR, ha,
2754 "Invalid IPv6 iface number = %d\n",
2755 iface_param->iface_num);
2756 break;
2758 break;
2759 default:
2760 ql4_printk(KERN_ERR, ha,
2761 "Invalid iface type\n");
2762 break;
2764 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
2765 qla4xxx_set_iscsi_param(ha, iface_param,
2766 init_fw_cb);
2767 } else {
2768 continue;
2772 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
2774 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
2775 sizeof(struct addr_ctrl_blk),
2776 FLASH_OPT_RMW_COMMIT);
2777 if (rval != QLA_SUCCESS) {
2778 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
2779 __func__);
2780 rval = -EIO;
2781 goto exit_init_fw_cb;
2784 rval = qla4xxx_disable_acb(ha);
2785 if (rval != QLA_SUCCESS) {
2786 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
2787 __func__);
2788 rval = -EIO;
2789 goto exit_init_fw_cb;
2792 wait_for_completion_timeout(&ha->disable_acb_comp,
2793 DISABLE_ACB_TOV * HZ);
2795 qla4xxx_initcb_to_acb(init_fw_cb);
2797 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
2798 if (rval != QLA_SUCCESS) {
2799 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
2800 __func__);
2801 rval = -EIO;
2802 goto exit_init_fw_cb;
2805 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2806 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
2807 init_fw_cb_dma);
2809 exit_init_fw_cb:
2810 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
2811 init_fw_cb, init_fw_cb_dma);
2813 return rval;
2816 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
2817 enum iscsi_param param, char *buf)
2819 struct iscsi_session *sess = cls_sess->dd_data;
2820 struct ddb_entry *ddb_entry = sess->dd_data;
2821 struct scsi_qla_host *ha = ddb_entry->ha;
2822 struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
2823 struct ql4_chap_table chap_tbl;
2824 int rval, len;
2825 uint16_t idx;
2827 memset(&chap_tbl, 0, sizeof(chap_tbl));
2828 switch (param) {
2829 case ISCSI_PARAM_CHAP_IN_IDX:
2830 rval = qla4xxx_get_chap_index(ha, sess->username_in,
2831 sess->password_in, BIDI_CHAP,
2832 &idx);
2833 if (rval)
2834 len = sprintf(buf, "\n");
2835 else
2836 len = sprintf(buf, "%hu\n", idx);
2837 break;
2838 case ISCSI_PARAM_CHAP_OUT_IDX:
2839 if (ddb_entry->ddb_type == FLASH_DDB) {
2840 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2841 idx = ddb_entry->chap_tbl_idx;
2842 rval = QLA_SUCCESS;
2843 } else {
2844 rval = QLA_ERROR;
2846 } else {
2847 rval = qla4xxx_get_chap_index(ha, sess->username,
2848 sess->password,
2849 LOCAL_CHAP, &idx);
2851 if (rval)
2852 len = sprintf(buf, "\n");
2853 else
2854 len = sprintf(buf, "%hu\n", idx);
2855 break;
2856 case ISCSI_PARAM_USERNAME:
2857 case ISCSI_PARAM_PASSWORD:
2858 /* First, populate session username and password for FLASH DDB,
2859 * if not already done. This happens when session login fails
2860 * for a FLASH DDB.
2862 if (ddb_entry->ddb_type == FLASH_DDB &&
2863 ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
2864 !sess->username && !sess->password) {
2865 idx = ddb_entry->chap_tbl_idx;
2866 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2867 chap_tbl.secret,
2868 idx);
2869 if (!rval) {
2870 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2871 (char *)chap_tbl.name,
2872 strlen((char *)chap_tbl.name));
2873 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2874 (char *)chap_tbl.secret,
2875 chap_tbl.secret_len);
2878 /* fall through */
2879 default:
2880 return iscsi_session_get_param(cls_sess, param, buf);
2883 return len;
2886 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
2887 enum iscsi_param param, char *buf)
2889 struct iscsi_conn *conn;
2890 struct qla_conn *qla_conn;
2891 struct sockaddr *dst_addr;
2893 conn = cls_conn->dd_data;
2894 qla_conn = conn->dd_data;
2895 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
2897 switch (param) {
2898 case ISCSI_PARAM_CONN_PORT:
2899 case ISCSI_PARAM_CONN_ADDRESS:
2900 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2901 dst_addr, param, buf);
2902 default:
2903 return iscsi_conn_get_param(cls_conn, param, buf);
2907 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
2909 uint32_t mbx_sts = 0;
2910 uint16_t tmp_ddb_index;
2911 int ret;
2913 get_ddb_index:
2914 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
2916 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
2917 DEBUG2(ql4_printk(KERN_INFO, ha,
2918 "Free DDB index not available\n"));
2919 ret = QLA_ERROR;
2920 goto exit_get_ddb_index;
2923 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
2924 goto get_ddb_index;
2926 DEBUG2(ql4_printk(KERN_INFO, ha,
2927 "Found a free DDB index at %d\n", tmp_ddb_index));
2928 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
2929 if (ret == QLA_ERROR) {
2930 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
2931 ql4_printk(KERN_INFO, ha,
2932 "DDB index = %d not available trying next\n",
2933 tmp_ddb_index);
2934 goto get_ddb_index;
2936 DEBUG2(ql4_printk(KERN_INFO, ha,
2937 "Free FW DDB not available\n"));
2940 *ddb_index = tmp_ddb_index;
2942 exit_get_ddb_index:
2943 return ret;
2946 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
2947 struct ddb_entry *ddb_entry,
2948 char *existing_ipaddr,
2949 char *user_ipaddr)
2951 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
2952 char formatted_ipaddr[DDB_IPADDR_LEN];
2953 int status = QLA_SUCCESS, ret = 0;
2955 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
2956 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2957 '\0', NULL);
2958 if (ret == 0) {
2959 status = QLA_ERROR;
2960 goto out_match;
2962 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
2963 } else {
2964 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2965 '\0', NULL);
2966 if (ret == 0) {
2967 status = QLA_ERROR;
2968 goto out_match;
2970 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
2973 if (strcmp(existing_ipaddr, formatted_ipaddr))
2974 status = QLA_ERROR;
2976 out_match:
2977 return status;
2980 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
2981 struct iscsi_cls_conn *cls_conn)
2983 int idx = 0, max_ddbs, rval;
2984 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2985 struct iscsi_session *sess, *existing_sess;
2986 struct iscsi_conn *conn, *existing_conn;
2987 struct ddb_entry *ddb_entry;
2989 sess = cls_sess->dd_data;
2990 conn = cls_conn->dd_data;
2992 if (sess->targetname == NULL ||
2993 conn->persistent_address == NULL ||
2994 conn->persistent_port == 0)
2995 return QLA_ERROR;
2997 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
2998 MAX_DEV_DB_ENTRIES;
3000 for (idx = 0; idx < max_ddbs; idx++) {
3001 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
3002 if (ddb_entry == NULL)
3003 continue;
3005 if (ddb_entry->ddb_type != FLASH_DDB)
3006 continue;
3008 existing_sess = ddb_entry->sess->dd_data;
3009 existing_conn = ddb_entry->conn->dd_data;
3011 if (existing_sess->targetname == NULL ||
3012 existing_conn->persistent_address == NULL ||
3013 existing_conn->persistent_port == 0)
3014 continue;
3016 DEBUG2(ql4_printk(KERN_INFO, ha,
3017 "IQN = %s User IQN = %s\n",
3018 existing_sess->targetname,
3019 sess->targetname));
3021 DEBUG2(ql4_printk(KERN_INFO, ha,
3022 "IP = %s User IP = %s\n",
3023 existing_conn->persistent_address,
3024 conn->persistent_address));
3026 DEBUG2(ql4_printk(KERN_INFO, ha,
3027 "Port = %d User Port = %d\n",
3028 existing_conn->persistent_port,
3029 conn->persistent_port));
3031 if (strcmp(existing_sess->targetname, sess->targetname))
3032 continue;
3033 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
3034 existing_conn->persistent_address,
3035 conn->persistent_address);
3036 if (rval == QLA_ERROR)
3037 continue;
3038 if (existing_conn->persistent_port != conn->persistent_port)
3039 continue;
3040 break;
3043 if (idx == max_ddbs)
3044 return QLA_ERROR;
3046 DEBUG2(ql4_printk(KERN_INFO, ha,
3047 "Match found in fwdb sessions\n"));
3048 return QLA_SUCCESS;
3051 static struct iscsi_cls_session *
3052 qla4xxx_session_create(struct iscsi_endpoint *ep,
3053 uint16_t cmds_max, uint16_t qdepth,
3054 uint32_t initial_cmdsn)
3056 struct iscsi_cls_session *cls_sess;
3057 struct scsi_qla_host *ha;
3058 struct qla_endpoint *qla_ep;
3059 struct ddb_entry *ddb_entry;
3060 uint16_t ddb_index;
3061 struct iscsi_session *sess;
3062 struct sockaddr *dst_addr;
3063 int ret;
3065 if (!ep) {
3066 printk(KERN_ERR "qla4xxx: missing ep.\n");
3067 return NULL;
3070 qla_ep = ep->dd_data;
3071 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
3072 ha = to_qla_host(qla_ep->host);
3073 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3074 ha->host_no));
3076 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
3077 if (ret == QLA_ERROR)
3078 return NULL;
3080 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
3081 cmds_max, sizeof(struct ddb_entry),
3082 sizeof(struct ql4_task_data),
3083 initial_cmdsn, ddb_index);
3084 if (!cls_sess)
3085 return NULL;
3087 sess = cls_sess->dd_data;
3088 ddb_entry = sess->dd_data;
3089 ddb_entry->fw_ddb_index = ddb_index;
3090 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
3091 ddb_entry->ha = ha;
3092 ddb_entry->sess = cls_sess;
3093 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
3094 ddb_entry->ddb_change = qla4xxx_ddb_change;
3095 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
3096 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
3097 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
3098 ha->tot_ddbs++;
3100 return cls_sess;
3103 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
3105 struct iscsi_session *sess;
3106 struct ddb_entry *ddb_entry;
3107 struct scsi_qla_host *ha;
3108 unsigned long flags, wtime;
3109 struct dev_db_entry *fw_ddb_entry = NULL;
3110 dma_addr_t fw_ddb_entry_dma;
3111 uint32_t ddb_state;
3112 int ret;
3114 sess = cls_sess->dd_data;
3115 ddb_entry = sess->dd_data;
3116 ha = ddb_entry->ha;
3117 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3118 ha->host_no));
3120 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3121 &fw_ddb_entry_dma, GFP_KERNEL);
3122 if (!fw_ddb_entry) {
3123 ql4_printk(KERN_ERR, ha,
3124 "%s: Unable to allocate dma buffer\n", __func__);
3125 goto destroy_session;
3128 wtime = jiffies + (HZ * LOGOUT_TOV);
3129 do {
3130 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
3131 fw_ddb_entry, fw_ddb_entry_dma,
3132 NULL, NULL, &ddb_state, NULL,
3133 NULL, NULL);
3134 if (ret == QLA_ERROR)
3135 goto destroy_session;
3137 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
3138 (ddb_state == DDB_DS_SESSION_FAILED))
3139 goto destroy_session;
3141 schedule_timeout_uninterruptible(HZ);
3142 } while ((time_after(wtime, jiffies)));
3144 destroy_session:
3145 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
3146 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
3147 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
3148 spin_lock_irqsave(&ha->hardware_lock, flags);
3149 qla4xxx_free_ddb(ha, ddb_entry);
3150 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3152 iscsi_session_teardown(cls_sess);
3154 if (fw_ddb_entry)
3155 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3156 fw_ddb_entry, fw_ddb_entry_dma);
3159 static struct iscsi_cls_conn *
3160 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
3162 struct iscsi_cls_conn *cls_conn;
3163 struct iscsi_session *sess;
3164 struct ddb_entry *ddb_entry;
3165 struct scsi_qla_host *ha;
3167 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
3168 conn_idx);
3169 if (!cls_conn) {
3170 pr_info("%s: Can not create connection for conn_idx = %u\n",
3171 __func__, conn_idx);
3172 return NULL;
3175 sess = cls_sess->dd_data;
3176 ddb_entry = sess->dd_data;
3177 ddb_entry->conn = cls_conn;
3179 ha = ddb_entry->ha;
3180 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
3181 conn_idx));
3182 return cls_conn;
3185 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3186 struct iscsi_cls_conn *cls_conn,
3187 uint64_t transport_fd, int is_leading)
3189 struct iscsi_conn *conn;
3190 struct qla_conn *qla_conn;
3191 struct iscsi_endpoint *ep;
3192 struct ddb_entry *ddb_entry;
3193 struct scsi_qla_host *ha;
3194 struct iscsi_session *sess;
3196 sess = cls_session->dd_data;
3197 ddb_entry = sess->dd_data;
3198 ha = ddb_entry->ha;
3200 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3201 cls_session->sid, cls_conn->cid));
3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3204 return -EINVAL;
3205 ep = iscsi_lookup_endpoint(transport_fd);
3206 if (!ep)
3207 return -EINVAL;
3208 conn = cls_conn->dd_data;
3209 qla_conn = conn->dd_data;
3210 qla_conn->qla_ep = ep->dd_data;
3211 return 0;
3214 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
3216 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3217 struct iscsi_session *sess;
3218 struct ddb_entry *ddb_entry;
3219 struct scsi_qla_host *ha;
3220 struct dev_db_entry *fw_ddb_entry = NULL;
3221 dma_addr_t fw_ddb_entry_dma;
3222 uint32_t mbx_sts = 0;
3223 int ret = 0;
3224 int status = QLA_SUCCESS;
3226 sess = cls_sess->dd_data;
3227 ddb_entry = sess->dd_data;
3228 ha = ddb_entry->ha;
3229 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3230 cls_sess->sid, cls_conn->cid));
3232 /* Check if we have matching FW DDB, if yes then do not
3233 * login to this target. This could cause target to logout previous
3234 * connection
3236 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
3237 if (ret == QLA_SUCCESS) {
3238 ql4_printk(KERN_INFO, ha,
3239 "Session already exist in FW.\n");
3240 ret = -EEXIST;
3241 goto exit_conn_start;
3244 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3245 &fw_ddb_entry_dma, GFP_KERNEL);
3246 if (!fw_ddb_entry) {
3247 ql4_printk(KERN_ERR, ha,
3248 "%s: Unable to allocate dma buffer\n", __func__);
3249 ret = -ENOMEM;
3250 goto exit_conn_start;
3253 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
3254 if (ret) {
3255 /* If iscsid is stopped and started then no need to do
3256 * set param again since ddb state will be already
3257 * active and FW does not allow set ddb to an
3258 * active session.
3260 if (mbx_sts)
3261 if (ddb_entry->fw_ddb_device_state ==
3262 DDB_DS_SESSION_ACTIVE) {
3263 ddb_entry->unblock_sess(ddb_entry->sess);
3264 goto exit_set_param;
3267 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
3268 __func__, ddb_entry->fw_ddb_index);
3269 goto exit_conn_start;
3272 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
3273 if (status == QLA_ERROR) {
3274 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
3275 sess->targetname);
3276 ret = -EINVAL;
3277 goto exit_conn_start;
3280 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
3281 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
3283 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
3284 ddb_entry->fw_ddb_device_state));
3286 exit_set_param:
3287 ret = 0;
3289 exit_conn_start:
3290 if (fw_ddb_entry)
3291 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3292 fw_ddb_entry, fw_ddb_entry_dma);
3293 return ret;
3296 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
3298 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3299 struct iscsi_session *sess;
3300 struct scsi_qla_host *ha;
3301 struct ddb_entry *ddb_entry;
3302 int options;
3304 sess = cls_sess->dd_data;
3305 ddb_entry = sess->dd_data;
3306 ha = ddb_entry->ha;
3307 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
3308 cls_conn->cid));
3310 options = LOGOUT_OPTION_CLOSE_SESSION;
3311 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
3312 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
3315 static void qla4xxx_task_work(struct work_struct *wdata)
3317 struct ql4_task_data *task_data;
3318 struct scsi_qla_host *ha;
3319 struct passthru_status *sts;
3320 struct iscsi_task *task;
3321 struct iscsi_hdr *hdr;
3322 uint8_t *data;
3323 uint32_t data_len;
3324 struct iscsi_conn *conn;
3325 int hdr_len;
3326 itt_t itt;
3328 task_data = container_of(wdata, struct ql4_task_data, task_work);
3329 ha = task_data->ha;
3330 task = task_data->task;
3331 sts = &task_data->sts;
3332 hdr_len = sizeof(struct iscsi_hdr);
3334 DEBUG3(printk(KERN_INFO "Status returned\n"));
3335 DEBUG3(qla4xxx_dump_buffer(sts, 64));
3336 DEBUG3(printk(KERN_INFO "Response buffer"));
3337 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
3339 conn = task->conn;
3341 switch (sts->completionStatus) {
3342 case PASSTHRU_STATUS_COMPLETE:
3343 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
3344 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
3345 itt = sts->handle;
3346 hdr->itt = itt;
3347 data = task_data->resp_buffer + hdr_len;
3348 data_len = task_data->resp_len - hdr_len;
3349 iscsi_complete_pdu(conn, hdr, data, data_len);
3350 break;
3351 default:
3352 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
3353 sts->completionStatus);
3354 break;
3356 return;
3359 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3361 struct ql4_task_data *task_data;
3362 struct iscsi_session *sess;
3363 struct ddb_entry *ddb_entry;
3364 struct scsi_qla_host *ha;
3365 int hdr_len;
3367 sess = task->conn->session;
3368 ddb_entry = sess->dd_data;
3369 ha = ddb_entry->ha;
3370 task_data = task->dd_data;
3371 memset(task_data, 0, sizeof(struct ql4_task_data));
3373 if (task->sc) {
3374 ql4_printk(KERN_INFO, ha,
3375 "%s: SCSI Commands not implemented\n", __func__);
3376 return -EINVAL;
3379 hdr_len = sizeof(struct iscsi_hdr);
3380 task_data->ha = ha;
3381 task_data->task = task;
3383 if (task->data_count) {
3384 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
3385 task->data_count,
3386 DMA_TO_DEVICE);
3389 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3390 __func__, task->conn->max_recv_dlength, hdr_len));
3392 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
3393 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
3394 task_data->resp_len,
3395 &task_data->resp_dma,
3396 GFP_ATOMIC);
3397 if (!task_data->resp_buffer)
3398 goto exit_alloc_pdu;
3400 task_data->req_len = task->data_count + hdr_len;
3401 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
3402 task_data->req_len,
3403 &task_data->req_dma,
3404 GFP_ATOMIC);
3405 if (!task_data->req_buffer)
3406 goto exit_alloc_pdu;
3408 task->hdr = task_data->req_buffer;
3410 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
3412 return 0;
3414 exit_alloc_pdu:
3415 if (task_data->resp_buffer)
3416 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3417 task_data->resp_buffer, task_data->resp_dma);
3419 if (task_data->req_buffer)
3420 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3421 task_data->req_buffer, task_data->req_dma);
3422 return -ENOMEM;
3425 static void qla4xxx_task_cleanup(struct iscsi_task *task)
3427 struct ql4_task_data *task_data;
3428 struct iscsi_session *sess;
3429 struct ddb_entry *ddb_entry;
3430 struct scsi_qla_host *ha;
3431 int hdr_len;
3433 hdr_len = sizeof(struct iscsi_hdr);
3434 sess = task->conn->session;
3435 ddb_entry = sess->dd_data;
3436 ha = ddb_entry->ha;
3437 task_data = task->dd_data;
3439 if (task->data_count) {
3440 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
3441 task->data_count, DMA_TO_DEVICE);
3444 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3445 __func__, task->conn->max_recv_dlength, hdr_len));
3447 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3448 task_data->resp_buffer, task_data->resp_dma);
3449 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3450 task_data->req_buffer, task_data->req_dma);
3451 return;
3454 static int qla4xxx_task_xmit(struct iscsi_task *task)
3456 struct scsi_cmnd *sc = task->sc;
3457 struct iscsi_session *sess = task->conn->session;
3458 struct ddb_entry *ddb_entry = sess->dd_data;
3459 struct scsi_qla_host *ha = ddb_entry->ha;
3461 if (!sc)
3462 return qla4xxx_send_passthru0(task);
3464 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
3465 __func__);
3466 return -ENOSYS;
3469 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
3470 struct iscsi_bus_flash_conn *conn,
3471 struct dev_db_entry *fw_ddb_entry)
3473 unsigned long options = 0;
3474 int rc = 0;
3476 options = le16_to_cpu(fw_ddb_entry->options);
3477 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3478 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3479 rc = iscsi_switch_str_param(&sess->portal_type,
3480 PORTAL_TYPE_IPV6);
3481 if (rc)
3482 goto exit_copy;
3483 } else {
3484 rc = iscsi_switch_str_param(&sess->portal_type,
3485 PORTAL_TYPE_IPV4);
3486 if (rc)
3487 goto exit_copy;
3490 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3491 &options);
3492 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3493 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
3495 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3496 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3497 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3498 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3499 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3500 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3501 &options);
3502 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3503 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3504 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
3505 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3506 &options);
3507 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3508 sess->discovery_auth_optional =
3509 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3510 if (test_bit(ISCSIOPT_ERL1, &options))
3511 sess->erl |= BIT_1;
3512 if (test_bit(ISCSIOPT_ERL0, &options))
3513 sess->erl |= BIT_0;
3515 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3516 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3517 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3518 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3519 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3520 conn->tcp_timer_scale |= BIT_3;
3521 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3522 conn->tcp_timer_scale |= BIT_2;
3523 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3524 conn->tcp_timer_scale |= BIT_1;
3526 conn->tcp_timer_scale >>= 1;
3527 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3529 options = le16_to_cpu(fw_ddb_entry->ip_options);
3530 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3532 conn->max_recv_dlength = BYTE_UNITS *
3533 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3534 conn->max_xmit_dlength = BYTE_UNITS *
3535 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3536 sess->first_burst = BYTE_UNITS *
3537 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3538 sess->max_burst = BYTE_UNITS *
3539 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3540 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3541 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3542 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3543 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3544 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3545 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3546 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3547 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
3548 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
3549 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3550 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3551 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3552 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
3553 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
3554 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3555 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3557 sess->default_taskmgmt_timeout =
3558 le16_to_cpu(fw_ddb_entry->def_timeout);
3559 conn->port = le16_to_cpu(fw_ddb_entry->port);
3561 options = le16_to_cpu(fw_ddb_entry->options);
3562 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3563 if (!conn->ipaddress) {
3564 rc = -ENOMEM;
3565 goto exit_copy;
3568 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3569 if (!conn->redirect_ipaddr) {
3570 rc = -ENOMEM;
3571 goto exit_copy;
3574 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
3575 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
3577 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3578 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
3580 conn->link_local_ipv6_addr = kmemdup(
3581 fw_ddb_entry->link_local_ipv6_addr,
3582 IPv6_ADDR_LEN, GFP_KERNEL);
3583 if (!conn->link_local_ipv6_addr) {
3584 rc = -ENOMEM;
3585 goto exit_copy;
3587 } else {
3588 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3591 if (fw_ddb_entry->iscsi_name[0]) {
3592 rc = iscsi_switch_str_param(&sess->targetname,
3593 (char *)fw_ddb_entry->iscsi_name);
3594 if (rc)
3595 goto exit_copy;
3598 if (fw_ddb_entry->iscsi_alias[0]) {
3599 rc = iscsi_switch_str_param(&sess->targetalias,
3600 (char *)fw_ddb_entry->iscsi_alias);
3601 if (rc)
3602 goto exit_copy;
3605 COPY_ISID(sess->isid, fw_ddb_entry->isid);
3607 exit_copy:
3608 return rc;
3611 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
3612 struct iscsi_bus_flash_conn *conn,
3613 struct dev_db_entry *fw_ddb_entry)
3615 uint16_t options;
3616 int rc = 0;
3618 options = le16_to_cpu(fw_ddb_entry->options);
3619 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
3620 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3621 options |= BIT_8;
3622 else
3623 options &= ~BIT_8;
3625 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
3626 SET_BITVAL(sess->discovery_sess, options, BIT_4);
3627 SET_BITVAL(sess->entry_state, options, BIT_3);
3628 fw_ddb_entry->options = cpu_to_le16(options);
3630 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3631 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
3632 SET_BITVAL(conn->datadgst_en, options, BIT_12);
3633 SET_BITVAL(sess->imm_data_en, options, BIT_11);
3634 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
3635 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
3636 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
3637 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
3638 SET_BITVAL(conn->snack_req_en, options, BIT_6);
3639 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
3640 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
3641 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
3642 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
3643 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
3644 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
3646 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3647 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
3648 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
3649 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
3650 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
3651 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
3652 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
3653 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
3654 fw_ddb_entry->tcp_options = cpu_to_le16(options);
3656 options = le16_to_cpu(fw_ddb_entry->ip_options);
3657 SET_BITVAL(conn->fragment_disable, options, BIT_4);
3658 fw_ddb_entry->ip_options = cpu_to_le16(options);
3660 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
3661 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
3662 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
3663 fw_ddb_entry->iscsi_max_snd_data_seg_len =
3664 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
3665 fw_ddb_entry->iscsi_first_burst_len =
3666 cpu_to_le16(sess->first_burst / BYTE_UNITS);
3667 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
3668 BYTE_UNITS);
3669 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
3670 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
3671 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
3672 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
3673 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
3674 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
3675 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
3676 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
3677 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
3678 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
3679 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
3680 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
3681 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
3682 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
3683 fw_ddb_entry->port = cpu_to_le16(conn->port);
3684 fw_ddb_entry->def_timeout =
3685 cpu_to_le16(sess->default_taskmgmt_timeout);
3687 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3688 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
3689 else
3690 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
3692 if (conn->ipaddress)
3693 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
3694 sizeof(fw_ddb_entry->ip_addr));
3696 if (conn->redirect_ipaddr)
3697 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
3698 sizeof(fw_ddb_entry->tgt_addr));
3700 if (conn->link_local_ipv6_addr)
3701 memcpy(fw_ddb_entry->link_local_ipv6_addr,
3702 conn->link_local_ipv6_addr,
3703 sizeof(fw_ddb_entry->link_local_ipv6_addr));
3705 if (sess->targetname)
3706 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
3707 sizeof(fw_ddb_entry->iscsi_name));
3709 if (sess->targetalias)
3710 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
3711 sizeof(fw_ddb_entry->iscsi_alias));
3713 COPY_ISID(fw_ddb_entry->isid, sess->isid);
3715 return rc;
3718 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
3719 struct iscsi_session *sess,
3720 struct dev_db_entry *fw_ddb_entry)
3722 unsigned long options = 0;
3723 uint16_t ddb_link;
3724 uint16_t disc_parent;
3725 char ip_addr[DDB_IPADDR_LEN];
3727 options = le16_to_cpu(fw_ddb_entry->options);
3728 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3729 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3730 &options);
3731 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3733 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3734 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3735 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3736 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3737 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3738 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3739 &options);
3740 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3741 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3742 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3743 &options);
3744 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3745 sess->discovery_auth_optional =
3746 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3747 if (test_bit(ISCSIOPT_ERL1, &options))
3748 sess->erl |= BIT_1;
3749 if (test_bit(ISCSIOPT_ERL0, &options))
3750 sess->erl |= BIT_0;
3752 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3753 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3754 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3755 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3756 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3757 conn->tcp_timer_scale |= BIT_3;
3758 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3759 conn->tcp_timer_scale |= BIT_2;
3760 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3761 conn->tcp_timer_scale |= BIT_1;
3763 conn->tcp_timer_scale >>= 1;
3764 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3766 options = le16_to_cpu(fw_ddb_entry->ip_options);
3767 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3769 conn->max_recv_dlength = BYTE_UNITS *
3770 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3771 conn->max_xmit_dlength = BYTE_UNITS *
3772 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3773 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3774 sess->first_burst = BYTE_UNITS *
3775 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3776 sess->max_burst = BYTE_UNITS *
3777 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3778 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3779 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3780 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3781 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3782 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3783 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3784 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3785 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
3786 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3787 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3788 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3789 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3790 COPY_ISID(sess->isid, fw_ddb_entry->isid);
3792 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
3793 if (ddb_link == DDB_ISNS)
3794 disc_parent = ISCSI_DISC_PARENT_ISNS;
3795 else if (ddb_link == DDB_NO_LINK)
3796 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3797 else if (ddb_link < MAX_DDB_ENTRIES)
3798 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
3799 else
3800 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3802 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
3803 iscsi_get_discovery_parent_name(disc_parent), 0);
3805 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
3806 (char *)fw_ddb_entry->iscsi_alias, 0);
3808 options = le16_to_cpu(fw_ddb_entry->options);
3809 if (options & DDB_OPT_IPV6_DEVICE) {
3810 memset(ip_addr, 0, sizeof(ip_addr));
3811 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
3812 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
3813 (char *)ip_addr, 0);
3817 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
3818 struct dev_db_entry *fw_ddb_entry,
3819 struct iscsi_cls_session *cls_sess,
3820 struct iscsi_cls_conn *cls_conn)
3822 int buflen = 0;
3823 struct iscsi_session *sess;
3824 struct ddb_entry *ddb_entry;
3825 struct ql4_chap_table chap_tbl;
3826 struct iscsi_conn *conn;
3827 char ip_addr[DDB_IPADDR_LEN];
3828 uint16_t options = 0;
3830 sess = cls_sess->dd_data;
3831 ddb_entry = sess->dd_data;
3832 conn = cls_conn->dd_data;
3833 memset(&chap_tbl, 0, sizeof(chap_tbl));
3835 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3837 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3839 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
3840 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
3842 memset(ip_addr, 0, sizeof(ip_addr));
3843 options = le16_to_cpu(fw_ddb_entry->options);
3844 if (options & DDB_OPT_IPV6_DEVICE) {
3845 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
3847 memset(ip_addr, 0, sizeof(ip_addr));
3848 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
3849 } else {
3850 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
3851 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
3854 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
3855 (char *)ip_addr, buflen);
3856 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
3857 (char *)fw_ddb_entry->iscsi_name, buflen);
3858 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
3859 (char *)ha->name_string, buflen);
3861 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
3862 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
3863 chap_tbl.secret,
3864 ddb_entry->chap_tbl_idx)) {
3865 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
3866 (char *)chap_tbl.name,
3867 strlen((char *)chap_tbl.name));
3868 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
3869 (char *)chap_tbl.secret,
3870 chap_tbl.secret_len);
3875 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
3876 struct ddb_entry *ddb_entry)
3878 struct iscsi_cls_session *cls_sess;
3879 struct iscsi_cls_conn *cls_conn;
3880 uint32_t ddb_state;
3881 dma_addr_t fw_ddb_entry_dma;
3882 struct dev_db_entry *fw_ddb_entry;
3884 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3885 &fw_ddb_entry_dma, GFP_KERNEL);
3886 if (!fw_ddb_entry) {
3887 ql4_printk(KERN_ERR, ha,
3888 "%s: Unable to allocate dma buffer\n", __func__);
3889 goto exit_session_conn_fwddb_param;
3892 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3893 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3894 NULL, NULL, NULL) == QLA_ERROR) {
3895 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3896 "get_ddb_entry for fw_ddb_index %d\n",
3897 ha->host_no, __func__,
3898 ddb_entry->fw_ddb_index));
3899 goto exit_session_conn_fwddb_param;
3902 cls_sess = ddb_entry->sess;
3904 cls_conn = ddb_entry->conn;
3906 /* Update params */
3907 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
3909 exit_session_conn_fwddb_param:
3910 if (fw_ddb_entry)
3911 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3912 fw_ddb_entry, fw_ddb_entry_dma);
3915 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
3916 struct ddb_entry *ddb_entry)
3918 struct iscsi_cls_session *cls_sess;
3919 struct iscsi_cls_conn *cls_conn;
3920 struct iscsi_session *sess;
3921 struct iscsi_conn *conn;
3922 uint32_t ddb_state;
3923 dma_addr_t fw_ddb_entry_dma;
3924 struct dev_db_entry *fw_ddb_entry;
3926 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3927 &fw_ddb_entry_dma, GFP_KERNEL);
3928 if (!fw_ddb_entry) {
3929 ql4_printk(KERN_ERR, ha,
3930 "%s: Unable to allocate dma buffer\n", __func__);
3931 goto exit_session_conn_param;
3934 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3935 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3936 NULL, NULL, NULL) == QLA_ERROR) {
3937 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3938 "get_ddb_entry for fw_ddb_index %d\n",
3939 ha->host_no, __func__,
3940 ddb_entry->fw_ddb_index));
3941 goto exit_session_conn_param;
3944 cls_sess = ddb_entry->sess;
3945 sess = cls_sess->dd_data;
3947 cls_conn = ddb_entry->conn;
3948 conn = cls_conn->dd_data;
3950 /* Update timers after login */
3951 ddb_entry->default_relogin_timeout =
3952 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
3953 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
3954 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
3955 ddb_entry->default_time2wait =
3956 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3958 /* Update params */
3959 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3960 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3962 memcpy(sess->initiatorname, ha->name_string,
3963 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
3965 exit_session_conn_param:
3966 if (fw_ddb_entry)
3967 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3968 fw_ddb_entry, fw_ddb_entry_dma);
3972 * Timer routines
3974 static void qla4xxx_timer(struct timer_list *t);
3976 static void qla4xxx_start_timer(struct scsi_qla_host *ha,
3977 unsigned long interval)
3979 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
3980 __func__, ha->host->host_no));
3981 timer_setup(&ha->timer, qla4xxx_timer, 0);
3982 ha->timer.expires = jiffies + interval * HZ;
3983 add_timer(&ha->timer);
3984 ha->timer_active = 1;
3987 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
3989 del_timer_sync(&ha->timer);
3990 ha->timer_active = 0;
3993 /***
3994 * qla4xxx_mark_device_missing - blocks the session
3995 * @cls_session: Pointer to the session to be blocked
3996 * @ddb_entry: Pointer to device database entry
3998 * This routine marks a device missing and close connection.
4000 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
4002 iscsi_block_session(cls_session);
4006 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
4007 * @ha: Pointer to host adapter structure.
4009 * This routine marks a device missing and resets the relogin retry count.
4011 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
4013 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
4016 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
4017 struct ddb_entry *ddb_entry,
4018 struct scsi_cmnd *cmd)
4020 struct srb *srb;
4022 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
4023 if (!srb)
4024 return srb;
4026 kref_init(&srb->srb_ref);
4027 srb->ha = ha;
4028 srb->ddb = ddb_entry;
4029 srb->cmd = cmd;
4030 srb->flags = 0;
4031 CMD_SP(cmd) = (void *)srb;
4033 return srb;
4036 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
4038 struct scsi_cmnd *cmd = srb->cmd;
4040 if (srb->flags & SRB_DMA_VALID) {
4041 scsi_dma_unmap(cmd);
4042 srb->flags &= ~SRB_DMA_VALID;
4044 CMD_SP(cmd) = NULL;
4047 void qla4xxx_srb_compl(struct kref *ref)
4049 struct srb *srb = container_of(ref, struct srb, srb_ref);
4050 struct scsi_cmnd *cmd = srb->cmd;
4051 struct scsi_qla_host *ha = srb->ha;
4053 qla4xxx_srb_free_dma(ha, srb);
4055 mempool_free(srb, ha->srb_mempool);
4057 cmd->scsi_done(cmd);
4061 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
4062 * @host: scsi host
4063 * @cmd: Pointer to Linux's SCSI command structure
4065 * Remarks:
4066 * This routine is invoked by Linux to send a SCSI command to the driver.
4067 * The mid-level driver tries to ensure that queuecommand never gets
4068 * invoked concurrently with itself or the interrupt handler (although
4069 * the interrupt handler may call this routine as part of request-
4070 * completion handling). Unfortunely, it sometimes calls the scheduler
4071 * in interrupt context which is a big NO! NO!.
4073 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
4075 struct scsi_qla_host *ha = to_qla_host(host);
4076 struct ddb_entry *ddb_entry = cmd->device->hostdata;
4077 struct iscsi_cls_session *sess = ddb_entry->sess;
4078 struct srb *srb;
4079 int rval;
4081 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4082 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
4083 cmd->result = DID_NO_CONNECT << 16;
4084 else
4085 cmd->result = DID_REQUEUE << 16;
4086 goto qc_fail_command;
4089 if (!sess) {
4090 cmd->result = DID_IMM_RETRY << 16;
4091 goto qc_fail_command;
4094 rval = iscsi_session_chkready(sess);
4095 if (rval) {
4096 cmd->result = rval;
4097 goto qc_fail_command;
4100 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4101 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4102 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4103 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4104 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4105 !test_bit(AF_ONLINE, &ha->flags) ||
4106 !test_bit(AF_LINK_UP, &ha->flags) ||
4107 test_bit(AF_LOOPBACK, &ha->flags) ||
4108 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
4109 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
4110 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
4111 goto qc_host_busy;
4113 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
4114 if (!srb)
4115 goto qc_host_busy;
4117 rval = qla4xxx_send_command_to_isp(ha, srb);
4118 if (rval != QLA_SUCCESS)
4119 goto qc_host_busy_free_sp;
4121 return 0;
4123 qc_host_busy_free_sp:
4124 qla4xxx_srb_free_dma(ha, srb);
4125 mempool_free(srb, ha->srb_mempool);
4127 qc_host_busy:
4128 return SCSI_MLQUEUE_HOST_BUSY;
4130 qc_fail_command:
4131 cmd->scsi_done(cmd);
4133 return 0;
4137 * qla4xxx_mem_free - frees memory allocated to adapter
4138 * @ha: Pointer to host adapter structure.
4140 * Frees memory previously allocated by qla4xxx_mem_alloc
4142 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
4144 if (ha->queues)
4145 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
4146 ha->queues_dma);
4148 if (ha->fw_dump)
4149 vfree(ha->fw_dump);
4151 ha->queues_len = 0;
4152 ha->queues = NULL;
4153 ha->queues_dma = 0;
4154 ha->request_ring = NULL;
4155 ha->request_dma = 0;
4156 ha->response_ring = NULL;
4157 ha->response_dma = 0;
4158 ha->shadow_regs = NULL;
4159 ha->shadow_regs_dma = 0;
4160 ha->fw_dump = NULL;
4161 ha->fw_dump_size = 0;
4163 /* Free srb pool. */
4164 mempool_destroy(ha->srb_mempool);
4165 ha->srb_mempool = NULL;
4167 dma_pool_destroy(ha->chap_dma_pool);
4169 if (ha->chap_list)
4170 vfree(ha->chap_list);
4171 ha->chap_list = NULL;
4173 dma_pool_destroy(ha->fw_ddb_dma_pool);
4175 /* release io space registers */
4176 if (is_qla8022(ha)) {
4177 if (ha->nx_pcibase)
4178 iounmap(
4179 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
4180 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4181 if (ha->nx_pcibase)
4182 iounmap(
4183 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
4184 } else if (ha->reg) {
4185 iounmap(ha->reg);
4188 if (ha->reset_tmplt.buff)
4189 vfree(ha->reset_tmplt.buff);
4191 pci_release_regions(ha->pdev);
4195 * qla4xxx_mem_alloc - allocates memory for use by adapter.
4196 * @ha: Pointer to host adapter structure
4198 * Allocates DMA memory for request and response queues. Also allocates memory
4199 * for srbs.
4201 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4203 unsigned long align;
4205 /* Allocate contiguous block of DMA memory for queues. */
4206 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4207 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
4208 sizeof(struct shadow_regs) +
4209 MEM_ALIGN_VALUE +
4210 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4211 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4212 &ha->queues_dma, GFP_KERNEL);
4213 if (ha->queues == NULL) {
4214 ql4_printk(KERN_WARNING, ha,
4215 "Memory Allocation failed - queues.\n");
4217 goto mem_alloc_error_exit;
4221 * As per RISC alignment requirements -- the bus-address must be a
4222 * multiple of the request-ring size (in bytes).
4224 align = 0;
4225 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
4226 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
4227 (MEM_ALIGN_VALUE - 1));
4229 /* Update request and response queue pointers. */
4230 ha->request_dma = ha->queues_dma + align;
4231 ha->request_ring = (struct queue_entry *) (ha->queues + align);
4232 ha->response_dma = ha->queues_dma + align +
4233 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
4234 ha->response_ring = (struct queue_entry *) (ha->queues + align +
4235 (REQUEST_QUEUE_DEPTH *
4236 QUEUE_SIZE));
4237 ha->shadow_regs_dma = ha->queues_dma + align +
4238 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4239 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
4240 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
4241 (REQUEST_QUEUE_DEPTH *
4242 QUEUE_SIZE) +
4243 (RESPONSE_QUEUE_DEPTH *
4244 QUEUE_SIZE));
4246 /* Allocate memory for srb pool. */
4247 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
4248 mempool_free_slab, srb_cachep);
4249 if (ha->srb_mempool == NULL) {
4250 ql4_printk(KERN_WARNING, ha,
4251 "Memory Allocation failed - SRB Pool.\n");
4253 goto mem_alloc_error_exit;
4256 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
4257 CHAP_DMA_BLOCK_SIZE, 8, 0);
4259 if (ha->chap_dma_pool == NULL) {
4260 ql4_printk(KERN_WARNING, ha,
4261 "%s: chap_dma_pool allocation failed..\n", __func__);
4262 goto mem_alloc_error_exit;
4265 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
4266 DDB_DMA_BLOCK_SIZE, 8, 0);
4268 if (ha->fw_ddb_dma_pool == NULL) {
4269 ql4_printk(KERN_WARNING, ha,
4270 "%s: fw_ddb_dma_pool allocation failed..\n",
4271 __func__);
4272 goto mem_alloc_error_exit;
4275 return QLA_SUCCESS;
4277 mem_alloc_error_exit:
4278 return QLA_ERROR;
4282 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
4283 * @ha: adapter block pointer.
4285 * Note: The caller should not hold the idc lock.
4287 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
4289 uint32_t temp, temp_state, temp_val;
4290 int status = QLA_SUCCESS;
4292 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
4294 temp_state = qla82xx_get_temp_state(temp);
4295 temp_val = qla82xx_get_temp_val(temp);
4297 if (temp_state == QLA82XX_TEMP_PANIC) {
4298 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
4299 " exceeds maximum allowed. Hardware has been shut"
4300 " down.\n", temp_val);
4301 status = QLA_ERROR;
4302 } else if (temp_state == QLA82XX_TEMP_WARN) {
4303 if (ha->temperature == QLA82XX_TEMP_NORMAL)
4304 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
4305 " degrees C exceeds operating range."
4306 " Immediate action needed.\n", temp_val);
4307 } else {
4308 if (ha->temperature == QLA82XX_TEMP_WARN)
4309 ql4_printk(KERN_INFO, ha, "Device temperature is"
4310 " now %d degrees C in normal range.\n",
4311 temp_val);
4313 ha->temperature = temp_state;
4314 return status;
4318 * qla4_8xxx_check_fw_alive - Check firmware health
4319 * @ha: Pointer to host adapter structure.
4321 * Context: Interrupt
4323 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
4325 uint32_t fw_heartbeat_counter;
4326 int status = QLA_SUCCESS;
4328 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
4329 QLA8XXX_PEG_ALIVE_COUNTER);
4330 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
4331 if (fw_heartbeat_counter == 0xffffffff) {
4332 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
4333 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
4334 ha->host_no, __func__));
4335 return status;
4338 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
4339 ha->seconds_since_last_heartbeat++;
4340 /* FW not alive after 2 seconds */
4341 if (ha->seconds_since_last_heartbeat == 2) {
4342 ha->seconds_since_last_heartbeat = 0;
4343 qla4_8xxx_dump_peg_reg(ha);
4344 status = QLA_ERROR;
4346 } else
4347 ha->seconds_since_last_heartbeat = 0;
4349 ha->fw_heartbeat_counter = fw_heartbeat_counter;
4350 return status;
4353 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
4355 uint32_t halt_status;
4356 int halt_status_unrecoverable = 0;
4358 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
4360 if (is_qla8022(ha)) {
4361 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4362 __func__);
4363 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4364 CRB_NIU_XG_PAUSE_CTL_P0 |
4365 CRB_NIU_XG_PAUSE_CTL_P1);
4367 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
4368 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
4369 __func__);
4370 if (halt_status & HALT_STATUS_UNRECOVERABLE)
4371 halt_status_unrecoverable = 1;
4372 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4373 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
4374 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
4375 __func__);
4376 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
4377 halt_status_unrecoverable = 1;
4381 * Since we cannot change dev_state in interrupt context,
4382 * set appropriate DPC flag then wakeup DPC
4384 if (halt_status_unrecoverable) {
4385 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4386 } else {
4387 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
4388 __func__);
4389 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4391 qla4xxx_mailbox_premature_completion(ha);
4392 qla4xxx_wake_dpc(ha);
4396 * qla4_8xxx_watchdog - Poll dev state
4397 * @ha: Pointer to host adapter structure.
4399 * Context: Interrupt
4401 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
4403 uint32_t dev_state;
4404 uint32_t idc_ctrl;
4406 if (is_qla8032(ha) &&
4407 (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
4408 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
4409 __func__, ha->func_num);
4411 /* don't poll if reset is going on */
4412 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4413 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4414 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
4415 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
4417 if (qla4_8xxx_check_temp(ha)) {
4418 if (is_qla8022(ha)) {
4419 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
4420 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4421 CRB_NIU_XG_PAUSE_CTL_P0 |
4422 CRB_NIU_XG_PAUSE_CTL_P1);
4424 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4425 qla4xxx_wake_dpc(ha);
4426 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
4427 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
4429 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
4430 __func__);
4432 if (is_qla8032(ha) || is_qla8042(ha)) {
4433 idc_ctrl = qla4_83xx_rd_reg(ha,
4434 QLA83XX_IDC_DRV_CTRL);
4435 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
4436 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
4437 __func__);
4438 qla4xxx_mailbox_premature_completion(
4439 ha);
4443 if ((is_qla8032(ha) || is_qla8042(ha)) ||
4444 (is_qla8022(ha) && !ql4xdontresethba)) {
4445 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4446 qla4xxx_wake_dpc(ha);
4448 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
4449 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
4450 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
4451 __func__);
4452 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
4453 qla4xxx_wake_dpc(ha);
4454 } else {
4455 /* Check firmware health */
4456 if (qla4_8xxx_check_fw_alive(ha))
4457 qla4_8xxx_process_fw_error(ha);
4462 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
4464 struct iscsi_session *sess;
4465 struct ddb_entry *ddb_entry;
4466 struct scsi_qla_host *ha;
4468 sess = cls_sess->dd_data;
4469 ddb_entry = sess->dd_data;
4470 ha = ddb_entry->ha;
4472 if (!(ddb_entry->ddb_type == FLASH_DDB))
4473 return;
4475 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
4476 !iscsi_is_session_online(cls_sess)) {
4477 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
4478 INVALID_ENTRY) {
4479 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
4480 0) {
4481 atomic_set(&ddb_entry->retry_relogin_timer,
4482 INVALID_ENTRY);
4483 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4484 set_bit(DF_RELOGIN, &ddb_entry->flags);
4485 DEBUG2(ql4_printk(KERN_INFO, ha,
4486 "%s: index [%d] login device\n",
4487 __func__, ddb_entry->fw_ddb_index));
4488 } else
4489 atomic_dec(&ddb_entry->retry_relogin_timer);
4493 /* Wait for relogin to timeout */
4494 if (atomic_read(&ddb_entry->relogin_timer) &&
4495 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
4497 * If the relogin times out and the device is
4498 * still NOT ONLINE then try and relogin again.
4500 if (!iscsi_is_session_online(cls_sess)) {
4501 /* Reset retry relogin timer */
4502 atomic_inc(&ddb_entry->relogin_retry_count);
4503 DEBUG2(ql4_printk(KERN_INFO, ha,
4504 "%s: index[%d] relogin timed out-retrying"
4505 " relogin (%d), retry (%d)\n", __func__,
4506 ddb_entry->fw_ddb_index,
4507 atomic_read(&ddb_entry->relogin_retry_count),
4508 ddb_entry->default_time2wait + 4));
4509 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4510 atomic_set(&ddb_entry->retry_relogin_timer,
4511 ddb_entry->default_time2wait + 4);
4517 * qla4xxx_timer - checks every second for work to do.
4518 * @ha: Pointer to host adapter structure.
4520 static void qla4xxx_timer(struct timer_list *t)
4522 struct scsi_qla_host *ha = from_timer(ha, t, timer);
4523 int start_dpc = 0;
4524 uint16_t w;
4526 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
4528 /* If we are in the middle of AER/EEH processing
4529 * skip any processing and reschedule the timer
4531 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4532 mod_timer(&ha->timer, jiffies + HZ);
4533 return;
4536 /* Hardware read to trigger an EEH error during mailbox waits. */
4537 if (!pci_channel_offline(ha->pdev))
4538 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
4540 if (is_qla80XX(ha))
4541 qla4_8xxx_watchdog(ha);
4543 if (is_qla40XX(ha)) {
4544 /* Check for heartbeat interval. */
4545 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
4546 ha->heartbeat_interval != 0) {
4547 ha->seconds_since_last_heartbeat++;
4548 if (ha->seconds_since_last_heartbeat >
4549 ha->heartbeat_interval + 2)
4550 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4554 /* Process any deferred work. */
4555 if (!list_empty(&ha->work_list))
4556 start_dpc++;
4558 /* Wakeup the dpc routine for this adapter, if needed. */
4559 if (start_dpc ||
4560 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4561 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
4562 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
4563 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
4564 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4565 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
4566 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
4567 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4568 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4569 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
4570 test_bit(DPC_AEN, &ha->dpc_flags)) {
4571 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
4572 " - dpc flags = 0x%lx\n",
4573 ha->host_no, __func__, ha->dpc_flags));
4574 qla4xxx_wake_dpc(ha);
4577 /* Reschedule timer thread to call us back in one second */
4578 mod_timer(&ha->timer, jiffies + HZ);
4580 DEBUG2(ha->seconds_since_last_intr++);
4584 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
4585 * @ha: Pointer to host adapter structure.
4587 * This routine stalls the driver until all outstanding commands are returned.
4588 * Caller must release the Hardware Lock prior to calling this routine.
4590 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
4592 uint32_t index = 0;
4593 unsigned long flags;
4594 struct scsi_cmnd *cmd;
4595 unsigned long wtime;
4596 uint32_t wtmo;
4598 if (is_qla40XX(ha))
4599 wtmo = WAIT_CMD_TOV;
4600 else
4601 wtmo = ha->nx_reset_timeout / 2;
4603 wtime = jiffies + (wtmo * HZ);
4605 DEBUG2(ql4_printk(KERN_INFO, ha,
4606 "Wait up to %u seconds for cmds to complete\n",
4607 wtmo));
4609 while (!time_after_eq(jiffies, wtime)) {
4610 spin_lock_irqsave(&ha->hardware_lock, flags);
4611 /* Find a command that hasn't completed. */
4612 for (index = 0; index < ha->host->can_queue; index++) {
4613 cmd = scsi_host_find_tag(ha->host, index);
4615 * We cannot just check if the index is valid,
4616 * becase if we are run from the scsi eh, then
4617 * the scsi/block layer is going to prevent
4618 * the tag from being released.
4620 if (cmd != NULL && CMD_SP(cmd))
4621 break;
4623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4625 /* If No Commands are pending, wait is complete */
4626 if (index == ha->host->can_queue)
4627 return QLA_SUCCESS;
4629 msleep(1000);
4631 /* If we timed out on waiting for commands to come back
4632 * return ERROR. */
4633 return QLA_ERROR;
4636 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
4638 uint32_t ctrl_status;
4639 unsigned long flags = 0;
4641 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
4643 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
4644 return QLA_ERROR;
4646 spin_lock_irqsave(&ha->hardware_lock, flags);
4649 * If the SCSI Reset Interrupt bit is set, clear it.
4650 * Otherwise, the Soft Reset won't work.
4652 ctrl_status = readw(&ha->reg->ctrl_status);
4653 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
4654 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4656 /* Issue Soft Reset */
4657 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
4658 readl(&ha->reg->ctrl_status);
4660 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4661 return QLA_SUCCESS;
4665 * qla4xxx_soft_reset - performs soft reset.
4666 * @ha: Pointer to host adapter structure.
4668 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
4670 uint32_t max_wait_time;
4671 unsigned long flags = 0;
4672 int status;
4673 uint32_t ctrl_status;
4675 status = qla4xxx_hw_reset(ha);
4676 if (status != QLA_SUCCESS)
4677 return status;
4679 status = QLA_ERROR;
4680 /* Wait until the Network Reset Intr bit is cleared */
4681 max_wait_time = RESET_INTR_TOV;
4682 do {
4683 spin_lock_irqsave(&ha->hardware_lock, flags);
4684 ctrl_status = readw(&ha->reg->ctrl_status);
4685 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4687 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
4688 break;
4690 msleep(1000);
4691 } while ((--max_wait_time));
4693 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
4694 DEBUG2(printk(KERN_WARNING
4695 "scsi%ld: Network Reset Intr not cleared by "
4696 "Network function, clearing it now!\n",
4697 ha->host_no));
4698 spin_lock_irqsave(&ha->hardware_lock, flags);
4699 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
4700 readl(&ha->reg->ctrl_status);
4701 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4704 /* Wait until the firmware tells us the Soft Reset is done */
4705 max_wait_time = SOFT_RESET_TOV;
4706 do {
4707 spin_lock_irqsave(&ha->hardware_lock, flags);
4708 ctrl_status = readw(&ha->reg->ctrl_status);
4709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4711 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
4712 status = QLA_SUCCESS;
4713 break;
4716 msleep(1000);
4717 } while ((--max_wait_time));
4720 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
4721 * after the soft reset has taken place.
4723 spin_lock_irqsave(&ha->hardware_lock, flags);
4724 ctrl_status = readw(&ha->reg->ctrl_status);
4725 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
4726 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4727 readl(&ha->reg->ctrl_status);
4729 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4731 /* If soft reset fails then most probably the bios on other
4732 * function is also enabled.
4733 * Since the initialization is sequential the other fn
4734 * wont be able to acknowledge the soft reset.
4735 * Issue a force soft reset to workaround this scenario.
4737 if (max_wait_time == 0) {
4738 /* Issue Force Soft Reset */
4739 spin_lock_irqsave(&ha->hardware_lock, flags);
4740 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
4741 readl(&ha->reg->ctrl_status);
4742 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4743 /* Wait until the firmware tells us the Soft Reset is done */
4744 max_wait_time = SOFT_RESET_TOV;
4745 do {
4746 spin_lock_irqsave(&ha->hardware_lock, flags);
4747 ctrl_status = readw(&ha->reg->ctrl_status);
4748 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4750 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
4751 status = QLA_SUCCESS;
4752 break;
4755 msleep(1000);
4756 } while ((--max_wait_time));
4759 return status;
4763 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
4764 * @ha: Pointer to host adapter structure.
4765 * @res: returned scsi status
4767 * This routine is called just prior to a HARD RESET to return all
4768 * outstanding commands back to the Operating System.
4769 * Caller should make sure that the following locks are released
4770 * before this calling routine: Hardware lock, and io_request_lock.
4772 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
4774 struct srb *srb;
4775 int i;
4776 unsigned long flags;
4778 spin_lock_irqsave(&ha->hardware_lock, flags);
4779 for (i = 0; i < ha->host->can_queue; i++) {
4780 srb = qla4xxx_del_from_active_array(ha, i);
4781 if (srb != NULL) {
4782 srb->cmd->result = res;
4783 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
4786 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4789 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
4791 clear_bit(AF_ONLINE, &ha->flags);
4793 /* Disable the board */
4794 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
4796 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4797 qla4xxx_mark_all_devices_missing(ha);
4798 clear_bit(AF_INIT_DONE, &ha->flags);
4801 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
4803 struct iscsi_session *sess;
4804 struct ddb_entry *ddb_entry;
4806 sess = cls_session->dd_data;
4807 ddb_entry = sess->dd_data;
4808 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
4810 if (ddb_entry->ddb_type == FLASH_DDB)
4811 iscsi_block_session(ddb_entry->sess);
4812 else
4813 iscsi_session_failure(cls_session->dd_data,
4814 ISCSI_ERR_CONN_FAILED);
4818 * qla4xxx_recover_adapter - recovers adapter after a fatal error
4819 * @ha: Pointer to host adapter structure.
4821 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
4823 int status = QLA_ERROR;
4824 uint8_t reset_chip = 0;
4825 uint32_t dev_state;
4826 unsigned long wait;
4828 /* Stall incoming I/O until we are done */
4829 scsi_block_requests(ha->host);
4830 clear_bit(AF_ONLINE, &ha->flags);
4831 clear_bit(AF_LINK_UP, &ha->flags);
4833 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
4835 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4837 if ((is_qla8032(ha) || is_qla8042(ha)) &&
4838 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4839 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4840 __func__);
4841 /* disable pause frame for ISP83xx */
4842 qla4_83xx_disable_pause(ha);
4845 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4847 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
4848 reset_chip = 1;
4850 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
4851 * do not reset adapter, jump to initialize_adapter */
4852 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4853 status = QLA_SUCCESS;
4854 goto recover_ha_init_adapter;
4857 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
4858 * from eh_host_reset or ioctl module */
4859 if (is_qla80XX(ha) && !reset_chip &&
4860 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4862 DEBUG2(ql4_printk(KERN_INFO, ha,
4863 "scsi%ld: %s - Performing stop_firmware...\n",
4864 ha->host_no, __func__));
4865 status = ha->isp_ops->reset_firmware(ha);
4866 if (status == QLA_SUCCESS) {
4867 ha->isp_ops->disable_intrs(ha);
4868 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4869 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4870 } else {
4871 /* If the stop_firmware fails then
4872 * reset the entire chip */
4873 reset_chip = 1;
4874 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
4875 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4879 /* Issue full chip reset if recovering from a catastrophic error,
4880 * or if stop_firmware fails for ISP-8xxx.
4881 * This is the default case for ISP-4xxx */
4882 if (is_qla40XX(ha) || reset_chip) {
4883 if (is_qla40XX(ha))
4884 goto chip_reset;
4886 /* Check if 8XXX firmware is alive or not
4887 * We may have arrived here from NEED_RESET
4888 * detection only */
4889 if (test_bit(AF_FW_RECOVERY, &ha->flags))
4890 goto chip_reset;
4892 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
4893 while (time_before(jiffies, wait)) {
4894 if (qla4_8xxx_check_fw_alive(ha)) {
4895 qla4xxx_mailbox_premature_completion(ha);
4896 break;
4899 set_current_state(TASK_UNINTERRUPTIBLE);
4900 schedule_timeout(HZ);
4902 chip_reset:
4903 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
4904 qla4xxx_cmd_wait(ha);
4906 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4907 DEBUG2(ql4_printk(KERN_INFO, ha,
4908 "scsi%ld: %s - Performing chip reset..\n",
4909 ha->host_no, __func__));
4910 status = ha->isp_ops->reset_chip(ha);
4911 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4914 /* Flush any pending ddb changed AENs */
4915 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4917 recover_ha_init_adapter:
4918 /* Upon successful firmware/chip reset, re-initialize the adapter */
4919 if (status == QLA_SUCCESS) {
4920 /* For ISP-4xxx, force function 1 to always initialize
4921 * before function 3 to prevent both funcions from
4922 * stepping on top of the other */
4923 if (is_qla40XX(ha) && (ha->mac_index == 3))
4924 ssleep(6);
4926 /* NOTE: AF_ONLINE flag set upon successful completion of
4927 * qla4xxx_initialize_adapter */
4928 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4929 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
4930 status = qla4_8xxx_check_init_adapter_retry(ha);
4931 if (status == QLA_ERROR) {
4932 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
4933 ha->host_no, __func__);
4934 qla4xxx_dead_adapter_cleanup(ha);
4935 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4936 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4937 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4938 &ha->dpc_flags);
4939 goto exit_recover;
4944 /* Retry failed adapter initialization, if necessary
4945 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
4946 * case to prevent ping-pong resets between functions */
4947 if (!test_bit(AF_ONLINE, &ha->flags) &&
4948 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4949 /* Adapter initialization failed, see if we can retry
4950 * resetting the ha.
4951 * Since we don't want to block the DPC for too long
4952 * with multiple resets in the same thread,
4953 * utilize DPC to retry */
4954 if (is_qla80XX(ha)) {
4955 ha->isp_ops->idc_lock(ha);
4956 dev_state = qla4_8xxx_rd_direct(ha,
4957 QLA8XXX_CRB_DEV_STATE);
4958 ha->isp_ops->idc_unlock(ha);
4959 if (dev_state == QLA8XXX_DEV_FAILED) {
4960 ql4_printk(KERN_INFO, ha, "%s: don't retry "
4961 "recover adapter. H/W is in Failed "
4962 "state\n", __func__);
4963 qla4xxx_dead_adapter_cleanup(ha);
4964 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4965 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4966 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4967 &ha->dpc_flags);
4968 status = QLA_ERROR;
4970 goto exit_recover;
4974 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
4975 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
4976 DEBUG2(printk("scsi%ld: recover adapter - retrying "
4977 "(%d) more times\n", ha->host_no,
4978 ha->retry_reset_ha_cnt));
4979 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4980 status = QLA_ERROR;
4981 } else {
4982 if (ha->retry_reset_ha_cnt > 0) {
4983 /* Schedule another Reset HA--DPC will retry */
4984 ha->retry_reset_ha_cnt--;
4985 DEBUG2(printk("scsi%ld: recover adapter - "
4986 "retry remaining %d\n",
4987 ha->host_no,
4988 ha->retry_reset_ha_cnt));
4989 status = QLA_ERROR;
4992 if (ha->retry_reset_ha_cnt == 0) {
4993 /* Recover adapter retries have been exhausted.
4994 * Adapter DEAD */
4995 DEBUG2(printk("scsi%ld: recover adapter "
4996 "failed - board disabled\n",
4997 ha->host_no));
4998 qla4xxx_dead_adapter_cleanup(ha);
4999 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
5000 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5001 clear_bit(DPC_RESET_HA_FW_CONTEXT,
5002 &ha->dpc_flags);
5003 status = QLA_ERROR;
5006 } else {
5007 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5008 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5009 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
5012 exit_recover:
5013 ha->adapter_error_count++;
5015 if (test_bit(AF_ONLINE, &ha->flags))
5016 ha->isp_ops->enable_intrs(ha);
5018 scsi_unblock_requests(ha->host);
5020 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5021 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
5022 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
5024 return status;
5027 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
5029 struct iscsi_session *sess;
5030 struct ddb_entry *ddb_entry;
5031 struct scsi_qla_host *ha;
5033 sess = cls_session->dd_data;
5034 ddb_entry = sess->dd_data;
5035 ha = ddb_entry->ha;
5036 if (!iscsi_is_session_online(cls_session)) {
5037 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
5038 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5039 " unblock session\n", ha->host_no, __func__,
5040 ddb_entry->fw_ddb_index);
5041 iscsi_unblock_session(ddb_entry->sess);
5042 } else {
5043 /* Trigger relogin */
5044 if (ddb_entry->ddb_type == FLASH_DDB) {
5045 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
5046 test_bit(DF_DISABLE_RELOGIN,
5047 &ddb_entry->flags)))
5048 qla4xxx_arm_relogin_timer(ddb_entry);
5049 } else
5050 iscsi_session_failure(cls_session->dd_data,
5051 ISCSI_ERR_CONN_FAILED);
5056 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
5058 struct iscsi_session *sess;
5059 struct ddb_entry *ddb_entry;
5060 struct scsi_qla_host *ha;
5062 sess = cls_session->dd_data;
5063 ddb_entry = sess->dd_data;
5064 ha = ddb_entry->ha;
5065 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5066 " unblock session\n", ha->host_no, __func__,
5067 ddb_entry->fw_ddb_index);
5069 iscsi_unblock_session(ddb_entry->sess);
5071 /* Start scan target */
5072 if (test_bit(AF_ONLINE, &ha->flags)) {
5073 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5074 " start scan\n", ha->host_no, __func__,
5075 ddb_entry->fw_ddb_index);
5076 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
5078 return QLA_SUCCESS;
5081 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
5083 struct iscsi_session *sess;
5084 struct ddb_entry *ddb_entry;
5085 struct scsi_qla_host *ha;
5086 int status = QLA_SUCCESS;
5088 sess = cls_session->dd_data;
5089 ddb_entry = sess->dd_data;
5090 ha = ddb_entry->ha;
5091 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5092 " unblock user space session\n", ha->host_no, __func__,
5093 ddb_entry->fw_ddb_index);
5095 if (!iscsi_is_session_online(cls_session)) {
5096 iscsi_conn_start(ddb_entry->conn);
5097 iscsi_conn_login_event(ddb_entry->conn,
5098 ISCSI_CONN_STATE_LOGGED_IN);
5099 } else {
5100 ql4_printk(KERN_INFO, ha,
5101 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
5102 ha->host_no, __func__, ddb_entry->fw_ddb_index,
5103 cls_session->sid);
5104 status = QLA_ERROR;
5107 return status;
5110 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
5112 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
5115 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
5117 uint16_t relogin_timer;
5118 struct iscsi_session *sess;
5119 struct ddb_entry *ddb_entry;
5120 struct scsi_qla_host *ha;
5122 sess = cls_sess->dd_data;
5123 ddb_entry = sess->dd_data;
5124 ha = ddb_entry->ha;
5126 relogin_timer = max(ddb_entry->default_relogin_timeout,
5127 (uint16_t)RELOGIN_TOV);
5128 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
5130 DEBUG2(ql4_printk(KERN_INFO, ha,
5131 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
5132 ddb_entry->fw_ddb_index, relogin_timer));
5134 qla4xxx_login_flash_ddb(cls_sess);
5137 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
5139 struct iscsi_session *sess;
5140 struct ddb_entry *ddb_entry;
5141 struct scsi_qla_host *ha;
5143 sess = cls_sess->dd_data;
5144 ddb_entry = sess->dd_data;
5145 ha = ddb_entry->ha;
5147 if (!(ddb_entry->ddb_type == FLASH_DDB))
5148 return;
5150 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
5151 return;
5153 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
5154 !iscsi_is_session_online(cls_sess)) {
5155 DEBUG2(ql4_printk(KERN_INFO, ha,
5156 "relogin issued\n"));
5157 qla4xxx_relogin_flash_ddb(cls_sess);
5161 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
5163 if (ha->dpc_thread)
5164 queue_work(ha->dpc_thread, &ha->dpc_work);
5167 static struct qla4_work_evt *
5168 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
5169 enum qla4_work_type type)
5171 struct qla4_work_evt *e;
5172 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
5174 e = kzalloc(size, GFP_ATOMIC);
5175 if (!e)
5176 return NULL;
5178 INIT_LIST_HEAD(&e->list);
5179 e->type = type;
5180 return e;
5183 static void qla4xxx_post_work(struct scsi_qla_host *ha,
5184 struct qla4_work_evt *e)
5186 unsigned long flags;
5188 spin_lock_irqsave(&ha->work_lock, flags);
5189 list_add_tail(&e->list, &ha->work_list);
5190 spin_unlock_irqrestore(&ha->work_lock, flags);
5191 qla4xxx_wake_dpc(ha);
5194 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
5195 enum iscsi_host_event_code aen_code,
5196 uint32_t data_size, uint8_t *data)
5198 struct qla4_work_evt *e;
5200 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
5201 if (!e)
5202 return QLA_ERROR;
5204 e->u.aen.code = aen_code;
5205 e->u.aen.data_size = data_size;
5206 memcpy(e->u.aen.data, data, data_size);
5208 qla4xxx_post_work(ha, e);
5210 return QLA_SUCCESS;
5213 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
5214 uint32_t status, uint32_t pid,
5215 uint32_t data_size, uint8_t *data)
5217 struct qla4_work_evt *e;
5219 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
5220 if (!e)
5221 return QLA_ERROR;
5223 e->u.ping.status = status;
5224 e->u.ping.pid = pid;
5225 e->u.ping.data_size = data_size;
5226 memcpy(e->u.ping.data, data, data_size);
5228 qla4xxx_post_work(ha, e);
5230 return QLA_SUCCESS;
5233 static void qla4xxx_do_work(struct scsi_qla_host *ha)
5235 struct qla4_work_evt *e, *tmp;
5236 unsigned long flags;
5237 LIST_HEAD(work);
5239 spin_lock_irqsave(&ha->work_lock, flags);
5240 list_splice_init(&ha->work_list, &work);
5241 spin_unlock_irqrestore(&ha->work_lock, flags);
5243 list_for_each_entry_safe(e, tmp, &work, list) {
5244 list_del_init(&e->list);
5246 switch (e->type) {
5247 case QLA4_EVENT_AEN:
5248 iscsi_post_host_event(ha->host_no,
5249 &qla4xxx_iscsi_transport,
5250 e->u.aen.code,
5251 e->u.aen.data_size,
5252 e->u.aen.data);
5253 break;
5254 case QLA4_EVENT_PING_STATUS:
5255 iscsi_ping_comp_event(ha->host_no,
5256 &qla4xxx_iscsi_transport,
5257 e->u.ping.status,
5258 e->u.ping.pid,
5259 e->u.ping.data_size,
5260 e->u.ping.data);
5261 break;
5262 default:
5263 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
5264 "supported", e->type);
5266 kfree(e);
5271 * qla4xxx_do_dpc - dpc routine
5272 * @data: in our case pointer to adapter structure
5274 * This routine is a task that is schedule by the interrupt handler
5275 * to perform the background processing for interrupts. We put it
5276 * on a task queue that is consumed whenever the scheduler runs; that's
5277 * so you can do anything (i.e. put the process to sleep etc). In fact,
5278 * the mid-level tries to sleep when it reaches the driver threshold
5279 * "host->can_queue". This can cause a panic if we were in our interrupt code.
5281 static void qla4xxx_do_dpc(struct work_struct *work)
5283 struct scsi_qla_host *ha =
5284 container_of(work, struct scsi_qla_host, dpc_work);
5285 int status = QLA_ERROR;
5287 DEBUG2(ql4_printk(KERN_INFO, ha,
5288 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
5289 ha->host_no, __func__, ha->flags, ha->dpc_flags));
5291 /* Initialization not yet finished. Don't do anything yet. */
5292 if (!test_bit(AF_INIT_DONE, &ha->flags))
5293 return;
5295 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
5296 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
5297 ha->host_no, __func__, ha->flags));
5298 return;
5301 /* post events to application */
5302 qla4xxx_do_work(ha);
5304 if (is_qla80XX(ha)) {
5305 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
5306 if (is_qla8032(ha) || is_qla8042(ha)) {
5307 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
5308 __func__);
5309 /* disable pause frame for ISP83xx */
5310 qla4_83xx_disable_pause(ha);
5313 ha->isp_ops->idc_lock(ha);
5314 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5315 QLA8XXX_DEV_FAILED);
5316 ha->isp_ops->idc_unlock(ha);
5317 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
5318 qla4_8xxx_device_state_handler(ha);
5321 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
5322 if (is_qla8042(ha)) {
5323 if (ha->idc_info.info2 &
5324 ENABLE_INTERNAL_LOOPBACK) {
5325 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
5326 __func__);
5327 status = qla4_84xx_config_acb(ha,
5328 ACB_CONFIG_DISABLE);
5329 if (status != QLA_SUCCESS) {
5330 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
5331 __func__);
5335 qla4_83xx_post_idc_ack(ha);
5336 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
5339 if (is_qla8042(ha) &&
5340 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
5341 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
5342 __func__);
5343 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
5344 QLA_SUCCESS) {
5345 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
5346 __func__);
5348 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
5351 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
5352 qla4_8xxx_need_qsnt_handler(ha);
5356 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
5357 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
5358 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
5359 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
5360 if ((is_qla8022(ha) && ql4xdontresethba) ||
5361 ((is_qla8032(ha) || is_qla8042(ha)) &&
5362 qla4_83xx_idc_dontreset(ha))) {
5363 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5364 ha->host_no, __func__));
5365 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5366 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5367 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5368 goto dpc_post_reset_ha;
5370 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
5371 test_bit(DPC_RESET_HA, &ha->dpc_flags))
5372 qla4xxx_recover_adapter(ha);
5374 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
5375 uint8_t wait_time = RESET_INTR_TOV;
5377 while ((readw(&ha->reg->ctrl_status) &
5378 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
5379 if (--wait_time == 0)
5380 break;
5381 msleep(1000);
5383 if (wait_time == 0)
5384 DEBUG2(printk("scsi%ld: %s: SR|FSR "
5385 "bit not cleared-- resetting\n",
5386 ha->host_no, __func__));
5387 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5388 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
5389 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
5390 status = qla4xxx_recover_adapter(ha);
5392 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5393 if (status == QLA_SUCCESS)
5394 ha->isp_ops->enable_intrs(ha);
5398 dpc_post_reset_ha:
5399 /* ---- process AEN? --- */
5400 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
5401 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
5403 /* ---- Get DHCP IP Address? --- */
5404 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
5405 qla4xxx_get_dhcp_ip_address(ha);
5407 /* ---- relogin device? --- */
5408 if (adapter_up(ha) &&
5409 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
5410 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
5413 /* ---- link change? --- */
5414 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
5415 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
5416 if (!test_bit(AF_LINK_UP, &ha->flags)) {
5417 /* ---- link down? --- */
5418 qla4xxx_mark_all_devices_missing(ha);
5419 } else {
5420 /* ---- link up? --- *
5421 * F/W will auto login to all devices ONLY ONCE after
5422 * link up during driver initialization and runtime
5423 * fatal error recovery. Therefore, the driver must
5424 * manually relogin to devices when recovering from
5425 * connection failures, logouts, expired KATO, etc. */
5426 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
5427 qla4xxx_build_ddb_list(ha, ha->is_reset);
5428 iscsi_host_for_each_session(ha->host,
5429 qla4xxx_login_flash_ddb);
5430 } else
5431 qla4xxx_relogin_all_devices(ha);
5434 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
5435 if (qla4xxx_sysfs_ddb_export(ha))
5436 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
5437 __func__);
5442 * qla4xxx_free_adapter - release the adapter
5443 * @ha: pointer to adapter structure
5445 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
5447 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5449 /* Turn-off interrupts on the card. */
5450 ha->isp_ops->disable_intrs(ha);
5452 if (is_qla40XX(ha)) {
5453 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
5454 &ha->reg->ctrl_status);
5455 readl(&ha->reg->ctrl_status);
5456 } else if (is_qla8022(ha)) {
5457 writel(0, &ha->qla4_82xx_reg->host_int);
5458 readl(&ha->qla4_82xx_reg->host_int);
5459 } else if (is_qla8032(ha) || is_qla8042(ha)) {
5460 writel(0, &ha->qla4_83xx_reg->risc_intr);
5461 readl(&ha->qla4_83xx_reg->risc_intr);
5464 /* Remove timer thread, if present */
5465 if (ha->timer_active)
5466 qla4xxx_stop_timer(ha);
5468 /* Kill the kernel thread for this host */
5469 if (ha->dpc_thread)
5470 destroy_workqueue(ha->dpc_thread);
5472 /* Kill the kernel thread for this host */
5473 if (ha->task_wq)
5474 destroy_workqueue(ha->task_wq);
5476 /* Put firmware in known state */
5477 ha->isp_ops->reset_firmware(ha);
5479 if (is_qla80XX(ha)) {
5480 ha->isp_ops->idc_lock(ha);
5481 qla4_8xxx_clear_drv_active(ha);
5482 ha->isp_ops->idc_unlock(ha);
5485 /* Detach interrupts */
5486 qla4xxx_free_irqs(ha);
5488 /* free extra memory */
5489 qla4xxx_mem_free(ha);
5492 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
5494 int status = 0;
5495 unsigned long mem_base, mem_len, db_base, db_len;
5496 struct pci_dev *pdev = ha->pdev;
5498 status = pci_request_regions(pdev, DRIVER_NAME);
5499 if (status) {
5500 printk(KERN_WARNING
5501 "scsi(%ld) Failed to reserve PIO regions (%s) "
5502 "status=%d\n", ha->host_no, pci_name(pdev), status);
5503 goto iospace_error_exit;
5506 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
5507 __func__, pdev->revision));
5508 ha->revision_id = pdev->revision;
5510 /* remap phys address */
5511 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
5512 mem_len = pci_resource_len(pdev, 0);
5513 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
5514 __func__, mem_base, mem_len));
5516 /* mapping of pcibase pointer */
5517 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
5518 if (!ha->nx_pcibase) {
5519 printk(KERN_ERR
5520 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
5521 pci_release_regions(ha->pdev);
5522 goto iospace_error_exit;
5525 /* Mapping of IO base pointer, door bell read and write pointer */
5527 /* mapping of IO base pointer */
5528 if (is_qla8022(ha)) {
5529 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
5530 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
5531 (ha->pdev->devfn << 11));
5532 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
5533 QLA82XX_CAM_RAM_DB2);
5534 } else if (is_qla8032(ha) || is_qla8042(ha)) {
5535 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
5536 ((uint8_t *)ha->nx_pcibase);
5539 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
5540 db_len = pci_resource_len(pdev, 4);
5542 return 0;
5543 iospace_error_exit:
5544 return -ENOMEM;
5547 /***
5548 * qla4xxx_iospace_config - maps registers
5549 * @ha: pointer to adapter structure
5551 * This routines maps HBA's registers from the pci address space
5552 * into the kernel virtual address space for memory mapped i/o.
5554 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
5556 unsigned long pio, pio_len, pio_flags;
5557 unsigned long mmio, mmio_len, mmio_flags;
5559 pio = pci_resource_start(ha->pdev, 0);
5560 pio_len = pci_resource_len(ha->pdev, 0);
5561 pio_flags = pci_resource_flags(ha->pdev, 0);
5562 if (pio_flags & IORESOURCE_IO) {
5563 if (pio_len < MIN_IOBASE_LEN) {
5564 ql4_printk(KERN_WARNING, ha,
5565 "Invalid PCI I/O region size\n");
5566 pio = 0;
5568 } else {
5569 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
5570 pio = 0;
5573 /* Use MMIO operations for all accesses. */
5574 mmio = pci_resource_start(ha->pdev, 1);
5575 mmio_len = pci_resource_len(ha->pdev, 1);
5576 mmio_flags = pci_resource_flags(ha->pdev, 1);
5578 if (!(mmio_flags & IORESOURCE_MEM)) {
5579 ql4_printk(KERN_ERR, ha,
5580 "region #0 not an MMIO resource, aborting\n");
5582 goto iospace_error_exit;
5585 if (mmio_len < MIN_IOBASE_LEN) {
5586 ql4_printk(KERN_ERR, ha,
5587 "Invalid PCI mem region size, aborting\n");
5588 goto iospace_error_exit;
5591 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
5592 ql4_printk(KERN_WARNING, ha,
5593 "Failed to reserve PIO/MMIO regions\n");
5595 goto iospace_error_exit;
5598 ha->pio_address = pio;
5599 ha->pio_length = pio_len;
5600 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
5601 if (!ha->reg) {
5602 ql4_printk(KERN_ERR, ha,
5603 "cannot remap MMIO, aborting\n");
5605 goto iospace_error_exit;
5608 return 0;
5610 iospace_error_exit:
5611 return -ENOMEM;
5614 static struct isp_operations qla4xxx_isp_ops = {
5615 .iospace_config = qla4xxx_iospace_config,
5616 .pci_config = qla4xxx_pci_config,
5617 .disable_intrs = qla4xxx_disable_intrs,
5618 .enable_intrs = qla4xxx_enable_intrs,
5619 .start_firmware = qla4xxx_start_firmware,
5620 .intr_handler = qla4xxx_intr_handler,
5621 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
5622 .reset_chip = qla4xxx_soft_reset,
5623 .reset_firmware = qla4xxx_hw_reset,
5624 .queue_iocb = qla4xxx_queue_iocb,
5625 .complete_iocb = qla4xxx_complete_iocb,
5626 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
5627 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
5628 .get_sys_info = qla4xxx_get_sys_info,
5629 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
5630 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
5633 static struct isp_operations qla4_82xx_isp_ops = {
5634 .iospace_config = qla4_8xxx_iospace_config,
5635 .pci_config = qla4_8xxx_pci_config,
5636 .disable_intrs = qla4_82xx_disable_intrs,
5637 .enable_intrs = qla4_82xx_enable_intrs,
5638 .start_firmware = qla4_8xxx_load_risc,
5639 .restart_firmware = qla4_82xx_try_start_fw,
5640 .intr_handler = qla4_82xx_intr_handler,
5641 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
5642 .need_reset = qla4_8xxx_need_reset,
5643 .reset_chip = qla4_82xx_isp_reset,
5644 .reset_firmware = qla4_8xxx_stop_firmware,
5645 .queue_iocb = qla4_82xx_queue_iocb,
5646 .complete_iocb = qla4_82xx_complete_iocb,
5647 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
5648 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
5649 .get_sys_info = qla4_8xxx_get_sys_info,
5650 .rd_reg_direct = qla4_82xx_rd_32,
5651 .wr_reg_direct = qla4_82xx_wr_32,
5652 .rd_reg_indirect = qla4_82xx_md_rd_32,
5653 .wr_reg_indirect = qla4_82xx_md_wr_32,
5654 .idc_lock = qla4_82xx_idc_lock,
5655 .idc_unlock = qla4_82xx_idc_unlock,
5656 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
5657 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
5658 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
5661 static struct isp_operations qla4_83xx_isp_ops = {
5662 .iospace_config = qla4_8xxx_iospace_config,
5663 .pci_config = qla4_8xxx_pci_config,
5664 .disable_intrs = qla4_83xx_disable_intrs,
5665 .enable_intrs = qla4_83xx_enable_intrs,
5666 .start_firmware = qla4_8xxx_load_risc,
5667 .restart_firmware = qla4_83xx_start_firmware,
5668 .intr_handler = qla4_83xx_intr_handler,
5669 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
5670 .need_reset = qla4_8xxx_need_reset,
5671 .reset_chip = qla4_83xx_isp_reset,
5672 .reset_firmware = qla4_8xxx_stop_firmware,
5673 .queue_iocb = qla4_83xx_queue_iocb,
5674 .complete_iocb = qla4_83xx_complete_iocb,
5675 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
5676 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
5677 .get_sys_info = qla4_8xxx_get_sys_info,
5678 .rd_reg_direct = qla4_83xx_rd_reg,
5679 .wr_reg_direct = qla4_83xx_wr_reg,
5680 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
5681 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
5682 .idc_lock = qla4_83xx_drv_lock,
5683 .idc_unlock = qla4_83xx_drv_unlock,
5684 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
5685 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
5686 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
5689 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5691 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
5694 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5696 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
5699 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5701 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
5704 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5706 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
5709 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
5711 struct scsi_qla_host *ha = data;
5712 char *str = buf;
5713 int rc;
5715 switch (type) {
5716 case ISCSI_BOOT_ETH_FLAGS:
5717 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5718 break;
5719 case ISCSI_BOOT_ETH_INDEX:
5720 rc = sprintf(str, "0\n");
5721 break;
5722 case ISCSI_BOOT_ETH_MAC:
5723 rc = sysfs_format_mac(str, ha->my_mac,
5724 MAC_ADDR_LEN);
5725 break;
5726 default:
5727 rc = -ENOSYS;
5728 break;
5730 return rc;
5733 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
5735 int rc;
5737 switch (type) {
5738 case ISCSI_BOOT_ETH_FLAGS:
5739 case ISCSI_BOOT_ETH_MAC:
5740 case ISCSI_BOOT_ETH_INDEX:
5741 rc = S_IRUGO;
5742 break;
5743 default:
5744 rc = 0;
5745 break;
5747 return rc;
5750 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
5752 struct scsi_qla_host *ha = data;
5753 char *str = buf;
5754 int rc;
5756 switch (type) {
5757 case ISCSI_BOOT_INI_INITIATOR_NAME:
5758 rc = sprintf(str, "%s\n", ha->name_string);
5759 break;
5760 default:
5761 rc = -ENOSYS;
5762 break;
5764 return rc;
5767 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
5769 int rc;
5771 switch (type) {
5772 case ISCSI_BOOT_INI_INITIATOR_NAME:
5773 rc = S_IRUGO;
5774 break;
5775 default:
5776 rc = 0;
5777 break;
5779 return rc;
5782 static ssize_t
5783 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
5784 char *buf)
5786 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
5787 char *str = buf;
5788 int rc;
5790 switch (type) {
5791 case ISCSI_BOOT_TGT_NAME:
5792 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
5793 break;
5794 case ISCSI_BOOT_TGT_IP_ADDR:
5795 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
5796 rc = sprintf(buf, "%pI4\n",
5797 &boot_conn->dest_ipaddr.ip_address);
5798 else
5799 rc = sprintf(str, "%pI6\n",
5800 &boot_conn->dest_ipaddr.ip_address);
5801 break;
5802 case ISCSI_BOOT_TGT_PORT:
5803 rc = sprintf(str, "%d\n", boot_conn->dest_port);
5804 break;
5805 case ISCSI_BOOT_TGT_CHAP_NAME:
5806 rc = sprintf(str, "%.*s\n",
5807 boot_conn->chap.target_chap_name_length,
5808 (char *)&boot_conn->chap.target_chap_name);
5809 break;
5810 case ISCSI_BOOT_TGT_CHAP_SECRET:
5811 rc = sprintf(str, "%.*s\n",
5812 boot_conn->chap.target_secret_length,
5813 (char *)&boot_conn->chap.target_secret);
5814 break;
5815 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5816 rc = sprintf(str, "%.*s\n",
5817 boot_conn->chap.intr_chap_name_length,
5818 (char *)&boot_conn->chap.intr_chap_name);
5819 break;
5820 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5821 rc = sprintf(str, "%.*s\n",
5822 boot_conn->chap.intr_secret_length,
5823 (char *)&boot_conn->chap.intr_secret);
5824 break;
5825 case ISCSI_BOOT_TGT_FLAGS:
5826 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5827 break;
5828 case ISCSI_BOOT_TGT_NIC_ASSOC:
5829 rc = sprintf(str, "0\n");
5830 break;
5831 default:
5832 rc = -ENOSYS;
5833 break;
5835 return rc;
5838 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
5840 struct scsi_qla_host *ha = data;
5841 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
5843 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5846 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
5848 struct scsi_qla_host *ha = data;
5849 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
5851 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5854 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
5856 int rc;
5858 switch (type) {
5859 case ISCSI_BOOT_TGT_NAME:
5860 case ISCSI_BOOT_TGT_IP_ADDR:
5861 case ISCSI_BOOT_TGT_PORT:
5862 case ISCSI_BOOT_TGT_CHAP_NAME:
5863 case ISCSI_BOOT_TGT_CHAP_SECRET:
5864 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5865 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5866 case ISCSI_BOOT_TGT_NIC_ASSOC:
5867 case ISCSI_BOOT_TGT_FLAGS:
5868 rc = S_IRUGO;
5869 break;
5870 default:
5871 rc = 0;
5872 break;
5874 return rc;
5877 static void qla4xxx_boot_release(void *data)
5879 struct scsi_qla_host *ha = data;
5881 scsi_host_put(ha->host);
5884 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
5886 dma_addr_t buf_dma;
5887 uint32_t addr, pri_addr, sec_addr;
5888 uint32_t offset;
5889 uint16_t func_num;
5890 uint8_t val;
5891 uint8_t *buf = NULL;
5892 size_t size = 13 * sizeof(uint8_t);
5893 int ret = QLA_SUCCESS;
5895 func_num = PCI_FUNC(ha->pdev->devfn);
5897 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
5898 __func__, ha->pdev->device, func_num);
5900 if (is_qla40XX(ha)) {
5901 if (func_num == 1) {
5902 addr = NVRAM_PORT0_BOOT_MODE;
5903 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
5904 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
5905 } else if (func_num == 3) {
5906 addr = NVRAM_PORT1_BOOT_MODE;
5907 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
5908 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
5909 } else {
5910 ret = QLA_ERROR;
5911 goto exit_boot_info;
5914 /* Check Boot Mode */
5915 val = rd_nvram_byte(ha, addr);
5916 if (!(val & 0x07)) {
5917 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
5918 "options : 0x%x\n", __func__, val));
5919 ret = QLA_ERROR;
5920 goto exit_boot_info;
5923 /* get primary valid target index */
5924 val = rd_nvram_byte(ha, pri_addr);
5925 if (val & BIT_7)
5926 ddb_index[0] = (val & 0x7f);
5928 /* get secondary valid target index */
5929 val = rd_nvram_byte(ha, sec_addr);
5930 if (val & BIT_7)
5931 ddb_index[1] = (val & 0x7f);
5932 goto exit_boot_info;
5933 } else if (is_qla80XX(ha)) {
5934 buf = dma_alloc_coherent(&ha->pdev->dev, size,
5935 &buf_dma, GFP_KERNEL);
5936 if (!buf) {
5937 DEBUG2(ql4_printk(KERN_ERR, ha,
5938 "%s: Unable to allocate dma buffer\n",
5939 __func__));
5940 ret = QLA_ERROR;
5941 goto exit_boot_info;
5944 if (ha->port_num == 0)
5945 offset = BOOT_PARAM_OFFSET_PORT0;
5946 else if (ha->port_num == 1)
5947 offset = BOOT_PARAM_OFFSET_PORT1;
5948 else {
5949 ret = QLA_ERROR;
5950 goto exit_boot_info_free;
5952 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
5953 offset;
5954 if (qla4xxx_get_flash(ha, buf_dma, addr,
5955 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
5956 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
5957 " failed\n", ha->host_no, __func__));
5958 ret = QLA_ERROR;
5959 goto exit_boot_info_free;
5961 /* Check Boot Mode */
5962 if (!(buf[1] & 0x07)) {
5963 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
5964 " : 0x%x\n", buf[1]));
5965 ret = QLA_ERROR;
5966 goto exit_boot_info_free;
5969 /* get primary valid target index */
5970 if (buf[2] & BIT_7)
5971 ddb_index[0] = buf[2] & 0x7f;
5973 /* get secondary valid target index */
5974 if (buf[11] & BIT_7)
5975 ddb_index[1] = buf[11] & 0x7f;
5976 } else {
5977 ret = QLA_ERROR;
5978 goto exit_boot_info;
5981 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
5982 " target ID %d\n", __func__, ddb_index[0],
5983 ddb_index[1]));
5985 exit_boot_info_free:
5986 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
5987 exit_boot_info:
5988 ha->pri_ddb_idx = ddb_index[0];
5989 ha->sec_ddb_idx = ddb_index[1];
5990 return ret;
5994 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
5995 * @ha: pointer to adapter structure
5996 * @username: CHAP username to be returned
5997 * @password: CHAP password to be returned
5999 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
6000 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
6001 * So from the CHAP cache find the first BIDI CHAP entry and set it
6002 * to the boot record in sysfs.
6004 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
6005 char *password)
6007 int i, ret = -EINVAL;
6008 int max_chap_entries = 0;
6009 struct ql4_chap_table *chap_table;
6011 if (is_qla80XX(ha))
6012 max_chap_entries = (ha->hw.flt_chap_size / 2) /
6013 sizeof(struct ql4_chap_table);
6014 else
6015 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
6017 if (!ha->chap_list) {
6018 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
6019 return ret;
6022 mutex_lock(&ha->chap_sem);
6023 for (i = 0; i < max_chap_entries; i++) {
6024 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
6025 if (chap_table->cookie !=
6026 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
6027 continue;
6030 if (chap_table->flags & BIT_7) /* local */
6031 continue;
6033 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
6034 continue;
6036 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
6037 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
6038 ret = 0;
6039 break;
6041 mutex_unlock(&ha->chap_sem);
6043 return ret;
6047 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
6048 struct ql4_boot_session_info *boot_sess,
6049 uint16_t ddb_index)
6051 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
6052 struct dev_db_entry *fw_ddb_entry;
6053 dma_addr_t fw_ddb_entry_dma;
6054 uint16_t idx;
6055 uint16_t options;
6056 int ret = QLA_SUCCESS;
6058 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6059 &fw_ddb_entry_dma, GFP_KERNEL);
6060 if (!fw_ddb_entry) {
6061 DEBUG2(ql4_printk(KERN_ERR, ha,
6062 "%s: Unable to allocate dma buffer.\n",
6063 __func__));
6064 ret = QLA_ERROR;
6065 return ret;
6068 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
6069 fw_ddb_entry_dma, ddb_index)) {
6070 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
6071 "index [%d]\n", __func__, ddb_index));
6072 ret = QLA_ERROR;
6073 goto exit_boot_target;
6076 /* Update target name and IP from DDB */
6077 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
6078 min(sizeof(boot_sess->target_name),
6079 sizeof(fw_ddb_entry->iscsi_name)));
6081 options = le16_to_cpu(fw_ddb_entry->options);
6082 if (options & DDB_OPT_IPV6_DEVICE) {
6083 memcpy(&boot_conn->dest_ipaddr.ip_address,
6084 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
6085 } else {
6086 boot_conn->dest_ipaddr.ip_type = 0x1;
6087 memcpy(&boot_conn->dest_ipaddr.ip_address,
6088 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
6091 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
6093 /* update chap information */
6094 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
6096 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6098 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
6100 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
6101 target_chap_name,
6102 (char *)&boot_conn->chap.target_secret,
6103 idx);
6104 if (ret) {
6105 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
6106 ret = QLA_ERROR;
6107 goto exit_boot_target;
6110 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6111 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6114 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6116 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
6118 ret = qla4xxx_get_bidi_chap(ha,
6119 (char *)&boot_conn->chap.intr_chap_name,
6120 (char *)&boot_conn->chap.intr_secret);
6122 if (ret) {
6123 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
6124 ret = QLA_ERROR;
6125 goto exit_boot_target;
6128 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6129 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6132 exit_boot_target:
6133 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6134 fw_ddb_entry, fw_ddb_entry_dma);
6135 return ret;
6138 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
6140 uint16_t ddb_index[2];
6141 int ret = QLA_ERROR;
6142 int rval;
6144 memset(ddb_index, 0, sizeof(ddb_index));
6145 ddb_index[0] = 0xffff;
6146 ddb_index[1] = 0xffff;
6147 ret = get_fw_boot_info(ha, ddb_index);
6148 if (ret != QLA_SUCCESS) {
6149 DEBUG2(ql4_printk(KERN_INFO, ha,
6150 "%s: No boot target configured.\n", __func__));
6151 return ret;
6154 if (ql4xdisablesysfsboot)
6155 return QLA_SUCCESS;
6157 if (ddb_index[0] == 0xffff)
6158 goto sec_target;
6160 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
6161 ddb_index[0]);
6162 if (rval != QLA_SUCCESS) {
6163 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
6164 "configured\n", __func__));
6165 } else
6166 ret = QLA_SUCCESS;
6168 sec_target:
6169 if (ddb_index[1] == 0xffff)
6170 goto exit_get_boot_info;
6172 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
6173 ddb_index[1]);
6174 if (rval != QLA_SUCCESS) {
6175 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
6176 " configured\n", __func__));
6177 } else
6178 ret = QLA_SUCCESS;
6180 exit_get_boot_info:
6181 return ret;
6184 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
6186 struct iscsi_boot_kobj *boot_kobj;
6188 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
6189 return QLA_ERROR;
6191 if (ql4xdisablesysfsboot) {
6192 ql4_printk(KERN_INFO, ha,
6193 "%s: syfsboot disabled - driver will trigger login "
6194 "and publish session for discovery .\n", __func__);
6195 return QLA_SUCCESS;
6199 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
6200 if (!ha->boot_kset)
6201 goto kset_free;
6203 if (!scsi_host_get(ha->host))
6204 goto kset_free;
6205 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
6206 qla4xxx_show_boot_tgt_pri_info,
6207 qla4xxx_tgt_get_attr_visibility,
6208 qla4xxx_boot_release);
6209 if (!boot_kobj)
6210 goto put_host;
6212 if (!scsi_host_get(ha->host))
6213 goto kset_free;
6214 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
6215 qla4xxx_show_boot_tgt_sec_info,
6216 qla4xxx_tgt_get_attr_visibility,
6217 qla4xxx_boot_release);
6218 if (!boot_kobj)
6219 goto put_host;
6221 if (!scsi_host_get(ha->host))
6222 goto kset_free;
6223 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
6224 qla4xxx_show_boot_ini_info,
6225 qla4xxx_ini_get_attr_visibility,
6226 qla4xxx_boot_release);
6227 if (!boot_kobj)
6228 goto put_host;
6230 if (!scsi_host_get(ha->host))
6231 goto kset_free;
6232 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
6233 qla4xxx_show_boot_eth_info,
6234 qla4xxx_eth_get_attr_visibility,
6235 qla4xxx_boot_release);
6236 if (!boot_kobj)
6237 goto put_host;
6239 return QLA_SUCCESS;
6241 put_host:
6242 scsi_host_put(ha->host);
6243 kset_free:
6244 iscsi_boot_destroy_kset(ha->boot_kset);
6245 return -ENOMEM;
6249 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
6250 struct ql4_tuple_ddb *tddb)
6252 struct scsi_qla_host *ha;
6253 struct iscsi_cls_session *cls_sess;
6254 struct iscsi_cls_conn *cls_conn;
6255 struct iscsi_session *sess;
6256 struct iscsi_conn *conn;
6258 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
6259 ha = ddb_entry->ha;
6260 cls_sess = ddb_entry->sess;
6261 sess = cls_sess->dd_data;
6262 cls_conn = ddb_entry->conn;
6263 conn = cls_conn->dd_data;
6265 tddb->tpgt = sess->tpgt;
6266 tddb->port = conn->persistent_port;
6267 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
6268 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
6271 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
6272 struct ql4_tuple_ddb *tddb,
6273 uint8_t *flash_isid)
6275 uint16_t options = 0;
6277 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
6278 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
6279 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
6281 options = le16_to_cpu(fw_ddb_entry->options);
6282 if (options & DDB_OPT_IPV6_DEVICE)
6283 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
6284 else
6285 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
6287 tddb->port = le16_to_cpu(fw_ddb_entry->port);
6289 if (flash_isid == NULL)
6290 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
6291 sizeof(tddb->isid));
6292 else
6293 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
6296 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
6297 struct ql4_tuple_ddb *old_tddb,
6298 struct ql4_tuple_ddb *new_tddb,
6299 uint8_t is_isid_compare)
6301 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6302 return QLA_ERROR;
6304 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
6305 return QLA_ERROR;
6307 if (old_tddb->port != new_tddb->port)
6308 return QLA_ERROR;
6310 /* For multi sessions, driver generates the ISID, so do not compare
6311 * ISID in reset path since it would be a comparison between the
6312 * driver generated ISID and firmware generated ISID. This could
6313 * lead to adding duplicated DDBs in the list as driver generated
6314 * ISID would not match firmware generated ISID.
6316 if (is_isid_compare) {
6317 DEBUG2(ql4_printk(KERN_INFO, ha,
6318 "%s: old ISID [%pmR] New ISID [%pmR]\n",
6319 __func__, old_tddb->isid, new_tddb->isid));
6321 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6322 sizeof(old_tddb->isid)))
6323 return QLA_ERROR;
6326 DEBUG2(ql4_printk(KERN_INFO, ha,
6327 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
6328 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
6329 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
6330 new_tddb->ip_addr, new_tddb->iscsi_name));
6332 return QLA_SUCCESS;
6335 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
6336 struct dev_db_entry *fw_ddb_entry,
6337 uint32_t *index)
6339 struct ddb_entry *ddb_entry;
6340 struct ql4_tuple_ddb *fw_tddb = NULL;
6341 struct ql4_tuple_ddb *tmp_tddb = NULL;
6342 int idx;
6343 int ret = QLA_ERROR;
6345 fw_tddb = vzalloc(sizeof(*fw_tddb));
6346 if (!fw_tddb) {
6347 DEBUG2(ql4_printk(KERN_WARNING, ha,
6348 "Memory Allocation failed.\n"));
6349 ret = QLA_SUCCESS;
6350 goto exit_check;
6353 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6354 if (!tmp_tddb) {
6355 DEBUG2(ql4_printk(KERN_WARNING, ha,
6356 "Memory Allocation failed.\n"));
6357 ret = QLA_SUCCESS;
6358 goto exit_check;
6361 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6363 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6364 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6365 if (ddb_entry == NULL)
6366 continue;
6368 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
6369 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
6370 ret = QLA_SUCCESS; /* found */
6371 if (index != NULL)
6372 *index = idx;
6373 goto exit_check;
6377 exit_check:
6378 if (fw_tddb)
6379 vfree(fw_tddb);
6380 if (tmp_tddb)
6381 vfree(tmp_tddb);
6382 return ret;
6386 * qla4xxx_check_existing_isid - check if target with same isid exist
6387 * in target list
6388 * @list_nt: list of target
6389 * @isid: isid to check
6391 * This routine return QLA_SUCCESS if target with same isid exist
6393 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
6395 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6396 struct dev_db_entry *fw_ddb_entry;
6398 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6399 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
6401 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
6402 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
6403 return QLA_SUCCESS;
6406 return QLA_ERROR;
6410 * qla4xxx_update_isid - compare ddbs and updated isid
6411 * @ha: Pointer to host adapter structure.
6412 * @list_nt: list of nt target
6413 * @fw_ddb_entry: firmware ddb entry
6415 * This routine update isid if ddbs have same iqn, same isid and
6416 * different IP addr.
6417 * Return QLA_SUCCESS if isid is updated.
6419 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
6420 struct list_head *list_nt,
6421 struct dev_db_entry *fw_ddb_entry)
6423 uint8_t base_value, i;
6425 base_value = fw_ddb_entry->isid[1] & 0x1f;
6426 for (i = 0; i < 8; i++) {
6427 fw_ddb_entry->isid[1] = (base_value | (i << 5));
6428 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6429 break;
6432 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6433 return QLA_ERROR;
6435 return QLA_SUCCESS;
6439 * qla4xxx_should_update_isid - check if isid need to update
6440 * @ha: Pointer to host adapter structure.
6441 * @old_tddb: ddb tuple
6442 * @new_tddb: ddb tuple
6444 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
6445 * same isid
6447 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
6448 struct ql4_tuple_ddb *old_tddb,
6449 struct ql4_tuple_ddb *new_tddb)
6451 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
6452 /* Same ip */
6453 if (old_tddb->port == new_tddb->port)
6454 return QLA_ERROR;
6457 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6458 /* different iqn */
6459 return QLA_ERROR;
6461 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6462 sizeof(old_tddb->isid)))
6463 /* different isid */
6464 return QLA_ERROR;
6466 return QLA_SUCCESS;
6470 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
6471 * @ha: Pointer to host adapter structure.
6472 * @list_nt: list of nt target.
6473 * @fw_ddb_entry: firmware ddb entry.
6475 * This routine check if fw_ddb_entry already exists in list_nt to avoid
6476 * duplicate ddb in list_nt.
6477 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
6478 * Note: This function also update isid of DDB if required.
6481 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
6482 struct list_head *list_nt,
6483 struct dev_db_entry *fw_ddb_entry)
6485 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6486 struct ql4_tuple_ddb *fw_tddb = NULL;
6487 struct ql4_tuple_ddb *tmp_tddb = NULL;
6488 int rval, ret = QLA_ERROR;
6490 fw_tddb = vzalloc(sizeof(*fw_tddb));
6491 if (!fw_tddb) {
6492 DEBUG2(ql4_printk(KERN_WARNING, ha,
6493 "Memory Allocation failed.\n"));
6494 ret = QLA_SUCCESS;
6495 goto exit_check;
6498 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6499 if (!tmp_tddb) {
6500 DEBUG2(ql4_printk(KERN_WARNING, ha,
6501 "Memory Allocation failed.\n"));
6502 ret = QLA_SUCCESS;
6503 goto exit_check;
6506 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6508 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6509 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
6510 nt_ddb_idx->flash_isid);
6511 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
6512 /* found duplicate ddb */
6513 if (ret == QLA_SUCCESS)
6514 goto exit_check;
6517 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6518 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
6520 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
6521 if (ret == QLA_SUCCESS) {
6522 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
6523 if (rval == QLA_SUCCESS)
6524 ret = QLA_ERROR;
6525 else
6526 ret = QLA_SUCCESS;
6528 goto exit_check;
6532 exit_check:
6533 if (fw_tddb)
6534 vfree(fw_tddb);
6535 if (tmp_tddb)
6536 vfree(tmp_tddb);
6537 return ret;
6540 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
6542 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6544 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6545 list_del_init(&ddb_idx->list);
6546 vfree(ddb_idx);
6550 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
6551 struct dev_db_entry *fw_ddb_entry)
6553 struct iscsi_endpoint *ep;
6554 struct sockaddr_in *addr;
6555 struct sockaddr_in6 *addr6;
6556 struct sockaddr *t_addr;
6557 struct sockaddr_storage *dst_addr;
6558 char *ip;
6560 /* TODO: need to destroy on unload iscsi_endpoint*/
6561 dst_addr = vmalloc(sizeof(*dst_addr));
6562 if (!dst_addr)
6563 return NULL;
6565 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
6566 t_addr = (struct sockaddr *)dst_addr;
6567 t_addr->sa_family = AF_INET6;
6568 addr6 = (struct sockaddr_in6 *)dst_addr;
6569 ip = (char *)&addr6->sin6_addr;
6570 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
6571 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
6573 } else {
6574 t_addr = (struct sockaddr *)dst_addr;
6575 t_addr->sa_family = AF_INET;
6576 addr = (struct sockaddr_in *)dst_addr;
6577 ip = (char *)&addr->sin_addr;
6578 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
6579 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
6582 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
6583 vfree(dst_addr);
6584 return ep;
6587 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
6589 if (ql4xdisablesysfsboot)
6590 return QLA_SUCCESS;
6591 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
6592 return QLA_ERROR;
6593 return QLA_SUCCESS;
6596 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
6597 struct ddb_entry *ddb_entry,
6598 uint16_t idx)
6600 uint16_t def_timeout;
6602 ddb_entry->ddb_type = FLASH_DDB;
6603 ddb_entry->fw_ddb_index = INVALID_ENTRY;
6604 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
6605 ddb_entry->ha = ha;
6606 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
6607 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
6608 ddb_entry->chap_tbl_idx = INVALID_ENTRY;
6610 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6611 atomic_set(&ddb_entry->relogin_timer, 0);
6612 atomic_set(&ddb_entry->relogin_retry_count, 0);
6613 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
6614 ddb_entry->default_relogin_timeout =
6615 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
6616 def_timeout : LOGIN_TOV;
6617 ddb_entry->default_time2wait =
6618 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
6620 if (ql4xdisablesysfsboot &&
6621 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
6622 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
6625 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
6627 uint32_t idx = 0;
6628 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
6629 uint32_t sts[MBOX_REG_COUNT];
6630 uint32_t ip_state;
6631 unsigned long wtime;
6632 int ret;
6634 wtime = jiffies + (HZ * IP_CONFIG_TOV);
6635 do {
6636 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
6637 if (ip_idx[idx] == -1)
6638 continue;
6640 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
6642 if (ret == QLA_ERROR) {
6643 ip_idx[idx] = -1;
6644 continue;
6647 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
6649 DEBUG2(ql4_printk(KERN_INFO, ha,
6650 "Waiting for IP state for idx = %d, state = 0x%x\n",
6651 ip_idx[idx], ip_state));
6652 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
6653 ip_state == IP_ADDRSTATE_INVALID ||
6654 ip_state == IP_ADDRSTATE_PREFERRED ||
6655 ip_state == IP_ADDRSTATE_DEPRICATED ||
6656 ip_state == IP_ADDRSTATE_DISABLING)
6657 ip_idx[idx] = -1;
6660 /* Break if all IP states checked */
6661 if ((ip_idx[0] == -1) &&
6662 (ip_idx[1] == -1) &&
6663 (ip_idx[2] == -1) &&
6664 (ip_idx[3] == -1))
6665 break;
6666 schedule_timeout_uninterruptible(HZ);
6667 } while (time_after(wtime, jiffies));
6670 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
6671 struct dev_db_entry *flash_ddb_entry)
6673 uint16_t options = 0;
6674 size_t ip_len = IP_ADDR_LEN;
6676 options = le16_to_cpu(fw_ddb_entry->options);
6677 if (options & DDB_OPT_IPV6_DEVICE)
6678 ip_len = IPv6_ADDR_LEN;
6680 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
6681 return QLA_ERROR;
6683 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
6684 sizeof(fw_ddb_entry->isid)))
6685 return QLA_ERROR;
6687 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
6688 sizeof(fw_ddb_entry->port)))
6689 return QLA_ERROR;
6691 return QLA_SUCCESS;
6694 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
6695 struct dev_db_entry *fw_ddb_entry,
6696 uint32_t fw_idx, uint32_t *flash_index)
6698 struct dev_db_entry *flash_ddb_entry;
6699 dma_addr_t flash_ddb_entry_dma;
6700 uint32_t idx = 0;
6701 int max_ddbs;
6702 int ret = QLA_ERROR, status;
6704 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6705 MAX_DEV_DB_ENTRIES;
6707 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6708 &flash_ddb_entry_dma);
6709 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
6710 ql4_printk(KERN_ERR, ha, "Out of memory\n");
6711 goto exit_find_st_idx;
6714 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6715 flash_ddb_entry_dma, fw_idx);
6716 if (status == QLA_SUCCESS) {
6717 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6718 if (status == QLA_SUCCESS) {
6719 *flash_index = fw_idx;
6720 ret = QLA_SUCCESS;
6721 goto exit_find_st_idx;
6725 for (idx = 0; idx < max_ddbs; idx++) {
6726 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6727 flash_ddb_entry_dma, idx);
6728 if (status == QLA_ERROR)
6729 continue;
6731 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6732 if (status == QLA_SUCCESS) {
6733 *flash_index = idx;
6734 ret = QLA_SUCCESS;
6735 goto exit_find_st_idx;
6739 if (idx == max_ddbs)
6740 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
6741 fw_idx);
6743 exit_find_st_idx:
6744 if (flash_ddb_entry)
6745 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
6746 flash_ddb_entry_dma);
6748 return ret;
6751 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
6752 struct list_head *list_st)
6754 struct qla_ddb_index *st_ddb_idx;
6755 int max_ddbs;
6756 int fw_idx_size;
6757 struct dev_db_entry *fw_ddb_entry;
6758 dma_addr_t fw_ddb_dma;
6759 int ret;
6760 uint32_t idx = 0, next_idx = 0;
6761 uint32_t state = 0, conn_err = 0;
6762 uint32_t flash_index = -1;
6763 uint16_t conn_id = 0;
6765 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6766 &fw_ddb_dma);
6767 if (fw_ddb_entry == NULL) {
6768 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6769 goto exit_st_list;
6772 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6773 MAX_DEV_DB_ENTRIES;
6774 fw_idx_size = sizeof(struct qla_ddb_index);
6776 for (idx = 0; idx < max_ddbs; idx = next_idx) {
6777 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
6778 NULL, &next_idx, &state,
6779 &conn_err, NULL, &conn_id);
6780 if (ret == QLA_ERROR)
6781 break;
6783 /* Ignore DDB if invalid state (unassigned) */
6784 if (state == DDB_DS_UNASSIGNED)
6785 goto continue_next_st;
6787 /* Check if ST, add to the list_st */
6788 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
6789 goto continue_next_st;
6791 st_ddb_idx = vzalloc(fw_idx_size);
6792 if (!st_ddb_idx)
6793 break;
6795 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
6796 &flash_index);
6797 if (ret == QLA_ERROR) {
6798 ql4_printk(KERN_ERR, ha,
6799 "No flash entry for ST at idx [%d]\n", idx);
6800 st_ddb_idx->flash_ddb_idx = idx;
6801 } else {
6802 ql4_printk(KERN_INFO, ha,
6803 "ST at idx [%d] is stored at flash [%d]\n",
6804 idx, flash_index);
6805 st_ddb_idx->flash_ddb_idx = flash_index;
6808 st_ddb_idx->fw_ddb_idx = idx;
6810 list_add_tail(&st_ddb_idx->list, list_st);
6811 continue_next_st:
6812 if (next_idx == 0)
6813 break;
6816 exit_st_list:
6817 if (fw_ddb_entry)
6818 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6822 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
6823 * @ha: pointer to adapter structure
6824 * @list_ddb: List from which failed ddb to be removed
6826 * Iterate over the list of DDBs and find and remove DDBs that are either in
6827 * no connection active state or failed state
6829 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
6830 struct list_head *list_ddb)
6832 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6833 uint32_t next_idx = 0;
6834 uint32_t state = 0, conn_err = 0;
6835 int ret;
6837 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6838 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
6839 NULL, 0, NULL, &next_idx, &state,
6840 &conn_err, NULL, NULL);
6841 if (ret == QLA_ERROR)
6842 continue;
6844 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6845 state == DDB_DS_SESSION_FAILED) {
6846 list_del_init(&ddb_idx->list);
6847 vfree(ddb_idx);
6852 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
6853 struct ddb_entry *ddb_entry,
6854 struct dev_db_entry *fw_ddb_entry)
6856 struct iscsi_cls_session *cls_sess;
6857 struct iscsi_session *sess;
6858 uint32_t max_ddbs = 0;
6859 uint16_t ddb_link = -1;
6861 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6862 MAX_DEV_DB_ENTRIES;
6864 cls_sess = ddb_entry->sess;
6865 sess = cls_sess->dd_data;
6867 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6868 if (ddb_link < max_ddbs)
6869 sess->discovery_parent_idx = ddb_link;
6870 else
6871 sess->discovery_parent_idx = DDB_NO_LINK;
6874 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
6875 struct dev_db_entry *fw_ddb_entry,
6876 int is_reset, uint16_t idx)
6878 struct iscsi_cls_session *cls_sess;
6879 struct iscsi_session *sess;
6880 struct iscsi_cls_conn *cls_conn;
6881 struct iscsi_endpoint *ep;
6882 uint16_t cmds_max = 32;
6883 uint16_t conn_id = 0;
6884 uint32_t initial_cmdsn = 0;
6885 int ret = QLA_SUCCESS;
6887 struct ddb_entry *ddb_entry = NULL;
6889 /* Create session object, with INVALID_ENTRY,
6890 * the targer_id would get set when we issue the login
6892 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
6893 cmds_max, sizeof(struct ddb_entry),
6894 sizeof(struct ql4_task_data),
6895 initial_cmdsn, INVALID_ENTRY);
6896 if (!cls_sess) {
6897 ret = QLA_ERROR;
6898 goto exit_setup;
6902 * so calling module_put function to decrement the
6903 * reference count.
6905 module_put(qla4xxx_iscsi_transport.owner);
6906 sess = cls_sess->dd_data;
6907 ddb_entry = sess->dd_data;
6908 ddb_entry->sess = cls_sess;
6910 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
6911 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
6912 sizeof(struct dev_db_entry));
6914 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
6916 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
6918 if (!cls_conn) {
6919 ret = QLA_ERROR;
6920 goto exit_setup;
6923 ddb_entry->conn = cls_conn;
6925 /* Setup ep, for displaying attributes in sysfs */
6926 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
6927 if (ep) {
6928 ep->conn = cls_conn;
6929 cls_conn->ep = ep;
6930 } else {
6931 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
6932 ret = QLA_ERROR;
6933 goto exit_setup;
6936 /* Update sess/conn params */
6937 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
6938 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
6940 if (is_reset == RESET_ADAPTER) {
6941 iscsi_block_session(cls_sess);
6942 /* Use the relogin path to discover new devices
6943 * by short-circuting the logic of setting
6944 * timer to relogin - instead set the flags
6945 * to initiate login right away.
6947 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
6948 set_bit(DF_RELOGIN, &ddb_entry->flags);
6951 exit_setup:
6952 return ret;
6955 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
6956 struct list_head *list_ddb,
6957 struct dev_db_entry *fw_ddb_entry)
6959 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6960 uint16_t ddb_link;
6962 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6964 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6965 if (ddb_idx->fw_ddb_idx == ddb_link) {
6966 DEBUG2(ql4_printk(KERN_INFO, ha,
6967 "Updating NT parent idx from [%d] to [%d]\n",
6968 ddb_link, ddb_idx->flash_ddb_idx));
6969 fw_ddb_entry->ddb_link =
6970 cpu_to_le16(ddb_idx->flash_ddb_idx);
6971 return;
6976 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
6977 struct list_head *list_nt,
6978 struct list_head *list_st,
6979 int is_reset)
6981 struct dev_db_entry *fw_ddb_entry;
6982 struct ddb_entry *ddb_entry = NULL;
6983 dma_addr_t fw_ddb_dma;
6984 int max_ddbs;
6985 int fw_idx_size;
6986 int ret;
6987 uint32_t idx = 0, next_idx = 0;
6988 uint32_t state = 0, conn_err = 0;
6989 uint32_t ddb_idx = -1;
6990 uint16_t conn_id = 0;
6991 uint16_t ddb_link = -1;
6992 struct qla_ddb_index *nt_ddb_idx;
6994 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6995 &fw_ddb_dma);
6996 if (fw_ddb_entry == NULL) {
6997 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6998 goto exit_nt_list;
7000 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7001 MAX_DEV_DB_ENTRIES;
7002 fw_idx_size = sizeof(struct qla_ddb_index);
7004 for (idx = 0; idx < max_ddbs; idx = next_idx) {
7005 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7006 NULL, &next_idx, &state,
7007 &conn_err, NULL, &conn_id);
7008 if (ret == QLA_ERROR)
7009 break;
7011 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
7012 goto continue_next_nt;
7014 /* Check if NT, then add to list it */
7015 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
7016 goto continue_next_nt;
7018 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
7019 if (ddb_link < max_ddbs)
7020 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
7022 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
7023 state == DDB_DS_SESSION_FAILED) &&
7024 (is_reset == INIT_ADAPTER))
7025 goto continue_next_nt;
7027 DEBUG2(ql4_printk(KERN_INFO, ha,
7028 "Adding DDB to session = 0x%x\n", idx));
7030 if (is_reset == INIT_ADAPTER) {
7031 nt_ddb_idx = vmalloc(fw_idx_size);
7032 if (!nt_ddb_idx)
7033 break;
7035 nt_ddb_idx->fw_ddb_idx = idx;
7037 /* Copy original isid as it may get updated in function
7038 * qla4xxx_update_isid(). We need original isid in
7039 * function qla4xxx_compare_tuple_ddb to find duplicate
7040 * target */
7041 memcpy(&nt_ddb_idx->flash_isid[0],
7042 &fw_ddb_entry->isid[0],
7043 sizeof(nt_ddb_idx->flash_isid));
7045 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
7046 fw_ddb_entry);
7047 if (ret == QLA_SUCCESS) {
7048 /* free nt_ddb_idx and do not add to list_nt */
7049 vfree(nt_ddb_idx);
7050 goto continue_next_nt;
7053 /* Copy updated isid */
7054 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
7055 sizeof(struct dev_db_entry));
7057 list_add_tail(&nt_ddb_idx->list, list_nt);
7058 } else if (is_reset == RESET_ADAPTER) {
7059 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
7060 &ddb_idx);
7061 if (ret == QLA_SUCCESS) {
7062 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
7063 ddb_idx);
7064 if (ddb_entry != NULL)
7065 qla4xxx_update_sess_disc_idx(ha,
7066 ddb_entry,
7067 fw_ddb_entry);
7068 goto continue_next_nt;
7072 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
7073 if (ret == QLA_ERROR)
7074 goto exit_nt_list;
7076 continue_next_nt:
7077 if (next_idx == 0)
7078 break;
7081 exit_nt_list:
7082 if (fw_ddb_entry)
7083 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7086 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
7087 struct list_head *list_nt,
7088 uint16_t target_id)
7090 struct dev_db_entry *fw_ddb_entry;
7091 dma_addr_t fw_ddb_dma;
7092 int max_ddbs;
7093 int fw_idx_size;
7094 int ret;
7095 uint32_t idx = 0, next_idx = 0;
7096 uint32_t state = 0, conn_err = 0;
7097 uint16_t conn_id = 0;
7098 struct qla_ddb_index *nt_ddb_idx;
7100 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7101 &fw_ddb_dma);
7102 if (fw_ddb_entry == NULL) {
7103 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
7104 goto exit_new_nt_list;
7106 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7107 MAX_DEV_DB_ENTRIES;
7108 fw_idx_size = sizeof(struct qla_ddb_index);
7110 for (idx = 0; idx < max_ddbs; idx = next_idx) {
7111 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7112 NULL, &next_idx, &state,
7113 &conn_err, NULL, &conn_id);
7114 if (ret == QLA_ERROR)
7115 break;
7117 /* Check if NT, then add it to list */
7118 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7119 goto continue_next_new_nt;
7121 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
7122 goto continue_next_new_nt;
7124 DEBUG2(ql4_printk(KERN_INFO, ha,
7125 "Adding DDB to session = 0x%x\n", idx));
7127 nt_ddb_idx = vmalloc(fw_idx_size);
7128 if (!nt_ddb_idx)
7129 break;
7131 nt_ddb_idx->fw_ddb_idx = idx;
7133 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7134 if (ret == QLA_SUCCESS) {
7135 /* free nt_ddb_idx and do not add to list_nt */
7136 vfree(nt_ddb_idx);
7137 goto continue_next_new_nt;
7140 if (target_id < max_ddbs)
7141 fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
7143 list_add_tail(&nt_ddb_idx->list, list_nt);
7145 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7146 idx);
7147 if (ret == QLA_ERROR)
7148 goto exit_new_nt_list;
7150 continue_next_new_nt:
7151 if (next_idx == 0)
7152 break;
7155 exit_new_nt_list:
7156 if (fw_ddb_entry)
7157 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7161 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
7162 * @dev: dev associated with the sysfs entry
7163 * @data: pointer to flashnode session object
7165 * Returns:
7166 * 1: if flashnode entry is non-persistent
7167 * 0: if flashnode entry is persistent
7169 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
7171 struct iscsi_bus_flash_session *fnode_sess;
7173 if (!iscsi_flashnode_bus_match(dev, NULL))
7174 return 0;
7176 fnode_sess = iscsi_dev_to_flash_session(dev);
7178 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
7182 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
7183 * @ha: pointer to host
7184 * @fw_ddb_entry: flash ddb data
7185 * @idx: target index
7186 * @user: if set then this call is made from userland else from kernel
7188 * Returns:
7189 * On sucess: QLA_SUCCESS
7190 * On failure: QLA_ERROR
7192 * This create separate sysfs entries for session and connection attributes of
7193 * the given fw ddb entry.
7194 * If this is invoked as a result of a userspace call then the entry is marked
7195 * as nonpersistent using flash_state field.
7197 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7198 struct dev_db_entry *fw_ddb_entry,
7199 uint16_t *idx, int user)
7201 struct iscsi_bus_flash_session *fnode_sess = NULL;
7202 struct iscsi_bus_flash_conn *fnode_conn = NULL;
7203 int rc = QLA_ERROR;
7205 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
7206 &qla4xxx_iscsi_transport, 0);
7207 if (!fnode_sess) {
7208 ql4_printk(KERN_ERR, ha,
7209 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
7210 __func__, *idx, ha->host_no);
7211 goto exit_tgt_create;
7214 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
7215 &qla4xxx_iscsi_transport, 0);
7216 if (!fnode_conn) {
7217 ql4_printk(KERN_ERR, ha,
7218 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
7219 __func__, *idx, ha->host_no);
7220 goto free_sess;
7223 if (user) {
7224 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
7225 } else {
7226 fnode_sess->flash_state = DEV_DB_PERSISTENT;
7228 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
7229 fnode_sess->is_boot_target = 1;
7230 else
7231 fnode_sess->is_boot_target = 0;
7234 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7235 fw_ddb_entry);
7236 if (rc)
7237 goto free_sess;
7239 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7240 __func__, fnode_sess->dev.kobj.name);
7242 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7243 __func__, fnode_conn->dev.kobj.name);
7245 return QLA_SUCCESS;
7247 free_sess:
7248 iscsi_destroy_flashnode_sess(fnode_sess);
7250 exit_tgt_create:
7251 return QLA_ERROR;
7255 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
7256 * @shost: pointer to host
7257 * @buf: type of ddb entry (ipv4/ipv6)
7258 * @len: length of buf
7260 * This creates new ddb entry in the flash by finding first free index and
7261 * storing default ddb there. And then create sysfs entry for the new ddb entry.
7263 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
7264 int len)
7266 struct scsi_qla_host *ha = to_qla_host(shost);
7267 struct dev_db_entry *fw_ddb_entry = NULL;
7268 dma_addr_t fw_ddb_entry_dma;
7269 struct device *dev;
7270 uint16_t idx = 0;
7271 uint16_t max_ddbs = 0;
7272 uint32_t options = 0;
7273 uint32_t rval = QLA_ERROR;
7275 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
7276 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
7277 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
7278 __func__));
7279 goto exit_ddb_add;
7282 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7283 MAX_DEV_DB_ENTRIES;
7285 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7286 &fw_ddb_entry_dma, GFP_KERNEL);
7287 if (!fw_ddb_entry) {
7288 DEBUG2(ql4_printk(KERN_ERR, ha,
7289 "%s: Unable to allocate dma buffer\n",
7290 __func__));
7291 goto exit_ddb_add;
7294 dev = iscsi_find_flashnode_sess(ha->host, NULL,
7295 qla4xxx_sysfs_ddb_is_non_persistent);
7296 if (dev) {
7297 ql4_printk(KERN_ERR, ha,
7298 "%s: A non-persistent entry %s found\n",
7299 __func__, dev->kobj.name);
7300 put_device(dev);
7301 goto exit_ddb_add;
7304 /* Index 0 and 1 are reserved for boot target entries */
7305 for (idx = 2; idx < max_ddbs; idx++) {
7306 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
7307 fw_ddb_entry_dma, idx))
7308 break;
7311 if (idx == max_ddbs)
7312 goto exit_ddb_add;
7314 if (!strncasecmp("ipv6", buf, 4))
7315 options |= IPV6_DEFAULT_DDB_ENTRY;
7317 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7318 if (rval == QLA_ERROR)
7319 goto exit_ddb_add;
7321 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
7323 exit_ddb_add:
7324 if (fw_ddb_entry)
7325 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7326 fw_ddb_entry, fw_ddb_entry_dma);
7327 if (rval == QLA_SUCCESS)
7328 return idx;
7329 else
7330 return -EIO;
7334 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
7335 * @fnode_sess: pointer to session attrs of flash ddb entry
7336 * @fnode_conn: pointer to connection attrs of flash ddb entry
7338 * This writes the contents of target ddb buffer to Flash with a valid cookie
7339 * value in order to make the ddb entry persistent.
7341 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
7342 struct iscsi_bus_flash_conn *fnode_conn)
7344 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7345 struct scsi_qla_host *ha = to_qla_host(shost);
7346 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
7347 struct dev_db_entry *fw_ddb_entry = NULL;
7348 dma_addr_t fw_ddb_entry_dma;
7349 uint32_t options = 0;
7350 int rval = 0;
7352 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7353 &fw_ddb_entry_dma, GFP_KERNEL);
7354 if (!fw_ddb_entry) {
7355 DEBUG2(ql4_printk(KERN_ERR, ha,
7356 "%s: Unable to allocate dma buffer\n",
7357 __func__));
7358 rval = -ENOMEM;
7359 goto exit_ddb_apply;
7362 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7363 options |= IPV6_DEFAULT_DDB_ENTRY;
7365 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7366 if (rval == QLA_ERROR)
7367 goto exit_ddb_apply;
7369 dev_db_start_offset += (fnode_sess->target_id *
7370 sizeof(*fw_ddb_entry));
7372 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7373 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7375 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7376 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
7378 if (rval == QLA_SUCCESS) {
7379 fnode_sess->flash_state = DEV_DB_PERSISTENT;
7380 ql4_printk(KERN_INFO, ha,
7381 "%s: flash node %u of host %lu written to flash\n",
7382 __func__, fnode_sess->target_id, ha->host_no);
7383 } else {
7384 rval = -EIO;
7385 ql4_printk(KERN_ERR, ha,
7386 "%s: Error while writing flash node %u of host %lu to flash\n",
7387 __func__, fnode_sess->target_id, ha->host_no);
7390 exit_ddb_apply:
7391 if (fw_ddb_entry)
7392 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7393 fw_ddb_entry, fw_ddb_entry_dma);
7394 return rval;
7397 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
7398 struct dev_db_entry *fw_ddb_entry,
7399 uint16_t idx)
7401 struct dev_db_entry *ddb_entry = NULL;
7402 dma_addr_t ddb_entry_dma;
7403 unsigned long wtime;
7404 uint32_t mbx_sts = 0;
7405 uint32_t state = 0, conn_err = 0;
7406 uint16_t tmo = 0;
7407 int ret = 0;
7409 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7410 &ddb_entry_dma, GFP_KERNEL);
7411 if (!ddb_entry) {
7412 DEBUG2(ql4_printk(KERN_ERR, ha,
7413 "%s: Unable to allocate dma buffer\n",
7414 __func__));
7415 return QLA_ERROR;
7418 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
7420 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
7421 if (ret != QLA_SUCCESS) {
7422 DEBUG2(ql4_printk(KERN_ERR, ha,
7423 "%s: Unable to set ddb entry for index %d\n",
7424 __func__, idx));
7425 goto exit_ddb_conn_open;
7428 qla4xxx_conn_open(ha, idx);
7430 /* To ensure that sendtargets is done, wait for at least 12 secs */
7431 tmo = ((ha->def_timeout > LOGIN_TOV) &&
7432 (ha->def_timeout < LOGIN_TOV * 10) ?
7433 ha->def_timeout : LOGIN_TOV);
7435 DEBUG2(ql4_printk(KERN_INFO, ha,
7436 "Default time to wait for login to ddb %d\n", tmo));
7438 wtime = jiffies + (HZ * tmo);
7439 do {
7440 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
7441 NULL, &state, &conn_err, NULL,
7442 NULL);
7443 if (ret == QLA_ERROR)
7444 continue;
7446 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
7447 state == DDB_DS_SESSION_FAILED)
7448 break;
7450 schedule_timeout_uninterruptible(HZ / 10);
7451 } while (time_after(wtime, jiffies));
7453 exit_ddb_conn_open:
7454 if (ddb_entry)
7455 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7456 ddb_entry, ddb_entry_dma);
7457 return ret;
7460 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
7461 struct dev_db_entry *fw_ddb_entry,
7462 uint16_t target_id)
7464 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
7465 struct list_head list_nt;
7466 uint16_t ddb_index;
7467 int ret = 0;
7469 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
7470 ql4_printk(KERN_WARNING, ha,
7471 "%s: A discovery already in progress!\n", __func__);
7472 return QLA_ERROR;
7475 INIT_LIST_HEAD(&list_nt);
7477 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7479 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
7480 if (ret == QLA_ERROR)
7481 goto exit_login_st_clr_bit;
7483 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
7484 if (ret == QLA_ERROR)
7485 goto exit_login_st;
7487 qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
7489 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
7490 list_del_init(&ddb_idx->list);
7491 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
7492 vfree(ddb_idx);
7495 exit_login_st:
7496 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
7497 ql4_printk(KERN_ERR, ha,
7498 "Unable to clear DDB index = 0x%x\n", ddb_index);
7501 clear_bit(ddb_index, ha->ddb_idx_map);
7503 exit_login_st_clr_bit:
7504 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7505 return ret;
7508 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
7509 struct dev_db_entry *fw_ddb_entry,
7510 uint16_t idx)
7512 int ret = QLA_ERROR;
7514 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7515 if (ret != QLA_SUCCESS)
7516 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7517 idx);
7518 else
7519 ret = -EPERM;
7521 return ret;
7525 * qla4xxx_sysfs_ddb_login - Login to the specified target
7526 * @fnode_sess: pointer to session attrs of flash ddb entry
7527 * @fnode_conn: pointer to connection attrs of flash ddb entry
7529 * This logs in to the specified target
7531 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
7532 struct iscsi_bus_flash_conn *fnode_conn)
7534 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7535 struct scsi_qla_host *ha = to_qla_host(shost);
7536 struct dev_db_entry *fw_ddb_entry = NULL;
7537 dma_addr_t fw_ddb_entry_dma;
7538 uint32_t options = 0;
7539 int ret = 0;
7541 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
7542 ql4_printk(KERN_ERR, ha,
7543 "%s: Target info is not persistent\n", __func__);
7544 ret = -EIO;
7545 goto exit_ddb_login;
7548 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7549 &fw_ddb_entry_dma, GFP_KERNEL);
7550 if (!fw_ddb_entry) {
7551 DEBUG2(ql4_printk(KERN_ERR, ha,
7552 "%s: Unable to allocate dma buffer\n",
7553 __func__));
7554 ret = -ENOMEM;
7555 goto exit_ddb_login;
7558 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7559 options |= IPV6_DEFAULT_DDB_ENTRY;
7561 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7562 if (ret == QLA_ERROR)
7563 goto exit_ddb_login;
7565 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7566 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7568 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7569 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
7570 fnode_sess->target_id);
7571 else
7572 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
7573 fnode_sess->target_id);
7575 if (ret > 0)
7576 ret = -EIO;
7578 exit_ddb_login:
7579 if (fw_ddb_entry)
7580 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7581 fw_ddb_entry, fw_ddb_entry_dma);
7582 return ret;
7586 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
7587 * @cls_sess: pointer to session to be logged out
7589 * This performs session log out from the specified target
7591 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
7593 struct iscsi_session *sess;
7594 struct ddb_entry *ddb_entry = NULL;
7595 struct scsi_qla_host *ha;
7596 struct dev_db_entry *fw_ddb_entry = NULL;
7597 dma_addr_t fw_ddb_entry_dma;
7598 unsigned long flags;
7599 unsigned long wtime;
7600 uint32_t ddb_state;
7601 int options;
7602 int ret = 0;
7604 sess = cls_sess->dd_data;
7605 ddb_entry = sess->dd_data;
7606 ha = ddb_entry->ha;
7608 if (ddb_entry->ddb_type != FLASH_DDB) {
7609 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
7610 __func__);
7611 ret = -ENXIO;
7612 goto exit_ddb_logout;
7615 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7616 ql4_printk(KERN_ERR, ha,
7617 "%s: Logout from boot target entry is not permitted.\n",
7618 __func__);
7619 ret = -EPERM;
7620 goto exit_ddb_logout;
7623 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7624 &fw_ddb_entry_dma, GFP_KERNEL);
7625 if (!fw_ddb_entry) {
7626 ql4_printk(KERN_ERR, ha,
7627 "%s: Unable to allocate dma buffer\n", __func__);
7628 ret = -ENOMEM;
7629 goto exit_ddb_logout;
7632 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
7633 goto ddb_logout_init;
7635 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7636 fw_ddb_entry, fw_ddb_entry_dma,
7637 NULL, NULL, &ddb_state, NULL,
7638 NULL, NULL);
7639 if (ret == QLA_ERROR)
7640 goto ddb_logout_init;
7642 if (ddb_state == DDB_DS_SESSION_ACTIVE)
7643 goto ddb_logout_init;
7645 /* wait until next relogin is triggered using DF_RELOGIN and
7646 * clear DF_RELOGIN to avoid invocation of further relogin
7648 wtime = jiffies + (HZ * RELOGIN_TOV);
7649 do {
7650 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
7651 goto ddb_logout_init;
7653 schedule_timeout_uninterruptible(HZ);
7654 } while ((time_after(wtime, jiffies)));
7656 ddb_logout_init:
7657 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
7658 atomic_set(&ddb_entry->relogin_timer, 0);
7660 options = LOGOUT_OPTION_CLOSE_SESSION;
7661 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
7663 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
7664 wtime = jiffies + (HZ * LOGOUT_TOV);
7665 do {
7666 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7667 fw_ddb_entry, fw_ddb_entry_dma,
7668 NULL, NULL, &ddb_state, NULL,
7669 NULL, NULL);
7670 if (ret == QLA_ERROR)
7671 goto ddb_logout_clr_sess;
7673 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
7674 (ddb_state == DDB_DS_SESSION_FAILED))
7675 goto ddb_logout_clr_sess;
7677 schedule_timeout_uninterruptible(HZ);
7678 } while ((time_after(wtime, jiffies)));
7680 ddb_logout_clr_sess:
7681 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7683 * we have decremented the reference count of the driver
7684 * when we setup the session to have the driver unload
7685 * to be seamless without actually destroying the
7686 * session
7688 try_module_get(qla4xxx_iscsi_transport.owner);
7689 iscsi_destroy_endpoint(ddb_entry->conn->ep);
7691 spin_lock_irqsave(&ha->hardware_lock, flags);
7692 qla4xxx_free_ddb(ha, ddb_entry);
7693 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
7694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7696 iscsi_session_teardown(ddb_entry->sess);
7698 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
7699 ret = QLA_SUCCESS;
7701 exit_ddb_logout:
7702 if (fw_ddb_entry)
7703 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7704 fw_ddb_entry, fw_ddb_entry_dma);
7705 return ret;
7709 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
7710 * @fnode_sess: pointer to session attrs of flash ddb entry
7711 * @fnode_conn: pointer to connection attrs of flash ddb entry
7713 * This performs log out from the specified target
7715 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
7716 struct iscsi_bus_flash_conn *fnode_conn)
7718 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7719 struct scsi_qla_host *ha = to_qla_host(shost);
7720 struct ql4_tuple_ddb *flash_tddb = NULL;
7721 struct ql4_tuple_ddb *tmp_tddb = NULL;
7722 struct dev_db_entry *fw_ddb_entry = NULL;
7723 struct ddb_entry *ddb_entry = NULL;
7724 dma_addr_t fw_ddb_dma;
7725 uint32_t next_idx = 0;
7726 uint32_t state = 0, conn_err = 0;
7727 uint16_t conn_id = 0;
7728 int idx, index;
7729 int status, ret = 0;
7731 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7732 &fw_ddb_dma);
7733 if (fw_ddb_entry == NULL) {
7734 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
7735 ret = -ENOMEM;
7736 goto exit_ddb_logout;
7739 flash_tddb = vzalloc(sizeof(*flash_tddb));
7740 if (!flash_tddb) {
7741 ql4_printk(KERN_WARNING, ha,
7742 "%s:Memory Allocation failed.\n", __func__);
7743 ret = -ENOMEM;
7744 goto exit_ddb_logout;
7747 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
7748 if (!tmp_tddb) {
7749 ql4_printk(KERN_WARNING, ha,
7750 "%s:Memory Allocation failed.\n", __func__);
7751 ret = -ENOMEM;
7752 goto exit_ddb_logout;
7755 if (!fnode_sess->targetname) {
7756 ql4_printk(KERN_ERR, ha,
7757 "%s:Cannot logout from SendTarget entry\n",
7758 __func__);
7759 ret = -EPERM;
7760 goto exit_ddb_logout;
7763 if (fnode_sess->is_boot_target) {
7764 ql4_printk(KERN_ERR, ha,
7765 "%s: Logout from boot target entry is not permitted.\n",
7766 __func__);
7767 ret = -EPERM;
7768 goto exit_ddb_logout;
7771 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
7772 ISCSI_NAME_SIZE);
7774 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7775 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
7776 else
7777 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
7779 flash_tddb->tpgt = fnode_sess->tpgt;
7780 flash_tddb->port = fnode_conn->port;
7782 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
7784 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7785 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7786 if (ddb_entry == NULL)
7787 continue;
7789 if (ddb_entry->ddb_type != FLASH_DDB)
7790 continue;
7792 index = ddb_entry->sess->target_id;
7793 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
7794 fw_ddb_dma, NULL, &next_idx,
7795 &state, &conn_err, NULL,
7796 &conn_id);
7797 if (status == QLA_ERROR) {
7798 ret = -ENOMEM;
7799 break;
7802 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
7804 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
7805 true);
7806 if (status == QLA_SUCCESS) {
7807 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
7808 break;
7812 if (idx == MAX_DDB_ENTRIES)
7813 ret = -ESRCH;
7815 exit_ddb_logout:
7816 if (flash_tddb)
7817 vfree(flash_tddb);
7818 if (tmp_tddb)
7819 vfree(tmp_tddb);
7820 if (fw_ddb_entry)
7821 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7823 return ret;
7826 static int
7827 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
7828 int param, char *buf)
7830 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7831 struct scsi_qla_host *ha = to_qla_host(shost);
7832 struct iscsi_bus_flash_conn *fnode_conn;
7833 struct ql4_chap_table chap_tbl;
7834 struct device *dev;
7835 int parent_type;
7836 int rc = 0;
7838 dev = iscsi_find_flashnode_conn(fnode_sess);
7839 if (!dev)
7840 return -EIO;
7842 fnode_conn = iscsi_dev_to_flash_conn(dev);
7844 switch (param) {
7845 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
7846 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
7847 break;
7848 case ISCSI_FLASHNODE_PORTAL_TYPE:
7849 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
7850 break;
7851 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
7852 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
7853 break;
7854 case ISCSI_FLASHNODE_DISCOVERY_SESS:
7855 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
7856 break;
7857 case ISCSI_FLASHNODE_ENTRY_EN:
7858 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
7859 break;
7860 case ISCSI_FLASHNODE_HDR_DGST_EN:
7861 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
7862 break;
7863 case ISCSI_FLASHNODE_DATA_DGST_EN:
7864 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
7865 break;
7866 case ISCSI_FLASHNODE_IMM_DATA_EN:
7867 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
7868 break;
7869 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
7870 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
7871 break;
7872 case ISCSI_FLASHNODE_DATASEQ_INORDER:
7873 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
7874 break;
7875 case ISCSI_FLASHNODE_PDU_INORDER:
7876 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
7877 break;
7878 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
7879 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
7880 break;
7881 case ISCSI_FLASHNODE_SNACK_REQ_EN:
7882 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
7883 break;
7884 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
7885 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
7886 break;
7887 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
7888 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
7889 break;
7890 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
7891 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
7892 break;
7893 case ISCSI_FLASHNODE_ERL:
7894 rc = sprintf(buf, "%u\n", fnode_sess->erl);
7895 break;
7896 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
7897 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
7898 break;
7899 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
7900 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
7901 break;
7902 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
7903 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
7904 break;
7905 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
7906 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
7907 break;
7908 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
7909 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
7910 break;
7911 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
7912 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
7913 break;
7914 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
7915 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
7916 break;
7917 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
7918 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
7919 break;
7920 case ISCSI_FLASHNODE_FIRST_BURST:
7921 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
7922 break;
7923 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
7924 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
7925 break;
7926 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
7927 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
7928 break;
7929 case ISCSI_FLASHNODE_MAX_R2T:
7930 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
7931 break;
7932 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
7933 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
7934 break;
7935 case ISCSI_FLASHNODE_ISID:
7936 rc = sprintf(buf, "%pm\n", fnode_sess->isid);
7937 break;
7938 case ISCSI_FLASHNODE_TSID:
7939 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
7940 break;
7941 case ISCSI_FLASHNODE_PORT:
7942 rc = sprintf(buf, "%d\n", fnode_conn->port);
7943 break;
7944 case ISCSI_FLASHNODE_MAX_BURST:
7945 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
7946 break;
7947 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
7948 rc = sprintf(buf, "%u\n",
7949 fnode_sess->default_taskmgmt_timeout);
7950 break;
7951 case ISCSI_FLASHNODE_IPADDR:
7952 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7953 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
7954 else
7955 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
7956 break;
7957 case ISCSI_FLASHNODE_ALIAS:
7958 if (fnode_sess->targetalias)
7959 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
7960 else
7961 rc = sprintf(buf, "\n");
7962 break;
7963 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
7964 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7965 rc = sprintf(buf, "%pI6\n",
7966 fnode_conn->redirect_ipaddr);
7967 else
7968 rc = sprintf(buf, "%pI4\n",
7969 fnode_conn->redirect_ipaddr);
7970 break;
7971 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
7972 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
7973 break;
7974 case ISCSI_FLASHNODE_LOCAL_PORT:
7975 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
7976 break;
7977 case ISCSI_FLASHNODE_IPV4_TOS:
7978 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
7979 break;
7980 case ISCSI_FLASHNODE_IPV6_TC:
7981 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7982 rc = sprintf(buf, "%u\n",
7983 fnode_conn->ipv6_traffic_class);
7984 else
7985 rc = sprintf(buf, "\n");
7986 break;
7987 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
7988 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
7989 break;
7990 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
7991 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7992 rc = sprintf(buf, "%pI6\n",
7993 fnode_conn->link_local_ipv6_addr);
7994 else
7995 rc = sprintf(buf, "\n");
7996 break;
7997 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
7998 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
7999 break;
8000 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
8001 if (fnode_sess->discovery_parent_type == DDB_ISNS)
8002 parent_type = ISCSI_DISC_PARENT_ISNS;
8003 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
8004 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8005 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
8006 parent_type = ISCSI_DISC_PARENT_SENDTGT;
8007 else
8008 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8010 rc = sprintf(buf, "%s\n",
8011 iscsi_get_discovery_parent_name(parent_type));
8012 break;
8013 case ISCSI_FLASHNODE_NAME:
8014 if (fnode_sess->targetname)
8015 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
8016 else
8017 rc = sprintf(buf, "\n");
8018 break;
8019 case ISCSI_FLASHNODE_TPGT:
8020 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
8021 break;
8022 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8023 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
8024 break;
8025 case ISCSI_FLASHNODE_TCP_RECV_WSF:
8026 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
8027 break;
8028 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8029 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
8030 break;
8031 case ISCSI_FLASHNODE_USERNAME:
8032 if (fnode_sess->chap_auth_en) {
8033 qla4xxx_get_uni_chap_at_index(ha,
8034 chap_tbl.name,
8035 chap_tbl.secret,
8036 fnode_sess->chap_out_idx);
8037 rc = sprintf(buf, "%s\n", chap_tbl.name);
8038 } else {
8039 rc = sprintf(buf, "\n");
8041 break;
8042 case ISCSI_FLASHNODE_PASSWORD:
8043 if (fnode_sess->chap_auth_en) {
8044 qla4xxx_get_uni_chap_at_index(ha,
8045 chap_tbl.name,
8046 chap_tbl.secret,
8047 fnode_sess->chap_out_idx);
8048 rc = sprintf(buf, "%s\n", chap_tbl.secret);
8049 } else {
8050 rc = sprintf(buf, "\n");
8052 break;
8053 case ISCSI_FLASHNODE_STATSN:
8054 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
8055 break;
8056 case ISCSI_FLASHNODE_EXP_STATSN:
8057 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
8058 break;
8059 case ISCSI_FLASHNODE_IS_BOOT_TGT:
8060 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
8061 break;
8062 default:
8063 rc = -ENOSYS;
8064 break;
8067 put_device(dev);
8068 return rc;
8072 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
8073 * @fnode_sess: pointer to session attrs of flash ddb entry
8074 * @fnode_conn: pointer to connection attrs of flash ddb entry
8075 * @data: Parameters and their values to update
8076 * @len: len of data
8078 * This sets the parameter of flash ddb entry and writes them to flash
8080 static int
8081 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
8082 struct iscsi_bus_flash_conn *fnode_conn,
8083 void *data, int len)
8085 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8086 struct scsi_qla_host *ha = to_qla_host(shost);
8087 struct iscsi_flashnode_param_info *fnode_param;
8088 struct ql4_chap_table chap_tbl;
8089 struct nlattr *attr;
8090 uint16_t chap_out_idx = INVALID_ENTRY;
8091 int rc = QLA_ERROR;
8092 uint32_t rem = len;
8094 memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
8095 nla_for_each_attr(attr, data, len, rem) {
8096 fnode_param = nla_data(attr);
8098 switch (fnode_param->param) {
8099 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
8100 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
8101 break;
8102 case ISCSI_FLASHNODE_PORTAL_TYPE:
8103 memcpy(fnode_sess->portal_type, fnode_param->value,
8104 strlen(fnode_sess->portal_type));
8105 break;
8106 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
8107 fnode_sess->auto_snd_tgt_disable =
8108 fnode_param->value[0];
8109 break;
8110 case ISCSI_FLASHNODE_DISCOVERY_SESS:
8111 fnode_sess->discovery_sess = fnode_param->value[0];
8112 break;
8113 case ISCSI_FLASHNODE_ENTRY_EN:
8114 fnode_sess->entry_state = fnode_param->value[0];
8115 break;
8116 case ISCSI_FLASHNODE_HDR_DGST_EN:
8117 fnode_conn->hdrdgst_en = fnode_param->value[0];
8118 break;
8119 case ISCSI_FLASHNODE_DATA_DGST_EN:
8120 fnode_conn->datadgst_en = fnode_param->value[0];
8121 break;
8122 case ISCSI_FLASHNODE_IMM_DATA_EN:
8123 fnode_sess->imm_data_en = fnode_param->value[0];
8124 break;
8125 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
8126 fnode_sess->initial_r2t_en = fnode_param->value[0];
8127 break;
8128 case ISCSI_FLASHNODE_DATASEQ_INORDER:
8129 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
8130 break;
8131 case ISCSI_FLASHNODE_PDU_INORDER:
8132 fnode_sess->pdu_inorder_en = fnode_param->value[0];
8133 break;
8134 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
8135 fnode_sess->chap_auth_en = fnode_param->value[0];
8136 /* Invalidate chap index if chap auth is disabled */
8137 if (!fnode_sess->chap_auth_en)
8138 fnode_sess->chap_out_idx = INVALID_ENTRY;
8140 break;
8141 case ISCSI_FLASHNODE_SNACK_REQ_EN:
8142 fnode_conn->snack_req_en = fnode_param->value[0];
8143 break;
8144 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
8145 fnode_sess->discovery_logout_en = fnode_param->value[0];
8146 break;
8147 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
8148 fnode_sess->bidi_chap_en = fnode_param->value[0];
8149 break;
8150 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
8151 fnode_sess->discovery_auth_optional =
8152 fnode_param->value[0];
8153 break;
8154 case ISCSI_FLASHNODE_ERL:
8155 fnode_sess->erl = fnode_param->value[0];
8156 break;
8157 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
8158 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
8159 break;
8160 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
8161 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
8162 break;
8163 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
8164 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
8165 break;
8166 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
8167 fnode_conn->tcp_timer_scale = fnode_param->value[0];
8168 break;
8169 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
8170 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
8171 break;
8172 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
8173 fnode_conn->fragment_disable = fnode_param->value[0];
8174 break;
8175 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
8176 fnode_conn->max_recv_dlength =
8177 *(unsigned *)fnode_param->value;
8178 break;
8179 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
8180 fnode_conn->max_xmit_dlength =
8181 *(unsigned *)fnode_param->value;
8182 break;
8183 case ISCSI_FLASHNODE_FIRST_BURST:
8184 fnode_sess->first_burst =
8185 *(unsigned *)fnode_param->value;
8186 break;
8187 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
8188 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
8189 break;
8190 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
8191 fnode_sess->time2retain =
8192 *(uint16_t *)fnode_param->value;
8193 break;
8194 case ISCSI_FLASHNODE_MAX_R2T:
8195 fnode_sess->max_r2t =
8196 *(uint16_t *)fnode_param->value;
8197 break;
8198 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
8199 fnode_conn->keepalive_timeout =
8200 *(uint16_t *)fnode_param->value;
8201 break;
8202 case ISCSI_FLASHNODE_ISID:
8203 memcpy(fnode_sess->isid, fnode_param->value,
8204 sizeof(fnode_sess->isid));
8205 break;
8206 case ISCSI_FLASHNODE_TSID:
8207 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
8208 break;
8209 case ISCSI_FLASHNODE_PORT:
8210 fnode_conn->port = *(uint16_t *)fnode_param->value;
8211 break;
8212 case ISCSI_FLASHNODE_MAX_BURST:
8213 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
8214 break;
8215 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
8216 fnode_sess->default_taskmgmt_timeout =
8217 *(uint16_t *)fnode_param->value;
8218 break;
8219 case ISCSI_FLASHNODE_IPADDR:
8220 memcpy(fnode_conn->ipaddress, fnode_param->value,
8221 IPv6_ADDR_LEN);
8222 break;
8223 case ISCSI_FLASHNODE_ALIAS:
8224 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
8225 (char *)fnode_param->value);
8226 break;
8227 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
8228 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
8229 IPv6_ADDR_LEN);
8230 break;
8231 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
8232 fnode_conn->max_segment_size =
8233 *(unsigned *)fnode_param->value;
8234 break;
8235 case ISCSI_FLASHNODE_LOCAL_PORT:
8236 fnode_conn->local_port =
8237 *(uint16_t *)fnode_param->value;
8238 break;
8239 case ISCSI_FLASHNODE_IPV4_TOS:
8240 fnode_conn->ipv4_tos = fnode_param->value[0];
8241 break;
8242 case ISCSI_FLASHNODE_IPV6_TC:
8243 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
8244 break;
8245 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
8246 fnode_conn->ipv6_flow_label = fnode_param->value[0];
8247 break;
8248 case ISCSI_FLASHNODE_NAME:
8249 rc = iscsi_switch_str_param(&fnode_sess->targetname,
8250 (char *)fnode_param->value);
8251 break;
8252 case ISCSI_FLASHNODE_TPGT:
8253 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
8254 break;
8255 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
8256 memcpy(fnode_conn->link_local_ipv6_addr,
8257 fnode_param->value, IPv6_ADDR_LEN);
8258 break;
8259 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
8260 fnode_sess->discovery_parent_idx =
8261 *(uint16_t *)fnode_param->value;
8262 break;
8263 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8264 fnode_conn->tcp_xmit_wsf =
8265 *(uint8_t *)fnode_param->value;
8266 break;
8267 case ISCSI_FLASHNODE_TCP_RECV_WSF:
8268 fnode_conn->tcp_recv_wsf =
8269 *(uint8_t *)fnode_param->value;
8270 break;
8271 case ISCSI_FLASHNODE_STATSN:
8272 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
8273 break;
8274 case ISCSI_FLASHNODE_EXP_STATSN:
8275 fnode_conn->exp_statsn =
8276 *(uint32_t *)fnode_param->value;
8277 break;
8278 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8279 chap_out_idx = *(uint16_t *)fnode_param->value;
8280 if (!qla4xxx_get_uni_chap_at_index(ha,
8281 chap_tbl.name,
8282 chap_tbl.secret,
8283 chap_out_idx)) {
8284 fnode_sess->chap_out_idx = chap_out_idx;
8285 /* Enable chap auth if chap index is valid */
8286 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
8288 break;
8289 default:
8290 ql4_printk(KERN_ERR, ha,
8291 "%s: No such sysfs attribute\n", __func__);
8292 rc = -ENOSYS;
8293 goto exit_set_param;
8297 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
8299 exit_set_param:
8300 return rc;
8304 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
8305 * @fnode_sess: pointer to session attrs of flash ddb entry
8307 * This invalidates the flash ddb entry at the given index
8309 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
8311 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8312 struct scsi_qla_host *ha = to_qla_host(shost);
8313 uint32_t dev_db_start_offset;
8314 uint32_t dev_db_end_offset;
8315 struct dev_db_entry *fw_ddb_entry = NULL;
8316 dma_addr_t fw_ddb_entry_dma;
8317 uint16_t *ddb_cookie = NULL;
8318 size_t ddb_size = 0;
8319 void *pddb = NULL;
8320 int target_id;
8321 int rc = 0;
8323 if (fnode_sess->is_boot_target) {
8324 rc = -EPERM;
8325 DEBUG2(ql4_printk(KERN_ERR, ha,
8326 "%s: Deletion of boot target entry is not permitted.\n",
8327 __func__));
8328 goto exit_ddb_del;
8331 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
8332 goto sysfs_ddb_del;
8334 if (is_qla40XX(ha)) {
8335 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
8336 dev_db_end_offset = FLASH_OFFSET_DB_END;
8337 dev_db_start_offset += (fnode_sess->target_id *
8338 sizeof(*fw_ddb_entry));
8339 ddb_size = sizeof(*fw_ddb_entry);
8340 } else {
8341 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
8342 (ha->hw.flt_region_ddb << 2);
8343 /* flt_ddb_size is DDB table size for both ports
8344 * so divide it by 2 to calculate the offset for second port
8346 if (ha->port_num == 1)
8347 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
8349 dev_db_end_offset = dev_db_start_offset +
8350 (ha->hw.flt_ddb_size / 2);
8352 dev_db_start_offset += (fnode_sess->target_id *
8353 sizeof(*fw_ddb_entry));
8354 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
8356 ddb_size = sizeof(*ddb_cookie);
8359 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
8360 __func__, dev_db_start_offset, dev_db_end_offset));
8362 if (dev_db_start_offset > dev_db_end_offset) {
8363 rc = -EIO;
8364 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
8365 __func__, fnode_sess->target_id));
8366 goto exit_ddb_del;
8369 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
8370 &fw_ddb_entry_dma, GFP_KERNEL);
8371 if (!pddb) {
8372 rc = -ENOMEM;
8373 DEBUG2(ql4_printk(KERN_ERR, ha,
8374 "%s: Unable to allocate dma buffer\n",
8375 __func__));
8376 goto exit_ddb_del;
8379 if (is_qla40XX(ha)) {
8380 fw_ddb_entry = pddb;
8381 memset(fw_ddb_entry, 0, ddb_size);
8382 ddb_cookie = &fw_ddb_entry->cookie;
8383 } else {
8384 ddb_cookie = pddb;
8387 /* invalidate the cookie */
8388 *ddb_cookie = 0xFFEE;
8389 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
8390 ddb_size, FLASH_OPT_RMW_COMMIT);
8392 sysfs_ddb_del:
8393 target_id = fnode_sess->target_id;
8394 iscsi_destroy_flashnode_sess(fnode_sess);
8395 ql4_printk(KERN_INFO, ha,
8396 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
8397 __func__, target_id, ha->host_no);
8398 exit_ddb_del:
8399 if (pddb)
8400 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
8401 fw_ddb_entry_dma);
8402 return rc;
8406 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
8407 * @ha: pointer to adapter structure
8409 * Export the firmware DDB for all send targets and normal targets to sysfs.
8411 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
8413 struct dev_db_entry *fw_ddb_entry = NULL;
8414 dma_addr_t fw_ddb_entry_dma;
8415 uint16_t max_ddbs;
8416 uint16_t idx = 0;
8417 int ret = QLA_SUCCESS;
8419 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
8420 sizeof(*fw_ddb_entry),
8421 &fw_ddb_entry_dma, GFP_KERNEL);
8422 if (!fw_ddb_entry) {
8423 DEBUG2(ql4_printk(KERN_ERR, ha,
8424 "%s: Unable to allocate dma buffer\n",
8425 __func__));
8426 return -ENOMEM;
8429 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
8430 MAX_DEV_DB_ENTRIES;
8432 for (idx = 0; idx < max_ddbs; idx++) {
8433 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
8434 idx))
8435 continue;
8437 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
8438 if (ret) {
8439 ret = -EIO;
8440 break;
8444 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
8445 fw_ddb_entry_dma);
8447 return ret;
8450 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
8452 iscsi_destroy_all_flashnode(ha->host);
8456 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
8457 * @ha: pointer to adapter structure
8458 * @is_reset: Is this init path or reset path
8460 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
8461 * using connection open, then create the list of normal targets (nt)
8462 * from firmware DDBs. Based on the list of nt setup session and connection
8463 * objects.
8465 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
8467 uint16_t tmo = 0;
8468 struct list_head list_st, list_nt;
8469 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
8470 unsigned long wtime;
8472 if (!test_bit(AF_LINK_UP, &ha->flags)) {
8473 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
8474 ha->is_reset = is_reset;
8475 return;
8478 INIT_LIST_HEAD(&list_st);
8479 INIT_LIST_HEAD(&list_nt);
8481 qla4xxx_build_st_list(ha, &list_st);
8483 /* Before issuing conn open mbox, ensure all IPs states are configured
8484 * Note, conn open fails if IPs are not configured
8486 qla4xxx_wait_for_ip_configuration(ha);
8488 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
8489 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
8490 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
8493 /* Wait to ensure all sendtargets are done for min 12 sec wait */
8494 tmo = ((ha->def_timeout > LOGIN_TOV) &&
8495 (ha->def_timeout < LOGIN_TOV * 10) ?
8496 ha->def_timeout : LOGIN_TOV);
8498 DEBUG2(ql4_printk(KERN_INFO, ha,
8499 "Default time to wait for build ddb %d\n", tmo));
8501 wtime = jiffies + (HZ * tmo);
8502 do {
8503 if (list_empty(&list_st))
8504 break;
8506 qla4xxx_remove_failed_ddb(ha, &list_st);
8507 schedule_timeout_uninterruptible(HZ / 10);
8508 } while (time_after(wtime, jiffies));
8511 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
8513 qla4xxx_free_ddb_list(&list_st);
8514 qla4xxx_free_ddb_list(&list_nt);
8516 qla4xxx_free_ddb_index(ha);
8520 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
8521 * response.
8522 * @ha: pointer to adapter structure
8524 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
8525 * set in DDB and we will wait for login response of boot targets during
8526 * probe.
8528 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
8530 struct ddb_entry *ddb_entry;
8531 struct dev_db_entry *fw_ddb_entry = NULL;
8532 dma_addr_t fw_ddb_entry_dma;
8533 unsigned long wtime;
8534 uint32_t ddb_state;
8535 int max_ddbs, idx, ret;
8537 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
8538 MAX_DEV_DB_ENTRIES;
8540 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8541 &fw_ddb_entry_dma, GFP_KERNEL);
8542 if (!fw_ddb_entry) {
8543 ql4_printk(KERN_ERR, ha,
8544 "%s: Unable to allocate dma buffer\n", __func__);
8545 goto exit_login_resp;
8548 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
8550 for (idx = 0; idx < max_ddbs; idx++) {
8551 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8552 if (ddb_entry == NULL)
8553 continue;
8555 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
8556 DEBUG2(ql4_printk(KERN_INFO, ha,
8557 "%s: DDB index [%d]\n", __func__,
8558 ddb_entry->fw_ddb_index));
8559 do {
8560 ret = qla4xxx_get_fwddb_entry(ha,
8561 ddb_entry->fw_ddb_index,
8562 fw_ddb_entry, fw_ddb_entry_dma,
8563 NULL, NULL, &ddb_state, NULL,
8564 NULL, NULL);
8565 if (ret == QLA_ERROR)
8566 goto exit_login_resp;
8568 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
8569 (ddb_state == DDB_DS_SESSION_FAILED))
8570 break;
8572 schedule_timeout_uninterruptible(HZ);
8574 } while ((time_after(wtime, jiffies)));
8576 if (!time_after(wtime, jiffies)) {
8577 DEBUG2(ql4_printk(KERN_INFO, ha,
8578 "%s: Login response wait timer expired\n",
8579 __func__));
8580 goto exit_login_resp;
8585 exit_login_resp:
8586 if (fw_ddb_entry)
8587 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8588 fw_ddb_entry, fw_ddb_entry_dma);
8592 * qla4xxx_probe_adapter - callback function to probe HBA
8593 * @pdev: pointer to pci_dev structure
8594 * @pci_device_id: pointer to pci_device entry
8596 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
8597 * It returns zero if successful. It also initializes all data necessary for
8598 * the driver.
8600 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
8601 const struct pci_device_id *ent)
8603 int ret = -ENODEV, status;
8604 struct Scsi_Host *host;
8605 struct scsi_qla_host *ha;
8606 uint8_t init_retry_count = 0;
8607 char buf[34];
8608 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
8609 uint32_t dev_state;
8611 if (pci_enable_device(pdev))
8612 return -1;
8614 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
8615 if (host == NULL) {
8616 printk(KERN_WARNING
8617 "qla4xxx: Couldn't allocate host from scsi layer!\n");
8618 goto probe_disable_device;
8621 /* Clear our data area */
8622 ha = to_qla_host(host);
8623 memset(ha, 0, sizeof(*ha));
8625 /* Save the information from PCI BIOS. */
8626 ha->pdev = pdev;
8627 ha->host = host;
8628 ha->host_no = host->host_no;
8629 ha->func_num = PCI_FUNC(ha->pdev->devfn);
8631 pci_enable_pcie_error_reporting(pdev);
8633 /* Setup Runtime configurable options */
8634 if (is_qla8022(ha)) {
8635 ha->isp_ops = &qla4_82xx_isp_ops;
8636 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
8637 ha->qdr_sn_window = -1;
8638 ha->ddr_mn_window = -1;
8639 ha->curr_window = 255;
8640 nx_legacy_intr = &legacy_intr[ha->func_num];
8641 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
8642 ha->nx_legacy_intr.tgt_status_reg =
8643 nx_legacy_intr->tgt_status_reg;
8644 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
8645 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
8646 } else if (is_qla8032(ha) || is_qla8042(ha)) {
8647 ha->isp_ops = &qla4_83xx_isp_ops;
8648 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
8649 } else {
8650 ha->isp_ops = &qla4xxx_isp_ops;
8653 if (is_qla80XX(ha)) {
8654 rwlock_init(&ha->hw_lock);
8655 ha->pf_bit = ha->func_num << 16;
8656 /* Set EEH reset type to fundamental if required by hba */
8657 pdev->needs_freset = 1;
8660 /* Configure PCI I/O space. */
8661 ret = ha->isp_ops->iospace_config(ha);
8662 if (ret)
8663 goto probe_failed_ioconfig;
8665 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
8666 pdev->device, pdev->irq, ha->reg);
8668 qla4xxx_config_dma_addressing(ha);
8670 /* Initialize lists and spinlocks. */
8671 INIT_LIST_HEAD(&ha->free_srb_q);
8673 mutex_init(&ha->mbox_sem);
8674 mutex_init(&ha->chap_sem);
8675 init_completion(&ha->mbx_intr_comp);
8676 init_completion(&ha->disable_acb_comp);
8677 init_completion(&ha->idc_comp);
8678 init_completion(&ha->link_up_comp);
8680 spin_lock_init(&ha->hardware_lock);
8681 spin_lock_init(&ha->work_lock);
8683 /* Initialize work list */
8684 INIT_LIST_HEAD(&ha->work_list);
8686 /* Allocate dma buffers */
8687 if (qla4xxx_mem_alloc(ha)) {
8688 ql4_printk(KERN_WARNING, ha,
8689 "[ERROR] Failed to allocate memory for adapter\n");
8691 ret = -ENOMEM;
8692 goto probe_failed;
8695 host->cmd_per_lun = 3;
8696 host->max_channel = 0;
8697 host->max_lun = MAX_LUNS - 1;
8698 host->max_id = MAX_TARGETS;
8699 host->max_cmd_len = IOCB_MAX_CDB_LEN;
8700 host->can_queue = MAX_SRBS ;
8701 host->transportt = qla4xxx_scsi_transport;
8703 pci_set_drvdata(pdev, ha);
8705 ret = scsi_add_host(host, &pdev->dev);
8706 if (ret)
8707 goto probe_failed;
8709 if (is_qla80XX(ha))
8710 qla4_8xxx_get_flash_info(ha);
8712 if (is_qla8032(ha) || is_qla8042(ha)) {
8713 qla4_83xx_read_reset_template(ha);
8715 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
8716 * If DONRESET_BIT0 is set, drivers should not set dev_state
8717 * to NEED_RESET. But if NEED_RESET is set, drivers should
8718 * should honor the reset.
8720 if (ql4xdontresethba == 1)
8721 qla4_83xx_set_idc_dontreset(ha);
8725 * Initialize the Host adapter request/response queues and
8726 * firmware
8727 * NOTE: interrupts enabled upon successful completion
8729 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8731 /* Dont retry adapter initialization if IRQ allocation failed */
8732 if (is_qla80XX(ha) && (status == QLA_ERROR))
8733 goto skip_retry_init;
8735 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
8736 init_retry_count++ < MAX_INIT_RETRIES) {
8738 if (is_qla80XX(ha)) {
8739 ha->isp_ops->idc_lock(ha);
8740 dev_state = qla4_8xxx_rd_direct(ha,
8741 QLA8XXX_CRB_DEV_STATE);
8742 ha->isp_ops->idc_unlock(ha);
8743 if (dev_state == QLA8XXX_DEV_FAILED) {
8744 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
8745 "initialize adapter. H/W is in failed state\n",
8746 __func__);
8747 break;
8750 DEBUG2(printk("scsi: %s: retrying adapter initialization "
8751 "(%d)\n", __func__, init_retry_count));
8753 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
8754 continue;
8756 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8757 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
8758 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
8759 goto skip_retry_init;
8763 skip_retry_init:
8764 if (!test_bit(AF_ONLINE, &ha->flags)) {
8765 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
8767 if ((is_qla8022(ha) && ql4xdontresethba) ||
8768 ((is_qla8032(ha) || is_qla8042(ha)) &&
8769 qla4_83xx_idc_dontreset(ha))) {
8770 /* Put the device in failed state. */
8771 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
8772 ha->isp_ops->idc_lock(ha);
8773 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8774 QLA8XXX_DEV_FAILED);
8775 ha->isp_ops->idc_unlock(ha);
8777 ret = -ENODEV;
8778 goto remove_host;
8781 /* Startup the kernel thread for this host adapter. */
8782 DEBUG2(printk("scsi: %s: Starting kernel thread for "
8783 "qla4xxx_dpc\n", __func__));
8784 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
8785 ha->dpc_thread = create_singlethread_workqueue(buf);
8786 if (!ha->dpc_thread) {
8787 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
8788 ret = -ENODEV;
8789 goto remove_host;
8791 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
8793 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
8794 ha->host_no);
8795 if (!ha->task_wq) {
8796 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
8797 ret = -ENODEV;
8798 goto remove_host;
8802 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
8803 * (which is called indirectly by qla4xxx_initialize_adapter),
8804 * so that irqs will be registered after crbinit but before
8805 * mbx_intr_enable.
8807 if (is_qla40XX(ha)) {
8808 ret = qla4xxx_request_irqs(ha);
8809 if (ret) {
8810 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
8811 "interrupt %d already in use.\n", pdev->irq);
8812 goto remove_host;
8816 pci_save_state(ha->pdev);
8817 ha->isp_ops->enable_intrs(ha);
8819 /* Start timer thread. */
8820 qla4xxx_start_timer(ha, 1);
8822 set_bit(AF_INIT_DONE, &ha->flags);
8824 qla4_8xxx_alloc_sysfs_attr(ha);
8826 printk(KERN_INFO
8827 " QLogic iSCSI HBA Driver version: %s\n"
8828 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
8829 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
8830 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
8831 ha->fw_info.fw_patch, ha->fw_info.fw_build);
8833 /* Set the driver version */
8834 if (is_qla80XX(ha))
8835 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
8837 if (qla4xxx_setup_boot_info(ha))
8838 ql4_printk(KERN_ERR, ha,
8839 "%s: No iSCSI boot target configured\n", __func__);
8841 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
8842 /* Perform the build ddb list and login to each */
8843 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
8844 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
8845 qla4xxx_wait_login_resp_boot_tgt(ha);
8847 qla4xxx_create_chap_list(ha);
8849 qla4xxx_create_ifaces(ha);
8850 return 0;
8852 remove_host:
8853 scsi_remove_host(ha->host);
8855 probe_failed:
8856 qla4xxx_free_adapter(ha);
8858 probe_failed_ioconfig:
8859 pci_disable_pcie_error_reporting(pdev);
8860 scsi_host_put(ha->host);
8862 probe_disable_device:
8863 pci_disable_device(pdev);
8865 return ret;
8869 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
8870 * @ha: pointer to adapter structure
8872 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
8873 * so that the other port will not re-initialize while in the process of
8874 * removing the ha due to driver unload or hba hotplug.
8876 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
8878 struct scsi_qla_host *other_ha = NULL;
8879 struct pci_dev *other_pdev = NULL;
8880 int fn = ISP4XXX_PCI_FN_2;
8882 /*iscsi function numbers for ISP4xxx is 1 and 3*/
8883 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
8884 fn = ISP4XXX_PCI_FN_1;
8886 other_pdev =
8887 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8888 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8889 fn));
8891 /* Get other_ha if other_pdev is valid and state is enable*/
8892 if (other_pdev) {
8893 if (atomic_read(&other_pdev->enable_cnt)) {
8894 other_ha = pci_get_drvdata(other_pdev);
8895 if (other_ha) {
8896 set_bit(AF_HA_REMOVAL, &other_ha->flags);
8897 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
8898 "Prevent %s reinit\n", __func__,
8899 dev_name(&other_ha->pdev->dev)));
8902 pci_dev_put(other_pdev);
8906 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
8907 struct ddb_entry *ddb_entry)
8909 struct dev_db_entry *fw_ddb_entry = NULL;
8910 dma_addr_t fw_ddb_entry_dma;
8911 unsigned long wtime;
8912 uint32_t ddb_state;
8913 int options;
8914 int status;
8916 options = LOGOUT_OPTION_CLOSE_SESSION;
8917 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
8918 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
8919 goto clear_ddb;
8922 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8923 &fw_ddb_entry_dma, GFP_KERNEL);
8924 if (!fw_ddb_entry) {
8925 ql4_printk(KERN_ERR, ha,
8926 "%s: Unable to allocate dma buffer\n", __func__);
8927 goto clear_ddb;
8930 wtime = jiffies + (HZ * LOGOUT_TOV);
8931 do {
8932 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
8933 fw_ddb_entry, fw_ddb_entry_dma,
8934 NULL, NULL, &ddb_state, NULL,
8935 NULL, NULL);
8936 if (status == QLA_ERROR)
8937 goto free_ddb;
8939 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
8940 (ddb_state == DDB_DS_SESSION_FAILED))
8941 goto free_ddb;
8943 schedule_timeout_uninterruptible(HZ);
8944 } while ((time_after(wtime, jiffies)));
8946 free_ddb:
8947 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8948 fw_ddb_entry, fw_ddb_entry_dma);
8949 clear_ddb:
8950 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
8953 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
8955 struct ddb_entry *ddb_entry;
8956 int idx;
8958 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
8960 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8961 if ((ddb_entry != NULL) &&
8962 (ddb_entry->ddb_type == FLASH_DDB)) {
8964 qla4xxx_destroy_ddb(ha, ddb_entry);
8966 * we have decremented the reference count of the driver
8967 * when we setup the session to have the driver unload
8968 * to be seamless without actually destroying the
8969 * session
8971 try_module_get(qla4xxx_iscsi_transport.owner);
8972 iscsi_destroy_endpoint(ddb_entry->conn->ep);
8973 qla4xxx_free_ddb(ha, ddb_entry);
8974 iscsi_session_teardown(ddb_entry->sess);
8979 * qla4xxx_remove_adapter - callback function to remove adapter.
8980 * @pci_dev: PCI device pointer
8982 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
8984 struct scsi_qla_host *ha;
8987 * If the PCI device is disabled then it means probe_adapter had
8988 * failed and resources already cleaned up on probe_adapter exit.
8990 if (!pci_is_enabled(pdev))
8991 return;
8993 ha = pci_get_drvdata(pdev);
8995 if (is_qla40XX(ha))
8996 qla4xxx_prevent_other_port_reinit(ha);
8998 /* destroy iface from sysfs */
8999 qla4xxx_destroy_ifaces(ha);
9001 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
9002 iscsi_boot_destroy_kset(ha->boot_kset);
9004 qla4xxx_destroy_fw_ddb_session(ha);
9005 qla4_8xxx_free_sysfs_attr(ha);
9007 qla4xxx_sysfs_ddb_remove(ha);
9008 scsi_remove_host(ha->host);
9010 qla4xxx_free_adapter(ha);
9012 scsi_host_put(ha->host);
9014 pci_disable_pcie_error_reporting(pdev);
9015 pci_disable_device(pdev);
9019 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
9020 * @ha: HA context
9022 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
9024 /* Update our PCI device dma_mask for full 64 bit mask */
9025 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
9026 dev_dbg(&ha->pdev->dev,
9027 "Failed to set 64 bit PCI consistent mask; "
9028 "using 32 bit.\n");
9029 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
9033 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
9035 struct iscsi_cls_session *cls_sess;
9036 struct iscsi_session *sess;
9037 struct ddb_entry *ddb;
9038 int queue_depth = QL4_DEF_QDEPTH;
9040 cls_sess = starget_to_session(sdev->sdev_target);
9041 sess = cls_sess->dd_data;
9042 ddb = sess->dd_data;
9044 sdev->hostdata = ddb;
9046 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
9047 queue_depth = ql4xmaxqdepth;
9049 scsi_change_queue_depth(sdev, queue_depth);
9050 return 0;
9054 * qla4xxx_del_from_active_array - returns an active srb
9055 * @ha: Pointer to host adapter structure.
9056 * @index: index into the active_array
9058 * This routine removes and returns the srb at the specified index
9060 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
9061 uint32_t index)
9063 struct srb *srb = NULL;
9064 struct scsi_cmnd *cmd = NULL;
9066 cmd = scsi_host_find_tag(ha->host, index);
9067 if (!cmd)
9068 return srb;
9070 srb = (struct srb *)CMD_SP(cmd);
9071 if (!srb)
9072 return srb;
9074 /* update counters */
9075 if (srb->flags & SRB_DMA_VALID) {
9076 ha->iocb_cnt -= srb->iocb_cnt;
9077 if (srb->cmd)
9078 srb->cmd->host_scribble =
9079 (unsigned char *)(unsigned long) MAX_SRBS;
9081 return srb;
9085 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
9086 * @ha: Pointer to host adapter structure.
9087 * @cmd: Scsi Command to wait on.
9089 * This routine waits for the command to be returned by the Firmware
9090 * for some max time.
9092 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
9093 struct scsi_cmnd *cmd)
9095 int done = 0;
9096 struct srb *rp;
9097 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
9098 int ret = SUCCESS;
9100 /* Dont wait on command if PCI error is being handled
9101 * by PCI AER driver
9103 if (unlikely(pci_channel_offline(ha->pdev)) ||
9104 (test_bit(AF_EEH_BUSY, &ha->flags))) {
9105 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
9106 ha->host_no, __func__);
9107 return ret;
9110 do {
9111 /* Checking to see if its returned to OS */
9112 rp = (struct srb *) CMD_SP(cmd);
9113 if (rp == NULL) {
9114 done++;
9115 break;
9118 msleep(2000);
9119 } while (max_wait_time--);
9121 return done;
9125 * qla4xxx_wait_for_hba_online - waits for HBA to come online
9126 * @ha: Pointer to host adapter structure
9128 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
9130 unsigned long wait_online;
9132 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
9133 while (time_before(jiffies, wait_online)) {
9135 if (adapter_up(ha))
9136 return QLA_SUCCESS;
9138 msleep(2000);
9141 return QLA_ERROR;
9145 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
9146 * @ha: pointer to HBA
9147 * @t: target id
9148 * @l: lun id
9150 * This function waits for all outstanding commands to a lun to complete. It
9151 * returns 0 if all pending commands are returned and 1 otherwise.
9153 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
9154 struct scsi_target *stgt,
9155 struct scsi_device *sdev)
9157 int cnt;
9158 int status = 0;
9159 struct scsi_cmnd *cmd;
9162 * Waiting for all commands for the designated target or dev
9163 * in the active array
9165 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
9166 cmd = scsi_host_find_tag(ha->host, cnt);
9167 if (cmd && stgt == scsi_target(cmd->device) &&
9168 (!sdev || sdev == cmd->device)) {
9169 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9170 status++;
9171 break;
9175 return status;
9179 * qla4xxx_eh_abort - callback for abort task.
9180 * @cmd: Pointer to Linux's SCSI command structure
9182 * This routine is called by the Linux OS to abort the specified
9183 * command.
9185 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9187 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9188 unsigned int id = cmd->device->id;
9189 uint64_t lun = cmd->device->lun;
9190 unsigned long flags;
9191 struct srb *srb = NULL;
9192 int ret = SUCCESS;
9193 int wait = 0;
9194 int rval;
9196 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
9197 ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9199 rval = qla4xxx_isp_check_reg(ha);
9200 if (rval != QLA_SUCCESS) {
9201 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9202 return FAILED;
9205 spin_lock_irqsave(&ha->hardware_lock, flags);
9206 srb = (struct srb *) CMD_SP(cmd);
9207 if (!srb) {
9208 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9209 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
9210 ha->host_no, id, lun);
9211 return SUCCESS;
9213 kref_get(&srb->srb_ref);
9214 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9216 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
9217 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
9218 ha->host_no, id, lun));
9219 ret = FAILED;
9220 } else {
9221 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
9222 ha->host_no, id, lun));
9223 wait = 1;
9226 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
9228 /* Wait for command to complete */
9229 if (wait) {
9230 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9231 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
9232 ha->host_no, id, lun));
9233 ret = FAILED;
9237 ql4_printk(KERN_INFO, ha,
9238 "scsi%ld:%d:%llu: Abort command - %s\n",
9239 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
9241 return ret;
9245 * qla4xxx_eh_device_reset - callback for target reset.
9246 * @cmd: Pointer to Linux's SCSI command structure
9248 * This routine is called by the Linux OS to reset all luns on the
9249 * specified target.
9251 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9253 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9254 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9255 int ret = FAILED, stat;
9256 int rval;
9258 if (!ddb_entry)
9259 return ret;
9261 ret = iscsi_block_scsi_eh(cmd);
9262 if (ret)
9263 return ret;
9264 ret = FAILED;
9266 ql4_printk(KERN_INFO, ha,
9267 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
9268 cmd->device->channel, cmd->device->id, cmd->device->lun);
9270 DEBUG2(printk(KERN_INFO
9271 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
9272 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
9273 cmd, jiffies, cmd->request->timeout / HZ,
9274 ha->dpc_flags, cmd->result, cmd->allowed));
9276 rval = qla4xxx_isp_check_reg(ha);
9277 if (rval != QLA_SUCCESS) {
9278 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9279 return FAILED;
9282 /* FIXME: wait for hba to go online */
9283 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
9284 if (stat != QLA_SUCCESS) {
9285 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
9286 goto eh_dev_reset_done;
9289 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9290 cmd->device)) {
9291 ql4_printk(KERN_INFO, ha,
9292 "DEVICE RESET FAILED - waiting for "
9293 "commands.\n");
9294 goto eh_dev_reset_done;
9297 /* Send marker. */
9298 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9299 MM_LUN_RESET) != QLA_SUCCESS)
9300 goto eh_dev_reset_done;
9302 ql4_printk(KERN_INFO, ha,
9303 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
9304 ha->host_no, cmd->device->channel, cmd->device->id,
9305 cmd->device->lun);
9307 ret = SUCCESS;
9309 eh_dev_reset_done:
9311 return ret;
9315 * qla4xxx_eh_target_reset - callback for target reset.
9316 * @cmd: Pointer to Linux's SCSI command structure
9318 * This routine is called by the Linux OS to reset the target.
9320 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9322 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9323 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9324 int stat, ret;
9325 int rval;
9327 if (!ddb_entry)
9328 return FAILED;
9330 ret = iscsi_block_scsi_eh(cmd);
9331 if (ret)
9332 return ret;
9334 starget_printk(KERN_INFO, scsi_target(cmd->device),
9335 "WARM TARGET RESET ISSUED.\n");
9337 DEBUG2(printk(KERN_INFO
9338 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
9339 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
9340 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
9341 ha->dpc_flags, cmd->result, cmd->allowed));
9343 rval = qla4xxx_isp_check_reg(ha);
9344 if (rval != QLA_SUCCESS) {
9345 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9346 return FAILED;
9349 stat = qla4xxx_reset_target(ha, ddb_entry);
9350 if (stat != QLA_SUCCESS) {
9351 starget_printk(KERN_INFO, scsi_target(cmd->device),
9352 "WARM TARGET RESET FAILED.\n");
9353 return FAILED;
9356 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9357 NULL)) {
9358 starget_printk(KERN_INFO, scsi_target(cmd->device),
9359 "WARM TARGET DEVICE RESET FAILED - "
9360 "waiting for commands.\n");
9361 return FAILED;
9364 /* Send marker. */
9365 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9366 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
9367 starget_printk(KERN_INFO, scsi_target(cmd->device),
9368 "WARM TARGET DEVICE RESET FAILED - "
9369 "marker iocb failed.\n");
9370 return FAILED;
9373 starget_printk(KERN_INFO, scsi_target(cmd->device),
9374 "WARM TARGET RESET SUCCEEDED.\n");
9375 return SUCCESS;
9379 * qla4xxx_is_eh_active - check if error handler is running
9380 * @shost: Pointer to SCSI Host struct
9382 * This routine finds that if reset host is called in EH
9383 * scenario or from some application like sg_reset
9385 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
9387 if (shost->shost_state == SHOST_RECOVERY)
9388 return 1;
9389 return 0;
9393 * qla4xxx_eh_host_reset - kernel callback
9394 * @cmd: Pointer to Linux's SCSI command structure
9396 * This routine is invoked by the Linux kernel to perform fatal error
9397 * recovery on the specified adapter.
9399 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
9401 int return_status = FAILED;
9402 struct scsi_qla_host *ha;
9403 int rval;
9405 ha = to_qla_host(cmd->device->host);
9407 rval = qla4xxx_isp_check_reg(ha);
9408 if (rval != QLA_SUCCESS) {
9409 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9410 return FAILED;
9413 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
9414 qla4_83xx_set_idc_dontreset(ha);
9417 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
9418 * protocol drivers, we should not set device_state to NEED_RESET
9420 if (ql4xdontresethba ||
9421 ((is_qla8032(ha) || is_qla8042(ha)) &&
9422 qla4_83xx_idc_dontreset(ha))) {
9423 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
9424 ha->host_no, __func__));
9426 /* Clear outstanding srb in queues */
9427 if (qla4xxx_is_eh_active(cmd->device->host))
9428 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
9430 return FAILED;
9433 ql4_printk(KERN_INFO, ha,
9434 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
9435 cmd->device->channel, cmd->device->id, cmd->device->lun);
9437 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
9438 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
9439 "DEAD.\n", ha->host_no, cmd->device->channel,
9440 __func__));
9442 return FAILED;
9445 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9446 if (is_qla80XX(ha))
9447 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
9448 else
9449 set_bit(DPC_RESET_HA, &ha->dpc_flags);
9452 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
9453 return_status = SUCCESS;
9455 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
9456 return_status == FAILED ? "FAILED" : "SUCCEEDED");
9458 return return_status;
9461 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
9463 uint32_t mbox_cmd[MBOX_REG_COUNT];
9464 uint32_t mbox_sts[MBOX_REG_COUNT];
9465 struct addr_ctrl_blk_def *acb = NULL;
9466 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
9467 int rval = QLA_SUCCESS;
9468 dma_addr_t acb_dma;
9470 acb = dma_alloc_coherent(&ha->pdev->dev,
9471 sizeof(struct addr_ctrl_blk_def),
9472 &acb_dma, GFP_KERNEL);
9473 if (!acb) {
9474 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
9475 __func__);
9476 rval = -ENOMEM;
9477 goto exit_port_reset;
9480 memset(acb, 0, acb_len);
9482 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
9483 if (rval != QLA_SUCCESS) {
9484 rval = -EIO;
9485 goto exit_free_acb;
9488 rval = qla4xxx_disable_acb(ha);
9489 if (rval != QLA_SUCCESS) {
9490 rval = -EIO;
9491 goto exit_free_acb;
9494 wait_for_completion_timeout(&ha->disable_acb_comp,
9495 DISABLE_ACB_TOV * HZ);
9497 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
9498 if (rval != QLA_SUCCESS) {
9499 rval = -EIO;
9500 goto exit_free_acb;
9503 exit_free_acb:
9504 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
9505 acb, acb_dma);
9506 exit_port_reset:
9507 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
9508 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
9509 return rval;
9512 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
9514 struct scsi_qla_host *ha = to_qla_host(shost);
9515 int rval = QLA_SUCCESS;
9516 uint32_t idc_ctrl;
9518 if (ql4xdontresethba) {
9519 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
9520 __func__));
9521 rval = -EPERM;
9522 goto exit_host_reset;
9525 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
9526 goto recover_adapter;
9528 switch (reset_type) {
9529 case SCSI_ADAPTER_RESET:
9530 set_bit(DPC_RESET_HA, &ha->dpc_flags);
9531 break;
9532 case SCSI_FIRMWARE_RESET:
9533 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9534 if (is_qla80XX(ha))
9535 /* set firmware context reset */
9536 set_bit(DPC_RESET_HA_FW_CONTEXT,
9537 &ha->dpc_flags);
9538 else {
9539 rval = qla4xxx_context_reset(ha);
9540 goto exit_host_reset;
9543 break;
9546 recover_adapter:
9547 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
9548 * reset is issued by application */
9549 if ((is_qla8032(ha) || is_qla8042(ha)) &&
9550 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9551 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
9552 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
9553 (idc_ctrl | GRACEFUL_RESET_BIT1));
9556 rval = qla4xxx_recover_adapter(ha);
9557 if (rval != QLA_SUCCESS) {
9558 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
9559 __func__));
9560 rval = -EIO;
9563 exit_host_reset:
9564 return rval;
9567 /* PCI AER driver recovers from all correctable errors w/o
9568 * driver intervention. For uncorrectable errors PCI AER
9569 * driver calls the following device driver's callbacks
9571 * - Fatal Errors - link_reset
9572 * - Non-Fatal Errors - driver's error_detected() which
9573 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
9575 * PCI AER driver calls
9576 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled()
9577 * returns RECOVERED or NEED_RESET if fw_hung
9578 * NEED_RESET - driver's slot_reset()
9579 * DISCONNECT - device is dead & cannot recover
9580 * RECOVERED - driver's resume()
9582 static pci_ers_result_t
9583 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9585 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9587 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
9588 ha->host_no, __func__, state);
9590 if (!is_aer_supported(ha))
9591 return PCI_ERS_RESULT_NONE;
9593 switch (state) {
9594 case pci_channel_io_normal:
9595 clear_bit(AF_EEH_BUSY, &ha->flags);
9596 return PCI_ERS_RESULT_CAN_RECOVER;
9597 case pci_channel_io_frozen:
9598 set_bit(AF_EEH_BUSY, &ha->flags);
9599 qla4xxx_mailbox_premature_completion(ha);
9600 qla4xxx_free_irqs(ha);
9601 pci_disable_device(pdev);
9602 /* Return back all IOs */
9603 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
9604 return PCI_ERS_RESULT_NEED_RESET;
9605 case pci_channel_io_perm_failure:
9606 set_bit(AF_EEH_BUSY, &ha->flags);
9607 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
9608 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
9609 return PCI_ERS_RESULT_DISCONNECT;
9611 return PCI_ERS_RESULT_NEED_RESET;
9615 * qla4xxx_pci_mmio_enabled() gets called if
9616 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
9617 * and read/write to the device still works.
9619 static pci_ers_result_t
9620 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
9622 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9624 if (!is_aer_supported(ha))
9625 return PCI_ERS_RESULT_NONE;
9627 return PCI_ERS_RESULT_RECOVERED;
9630 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9632 uint32_t rval = QLA_ERROR;
9633 int fn;
9634 struct pci_dev *other_pdev = NULL;
9636 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
9638 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9640 if (test_bit(AF_ONLINE, &ha->flags)) {
9641 clear_bit(AF_ONLINE, &ha->flags);
9642 clear_bit(AF_LINK_UP, &ha->flags);
9643 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
9644 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
9647 fn = PCI_FUNC(ha->pdev->devfn);
9648 if (is_qla8022(ha)) {
9649 while (fn > 0) {
9650 fn--;
9651 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
9652 ha->host_no, __func__, fn);
9653 /* Get the pci device given the domain, bus,
9654 * slot/function number */
9655 other_pdev = pci_get_domain_bus_and_slot(
9656 pci_domain_nr(ha->pdev->bus),
9657 ha->pdev->bus->number,
9658 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
9659 fn));
9661 if (!other_pdev)
9662 continue;
9664 if (atomic_read(&other_pdev->enable_cnt)) {
9665 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
9666 ha->host_no, __func__, fn);
9667 pci_dev_put(other_pdev);
9668 break;
9670 pci_dev_put(other_pdev);
9672 } else {
9673 /* this case is meant for ISP83xx/ISP84xx only */
9674 if (qla4_83xx_can_perform_reset(ha)) {
9675 /* reset fn as iSCSI is going to perform the reset */
9676 fn = 0;
9680 /* The first function on the card, the reset owner will
9681 * start & initialize the firmware. The other functions
9682 * on the card will reset the firmware context
9684 if (!fn) {
9685 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
9686 "0x%x is the owner\n", ha->host_no, __func__,
9687 ha->pdev->devfn);
9689 ha->isp_ops->idc_lock(ha);
9690 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9691 QLA8XXX_DEV_COLD);
9692 ha->isp_ops->idc_unlock(ha);
9694 rval = qla4_8xxx_update_idc_reg(ha);
9695 if (rval == QLA_ERROR) {
9696 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
9697 ha->host_no, __func__);
9698 ha->isp_ops->idc_lock(ha);
9699 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9700 QLA8XXX_DEV_FAILED);
9701 ha->isp_ops->idc_unlock(ha);
9702 goto exit_error_recovery;
9705 clear_bit(AF_FW_RECOVERY, &ha->flags);
9706 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9708 if (rval != QLA_SUCCESS) {
9709 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9710 "FAILED\n", ha->host_no, __func__);
9711 qla4xxx_free_irqs(ha);
9712 ha->isp_ops->idc_lock(ha);
9713 qla4_8xxx_clear_drv_active(ha);
9714 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9715 QLA8XXX_DEV_FAILED);
9716 ha->isp_ops->idc_unlock(ha);
9717 } else {
9718 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9719 "READY\n", ha->host_no, __func__);
9720 ha->isp_ops->idc_lock(ha);
9721 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9722 QLA8XXX_DEV_READY);
9723 /* Clear driver state register */
9724 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
9725 qla4_8xxx_set_drv_active(ha);
9726 ha->isp_ops->idc_unlock(ha);
9727 ha->isp_ops->enable_intrs(ha);
9729 } else {
9730 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
9731 "the reset owner\n", ha->host_no, __func__,
9732 ha->pdev->devfn);
9733 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
9734 QLA8XXX_DEV_READY)) {
9735 clear_bit(AF_FW_RECOVERY, &ha->flags);
9736 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9737 if (rval == QLA_SUCCESS)
9738 ha->isp_ops->enable_intrs(ha);
9739 else
9740 qla4xxx_free_irqs(ha);
9742 ha->isp_ops->idc_lock(ha);
9743 qla4_8xxx_set_drv_active(ha);
9744 ha->isp_ops->idc_unlock(ha);
9747 exit_error_recovery:
9748 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9749 return rval;
9752 static pci_ers_result_t
9753 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
9755 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
9756 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9757 int rc;
9759 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
9760 ha->host_no, __func__);
9762 if (!is_aer_supported(ha))
9763 return PCI_ERS_RESULT_NONE;
9765 /* Restore the saved state of PCIe device -
9766 * BAR registers, PCI Config space, PCIX, MSI,
9767 * IOV states
9769 pci_restore_state(pdev);
9771 /* pci_restore_state() clears the saved_state flag of the device
9772 * save restored state which resets saved_state flag
9774 pci_save_state(pdev);
9776 /* Initialize device or resume if in suspended state */
9777 rc = pci_enable_device(pdev);
9778 if (rc) {
9779 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
9780 "device after reset\n", ha->host_no, __func__);
9781 goto exit_slot_reset;
9784 ha->isp_ops->disable_intrs(ha);
9786 if (is_qla80XX(ha)) {
9787 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
9788 ret = PCI_ERS_RESULT_RECOVERED;
9789 goto exit_slot_reset;
9790 } else
9791 goto exit_slot_reset;
9794 exit_slot_reset:
9795 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
9796 "device after reset\n", ha->host_no, __func__, ret);
9797 return ret;
9800 static void
9801 qla4xxx_pci_resume(struct pci_dev *pdev)
9803 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9804 int ret;
9806 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
9807 ha->host_no, __func__);
9809 ret = qla4xxx_wait_for_hba_online(ha);
9810 if (ret != QLA_SUCCESS) {
9811 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
9812 "resume I/O from slot/link_reset\n", ha->host_no,
9813 __func__);
9816 clear_bit(AF_EEH_BUSY, &ha->flags);
9819 static const struct pci_error_handlers qla4xxx_err_handler = {
9820 .error_detected = qla4xxx_pci_error_detected,
9821 .mmio_enabled = qla4xxx_pci_mmio_enabled,
9822 .slot_reset = qla4xxx_pci_slot_reset,
9823 .resume = qla4xxx_pci_resume,
9826 static struct pci_device_id qla4xxx_pci_tbl[] = {
9828 .vendor = PCI_VENDOR_ID_QLOGIC,
9829 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
9830 .subvendor = PCI_ANY_ID,
9831 .subdevice = PCI_ANY_ID,
9834 .vendor = PCI_VENDOR_ID_QLOGIC,
9835 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
9836 .subvendor = PCI_ANY_ID,
9837 .subdevice = PCI_ANY_ID,
9840 .vendor = PCI_VENDOR_ID_QLOGIC,
9841 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
9842 .subvendor = PCI_ANY_ID,
9843 .subdevice = PCI_ANY_ID,
9846 .vendor = PCI_VENDOR_ID_QLOGIC,
9847 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
9848 .subvendor = PCI_ANY_ID,
9849 .subdevice = PCI_ANY_ID,
9852 .vendor = PCI_VENDOR_ID_QLOGIC,
9853 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
9854 .subvendor = PCI_ANY_ID,
9855 .subdevice = PCI_ANY_ID,
9858 .vendor = PCI_VENDOR_ID_QLOGIC,
9859 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
9860 .subvendor = PCI_ANY_ID,
9861 .subdevice = PCI_ANY_ID,
9863 {0, 0},
9865 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
9867 static struct pci_driver qla4xxx_pci_driver = {
9868 .name = DRIVER_NAME,
9869 .id_table = qla4xxx_pci_tbl,
9870 .probe = qla4xxx_probe_adapter,
9871 .remove = qla4xxx_remove_adapter,
9872 .err_handler = &qla4xxx_err_handler,
9875 static int __init qla4xxx_module_init(void)
9877 int ret;
9879 if (ql4xqfulltracking)
9880 qla4xxx_driver_template.track_queue_depth = 1;
9882 /* Allocate cache for SRBs. */
9883 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
9884 SLAB_HWCACHE_ALIGN, NULL);
9885 if (srb_cachep == NULL) {
9886 printk(KERN_ERR
9887 "%s: Unable to allocate SRB cache..."
9888 "Failing load!\n", DRIVER_NAME);
9889 ret = -ENOMEM;
9890 goto no_srp_cache;
9893 /* Derive version string. */
9894 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
9895 if (ql4xextended_error_logging)
9896 strcat(qla4xxx_version_str, "-debug");
9898 qla4xxx_scsi_transport =
9899 iscsi_register_transport(&qla4xxx_iscsi_transport);
9900 if (!qla4xxx_scsi_transport){
9901 ret = -ENODEV;
9902 goto release_srb_cache;
9905 ret = pci_register_driver(&qla4xxx_pci_driver);
9906 if (ret)
9907 goto unregister_transport;
9909 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
9910 return 0;
9912 unregister_transport:
9913 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9914 release_srb_cache:
9915 kmem_cache_destroy(srb_cachep);
9916 no_srp_cache:
9917 return ret;
9920 static void __exit qla4xxx_module_exit(void)
9922 pci_unregister_driver(&qla4xxx_pci_driver);
9923 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9924 kmem_cache_destroy(srb_cachep);
9927 module_init(qla4xxx_module_init);
9928 module_exit(qla4xxx_module_exit);
9930 MODULE_AUTHOR("QLogic Corporation");
9931 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
9932 MODULE_LICENSE("GPL");
9933 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);