scsi: qla4xxx: check return code of qla4xxx_copy_from_fwddb_param
[linux/fpc-iii.git] / drivers / scsi / qla4xxx / ql4_os.c
blobf9f899ec94270c2e09e732fba3e9b052d360bf5c
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
24 * Driver version
26 static char qla4xxx_version_str[40];
29 * SRB allocation cache
31 static struct kmem_cache *srb_cachep;
34 * Module parameter information and variables
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 " Set to disable exporting boot targets to sysfs.\n"
40 "\t\t 0 - Export boot targets\n"
41 "\t\t 1 - Do not export boot targets (Default)");
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 " Don't reset the HBA for driver recovery.\n"
47 "\t\t 0 - It will reset HBA (Default)\n"
48 "\t\t 1 - It will NOT reset HBA");
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 " Option to enable extended error logging.\n"
54 "\t\t 0 - no logging (Default)\n"
55 "\t\t 2 - debug logging");
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 "\t\t 0 = enable INTx interrupt mechanism.\n"
62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
63 "\t\t 2 = enable MSI interrupt mechanism.");
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 " Maximum queue depth to report for target devices.\n"
70 "\t\t Default: 32.");
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 " Enable or disable dynamic tracking and adjustment of\n"
76 "\t\t scsi device queue depth.\n"
77 "\t\t 0 - Disable.\n"
78 "\t\t 1 - Enable. (Default)");
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec.");
86 int ql4xmdcapmask = 0;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0 (firmware default capture mask)\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 " Set to enable minidump.\n"
97 "\t\t 0 - disable minidump\n"
98 "\t\t 1 - enable minidump (Default)");
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
102 * SCSI host template entry points
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
107 * iSCSI template entry points
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 enum iscsi_param_type param_type,
119 int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 struct sockaddr *dst_addr,
123 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 struct iscsi_cls_conn *cls_conn,
133 uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 uint32_t iface_type, uint32_t payload_size,
148 uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
153 int len);
154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
157 * SCSI host template entry points
159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
164 static int qla4xxx_slave_alloc(struct scsi_device *device);
165 static umode_t qla4_attr_is_visible(int param_type, int param);
166 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
169 * iSCSI Flash DDB sysfs entry points
171 static int
172 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
173 struct iscsi_bus_flash_conn *fnode_conn,
174 void *data, int len);
175 static int
176 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
177 int param, char *buf);
178 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
179 int len);
180 static int
181 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
182 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
183 struct iscsi_bus_flash_conn *fnode_conn);
184 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
185 struct iscsi_bus_flash_conn *fnode_conn);
186 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
189 QLA82XX_LEGACY_INTR_CONFIG;
191 static struct scsi_host_template qla4xxx_driver_template = {
192 .module = THIS_MODULE,
193 .name = DRIVER_NAME,
194 .proc_name = DRIVER_NAME,
195 .queuecommand = qla4xxx_queuecommand,
197 .eh_abort_handler = qla4xxx_eh_abort,
198 .eh_device_reset_handler = qla4xxx_eh_device_reset,
199 .eh_target_reset_handler = qla4xxx_eh_target_reset,
200 .eh_host_reset_handler = qla4xxx_eh_host_reset,
201 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
203 .slave_alloc = qla4xxx_slave_alloc,
204 .change_queue_depth = scsi_change_queue_depth,
206 .this_id = -1,
207 .cmd_per_lun = 3,
208 .use_clustering = ENABLE_CLUSTERING,
209 .sg_tablesize = SG_ALL,
211 .max_sectors = 0xFFFF,
212 .shost_attrs = qla4xxx_host_attrs,
213 .host_reset = qla4xxx_host_reset,
214 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
217 static struct iscsi_transport qla4xxx_iscsi_transport = {
218 .owner = THIS_MODULE,
219 .name = DRIVER_NAME,
220 .caps = CAP_TEXT_NEGO |
221 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
222 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
223 CAP_MULTI_R2T,
224 .attr_is_visible = qla4_attr_is_visible,
225 .create_session = qla4xxx_session_create,
226 .destroy_session = qla4xxx_session_destroy,
227 .start_conn = qla4xxx_conn_start,
228 .create_conn = qla4xxx_conn_create,
229 .bind_conn = qla4xxx_conn_bind,
230 .stop_conn = iscsi_conn_stop,
231 .destroy_conn = qla4xxx_conn_destroy,
232 .set_param = iscsi_set_param,
233 .get_conn_param = qla4xxx_conn_get_param,
234 .get_session_param = qla4xxx_session_get_param,
235 .get_ep_param = qla4xxx_get_ep_param,
236 .ep_connect = qla4xxx_ep_connect,
237 .ep_poll = qla4xxx_ep_poll,
238 .ep_disconnect = qla4xxx_ep_disconnect,
239 .get_stats = qla4xxx_conn_get_stats,
240 .send_pdu = iscsi_conn_send_pdu,
241 .xmit_task = qla4xxx_task_xmit,
242 .cleanup_task = qla4xxx_task_cleanup,
243 .alloc_pdu = qla4xxx_alloc_pdu,
245 .get_host_param = qla4xxx_host_get_param,
246 .set_iface_param = qla4xxx_iface_set_param,
247 .get_iface_param = qla4xxx_get_iface_param,
248 .bsg_request = qla4xxx_bsg_request,
249 .send_ping = qla4xxx_send_ping,
250 .get_chap = qla4xxx_get_chap_list,
251 .delete_chap = qla4xxx_delete_chap,
252 .set_chap = qla4xxx_set_chap_entry,
253 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
254 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
255 .new_flashnode = qla4xxx_sysfs_ddb_add,
256 .del_flashnode = qla4xxx_sysfs_ddb_delete,
257 .login_flashnode = qla4xxx_sysfs_ddb_login,
258 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
259 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
260 .get_host_stats = qla4xxx_get_host_stats,
263 static struct scsi_transport_template *qla4xxx_scsi_transport;
265 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
267 u32 reg_val = 0;
268 int rval = QLA_SUCCESS;
270 if (is_qla8022(ha))
271 reg_val = readl(&ha->qla4_82xx_reg->host_status);
272 else if (is_qla8032(ha) || is_qla8042(ha))
273 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
274 else
275 reg_val = readw(&ha->reg->ctrl_status);
277 if (reg_val == QL4_ISP_REG_DISCONNECT)
278 rval = QLA_ERROR;
280 return rval;
283 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
284 uint32_t iface_type, uint32_t payload_size,
285 uint32_t pid, struct sockaddr *dst_addr)
287 struct scsi_qla_host *ha = to_qla_host(shost);
288 struct sockaddr_in *addr;
289 struct sockaddr_in6 *addr6;
290 uint32_t options = 0;
291 uint8_t ipaddr[IPv6_ADDR_LEN];
292 int rval;
294 memset(ipaddr, 0, IPv6_ADDR_LEN);
295 /* IPv4 to IPv4 */
296 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
297 (dst_addr->sa_family == AF_INET)) {
298 addr = (struct sockaddr_in *)dst_addr;
299 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
300 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
301 "dest: %pI4\n", __func__,
302 &ha->ip_config.ip_address, ipaddr));
303 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
304 ipaddr);
305 if (rval)
306 rval = -EINVAL;
307 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
308 (dst_addr->sa_family == AF_INET6)) {
309 /* IPv6 to IPv6 */
310 addr6 = (struct sockaddr_in6 *)dst_addr;
311 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
313 options |= PING_IPV6_PROTOCOL_ENABLE;
315 /* Ping using LinkLocal address */
316 if ((iface_num == 0) || (iface_num == 1)) {
317 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
318 "src: %pI6 dest: %pI6\n", __func__,
319 &ha->ip_config.ipv6_link_local_addr,
320 ipaddr));
321 options |= PING_IPV6_LINKLOCAL_ADDR;
322 rval = qla4xxx_ping_iocb(ha, options, payload_size,
323 pid, ipaddr);
324 } else {
325 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
326 "not supported\n", __func__, iface_num);
327 rval = -ENOSYS;
328 goto exit_send_ping;
332 * If ping using LinkLocal address fails, try ping using
333 * IPv6 address
335 if (rval != QLA_SUCCESS) {
336 options &= ~PING_IPV6_LINKLOCAL_ADDR;
337 if (iface_num == 0) {
338 options |= PING_IPV6_ADDR0;
339 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
340 "Ping src: %pI6 "
341 "dest: %pI6\n", __func__,
342 &ha->ip_config.ipv6_addr0,
343 ipaddr));
344 } else if (iface_num == 1) {
345 options |= PING_IPV6_ADDR1;
346 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
347 "Ping src: %pI6 "
348 "dest: %pI6\n", __func__,
349 &ha->ip_config.ipv6_addr1,
350 ipaddr));
352 rval = qla4xxx_ping_iocb(ha, options, payload_size,
353 pid, ipaddr);
354 if (rval)
355 rval = -EINVAL;
357 } else
358 rval = -ENOSYS;
359 exit_send_ping:
360 return rval;
363 static umode_t qla4_attr_is_visible(int param_type, int param)
365 switch (param_type) {
366 case ISCSI_HOST_PARAM:
367 switch (param) {
368 case ISCSI_HOST_PARAM_HWADDRESS:
369 case ISCSI_HOST_PARAM_IPADDRESS:
370 case ISCSI_HOST_PARAM_INITIATOR_NAME:
371 case ISCSI_HOST_PARAM_PORT_STATE:
372 case ISCSI_HOST_PARAM_PORT_SPEED:
373 return S_IRUGO;
374 default:
375 return 0;
377 case ISCSI_PARAM:
378 switch (param) {
379 case ISCSI_PARAM_PERSISTENT_ADDRESS:
380 case ISCSI_PARAM_PERSISTENT_PORT:
381 case ISCSI_PARAM_CONN_ADDRESS:
382 case ISCSI_PARAM_CONN_PORT:
383 case ISCSI_PARAM_TARGET_NAME:
384 case ISCSI_PARAM_TPGT:
385 case ISCSI_PARAM_TARGET_ALIAS:
386 case ISCSI_PARAM_MAX_BURST:
387 case ISCSI_PARAM_MAX_R2T:
388 case ISCSI_PARAM_FIRST_BURST:
389 case ISCSI_PARAM_MAX_RECV_DLENGTH:
390 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
391 case ISCSI_PARAM_IFACE_NAME:
392 case ISCSI_PARAM_CHAP_OUT_IDX:
393 case ISCSI_PARAM_CHAP_IN_IDX:
394 case ISCSI_PARAM_USERNAME:
395 case ISCSI_PARAM_PASSWORD:
396 case ISCSI_PARAM_USERNAME_IN:
397 case ISCSI_PARAM_PASSWORD_IN:
398 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
399 case ISCSI_PARAM_DISCOVERY_SESS:
400 case ISCSI_PARAM_PORTAL_TYPE:
401 case ISCSI_PARAM_CHAP_AUTH_EN:
402 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
403 case ISCSI_PARAM_BIDI_CHAP_EN:
404 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
405 case ISCSI_PARAM_DEF_TIME2WAIT:
406 case ISCSI_PARAM_DEF_TIME2RETAIN:
407 case ISCSI_PARAM_HDRDGST_EN:
408 case ISCSI_PARAM_DATADGST_EN:
409 case ISCSI_PARAM_INITIAL_R2T_EN:
410 case ISCSI_PARAM_IMM_DATA_EN:
411 case ISCSI_PARAM_PDU_INORDER_EN:
412 case ISCSI_PARAM_DATASEQ_INORDER_EN:
413 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
414 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
415 case ISCSI_PARAM_TCP_WSF_DISABLE:
416 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
417 case ISCSI_PARAM_TCP_TIMER_SCALE:
418 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
419 case ISCSI_PARAM_TCP_XMIT_WSF:
420 case ISCSI_PARAM_TCP_RECV_WSF:
421 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
422 case ISCSI_PARAM_IPV4_TOS:
423 case ISCSI_PARAM_IPV6_TC:
424 case ISCSI_PARAM_IPV6_FLOW_LABEL:
425 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
426 case ISCSI_PARAM_KEEPALIVE_TMO:
427 case ISCSI_PARAM_LOCAL_PORT:
428 case ISCSI_PARAM_ISID:
429 case ISCSI_PARAM_TSID:
430 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
431 case ISCSI_PARAM_ERL:
432 case ISCSI_PARAM_STATSN:
433 case ISCSI_PARAM_EXP_STATSN:
434 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
435 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
436 case ISCSI_PARAM_LOCAL_IPADDR:
437 return S_IRUGO;
438 default:
439 return 0;
441 case ISCSI_NET_PARAM:
442 switch (param) {
443 case ISCSI_NET_PARAM_IPV4_ADDR:
444 case ISCSI_NET_PARAM_IPV4_SUBNET:
445 case ISCSI_NET_PARAM_IPV4_GW:
446 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
447 case ISCSI_NET_PARAM_IFACE_ENABLE:
448 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
449 case ISCSI_NET_PARAM_IPV6_ADDR:
450 case ISCSI_NET_PARAM_IPV6_ROUTER:
451 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
452 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
453 case ISCSI_NET_PARAM_VLAN_ID:
454 case ISCSI_NET_PARAM_VLAN_PRIORITY:
455 case ISCSI_NET_PARAM_VLAN_ENABLED:
456 case ISCSI_NET_PARAM_MTU:
457 case ISCSI_NET_PARAM_PORT:
458 case ISCSI_NET_PARAM_IPADDR_STATE:
459 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
460 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
461 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
462 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
463 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
464 case ISCSI_NET_PARAM_TCP_WSF:
465 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
466 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
467 case ISCSI_NET_PARAM_CACHE_ID:
468 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
469 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
470 case ISCSI_NET_PARAM_IPV4_TOS_EN:
471 case ISCSI_NET_PARAM_IPV4_TOS:
472 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
473 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
474 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
475 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
476 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
477 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
478 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
479 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
480 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
481 case ISCSI_NET_PARAM_REDIRECT_EN:
482 case ISCSI_NET_PARAM_IPV4_TTL:
483 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
484 case ISCSI_NET_PARAM_IPV6_MLD_EN:
485 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
486 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
487 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
488 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
489 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
490 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
491 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
492 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
493 return S_IRUGO;
494 default:
495 return 0;
497 case ISCSI_IFACE_PARAM:
498 switch (param) {
499 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
500 case ISCSI_IFACE_PARAM_HDRDGST_EN:
501 case ISCSI_IFACE_PARAM_DATADGST_EN:
502 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
503 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
504 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
505 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
506 case ISCSI_IFACE_PARAM_ERL:
507 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
508 case ISCSI_IFACE_PARAM_FIRST_BURST:
509 case ISCSI_IFACE_PARAM_MAX_R2T:
510 case ISCSI_IFACE_PARAM_MAX_BURST:
511 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
512 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
513 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
514 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
515 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
516 case ISCSI_IFACE_PARAM_INITIATOR_NAME:
517 return S_IRUGO;
518 default:
519 return 0;
521 case ISCSI_FLASHNODE_PARAM:
522 switch (param) {
523 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
524 case ISCSI_FLASHNODE_PORTAL_TYPE:
525 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
526 case ISCSI_FLASHNODE_DISCOVERY_SESS:
527 case ISCSI_FLASHNODE_ENTRY_EN:
528 case ISCSI_FLASHNODE_HDR_DGST_EN:
529 case ISCSI_FLASHNODE_DATA_DGST_EN:
530 case ISCSI_FLASHNODE_IMM_DATA_EN:
531 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
532 case ISCSI_FLASHNODE_DATASEQ_INORDER:
533 case ISCSI_FLASHNODE_PDU_INORDER:
534 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
535 case ISCSI_FLASHNODE_SNACK_REQ_EN:
536 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
537 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
538 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
539 case ISCSI_FLASHNODE_ERL:
540 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
541 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
542 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
543 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
544 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
545 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
546 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
547 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
548 case ISCSI_FLASHNODE_FIRST_BURST:
549 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
550 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
551 case ISCSI_FLASHNODE_MAX_R2T:
552 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
553 case ISCSI_FLASHNODE_ISID:
554 case ISCSI_FLASHNODE_TSID:
555 case ISCSI_FLASHNODE_PORT:
556 case ISCSI_FLASHNODE_MAX_BURST:
557 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
558 case ISCSI_FLASHNODE_IPADDR:
559 case ISCSI_FLASHNODE_ALIAS:
560 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
561 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
562 case ISCSI_FLASHNODE_LOCAL_PORT:
563 case ISCSI_FLASHNODE_IPV4_TOS:
564 case ISCSI_FLASHNODE_IPV6_TC:
565 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
566 case ISCSI_FLASHNODE_NAME:
567 case ISCSI_FLASHNODE_TPGT:
568 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
569 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
570 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
571 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
572 case ISCSI_FLASHNODE_TCP_RECV_WSF:
573 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
574 case ISCSI_FLASHNODE_USERNAME:
575 case ISCSI_FLASHNODE_PASSWORD:
576 case ISCSI_FLASHNODE_STATSN:
577 case ISCSI_FLASHNODE_EXP_STATSN:
578 case ISCSI_FLASHNODE_IS_BOOT_TGT:
579 return S_IRUGO;
580 default:
581 return 0;
585 return 0;
589 * qla4xxx_create chap_list - Create CHAP list from FLASH
590 * @ha: pointer to adapter structure
592 * Read flash and make a list of CHAP entries, during login when a CHAP entry
593 * is received, it will be checked in this list. If entry exist then the CHAP
594 * entry index is set in the DDB. If CHAP entry does not exist in this list
595 * then a new entry is added in FLASH in CHAP table and the index obtained is
596 * used in the DDB.
598 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
600 int rval = 0;
601 uint8_t *chap_flash_data = NULL;
602 uint32_t offset;
603 dma_addr_t chap_dma;
604 uint32_t chap_size = 0;
606 if (is_qla40XX(ha))
607 chap_size = MAX_CHAP_ENTRIES_40XX *
608 sizeof(struct ql4_chap_table);
609 else /* Single region contains CHAP info for both
610 * ports which is divided into half for each port.
612 chap_size = ha->hw.flt_chap_size / 2;
614 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
615 &chap_dma, GFP_KERNEL);
616 if (!chap_flash_data) {
617 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
618 return;
621 if (is_qla40XX(ha)) {
622 offset = FLASH_CHAP_OFFSET;
623 } else {
624 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
625 if (ha->port_num == 1)
626 offset += chap_size;
629 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
630 if (rval != QLA_SUCCESS)
631 goto exit_chap_list;
633 if (ha->chap_list == NULL)
634 ha->chap_list = vmalloc(chap_size);
635 if (ha->chap_list == NULL) {
636 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
637 goto exit_chap_list;
640 memset(ha->chap_list, 0, chap_size);
641 memcpy(ha->chap_list, chap_flash_data, chap_size);
643 exit_chap_list:
644 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
647 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
648 int16_t chap_index,
649 struct ql4_chap_table **chap_entry)
651 int rval = QLA_ERROR;
652 int max_chap_entries;
654 if (!ha->chap_list) {
655 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
656 rval = QLA_ERROR;
657 goto exit_get_chap;
660 if (is_qla80XX(ha))
661 max_chap_entries = (ha->hw.flt_chap_size / 2) /
662 sizeof(struct ql4_chap_table);
663 else
664 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
666 if (chap_index > max_chap_entries) {
667 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
668 rval = QLA_ERROR;
669 goto exit_get_chap;
672 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
673 if ((*chap_entry)->cookie !=
674 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
675 rval = QLA_ERROR;
676 *chap_entry = NULL;
677 } else {
678 rval = QLA_SUCCESS;
681 exit_get_chap:
682 return rval;
686 * qla4xxx_find_free_chap_index - Find the first free chap index
687 * @ha: pointer to adapter structure
688 * @chap_index: CHAP index to be returned
690 * Find the first free chap index available in the chap table
692 * Note: Caller should acquire the chap lock before getting here.
694 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
695 uint16_t *chap_index)
697 int i, rval;
698 int free_index = -1;
699 int max_chap_entries = 0;
700 struct ql4_chap_table *chap_table;
702 if (is_qla80XX(ha))
703 max_chap_entries = (ha->hw.flt_chap_size / 2) /
704 sizeof(struct ql4_chap_table);
705 else
706 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
708 if (!ha->chap_list) {
709 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
710 rval = QLA_ERROR;
711 goto exit_find_chap;
714 for (i = 0; i < max_chap_entries; i++) {
715 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
717 if ((chap_table->cookie !=
718 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
719 (i > MAX_RESRV_CHAP_IDX)) {
720 free_index = i;
721 break;
725 if (free_index != -1) {
726 *chap_index = free_index;
727 rval = QLA_SUCCESS;
728 } else {
729 rval = QLA_ERROR;
732 exit_find_chap:
733 return rval;
736 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
737 uint32_t *num_entries, char *buf)
739 struct scsi_qla_host *ha = to_qla_host(shost);
740 struct ql4_chap_table *chap_table;
741 struct iscsi_chap_rec *chap_rec;
742 int max_chap_entries = 0;
743 int valid_chap_entries = 0;
744 int ret = 0, i;
746 if (is_qla80XX(ha))
747 max_chap_entries = (ha->hw.flt_chap_size / 2) /
748 sizeof(struct ql4_chap_table);
749 else
750 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
752 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
753 __func__, *num_entries, chap_tbl_idx);
755 if (!buf) {
756 ret = -ENOMEM;
757 goto exit_get_chap_list;
760 qla4xxx_create_chap_list(ha);
762 chap_rec = (struct iscsi_chap_rec *) buf;
763 mutex_lock(&ha->chap_sem);
764 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
765 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
766 if (chap_table->cookie !=
767 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
768 continue;
770 chap_rec->chap_tbl_idx = i;
771 strlcpy(chap_rec->username, chap_table->name,
772 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
773 strlcpy(chap_rec->password, chap_table->secret,
774 QL4_CHAP_MAX_SECRET_LEN);
775 chap_rec->password_length = chap_table->secret_len;
777 if (chap_table->flags & BIT_7) /* local */
778 chap_rec->chap_type = CHAP_TYPE_OUT;
780 if (chap_table->flags & BIT_6) /* peer */
781 chap_rec->chap_type = CHAP_TYPE_IN;
783 chap_rec++;
785 valid_chap_entries++;
786 if (valid_chap_entries == *num_entries)
787 break;
788 else
789 continue;
791 mutex_unlock(&ha->chap_sem);
793 exit_get_chap_list:
794 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
795 __func__, valid_chap_entries);
796 *num_entries = valid_chap_entries;
797 return ret;
800 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
802 int ret = 0;
803 uint16_t *chap_tbl_idx = (uint16_t *) data;
804 struct iscsi_cls_session *cls_session;
805 struct iscsi_session *sess;
806 struct ddb_entry *ddb_entry;
808 if (!iscsi_is_session_dev(dev))
809 goto exit_is_chap_active;
811 cls_session = iscsi_dev_to_session(dev);
812 sess = cls_session->dd_data;
813 ddb_entry = sess->dd_data;
815 if (iscsi_session_chkready(cls_session))
816 goto exit_is_chap_active;
818 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
819 ret = 1;
821 exit_is_chap_active:
822 return ret;
825 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
826 uint16_t chap_tbl_idx)
828 int ret = 0;
830 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
831 __qla4xxx_is_chap_active);
833 return ret;
836 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
838 struct scsi_qla_host *ha = to_qla_host(shost);
839 struct ql4_chap_table *chap_table;
840 dma_addr_t chap_dma;
841 int max_chap_entries = 0;
842 uint32_t offset = 0;
843 uint32_t chap_size;
844 int ret = 0;
846 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
847 if (chap_table == NULL)
848 return -ENOMEM;
850 memset(chap_table, 0, sizeof(struct ql4_chap_table));
852 if (is_qla80XX(ha))
853 max_chap_entries = (ha->hw.flt_chap_size / 2) /
854 sizeof(struct ql4_chap_table);
855 else
856 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
858 if (chap_tbl_idx > max_chap_entries) {
859 ret = -EINVAL;
860 goto exit_delete_chap;
863 /* Check if chap index is in use.
864 * If chap is in use don't delet chap entry */
865 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
866 if (ret) {
867 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
868 "delete from flash\n", chap_tbl_idx);
869 ret = -EBUSY;
870 goto exit_delete_chap;
873 chap_size = sizeof(struct ql4_chap_table);
874 if (is_qla40XX(ha))
875 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
876 else {
877 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
878 /* flt_chap_size is CHAP table size for both ports
879 * so divide it by 2 to calculate the offset for second port
881 if (ha->port_num == 1)
882 offset += (ha->hw.flt_chap_size / 2);
883 offset += (chap_tbl_idx * chap_size);
886 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
887 if (ret != QLA_SUCCESS) {
888 ret = -EINVAL;
889 goto exit_delete_chap;
892 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
893 __le16_to_cpu(chap_table->cookie)));
895 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
896 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
897 goto exit_delete_chap;
900 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
902 offset = FLASH_CHAP_OFFSET |
903 (chap_tbl_idx * sizeof(struct ql4_chap_table));
904 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
905 FLASH_OPT_RMW_COMMIT);
906 if (ret == QLA_SUCCESS && ha->chap_list) {
907 mutex_lock(&ha->chap_sem);
908 /* Update ha chap_list cache */
909 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
910 chap_table, sizeof(struct ql4_chap_table));
911 mutex_unlock(&ha->chap_sem);
913 if (ret != QLA_SUCCESS)
914 ret = -EINVAL;
916 exit_delete_chap:
917 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
918 return ret;
922 * qla4xxx_set_chap_entry - Make chap entry with given information
923 * @shost: pointer to host
924 * @data: chap info - credentials, index and type to make chap entry
925 * @len: length of data
927 * Add or update chap entry with the given information
929 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
931 struct scsi_qla_host *ha = to_qla_host(shost);
932 struct iscsi_chap_rec chap_rec;
933 struct ql4_chap_table *chap_entry = NULL;
934 struct iscsi_param_info *param_info;
935 struct nlattr *attr;
936 int max_chap_entries = 0;
937 int type;
938 int rem = len;
939 int rc = 0;
940 int size;
942 memset(&chap_rec, 0, sizeof(chap_rec));
944 nla_for_each_attr(attr, data, len, rem) {
945 param_info = nla_data(attr);
947 switch (param_info->param) {
948 case ISCSI_CHAP_PARAM_INDEX:
949 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
950 break;
951 case ISCSI_CHAP_PARAM_CHAP_TYPE:
952 chap_rec.chap_type = param_info->value[0];
953 break;
954 case ISCSI_CHAP_PARAM_USERNAME:
955 size = min_t(size_t, sizeof(chap_rec.username),
956 param_info->len);
957 memcpy(chap_rec.username, param_info->value, size);
958 break;
959 case ISCSI_CHAP_PARAM_PASSWORD:
960 size = min_t(size_t, sizeof(chap_rec.password),
961 param_info->len);
962 memcpy(chap_rec.password, param_info->value, size);
963 break;
964 case ISCSI_CHAP_PARAM_PASSWORD_LEN:
965 chap_rec.password_length = param_info->value[0];
966 break;
967 default:
968 ql4_printk(KERN_ERR, ha,
969 "%s: No such sysfs attribute\n", __func__);
970 rc = -ENOSYS;
971 goto exit_set_chap;
975 if (chap_rec.chap_type == CHAP_TYPE_IN)
976 type = BIDI_CHAP;
977 else
978 type = LOCAL_CHAP;
980 if (is_qla80XX(ha))
981 max_chap_entries = (ha->hw.flt_chap_size / 2) /
982 sizeof(struct ql4_chap_table);
983 else
984 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
986 mutex_lock(&ha->chap_sem);
987 if (chap_rec.chap_tbl_idx < max_chap_entries) {
988 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
989 &chap_entry);
990 if (!rc) {
991 if (!(type == qla4xxx_get_chap_type(chap_entry))) {
992 ql4_printk(KERN_INFO, ha,
993 "Type mismatch for CHAP entry %d\n",
994 chap_rec.chap_tbl_idx);
995 rc = -EINVAL;
996 goto exit_unlock_chap;
999 /* If chap index is in use then don't modify it */
1000 rc = qla4xxx_is_chap_active(shost,
1001 chap_rec.chap_tbl_idx);
1002 if (rc) {
1003 ql4_printk(KERN_INFO, ha,
1004 "CHAP entry %d is in use\n",
1005 chap_rec.chap_tbl_idx);
1006 rc = -EBUSY;
1007 goto exit_unlock_chap;
1010 } else {
1011 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
1012 if (rc) {
1013 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
1014 rc = -EBUSY;
1015 goto exit_unlock_chap;
1019 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
1020 chap_rec.chap_tbl_idx, type);
1022 exit_unlock_chap:
1023 mutex_unlock(&ha->chap_sem);
1025 exit_set_chap:
1026 return rc;
1030 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
1032 struct scsi_qla_host *ha = to_qla_host(shost);
1033 struct iscsi_offload_host_stats *host_stats = NULL;
1034 int host_stats_size;
1035 int ret = 0;
1036 int ddb_idx = 0;
1037 struct ql_iscsi_stats *ql_iscsi_stats = NULL;
1038 int stats_size;
1039 dma_addr_t iscsi_stats_dma;
1041 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
1043 host_stats_size = sizeof(struct iscsi_offload_host_stats);
1045 if (host_stats_size != len) {
1046 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
1047 __func__, len, host_stats_size);
1048 ret = -EINVAL;
1049 goto exit_host_stats;
1051 host_stats = (struct iscsi_offload_host_stats *)buf;
1053 if (!buf) {
1054 ret = -ENOMEM;
1055 goto exit_host_stats;
1058 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1060 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1061 &iscsi_stats_dma, GFP_KERNEL);
1062 if (!ql_iscsi_stats) {
1063 ql4_printk(KERN_ERR, ha,
1064 "Unable to allocate memory for iscsi stats\n");
1065 ret = -ENOMEM;
1066 goto exit_host_stats;
1069 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
1070 iscsi_stats_dma);
1071 if (ret != QLA_SUCCESS) {
1072 ql4_printk(KERN_ERR, ha,
1073 "Unable to retrieve iscsi stats\n");
1074 ret = -EIO;
1075 goto exit_host_stats;
1077 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
1078 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
1079 host_stats->mactx_multicast_frames =
1080 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
1081 host_stats->mactx_broadcast_frames =
1082 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
1083 host_stats->mactx_pause_frames =
1084 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
1085 host_stats->mactx_control_frames =
1086 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
1087 host_stats->mactx_deferral =
1088 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
1089 host_stats->mactx_excess_deferral =
1090 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
1091 host_stats->mactx_late_collision =
1092 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
1093 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
1094 host_stats->mactx_single_collision =
1095 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
1096 host_stats->mactx_multiple_collision =
1097 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
1098 host_stats->mactx_collision =
1099 le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
1100 host_stats->mactx_frames_dropped =
1101 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
1102 host_stats->mactx_jumbo_frames =
1103 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
1104 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
1105 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
1106 host_stats->macrx_unknown_control_frames =
1107 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
1108 host_stats->macrx_pause_frames =
1109 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
1110 host_stats->macrx_control_frames =
1111 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
1112 host_stats->macrx_dribble =
1113 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
1114 host_stats->macrx_frame_length_error =
1115 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
1116 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
1117 host_stats->macrx_carrier_sense_error =
1118 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
1119 host_stats->macrx_frame_discarded =
1120 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
1121 host_stats->macrx_frames_dropped =
1122 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
1123 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
1124 host_stats->mac_encoding_error =
1125 le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
1126 host_stats->macrx_length_error_large =
1127 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
1128 host_stats->macrx_length_error_small =
1129 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
1130 host_stats->macrx_multicast_frames =
1131 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
1132 host_stats->macrx_broadcast_frames =
1133 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
1134 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
1135 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
1136 host_stats->iptx_fragments =
1137 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
1138 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
1139 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
1140 host_stats->iprx_fragments =
1141 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
1142 host_stats->ip_datagram_reassembly =
1143 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
1144 host_stats->ip_invalid_address_error =
1145 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
1146 host_stats->ip_error_packets =
1147 le64_to_cpu(ql_iscsi_stats->ip_error_packets);
1148 host_stats->ip_fragrx_overlap =
1149 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
1150 host_stats->ip_fragrx_outoforder =
1151 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
1152 host_stats->ip_datagram_reassembly_timeout =
1153 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
1154 host_stats->ipv6tx_packets =
1155 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
1156 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
1157 host_stats->ipv6tx_fragments =
1158 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
1159 host_stats->ipv6rx_packets =
1160 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
1161 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
1162 host_stats->ipv6rx_fragments =
1163 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
1164 host_stats->ipv6_datagram_reassembly =
1165 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
1166 host_stats->ipv6_invalid_address_error =
1167 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
1168 host_stats->ipv6_error_packets =
1169 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
1170 host_stats->ipv6_fragrx_overlap =
1171 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
1172 host_stats->ipv6_fragrx_outoforder =
1173 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
1174 host_stats->ipv6_datagram_reassembly_timeout =
1175 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
1176 host_stats->tcptx_segments =
1177 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
1178 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
1179 host_stats->tcprx_segments =
1180 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
1181 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
1182 host_stats->tcp_duplicate_ack_retx =
1183 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
1184 host_stats->tcp_retx_timer_expired =
1185 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
1186 host_stats->tcprx_duplicate_ack =
1187 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
1188 host_stats->tcprx_pure_ackr =
1189 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
1190 host_stats->tcptx_delayed_ack =
1191 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
1192 host_stats->tcptx_pure_ack =
1193 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
1194 host_stats->tcprx_segment_error =
1195 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
1196 host_stats->tcprx_segment_outoforder =
1197 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
1198 host_stats->tcprx_window_probe =
1199 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
1200 host_stats->tcprx_window_update =
1201 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
1202 host_stats->tcptx_window_probe_persist =
1203 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
1204 host_stats->ecc_error_correction =
1205 le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
1206 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
1207 host_stats->iscsi_data_bytes_tx =
1208 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
1209 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
1210 host_stats->iscsi_data_bytes_rx =
1211 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
1212 host_stats->iscsi_io_completed =
1213 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
1214 host_stats->iscsi_unexpected_io_rx =
1215 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
1216 host_stats->iscsi_format_error =
1217 le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
1218 host_stats->iscsi_hdr_digest_error =
1219 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
1220 host_stats->iscsi_data_digest_error =
1221 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
1222 host_stats->iscsi_sequence_error =
1223 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
1224 exit_host_stats:
1225 if (ql_iscsi_stats)
1226 dma_free_coherent(&ha->pdev->dev, host_stats_size,
1227 ql_iscsi_stats, iscsi_stats_dma);
1229 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
1230 __func__);
1231 return ret;
1234 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
1235 enum iscsi_param_type param_type,
1236 int param, char *buf)
1238 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
1239 struct scsi_qla_host *ha = to_qla_host(shost);
1240 int ival;
1241 char *pval = NULL;
1242 int len = -ENOSYS;
1244 if (param_type == ISCSI_NET_PARAM) {
1245 switch (param) {
1246 case ISCSI_NET_PARAM_IPV4_ADDR:
1247 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1248 break;
1249 case ISCSI_NET_PARAM_IPV4_SUBNET:
1250 len = sprintf(buf, "%pI4\n",
1251 &ha->ip_config.subnet_mask);
1252 break;
1253 case ISCSI_NET_PARAM_IPV4_GW:
1254 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
1255 break;
1256 case ISCSI_NET_PARAM_IFACE_ENABLE:
1257 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1258 OP_STATE(ha->ip_config.ipv4_options,
1259 IPOPT_IPV4_PROTOCOL_ENABLE, pval);
1260 } else {
1261 OP_STATE(ha->ip_config.ipv6_options,
1262 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
1265 len = sprintf(buf, "%s\n", pval);
1266 break;
1267 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1268 len = sprintf(buf, "%s\n",
1269 (ha->ip_config.tcp_options &
1270 TCPOPT_DHCP_ENABLE) ?
1271 "dhcp" : "static");
1272 break;
1273 case ISCSI_NET_PARAM_IPV6_ADDR:
1274 if (iface->iface_num == 0)
1275 len = sprintf(buf, "%pI6\n",
1276 &ha->ip_config.ipv6_addr0);
1277 if (iface->iface_num == 1)
1278 len = sprintf(buf, "%pI6\n",
1279 &ha->ip_config.ipv6_addr1);
1280 break;
1281 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1282 len = sprintf(buf, "%pI6\n",
1283 &ha->ip_config.ipv6_link_local_addr);
1284 break;
1285 case ISCSI_NET_PARAM_IPV6_ROUTER:
1286 len = sprintf(buf, "%pI6\n",
1287 &ha->ip_config.ipv6_default_router_addr);
1288 break;
1289 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1290 pval = (ha->ip_config.ipv6_addl_options &
1291 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
1292 "nd" : "static";
1294 len = sprintf(buf, "%s\n", pval);
1295 break;
1296 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1297 pval = (ha->ip_config.ipv6_addl_options &
1298 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
1299 "auto" : "static";
1301 len = sprintf(buf, "%s\n", pval);
1302 break;
1303 case ISCSI_NET_PARAM_VLAN_ID:
1304 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1305 ival = ha->ip_config.ipv4_vlan_tag &
1306 ISCSI_MAX_VLAN_ID;
1307 else
1308 ival = ha->ip_config.ipv6_vlan_tag &
1309 ISCSI_MAX_VLAN_ID;
1311 len = sprintf(buf, "%d\n", ival);
1312 break;
1313 case ISCSI_NET_PARAM_VLAN_PRIORITY:
1314 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1315 ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
1316 ISCSI_MAX_VLAN_PRIORITY;
1317 else
1318 ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
1319 ISCSI_MAX_VLAN_PRIORITY;
1321 len = sprintf(buf, "%d\n", ival);
1322 break;
1323 case ISCSI_NET_PARAM_VLAN_ENABLED:
1324 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1325 OP_STATE(ha->ip_config.ipv4_options,
1326 IPOPT_VLAN_TAGGING_ENABLE, pval);
1327 } else {
1328 OP_STATE(ha->ip_config.ipv6_options,
1329 IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
1331 len = sprintf(buf, "%s\n", pval);
1332 break;
1333 case ISCSI_NET_PARAM_MTU:
1334 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
1335 break;
1336 case ISCSI_NET_PARAM_PORT:
1337 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1338 len = sprintf(buf, "%d\n",
1339 ha->ip_config.ipv4_port);
1340 else
1341 len = sprintf(buf, "%d\n",
1342 ha->ip_config.ipv6_port);
1343 break;
1344 case ISCSI_NET_PARAM_IPADDR_STATE:
1345 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1346 pval = iscsi_get_ipaddress_state_name(
1347 ha->ip_config.ipv4_addr_state);
1348 } else {
1349 if (iface->iface_num == 0)
1350 pval = iscsi_get_ipaddress_state_name(
1351 ha->ip_config.ipv6_addr0_state);
1352 else if (iface->iface_num == 1)
1353 pval = iscsi_get_ipaddress_state_name(
1354 ha->ip_config.ipv6_addr1_state);
1357 len = sprintf(buf, "%s\n", pval);
1358 break;
1359 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
1360 pval = iscsi_get_ipaddress_state_name(
1361 ha->ip_config.ipv6_link_local_state);
1362 len = sprintf(buf, "%s\n", pval);
1363 break;
1364 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
1365 pval = iscsi_get_router_state_name(
1366 ha->ip_config.ipv6_default_router_state);
1367 len = sprintf(buf, "%s\n", pval);
1368 break;
1369 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
1370 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1371 OP_STATE(~ha->ip_config.tcp_options,
1372 TCPOPT_DELAYED_ACK_DISABLE, pval);
1373 } else {
1374 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1375 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
1377 len = sprintf(buf, "%s\n", pval);
1378 break;
1379 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
1380 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1381 OP_STATE(~ha->ip_config.tcp_options,
1382 TCPOPT_NAGLE_ALGO_DISABLE, pval);
1383 } else {
1384 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1385 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
1387 len = sprintf(buf, "%s\n", pval);
1388 break;
1389 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
1390 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1391 OP_STATE(~ha->ip_config.tcp_options,
1392 TCPOPT_WINDOW_SCALE_DISABLE, pval);
1393 } else {
1394 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1395 IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
1396 pval);
1398 len = sprintf(buf, "%s\n", pval);
1399 break;
1400 case ISCSI_NET_PARAM_TCP_WSF:
1401 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1402 len = sprintf(buf, "%d\n",
1403 ha->ip_config.tcp_wsf);
1404 else
1405 len = sprintf(buf, "%d\n",
1406 ha->ip_config.ipv6_tcp_wsf);
1407 break;
1408 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
1409 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1410 ival = (ha->ip_config.tcp_options &
1411 TCPOPT_TIMER_SCALE) >> 1;
1412 else
1413 ival = (ha->ip_config.ipv6_tcp_options &
1414 IPV6_TCPOPT_TIMER_SCALE) >> 1;
1416 len = sprintf(buf, "%d\n", ival);
1417 break;
1418 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
1419 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1420 OP_STATE(ha->ip_config.tcp_options,
1421 TCPOPT_TIMESTAMP_ENABLE, pval);
1422 } else {
1423 OP_STATE(ha->ip_config.ipv6_tcp_options,
1424 IPV6_TCPOPT_TIMESTAMP_EN, pval);
1426 len = sprintf(buf, "%s\n", pval);
1427 break;
1428 case ISCSI_NET_PARAM_CACHE_ID:
1429 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1430 len = sprintf(buf, "%d\n",
1431 ha->ip_config.ipv4_cache_id);
1432 else
1433 len = sprintf(buf, "%d\n",
1434 ha->ip_config.ipv6_cache_id);
1435 break;
1436 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
1437 OP_STATE(ha->ip_config.tcp_options,
1438 TCPOPT_DNS_SERVER_IP_EN, pval);
1440 len = sprintf(buf, "%s\n", pval);
1441 break;
1442 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
1443 OP_STATE(ha->ip_config.tcp_options,
1444 TCPOPT_SLP_DA_INFO_EN, pval);
1446 len = sprintf(buf, "%s\n", pval);
1447 break;
1448 case ISCSI_NET_PARAM_IPV4_TOS_EN:
1449 OP_STATE(ha->ip_config.ipv4_options,
1450 IPOPT_IPV4_TOS_EN, pval);
1452 len = sprintf(buf, "%s\n", pval);
1453 break;
1454 case ISCSI_NET_PARAM_IPV4_TOS:
1455 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
1456 break;
1457 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
1458 OP_STATE(ha->ip_config.ipv4_options,
1459 IPOPT_GRAT_ARP_EN, pval);
1461 len = sprintf(buf, "%s\n", pval);
1462 break;
1463 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
1464 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
1465 pval);
1467 len = sprintf(buf, "%s\n", pval);
1468 break;
1469 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
1470 pval = (ha->ip_config.ipv4_alt_cid_len) ?
1471 (char *)ha->ip_config.ipv4_alt_cid : "";
1473 len = sprintf(buf, "%s\n", pval);
1474 break;
1475 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
1476 OP_STATE(ha->ip_config.ipv4_options,
1477 IPOPT_REQ_VID_EN, pval);
1479 len = sprintf(buf, "%s\n", pval);
1480 break;
1481 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
1482 OP_STATE(ha->ip_config.ipv4_options,
1483 IPOPT_USE_VID_EN, pval);
1485 len = sprintf(buf, "%s\n", pval);
1486 break;
1487 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
1488 pval = (ha->ip_config.ipv4_vid_len) ?
1489 (char *)ha->ip_config.ipv4_vid : "";
1491 len = sprintf(buf, "%s\n", pval);
1492 break;
1493 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
1494 OP_STATE(ha->ip_config.ipv4_options,
1495 IPOPT_LEARN_IQN_EN, pval);
1497 len = sprintf(buf, "%s\n", pval);
1498 break;
1499 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
1500 OP_STATE(~ha->ip_config.ipv4_options,
1501 IPOPT_FRAGMENTATION_DISABLE, pval);
1503 len = sprintf(buf, "%s\n", pval);
1504 break;
1505 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
1506 OP_STATE(ha->ip_config.ipv4_options,
1507 IPOPT_IN_FORWARD_EN, pval);
1509 len = sprintf(buf, "%s\n", pval);
1510 break;
1511 case ISCSI_NET_PARAM_REDIRECT_EN:
1512 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1513 OP_STATE(ha->ip_config.ipv4_options,
1514 IPOPT_ARP_REDIRECT_EN, pval);
1515 } else {
1516 OP_STATE(ha->ip_config.ipv6_options,
1517 IPV6_OPT_REDIRECT_EN, pval);
1519 len = sprintf(buf, "%s\n", pval);
1520 break;
1521 case ISCSI_NET_PARAM_IPV4_TTL:
1522 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
1523 break;
1524 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
1525 OP_STATE(ha->ip_config.ipv6_options,
1526 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
1528 len = sprintf(buf, "%s\n", pval);
1529 break;
1530 case ISCSI_NET_PARAM_IPV6_MLD_EN:
1531 OP_STATE(ha->ip_config.ipv6_addl_options,
1532 IPV6_ADDOPT_MLD_EN, pval);
1534 len = sprintf(buf, "%s\n", pval);
1535 break;
1536 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
1537 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
1538 break;
1539 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
1540 len = sprintf(buf, "%d\n",
1541 ha->ip_config.ipv6_traffic_class);
1542 break;
1543 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
1544 len = sprintf(buf, "%d\n",
1545 ha->ip_config.ipv6_hop_limit);
1546 break;
1547 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
1548 len = sprintf(buf, "%d\n",
1549 ha->ip_config.ipv6_nd_reach_time);
1550 break;
1551 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
1552 len = sprintf(buf, "%d\n",
1553 ha->ip_config.ipv6_nd_rexmit_timer);
1554 break;
1555 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
1556 len = sprintf(buf, "%d\n",
1557 ha->ip_config.ipv6_nd_stale_timeout);
1558 break;
1559 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
1560 len = sprintf(buf, "%d\n",
1561 ha->ip_config.ipv6_dup_addr_detect_count);
1562 break;
1563 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
1564 len = sprintf(buf, "%d\n",
1565 ha->ip_config.ipv6_gw_advrt_mtu);
1566 break;
1567 default:
1568 len = -ENOSYS;
1570 } else if (param_type == ISCSI_IFACE_PARAM) {
1571 switch (param) {
1572 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
1573 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
1574 break;
1575 case ISCSI_IFACE_PARAM_HDRDGST_EN:
1576 OP_STATE(ha->ip_config.iscsi_options,
1577 ISCSIOPTS_HEADER_DIGEST_EN, pval);
1579 len = sprintf(buf, "%s\n", pval);
1580 break;
1581 case ISCSI_IFACE_PARAM_DATADGST_EN:
1582 OP_STATE(ha->ip_config.iscsi_options,
1583 ISCSIOPTS_DATA_DIGEST_EN, pval);
1585 len = sprintf(buf, "%s\n", pval);
1586 break;
1587 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
1588 OP_STATE(ha->ip_config.iscsi_options,
1589 ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
1591 len = sprintf(buf, "%s\n", pval);
1592 break;
1593 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
1594 OP_STATE(ha->ip_config.iscsi_options,
1595 ISCSIOPTS_INITIAL_R2T_EN, pval);
1597 len = sprintf(buf, "%s\n", pval);
1598 break;
1599 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
1600 OP_STATE(ha->ip_config.iscsi_options,
1601 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
1603 len = sprintf(buf, "%s\n", pval);
1604 break;
1605 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
1606 OP_STATE(ha->ip_config.iscsi_options,
1607 ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
1609 len = sprintf(buf, "%s\n", pval);
1610 break;
1611 case ISCSI_IFACE_PARAM_ERL:
1612 len = sprintf(buf, "%d\n",
1613 (ha->ip_config.iscsi_options &
1614 ISCSIOPTS_ERL));
1615 break;
1616 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
1617 len = sprintf(buf, "%u\n",
1618 ha->ip_config.iscsi_max_pdu_size *
1619 BYTE_UNITS);
1620 break;
1621 case ISCSI_IFACE_PARAM_FIRST_BURST:
1622 len = sprintf(buf, "%u\n",
1623 ha->ip_config.iscsi_first_burst_len *
1624 BYTE_UNITS);
1625 break;
1626 case ISCSI_IFACE_PARAM_MAX_R2T:
1627 len = sprintf(buf, "%d\n",
1628 ha->ip_config.iscsi_max_outstnd_r2t);
1629 break;
1630 case ISCSI_IFACE_PARAM_MAX_BURST:
1631 len = sprintf(buf, "%u\n",
1632 ha->ip_config.iscsi_max_burst_len *
1633 BYTE_UNITS);
1634 break;
1635 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
1636 OP_STATE(ha->ip_config.iscsi_options,
1637 ISCSIOPTS_CHAP_AUTH_EN, pval);
1639 len = sprintf(buf, "%s\n", pval);
1640 break;
1641 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
1642 OP_STATE(ha->ip_config.iscsi_options,
1643 ISCSIOPTS_BIDI_CHAP_EN, pval);
1645 len = sprintf(buf, "%s\n", pval);
1646 break;
1647 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
1648 OP_STATE(ha->ip_config.iscsi_options,
1649 ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
1651 len = sprintf(buf, "%s\n", pval);
1652 break;
1653 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
1654 OP_STATE(ha->ip_config.iscsi_options,
1655 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
1657 len = sprintf(buf, "%s\n", pval);
1658 break;
1659 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
1660 OP_STATE(ha->ip_config.iscsi_options,
1661 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
1663 len = sprintf(buf, "%s\n", pval);
1664 break;
1665 case ISCSI_IFACE_PARAM_INITIATOR_NAME:
1666 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
1667 break;
1668 default:
1669 len = -ENOSYS;
1673 return len;
1676 static struct iscsi_endpoint *
1677 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1678 int non_blocking)
1680 int ret;
1681 struct iscsi_endpoint *ep;
1682 struct qla_endpoint *qla_ep;
1683 struct scsi_qla_host *ha;
1684 struct sockaddr_in *addr;
1685 struct sockaddr_in6 *addr6;
1687 if (!shost) {
1688 ret = -ENXIO;
1689 pr_err("%s: shost is NULL\n", __func__);
1690 return ERR_PTR(ret);
1693 ha = iscsi_host_priv(shost);
1694 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1695 if (!ep) {
1696 ret = -ENOMEM;
1697 return ERR_PTR(ret);
1700 qla_ep = ep->dd_data;
1701 memset(qla_ep, 0, sizeof(struct qla_endpoint));
1702 if (dst_addr->sa_family == AF_INET) {
1703 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
1704 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
1705 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
1706 (char *)&addr->sin_addr));
1707 } else if (dst_addr->sa_family == AF_INET6) {
1708 memcpy(&qla_ep->dst_addr, dst_addr,
1709 sizeof(struct sockaddr_in6));
1710 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1711 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1712 (char *)&addr6->sin6_addr));
1713 } else {
1714 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
1715 __func__);
1718 qla_ep->host = shost;
1720 return ep;
1723 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1725 struct qla_endpoint *qla_ep;
1726 struct scsi_qla_host *ha;
1727 int ret = 0;
1729 qla_ep = ep->dd_data;
1730 ha = to_qla_host(qla_ep->host);
1731 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
1733 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1734 ret = 1;
1736 return ret;
1739 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1741 struct qla_endpoint *qla_ep;
1742 struct scsi_qla_host *ha;
1744 qla_ep = ep->dd_data;
1745 ha = to_qla_host(qla_ep->host);
1746 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1747 ha->host_no));
1748 iscsi_destroy_endpoint(ep);
1751 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1752 enum iscsi_param param,
1753 char *buf)
1755 struct qla_endpoint *qla_ep = ep->dd_data;
1756 struct sockaddr *dst_addr;
1757 struct scsi_qla_host *ha;
1759 if (!qla_ep)
1760 return -ENOTCONN;
1762 ha = to_qla_host(qla_ep->host);
1763 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1764 ha->host_no));
1766 switch (param) {
1767 case ISCSI_PARAM_CONN_PORT:
1768 case ISCSI_PARAM_CONN_ADDRESS:
1769 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1770 if (!dst_addr)
1771 return -ENOTCONN;
1773 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1774 &qla_ep->dst_addr, param, buf);
1775 default:
1776 return -ENOSYS;
1780 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1781 struct iscsi_stats *stats)
1783 struct iscsi_session *sess;
1784 struct iscsi_cls_session *cls_sess;
1785 struct ddb_entry *ddb_entry;
1786 struct scsi_qla_host *ha;
1787 struct ql_iscsi_stats *ql_iscsi_stats;
1788 int stats_size;
1789 int ret;
1790 dma_addr_t iscsi_stats_dma;
1792 cls_sess = iscsi_conn_to_session(cls_conn);
1793 sess = cls_sess->dd_data;
1794 ddb_entry = sess->dd_data;
1795 ha = ddb_entry->ha;
1797 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1798 ha->host_no));
1799 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1800 /* Allocate memory */
1801 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1802 &iscsi_stats_dma, GFP_KERNEL);
1803 if (!ql_iscsi_stats) {
1804 ql4_printk(KERN_ERR, ha,
1805 "Unable to allocate memory for iscsi stats\n");
1806 goto exit_get_stats;
1809 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
1810 iscsi_stats_dma);
1811 if (ret != QLA_SUCCESS) {
1812 ql4_printk(KERN_ERR, ha,
1813 "Unable to retrieve iscsi stats\n");
1814 goto free_stats;
1817 /* octets */
1818 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
1819 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
1820 /* xmit pdus */
1821 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
1822 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
1823 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
1824 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
1825 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
1826 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
1827 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
1828 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
1829 /* recv pdus */
1830 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
1831 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
1832 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
1833 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
1834 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
1835 stats->logoutrsp_pdus =
1836 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
1837 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
1838 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
1839 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
1841 free_stats:
1842 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
1843 iscsi_stats_dma);
1844 exit_get_stats:
1845 return;
1848 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
1850 struct iscsi_cls_session *session;
1851 struct iscsi_session *sess;
1852 unsigned long flags;
1853 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
1855 session = starget_to_session(scsi_target(sc->device));
1856 sess = session->dd_data;
1858 spin_lock_irqsave(&session->lock, flags);
1859 if (session->state == ISCSI_SESSION_FAILED)
1860 ret = BLK_EH_RESET_TIMER;
1861 spin_unlock_irqrestore(&session->lock, flags);
1863 return ret;
1866 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
1868 struct scsi_qla_host *ha = to_qla_host(shost);
1869 struct iscsi_cls_host *ihost = shost->shost_data;
1870 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
1872 qla4xxx_get_firmware_state(ha);
1874 switch (ha->addl_fw_state & 0x0F00) {
1875 case FW_ADDSTATE_LINK_SPEED_10MBPS:
1876 speed = ISCSI_PORT_SPEED_10MBPS;
1877 break;
1878 case FW_ADDSTATE_LINK_SPEED_100MBPS:
1879 speed = ISCSI_PORT_SPEED_100MBPS;
1880 break;
1881 case FW_ADDSTATE_LINK_SPEED_1GBPS:
1882 speed = ISCSI_PORT_SPEED_1GBPS;
1883 break;
1884 case FW_ADDSTATE_LINK_SPEED_10GBPS:
1885 speed = ISCSI_PORT_SPEED_10GBPS;
1886 break;
1888 ihost->port_speed = speed;
1891 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1893 struct scsi_qla_host *ha = to_qla_host(shost);
1894 struct iscsi_cls_host *ihost = shost->shost_data;
1895 uint32_t state = ISCSI_PORT_STATE_DOWN;
1897 if (test_bit(AF_LINK_UP, &ha->flags))
1898 state = ISCSI_PORT_STATE_UP;
1900 ihost->port_state = state;
1903 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1904 enum iscsi_host_param param, char *buf)
1906 struct scsi_qla_host *ha = to_qla_host(shost);
1907 int len;
1909 switch (param) {
1910 case ISCSI_HOST_PARAM_HWADDRESS:
1911 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1912 break;
1913 case ISCSI_HOST_PARAM_IPADDRESS:
1914 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1915 break;
1916 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1917 len = sprintf(buf, "%s\n", ha->name_string);
1918 break;
1919 case ISCSI_HOST_PARAM_PORT_STATE:
1920 qla4xxx_set_port_state(shost);
1921 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1922 break;
1923 case ISCSI_HOST_PARAM_PORT_SPEED:
1924 qla4xxx_set_port_speed(shost);
1925 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1926 break;
1927 default:
1928 return -ENOSYS;
1931 return len;
1934 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1936 if (ha->iface_ipv4)
1937 return;
1939 /* IPv4 */
1940 ha->iface_ipv4 = iscsi_create_iface(ha->host,
1941 &qla4xxx_iscsi_transport,
1942 ISCSI_IFACE_TYPE_IPV4, 0, 0);
1943 if (!ha->iface_ipv4)
1944 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1945 "iface0.\n");
1948 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1950 if (!ha->iface_ipv6_0)
1951 /* IPv6 iface-0 */
1952 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1953 &qla4xxx_iscsi_transport,
1954 ISCSI_IFACE_TYPE_IPV6, 0,
1956 if (!ha->iface_ipv6_0)
1957 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1958 "iface0.\n");
1960 if (!ha->iface_ipv6_1)
1961 /* IPv6 iface-1 */
1962 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1963 &qla4xxx_iscsi_transport,
1964 ISCSI_IFACE_TYPE_IPV6, 1,
1966 if (!ha->iface_ipv6_1)
1967 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1968 "iface1.\n");
1971 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1973 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1974 qla4xxx_create_ipv4_iface(ha);
1976 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1977 qla4xxx_create_ipv6_iface(ha);
1980 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1982 if (ha->iface_ipv4) {
1983 iscsi_destroy_iface(ha->iface_ipv4);
1984 ha->iface_ipv4 = NULL;
1988 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1990 if (ha->iface_ipv6_0) {
1991 iscsi_destroy_iface(ha->iface_ipv6_0);
1992 ha->iface_ipv6_0 = NULL;
1994 if (ha->iface_ipv6_1) {
1995 iscsi_destroy_iface(ha->iface_ipv6_1);
1996 ha->iface_ipv6_1 = NULL;
2000 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
2002 qla4xxx_destroy_ipv4_iface(ha);
2003 qla4xxx_destroy_ipv6_iface(ha);
2006 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
2007 struct iscsi_iface_param_info *iface_param,
2008 struct addr_ctrl_blk *init_fw_cb)
2011 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
2012 * iface_num 1 is valid only for IPv6 Addr.
2014 switch (iface_param->param) {
2015 case ISCSI_NET_PARAM_IPV6_ADDR:
2016 if (iface_param->iface_num & 0x1)
2017 /* IPv6 Addr 1 */
2018 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
2019 sizeof(init_fw_cb->ipv6_addr1));
2020 else
2021 /* IPv6 Addr 0 */
2022 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
2023 sizeof(init_fw_cb->ipv6_addr0));
2024 break;
2025 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
2026 if (iface_param->iface_num & 0x1)
2027 break;
2028 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
2029 sizeof(init_fw_cb->ipv6_if_id));
2030 break;
2031 case ISCSI_NET_PARAM_IPV6_ROUTER:
2032 if (iface_param->iface_num & 0x1)
2033 break;
2034 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
2035 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2036 break;
2037 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
2038 /* Autocfg applies to even interface */
2039 if (iface_param->iface_num & 0x1)
2040 break;
2042 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
2043 init_fw_cb->ipv6_addtl_opts &=
2044 cpu_to_le16(
2045 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2046 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
2047 init_fw_cb->ipv6_addtl_opts |=
2048 cpu_to_le16(
2049 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2050 else
2051 ql4_printk(KERN_ERR, ha,
2052 "Invalid autocfg setting for IPv6 addr\n");
2053 break;
2054 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
2055 /* Autocfg applies to even interface */
2056 if (iface_param->iface_num & 0x1)
2057 break;
2059 if (iface_param->value[0] ==
2060 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
2061 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
2062 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2063 else if (iface_param->value[0] ==
2064 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
2065 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
2066 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2067 else
2068 ql4_printk(KERN_ERR, ha,
2069 "Invalid autocfg setting for IPv6 linklocal addr\n");
2070 break;
2071 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
2072 /* Autocfg applies to even interface */
2073 if (iface_param->iface_num & 0x1)
2074 break;
2076 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
2077 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
2078 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2079 break;
2080 case ISCSI_NET_PARAM_IFACE_ENABLE:
2081 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2082 init_fw_cb->ipv6_opts |=
2083 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
2084 qla4xxx_create_ipv6_iface(ha);
2085 } else {
2086 init_fw_cb->ipv6_opts &=
2087 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
2088 0xFFFF);
2089 qla4xxx_destroy_ipv6_iface(ha);
2091 break;
2092 case ISCSI_NET_PARAM_VLAN_TAG:
2093 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
2094 break;
2095 init_fw_cb->ipv6_vlan_tag =
2096 cpu_to_be16(*(uint16_t *)iface_param->value);
2097 break;
2098 case ISCSI_NET_PARAM_VLAN_ENABLED:
2099 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2100 init_fw_cb->ipv6_opts |=
2101 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
2102 else
2103 init_fw_cb->ipv6_opts &=
2104 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
2105 break;
2106 case ISCSI_NET_PARAM_MTU:
2107 init_fw_cb->eth_mtu_size =
2108 cpu_to_le16(*(uint16_t *)iface_param->value);
2109 break;
2110 case ISCSI_NET_PARAM_PORT:
2111 /* Autocfg applies to even interface */
2112 if (iface_param->iface_num & 0x1)
2113 break;
2115 init_fw_cb->ipv6_port =
2116 cpu_to_le16(*(uint16_t *)iface_param->value);
2117 break;
2118 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2119 if (iface_param->iface_num & 0x1)
2120 break;
2121 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2122 init_fw_cb->ipv6_tcp_opts |=
2123 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
2124 else
2125 init_fw_cb->ipv6_tcp_opts &=
2126 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
2127 0xFFFF);
2128 break;
2129 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2130 if (iface_param->iface_num & 0x1)
2131 break;
2132 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2133 init_fw_cb->ipv6_tcp_opts |=
2134 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2135 else
2136 init_fw_cb->ipv6_tcp_opts &=
2137 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2138 break;
2139 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2140 if (iface_param->iface_num & 0x1)
2141 break;
2142 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2143 init_fw_cb->ipv6_tcp_opts |=
2144 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2145 else
2146 init_fw_cb->ipv6_tcp_opts &=
2147 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2148 break;
2149 case ISCSI_NET_PARAM_TCP_WSF:
2150 if (iface_param->iface_num & 0x1)
2151 break;
2152 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
2153 break;
2154 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2155 if (iface_param->iface_num & 0x1)
2156 break;
2157 init_fw_cb->ipv6_tcp_opts &=
2158 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
2159 init_fw_cb->ipv6_tcp_opts |=
2160 cpu_to_le16((iface_param->value[0] << 1) &
2161 IPV6_TCPOPT_TIMER_SCALE);
2162 break;
2163 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2164 if (iface_param->iface_num & 0x1)
2165 break;
2166 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2167 init_fw_cb->ipv6_tcp_opts |=
2168 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
2169 else
2170 init_fw_cb->ipv6_tcp_opts &=
2171 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
2172 break;
2173 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
2174 if (iface_param->iface_num & 0x1)
2175 break;
2176 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2177 init_fw_cb->ipv6_opts |=
2178 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2179 else
2180 init_fw_cb->ipv6_opts &=
2181 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2182 break;
2183 case ISCSI_NET_PARAM_REDIRECT_EN:
2184 if (iface_param->iface_num & 0x1)
2185 break;
2186 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2187 init_fw_cb->ipv6_opts |=
2188 cpu_to_le16(IPV6_OPT_REDIRECT_EN);
2189 else
2190 init_fw_cb->ipv6_opts &=
2191 cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
2192 break;
2193 case ISCSI_NET_PARAM_IPV6_MLD_EN:
2194 if (iface_param->iface_num & 0x1)
2195 break;
2196 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2197 init_fw_cb->ipv6_addtl_opts |=
2198 cpu_to_le16(IPV6_ADDOPT_MLD_EN);
2199 else
2200 init_fw_cb->ipv6_addtl_opts &=
2201 cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
2202 break;
2203 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
2204 if (iface_param->iface_num & 0x1)
2205 break;
2206 init_fw_cb->ipv6_flow_lbl =
2207 cpu_to_le16(*(uint16_t *)iface_param->value);
2208 break;
2209 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
2210 if (iface_param->iface_num & 0x1)
2211 break;
2212 init_fw_cb->ipv6_traffic_class = iface_param->value[0];
2213 break;
2214 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
2215 if (iface_param->iface_num & 0x1)
2216 break;
2217 init_fw_cb->ipv6_hop_limit = iface_param->value[0];
2218 break;
2219 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
2220 if (iface_param->iface_num & 0x1)
2221 break;
2222 init_fw_cb->ipv6_nd_reach_time =
2223 cpu_to_le32(*(uint32_t *)iface_param->value);
2224 break;
2225 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
2226 if (iface_param->iface_num & 0x1)
2227 break;
2228 init_fw_cb->ipv6_nd_rexmit_timer =
2229 cpu_to_le32(*(uint32_t *)iface_param->value);
2230 break;
2231 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
2232 if (iface_param->iface_num & 0x1)
2233 break;
2234 init_fw_cb->ipv6_nd_stale_timeout =
2235 cpu_to_le32(*(uint32_t *)iface_param->value);
2236 break;
2237 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
2238 if (iface_param->iface_num & 0x1)
2239 break;
2240 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
2241 break;
2242 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
2243 if (iface_param->iface_num & 0x1)
2244 break;
2245 init_fw_cb->ipv6_gw_advrt_mtu =
2246 cpu_to_le32(*(uint32_t *)iface_param->value);
2247 break;
2248 default:
2249 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
2250 iface_param->param);
2251 break;
2255 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
2256 struct iscsi_iface_param_info *iface_param,
2257 struct addr_ctrl_blk *init_fw_cb)
2259 switch (iface_param->param) {
2260 case ISCSI_NET_PARAM_IPV4_ADDR:
2261 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
2262 sizeof(init_fw_cb->ipv4_addr));
2263 break;
2264 case ISCSI_NET_PARAM_IPV4_SUBNET:
2265 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
2266 sizeof(init_fw_cb->ipv4_subnet));
2267 break;
2268 case ISCSI_NET_PARAM_IPV4_GW:
2269 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
2270 sizeof(init_fw_cb->ipv4_gw_addr));
2271 break;
2272 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
2273 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
2274 init_fw_cb->ipv4_tcp_opts |=
2275 cpu_to_le16(TCPOPT_DHCP_ENABLE);
2276 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
2277 init_fw_cb->ipv4_tcp_opts &=
2278 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
2279 else
2280 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
2281 break;
2282 case ISCSI_NET_PARAM_IFACE_ENABLE:
2283 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2284 init_fw_cb->ipv4_ip_opts |=
2285 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
2286 qla4xxx_create_ipv4_iface(ha);
2287 } else {
2288 init_fw_cb->ipv4_ip_opts &=
2289 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
2290 0xFFFF);
2291 qla4xxx_destroy_ipv4_iface(ha);
2293 break;
2294 case ISCSI_NET_PARAM_VLAN_TAG:
2295 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
2296 break;
2297 init_fw_cb->ipv4_vlan_tag =
2298 cpu_to_be16(*(uint16_t *)iface_param->value);
2299 break;
2300 case ISCSI_NET_PARAM_VLAN_ENABLED:
2301 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2302 init_fw_cb->ipv4_ip_opts |=
2303 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
2304 else
2305 init_fw_cb->ipv4_ip_opts &=
2306 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
2307 break;
2308 case ISCSI_NET_PARAM_MTU:
2309 init_fw_cb->eth_mtu_size =
2310 cpu_to_le16(*(uint16_t *)iface_param->value);
2311 break;
2312 case ISCSI_NET_PARAM_PORT:
2313 init_fw_cb->ipv4_port =
2314 cpu_to_le16(*(uint16_t *)iface_param->value);
2315 break;
2316 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2317 if (iface_param->iface_num & 0x1)
2318 break;
2319 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2320 init_fw_cb->ipv4_tcp_opts |=
2321 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
2322 else
2323 init_fw_cb->ipv4_tcp_opts &=
2324 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
2325 0xFFFF);
2326 break;
2327 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2328 if (iface_param->iface_num & 0x1)
2329 break;
2330 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2331 init_fw_cb->ipv4_tcp_opts |=
2332 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
2333 else
2334 init_fw_cb->ipv4_tcp_opts &=
2335 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
2336 break;
2337 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2338 if (iface_param->iface_num & 0x1)
2339 break;
2340 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2341 init_fw_cb->ipv4_tcp_opts |=
2342 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
2343 else
2344 init_fw_cb->ipv4_tcp_opts &=
2345 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
2346 break;
2347 case ISCSI_NET_PARAM_TCP_WSF:
2348 if (iface_param->iface_num & 0x1)
2349 break;
2350 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
2351 break;
2352 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2353 if (iface_param->iface_num & 0x1)
2354 break;
2355 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
2356 init_fw_cb->ipv4_tcp_opts |=
2357 cpu_to_le16((iface_param->value[0] << 1) &
2358 TCPOPT_TIMER_SCALE);
2359 break;
2360 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2361 if (iface_param->iface_num & 0x1)
2362 break;
2363 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2364 init_fw_cb->ipv4_tcp_opts |=
2365 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
2366 else
2367 init_fw_cb->ipv4_tcp_opts &=
2368 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
2369 break;
2370 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
2371 if (iface_param->iface_num & 0x1)
2372 break;
2373 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2374 init_fw_cb->ipv4_tcp_opts |=
2375 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
2376 else
2377 init_fw_cb->ipv4_tcp_opts &=
2378 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
2379 break;
2380 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
2381 if (iface_param->iface_num & 0x1)
2382 break;
2383 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2384 init_fw_cb->ipv4_tcp_opts |=
2385 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
2386 else
2387 init_fw_cb->ipv4_tcp_opts &=
2388 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
2389 break;
2390 case ISCSI_NET_PARAM_IPV4_TOS_EN:
2391 if (iface_param->iface_num & 0x1)
2392 break;
2393 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2394 init_fw_cb->ipv4_ip_opts |=
2395 cpu_to_le16(IPOPT_IPV4_TOS_EN);
2396 else
2397 init_fw_cb->ipv4_ip_opts &=
2398 cpu_to_le16(~IPOPT_IPV4_TOS_EN);
2399 break;
2400 case ISCSI_NET_PARAM_IPV4_TOS:
2401 if (iface_param->iface_num & 0x1)
2402 break;
2403 init_fw_cb->ipv4_tos = iface_param->value[0];
2404 break;
2405 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
2406 if (iface_param->iface_num & 0x1)
2407 break;
2408 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2409 init_fw_cb->ipv4_ip_opts |=
2410 cpu_to_le16(IPOPT_GRAT_ARP_EN);
2411 else
2412 init_fw_cb->ipv4_ip_opts &=
2413 cpu_to_le16(~IPOPT_GRAT_ARP_EN);
2414 break;
2415 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
2416 if (iface_param->iface_num & 0x1)
2417 break;
2418 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2419 init_fw_cb->ipv4_ip_opts |=
2420 cpu_to_le16(IPOPT_ALT_CID_EN);
2421 else
2422 init_fw_cb->ipv4_ip_opts &=
2423 cpu_to_le16(~IPOPT_ALT_CID_EN);
2424 break;
2425 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
2426 if (iface_param->iface_num & 0x1)
2427 break;
2428 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
2429 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
2430 init_fw_cb->ipv4_dhcp_alt_cid_len =
2431 strlen(init_fw_cb->ipv4_dhcp_alt_cid);
2432 break;
2433 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
2434 if (iface_param->iface_num & 0x1)
2435 break;
2436 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2437 init_fw_cb->ipv4_ip_opts |=
2438 cpu_to_le16(IPOPT_REQ_VID_EN);
2439 else
2440 init_fw_cb->ipv4_ip_opts &=
2441 cpu_to_le16(~IPOPT_REQ_VID_EN);
2442 break;
2443 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
2444 if (iface_param->iface_num & 0x1)
2445 break;
2446 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2447 init_fw_cb->ipv4_ip_opts |=
2448 cpu_to_le16(IPOPT_USE_VID_EN);
2449 else
2450 init_fw_cb->ipv4_ip_opts &=
2451 cpu_to_le16(~IPOPT_USE_VID_EN);
2452 break;
2453 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
2454 if (iface_param->iface_num & 0x1)
2455 break;
2456 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
2457 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
2458 init_fw_cb->ipv4_dhcp_vid_len =
2459 strlen(init_fw_cb->ipv4_dhcp_vid);
2460 break;
2461 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
2462 if (iface_param->iface_num & 0x1)
2463 break;
2464 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2465 init_fw_cb->ipv4_ip_opts |=
2466 cpu_to_le16(IPOPT_LEARN_IQN_EN);
2467 else
2468 init_fw_cb->ipv4_ip_opts &=
2469 cpu_to_le16(~IPOPT_LEARN_IQN_EN);
2470 break;
2471 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
2472 if (iface_param->iface_num & 0x1)
2473 break;
2474 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2475 init_fw_cb->ipv4_ip_opts |=
2476 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
2477 else
2478 init_fw_cb->ipv4_ip_opts &=
2479 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
2480 break;
2481 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
2482 if (iface_param->iface_num & 0x1)
2483 break;
2484 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2485 init_fw_cb->ipv4_ip_opts |=
2486 cpu_to_le16(IPOPT_IN_FORWARD_EN);
2487 else
2488 init_fw_cb->ipv4_ip_opts &=
2489 cpu_to_le16(~IPOPT_IN_FORWARD_EN);
2490 break;
2491 case ISCSI_NET_PARAM_REDIRECT_EN:
2492 if (iface_param->iface_num & 0x1)
2493 break;
2494 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2495 init_fw_cb->ipv4_ip_opts |=
2496 cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
2497 else
2498 init_fw_cb->ipv4_ip_opts &=
2499 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
2500 break;
2501 case ISCSI_NET_PARAM_IPV4_TTL:
2502 if (iface_param->iface_num & 0x1)
2503 break;
2504 init_fw_cb->ipv4_ttl = iface_param->value[0];
2505 break;
2506 default:
2507 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
2508 iface_param->param);
2509 break;
2513 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
2514 struct iscsi_iface_param_info *iface_param,
2515 struct addr_ctrl_blk *init_fw_cb)
2517 switch (iface_param->param) {
2518 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
2519 if (iface_param->iface_num & 0x1)
2520 break;
2521 init_fw_cb->def_timeout =
2522 cpu_to_le16(*(uint16_t *)iface_param->value);
2523 break;
2524 case ISCSI_IFACE_PARAM_HDRDGST_EN:
2525 if (iface_param->iface_num & 0x1)
2526 break;
2527 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2528 init_fw_cb->iscsi_opts |=
2529 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
2530 else
2531 init_fw_cb->iscsi_opts &=
2532 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
2533 break;
2534 case ISCSI_IFACE_PARAM_DATADGST_EN:
2535 if (iface_param->iface_num & 0x1)
2536 break;
2537 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2538 init_fw_cb->iscsi_opts |=
2539 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
2540 else
2541 init_fw_cb->iscsi_opts &=
2542 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
2543 break;
2544 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
2545 if (iface_param->iface_num & 0x1)
2546 break;
2547 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2548 init_fw_cb->iscsi_opts |=
2549 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
2550 else
2551 init_fw_cb->iscsi_opts &=
2552 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
2553 break;
2554 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
2555 if (iface_param->iface_num & 0x1)
2556 break;
2557 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2558 init_fw_cb->iscsi_opts |=
2559 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
2560 else
2561 init_fw_cb->iscsi_opts &=
2562 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
2563 break;
2564 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
2565 if (iface_param->iface_num & 0x1)
2566 break;
2567 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2568 init_fw_cb->iscsi_opts |=
2569 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
2570 else
2571 init_fw_cb->iscsi_opts &=
2572 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
2573 break;
2574 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
2575 if (iface_param->iface_num & 0x1)
2576 break;
2577 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2578 init_fw_cb->iscsi_opts |=
2579 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
2580 else
2581 init_fw_cb->iscsi_opts &=
2582 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
2583 break;
2584 case ISCSI_IFACE_PARAM_ERL:
2585 if (iface_param->iface_num & 0x1)
2586 break;
2587 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
2588 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
2589 ISCSIOPTS_ERL);
2590 break;
2591 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
2592 if (iface_param->iface_num & 0x1)
2593 break;
2594 init_fw_cb->iscsi_max_pdu_size =
2595 cpu_to_le32(*(uint32_t *)iface_param->value) /
2596 BYTE_UNITS;
2597 break;
2598 case ISCSI_IFACE_PARAM_FIRST_BURST:
2599 if (iface_param->iface_num & 0x1)
2600 break;
2601 init_fw_cb->iscsi_fburst_len =
2602 cpu_to_le32(*(uint32_t *)iface_param->value) /
2603 BYTE_UNITS;
2604 break;
2605 case ISCSI_IFACE_PARAM_MAX_R2T:
2606 if (iface_param->iface_num & 0x1)
2607 break;
2608 init_fw_cb->iscsi_max_outstnd_r2t =
2609 cpu_to_le16(*(uint16_t *)iface_param->value);
2610 break;
2611 case ISCSI_IFACE_PARAM_MAX_BURST:
2612 if (iface_param->iface_num & 0x1)
2613 break;
2614 init_fw_cb->iscsi_max_burst_len =
2615 cpu_to_le32(*(uint32_t *)iface_param->value) /
2616 BYTE_UNITS;
2617 break;
2618 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
2619 if (iface_param->iface_num & 0x1)
2620 break;
2621 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2622 init_fw_cb->iscsi_opts |=
2623 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
2624 else
2625 init_fw_cb->iscsi_opts &=
2626 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
2627 break;
2628 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
2629 if (iface_param->iface_num & 0x1)
2630 break;
2631 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2632 init_fw_cb->iscsi_opts |=
2633 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
2634 else
2635 init_fw_cb->iscsi_opts &=
2636 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
2637 break;
2638 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
2639 if (iface_param->iface_num & 0x1)
2640 break;
2641 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2642 init_fw_cb->iscsi_opts |=
2643 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
2644 else
2645 init_fw_cb->iscsi_opts &=
2646 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
2647 break;
2648 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
2649 if (iface_param->iface_num & 0x1)
2650 break;
2651 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2652 init_fw_cb->iscsi_opts |=
2653 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2654 else
2655 init_fw_cb->iscsi_opts &=
2656 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2657 break;
2658 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
2659 if (iface_param->iface_num & 0x1)
2660 break;
2661 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2662 init_fw_cb->iscsi_opts |=
2663 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2664 else
2665 init_fw_cb->iscsi_opts &=
2666 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2667 break;
2668 default:
2669 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
2670 iface_param->param);
2671 break;
2675 static void
2676 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
2678 struct addr_ctrl_blk_def *acb;
2679 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
2680 memset(acb->reserved1, 0, sizeof(acb->reserved1));
2681 memset(acb->reserved2, 0, sizeof(acb->reserved2));
2682 memset(acb->reserved3, 0, sizeof(acb->reserved3));
2683 memset(acb->reserved4, 0, sizeof(acb->reserved4));
2684 memset(acb->reserved5, 0, sizeof(acb->reserved5));
2685 memset(acb->reserved6, 0, sizeof(acb->reserved6));
2686 memset(acb->reserved7, 0, sizeof(acb->reserved7));
2687 memset(acb->reserved8, 0, sizeof(acb->reserved8));
2688 memset(acb->reserved9, 0, sizeof(acb->reserved9));
2689 memset(acb->reserved10, 0, sizeof(acb->reserved10));
2690 memset(acb->reserved11, 0, sizeof(acb->reserved11));
2691 memset(acb->reserved12, 0, sizeof(acb->reserved12));
2692 memset(acb->reserved13, 0, sizeof(acb->reserved13));
2693 memset(acb->reserved14, 0, sizeof(acb->reserved14));
2694 memset(acb->reserved15, 0, sizeof(acb->reserved15));
2697 static int
2698 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2700 struct scsi_qla_host *ha = to_qla_host(shost);
2701 int rval = 0;
2702 struct iscsi_iface_param_info *iface_param = NULL;
2703 struct addr_ctrl_blk *init_fw_cb = NULL;
2704 dma_addr_t init_fw_cb_dma;
2705 uint32_t mbox_cmd[MBOX_REG_COUNT];
2706 uint32_t mbox_sts[MBOX_REG_COUNT];
2707 uint32_t rem = len;
2708 struct nlattr *attr;
2710 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2711 sizeof(struct addr_ctrl_blk),
2712 &init_fw_cb_dma, GFP_KERNEL);
2713 if (!init_fw_cb) {
2714 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2715 __func__);
2716 return -ENOMEM;
2719 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2720 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2721 memset(&mbox_sts, 0, sizeof(mbox_sts));
2723 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
2724 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
2725 rval = -EIO;
2726 goto exit_init_fw_cb;
2729 nla_for_each_attr(attr, data, len, rem) {
2730 iface_param = nla_data(attr);
2732 if (iface_param->param_type == ISCSI_NET_PARAM) {
2733 switch (iface_param->iface_type) {
2734 case ISCSI_IFACE_TYPE_IPV4:
2735 switch (iface_param->iface_num) {
2736 case 0:
2737 qla4xxx_set_ipv4(ha, iface_param,
2738 init_fw_cb);
2739 break;
2740 default:
2741 /* Cannot have more than one IPv4 interface */
2742 ql4_printk(KERN_ERR, ha,
2743 "Invalid IPv4 iface number = %d\n",
2744 iface_param->iface_num);
2745 break;
2747 break;
2748 case ISCSI_IFACE_TYPE_IPV6:
2749 switch (iface_param->iface_num) {
2750 case 0:
2751 case 1:
2752 qla4xxx_set_ipv6(ha, iface_param,
2753 init_fw_cb);
2754 break;
2755 default:
2756 /* Cannot have more than two IPv6 interface */
2757 ql4_printk(KERN_ERR, ha,
2758 "Invalid IPv6 iface number = %d\n",
2759 iface_param->iface_num);
2760 break;
2762 break;
2763 default:
2764 ql4_printk(KERN_ERR, ha,
2765 "Invalid iface type\n");
2766 break;
2768 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
2769 qla4xxx_set_iscsi_param(ha, iface_param,
2770 init_fw_cb);
2771 } else {
2772 continue;
2776 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
2778 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
2779 sizeof(struct addr_ctrl_blk),
2780 FLASH_OPT_RMW_COMMIT);
2781 if (rval != QLA_SUCCESS) {
2782 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
2783 __func__);
2784 rval = -EIO;
2785 goto exit_init_fw_cb;
2788 rval = qla4xxx_disable_acb(ha);
2789 if (rval != QLA_SUCCESS) {
2790 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
2791 __func__);
2792 rval = -EIO;
2793 goto exit_init_fw_cb;
2796 wait_for_completion_timeout(&ha->disable_acb_comp,
2797 DISABLE_ACB_TOV * HZ);
2799 qla4xxx_initcb_to_acb(init_fw_cb);
2801 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
2802 if (rval != QLA_SUCCESS) {
2803 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
2804 __func__);
2805 rval = -EIO;
2806 goto exit_init_fw_cb;
2809 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2810 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
2811 init_fw_cb_dma);
2813 exit_init_fw_cb:
2814 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
2815 init_fw_cb, init_fw_cb_dma);
2817 return rval;
2820 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
2821 enum iscsi_param param, char *buf)
2823 struct iscsi_session *sess = cls_sess->dd_data;
2824 struct ddb_entry *ddb_entry = sess->dd_data;
2825 struct scsi_qla_host *ha = ddb_entry->ha;
2826 struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
2827 struct ql4_chap_table chap_tbl;
2828 int rval, len;
2829 uint16_t idx;
2831 memset(&chap_tbl, 0, sizeof(chap_tbl));
2832 switch (param) {
2833 case ISCSI_PARAM_CHAP_IN_IDX:
2834 rval = qla4xxx_get_chap_index(ha, sess->username_in,
2835 sess->password_in, BIDI_CHAP,
2836 &idx);
2837 if (rval)
2838 len = sprintf(buf, "\n");
2839 else
2840 len = sprintf(buf, "%hu\n", idx);
2841 break;
2842 case ISCSI_PARAM_CHAP_OUT_IDX:
2843 if (ddb_entry->ddb_type == FLASH_DDB) {
2844 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2845 idx = ddb_entry->chap_tbl_idx;
2846 rval = QLA_SUCCESS;
2847 } else {
2848 rval = QLA_ERROR;
2850 } else {
2851 rval = qla4xxx_get_chap_index(ha, sess->username,
2852 sess->password,
2853 LOCAL_CHAP, &idx);
2855 if (rval)
2856 len = sprintf(buf, "\n");
2857 else
2858 len = sprintf(buf, "%hu\n", idx);
2859 break;
2860 case ISCSI_PARAM_USERNAME:
2861 case ISCSI_PARAM_PASSWORD:
2862 /* First, populate session username and password for FLASH DDB,
2863 * if not already done. This happens when session login fails
2864 * for a FLASH DDB.
2866 if (ddb_entry->ddb_type == FLASH_DDB &&
2867 ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
2868 !sess->username && !sess->password) {
2869 idx = ddb_entry->chap_tbl_idx;
2870 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2871 chap_tbl.secret,
2872 idx);
2873 if (!rval) {
2874 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2875 (char *)chap_tbl.name,
2876 strlen((char *)chap_tbl.name));
2877 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2878 (char *)chap_tbl.secret,
2879 chap_tbl.secret_len);
2882 /* allow fall-through */
2883 default:
2884 return iscsi_session_get_param(cls_sess, param, buf);
2887 return len;
2890 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
2891 enum iscsi_param param, char *buf)
2893 struct iscsi_conn *conn;
2894 struct qla_conn *qla_conn;
2895 struct sockaddr *dst_addr;
2897 conn = cls_conn->dd_data;
2898 qla_conn = conn->dd_data;
2899 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
2901 switch (param) {
2902 case ISCSI_PARAM_CONN_PORT:
2903 case ISCSI_PARAM_CONN_ADDRESS:
2904 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2905 dst_addr, param, buf);
2906 default:
2907 return iscsi_conn_get_param(cls_conn, param, buf);
2911 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
2913 uint32_t mbx_sts = 0;
2914 uint16_t tmp_ddb_index;
2915 int ret;
2917 get_ddb_index:
2918 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
2920 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
2921 DEBUG2(ql4_printk(KERN_INFO, ha,
2922 "Free DDB index not available\n"));
2923 ret = QLA_ERROR;
2924 goto exit_get_ddb_index;
2927 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
2928 goto get_ddb_index;
2930 DEBUG2(ql4_printk(KERN_INFO, ha,
2931 "Found a free DDB index at %d\n", tmp_ddb_index));
2932 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
2933 if (ret == QLA_ERROR) {
2934 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
2935 ql4_printk(KERN_INFO, ha,
2936 "DDB index = %d not available trying next\n",
2937 tmp_ddb_index);
2938 goto get_ddb_index;
2940 DEBUG2(ql4_printk(KERN_INFO, ha,
2941 "Free FW DDB not available\n"));
2944 *ddb_index = tmp_ddb_index;
2946 exit_get_ddb_index:
2947 return ret;
2950 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
2951 struct ddb_entry *ddb_entry,
2952 char *existing_ipaddr,
2953 char *user_ipaddr)
2955 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
2956 char formatted_ipaddr[DDB_IPADDR_LEN];
2957 int status = QLA_SUCCESS, ret = 0;
2959 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
2960 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2961 '\0', NULL);
2962 if (ret == 0) {
2963 status = QLA_ERROR;
2964 goto out_match;
2966 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
2967 } else {
2968 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2969 '\0', NULL);
2970 if (ret == 0) {
2971 status = QLA_ERROR;
2972 goto out_match;
2974 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
2977 if (strcmp(existing_ipaddr, formatted_ipaddr))
2978 status = QLA_ERROR;
2980 out_match:
2981 return status;
2984 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
2985 struct iscsi_cls_conn *cls_conn)
2987 int idx = 0, max_ddbs, rval;
2988 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2989 struct iscsi_session *sess, *existing_sess;
2990 struct iscsi_conn *conn, *existing_conn;
2991 struct ddb_entry *ddb_entry;
2993 sess = cls_sess->dd_data;
2994 conn = cls_conn->dd_data;
2996 if (sess->targetname == NULL ||
2997 conn->persistent_address == NULL ||
2998 conn->persistent_port == 0)
2999 return QLA_ERROR;
3001 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3002 MAX_DEV_DB_ENTRIES;
3004 for (idx = 0; idx < max_ddbs; idx++) {
3005 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
3006 if (ddb_entry == NULL)
3007 continue;
3009 if (ddb_entry->ddb_type != FLASH_DDB)
3010 continue;
3012 existing_sess = ddb_entry->sess->dd_data;
3013 existing_conn = ddb_entry->conn->dd_data;
3015 if (existing_sess->targetname == NULL ||
3016 existing_conn->persistent_address == NULL ||
3017 existing_conn->persistent_port == 0)
3018 continue;
3020 DEBUG2(ql4_printk(KERN_INFO, ha,
3021 "IQN = %s User IQN = %s\n",
3022 existing_sess->targetname,
3023 sess->targetname));
3025 DEBUG2(ql4_printk(KERN_INFO, ha,
3026 "IP = %s User IP = %s\n",
3027 existing_conn->persistent_address,
3028 conn->persistent_address));
3030 DEBUG2(ql4_printk(KERN_INFO, ha,
3031 "Port = %d User Port = %d\n",
3032 existing_conn->persistent_port,
3033 conn->persistent_port));
3035 if (strcmp(existing_sess->targetname, sess->targetname))
3036 continue;
3037 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
3038 existing_conn->persistent_address,
3039 conn->persistent_address);
3040 if (rval == QLA_ERROR)
3041 continue;
3042 if (existing_conn->persistent_port != conn->persistent_port)
3043 continue;
3044 break;
3047 if (idx == max_ddbs)
3048 return QLA_ERROR;
3050 DEBUG2(ql4_printk(KERN_INFO, ha,
3051 "Match found in fwdb sessions\n"));
3052 return QLA_SUCCESS;
3055 static struct iscsi_cls_session *
3056 qla4xxx_session_create(struct iscsi_endpoint *ep,
3057 uint16_t cmds_max, uint16_t qdepth,
3058 uint32_t initial_cmdsn)
3060 struct iscsi_cls_session *cls_sess;
3061 struct scsi_qla_host *ha;
3062 struct qla_endpoint *qla_ep;
3063 struct ddb_entry *ddb_entry;
3064 uint16_t ddb_index;
3065 struct iscsi_session *sess;
3066 struct sockaddr *dst_addr;
3067 int ret;
3069 if (!ep) {
3070 printk(KERN_ERR "qla4xxx: missing ep.\n");
3071 return NULL;
3074 qla_ep = ep->dd_data;
3075 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
3076 ha = to_qla_host(qla_ep->host);
3077 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3078 ha->host_no));
3080 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
3081 if (ret == QLA_ERROR)
3082 return NULL;
3084 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
3085 cmds_max, sizeof(struct ddb_entry),
3086 sizeof(struct ql4_task_data),
3087 initial_cmdsn, ddb_index);
3088 if (!cls_sess)
3089 return NULL;
3091 sess = cls_sess->dd_data;
3092 ddb_entry = sess->dd_data;
3093 ddb_entry->fw_ddb_index = ddb_index;
3094 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
3095 ddb_entry->ha = ha;
3096 ddb_entry->sess = cls_sess;
3097 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
3098 ddb_entry->ddb_change = qla4xxx_ddb_change;
3099 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
3100 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
3101 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
3102 ha->tot_ddbs++;
3104 return cls_sess;
3107 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
3109 struct iscsi_session *sess;
3110 struct ddb_entry *ddb_entry;
3111 struct scsi_qla_host *ha;
3112 unsigned long flags, wtime;
3113 struct dev_db_entry *fw_ddb_entry = NULL;
3114 dma_addr_t fw_ddb_entry_dma;
3115 uint32_t ddb_state;
3116 int ret;
3118 sess = cls_sess->dd_data;
3119 ddb_entry = sess->dd_data;
3120 ha = ddb_entry->ha;
3121 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3122 ha->host_no));
3124 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3125 &fw_ddb_entry_dma, GFP_KERNEL);
3126 if (!fw_ddb_entry) {
3127 ql4_printk(KERN_ERR, ha,
3128 "%s: Unable to allocate dma buffer\n", __func__);
3129 goto destroy_session;
3132 wtime = jiffies + (HZ * LOGOUT_TOV);
3133 do {
3134 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
3135 fw_ddb_entry, fw_ddb_entry_dma,
3136 NULL, NULL, &ddb_state, NULL,
3137 NULL, NULL);
3138 if (ret == QLA_ERROR)
3139 goto destroy_session;
3141 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
3142 (ddb_state == DDB_DS_SESSION_FAILED))
3143 goto destroy_session;
3145 schedule_timeout_uninterruptible(HZ);
3146 } while ((time_after(wtime, jiffies)));
3148 destroy_session:
3149 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
3150 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
3151 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
3152 spin_lock_irqsave(&ha->hardware_lock, flags);
3153 qla4xxx_free_ddb(ha, ddb_entry);
3154 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3156 iscsi_session_teardown(cls_sess);
3158 if (fw_ddb_entry)
3159 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3160 fw_ddb_entry, fw_ddb_entry_dma);
3163 static struct iscsi_cls_conn *
3164 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
3166 struct iscsi_cls_conn *cls_conn;
3167 struct iscsi_session *sess;
3168 struct ddb_entry *ddb_entry;
3169 struct scsi_qla_host *ha;
3171 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
3172 conn_idx);
3173 if (!cls_conn) {
3174 pr_info("%s: Can not create connection for conn_idx = %u\n",
3175 __func__, conn_idx);
3176 return NULL;
3179 sess = cls_sess->dd_data;
3180 ddb_entry = sess->dd_data;
3181 ddb_entry->conn = cls_conn;
3183 ha = ddb_entry->ha;
3184 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
3185 conn_idx));
3186 return cls_conn;
3189 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3190 struct iscsi_cls_conn *cls_conn,
3191 uint64_t transport_fd, int is_leading)
3193 struct iscsi_conn *conn;
3194 struct qla_conn *qla_conn;
3195 struct iscsi_endpoint *ep;
3196 struct ddb_entry *ddb_entry;
3197 struct scsi_qla_host *ha;
3198 struct iscsi_session *sess;
3200 sess = cls_session->dd_data;
3201 ddb_entry = sess->dd_data;
3202 ha = ddb_entry->ha;
3204 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3205 cls_session->sid, cls_conn->cid));
3207 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3208 return -EINVAL;
3209 ep = iscsi_lookup_endpoint(transport_fd);
3210 conn = cls_conn->dd_data;
3211 qla_conn = conn->dd_data;
3212 qla_conn->qla_ep = ep->dd_data;
3213 return 0;
3216 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
3218 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3219 struct iscsi_session *sess;
3220 struct ddb_entry *ddb_entry;
3221 struct scsi_qla_host *ha;
3222 struct dev_db_entry *fw_ddb_entry = NULL;
3223 dma_addr_t fw_ddb_entry_dma;
3224 uint32_t mbx_sts = 0;
3225 int ret = 0;
3226 int status = QLA_SUCCESS;
3228 sess = cls_sess->dd_data;
3229 ddb_entry = sess->dd_data;
3230 ha = ddb_entry->ha;
3231 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3232 cls_sess->sid, cls_conn->cid));
3234 /* Check if we have matching FW DDB, if yes then do not
3235 * login to this target. This could cause target to logout previous
3236 * connection
3238 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
3239 if (ret == QLA_SUCCESS) {
3240 ql4_printk(KERN_INFO, ha,
3241 "Session already exist in FW.\n");
3242 ret = -EEXIST;
3243 goto exit_conn_start;
3246 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3247 &fw_ddb_entry_dma, GFP_KERNEL);
3248 if (!fw_ddb_entry) {
3249 ql4_printk(KERN_ERR, ha,
3250 "%s: Unable to allocate dma buffer\n", __func__);
3251 ret = -ENOMEM;
3252 goto exit_conn_start;
3255 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
3256 if (ret) {
3257 /* If iscsid is stopped and started then no need to do
3258 * set param again since ddb state will be already
3259 * active and FW does not allow set ddb to an
3260 * active session.
3262 if (mbx_sts)
3263 if (ddb_entry->fw_ddb_device_state ==
3264 DDB_DS_SESSION_ACTIVE) {
3265 ddb_entry->unblock_sess(ddb_entry->sess);
3266 goto exit_set_param;
3269 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
3270 __func__, ddb_entry->fw_ddb_index);
3271 goto exit_conn_start;
3274 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
3275 if (status == QLA_ERROR) {
3276 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
3277 sess->targetname);
3278 ret = -EINVAL;
3279 goto exit_conn_start;
3282 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
3283 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
3285 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
3286 ddb_entry->fw_ddb_device_state));
3288 exit_set_param:
3289 ret = 0;
3291 exit_conn_start:
3292 if (fw_ddb_entry)
3293 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3294 fw_ddb_entry, fw_ddb_entry_dma);
3295 return ret;
3298 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
3300 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3301 struct iscsi_session *sess;
3302 struct scsi_qla_host *ha;
3303 struct ddb_entry *ddb_entry;
3304 int options;
3306 sess = cls_sess->dd_data;
3307 ddb_entry = sess->dd_data;
3308 ha = ddb_entry->ha;
3309 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
3310 cls_conn->cid));
3312 options = LOGOUT_OPTION_CLOSE_SESSION;
3313 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
3314 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
3317 static void qla4xxx_task_work(struct work_struct *wdata)
3319 struct ql4_task_data *task_data;
3320 struct scsi_qla_host *ha;
3321 struct passthru_status *sts;
3322 struct iscsi_task *task;
3323 struct iscsi_hdr *hdr;
3324 uint8_t *data;
3325 uint32_t data_len;
3326 struct iscsi_conn *conn;
3327 int hdr_len;
3328 itt_t itt;
3330 task_data = container_of(wdata, struct ql4_task_data, task_work);
3331 ha = task_data->ha;
3332 task = task_data->task;
3333 sts = &task_data->sts;
3334 hdr_len = sizeof(struct iscsi_hdr);
3336 DEBUG3(printk(KERN_INFO "Status returned\n"));
3337 DEBUG3(qla4xxx_dump_buffer(sts, 64));
3338 DEBUG3(printk(KERN_INFO "Response buffer"));
3339 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
3341 conn = task->conn;
3343 switch (sts->completionStatus) {
3344 case PASSTHRU_STATUS_COMPLETE:
3345 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
3346 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
3347 itt = sts->handle;
3348 hdr->itt = itt;
3349 data = task_data->resp_buffer + hdr_len;
3350 data_len = task_data->resp_len - hdr_len;
3351 iscsi_complete_pdu(conn, hdr, data, data_len);
3352 break;
3353 default:
3354 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
3355 sts->completionStatus);
3356 break;
3358 return;
3361 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3363 struct ql4_task_data *task_data;
3364 struct iscsi_session *sess;
3365 struct ddb_entry *ddb_entry;
3366 struct scsi_qla_host *ha;
3367 int hdr_len;
3369 sess = task->conn->session;
3370 ddb_entry = sess->dd_data;
3371 ha = ddb_entry->ha;
3372 task_data = task->dd_data;
3373 memset(task_data, 0, sizeof(struct ql4_task_data));
3375 if (task->sc) {
3376 ql4_printk(KERN_INFO, ha,
3377 "%s: SCSI Commands not implemented\n", __func__);
3378 return -EINVAL;
3381 hdr_len = sizeof(struct iscsi_hdr);
3382 task_data->ha = ha;
3383 task_data->task = task;
3385 if (task->data_count) {
3386 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
3387 task->data_count,
3388 PCI_DMA_TODEVICE);
3391 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3392 __func__, task->conn->max_recv_dlength, hdr_len));
3394 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
3395 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
3396 task_data->resp_len,
3397 &task_data->resp_dma,
3398 GFP_ATOMIC);
3399 if (!task_data->resp_buffer)
3400 goto exit_alloc_pdu;
3402 task_data->req_len = task->data_count + hdr_len;
3403 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
3404 task_data->req_len,
3405 &task_data->req_dma,
3406 GFP_ATOMIC);
3407 if (!task_data->req_buffer)
3408 goto exit_alloc_pdu;
3410 task->hdr = task_data->req_buffer;
3412 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
3414 return 0;
3416 exit_alloc_pdu:
3417 if (task_data->resp_buffer)
3418 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3419 task_data->resp_buffer, task_data->resp_dma);
3421 if (task_data->req_buffer)
3422 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3423 task_data->req_buffer, task_data->req_dma);
3424 return -ENOMEM;
3427 static void qla4xxx_task_cleanup(struct iscsi_task *task)
3429 struct ql4_task_data *task_data;
3430 struct iscsi_session *sess;
3431 struct ddb_entry *ddb_entry;
3432 struct scsi_qla_host *ha;
3433 int hdr_len;
3435 hdr_len = sizeof(struct iscsi_hdr);
3436 sess = task->conn->session;
3437 ddb_entry = sess->dd_data;
3438 ha = ddb_entry->ha;
3439 task_data = task->dd_data;
3441 if (task->data_count) {
3442 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
3443 task->data_count, PCI_DMA_TODEVICE);
3446 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3447 __func__, task->conn->max_recv_dlength, hdr_len));
3449 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3450 task_data->resp_buffer, task_data->resp_dma);
3451 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3452 task_data->req_buffer, task_data->req_dma);
3453 return;
3456 static int qla4xxx_task_xmit(struct iscsi_task *task)
3458 struct scsi_cmnd *sc = task->sc;
3459 struct iscsi_session *sess = task->conn->session;
3460 struct ddb_entry *ddb_entry = sess->dd_data;
3461 struct scsi_qla_host *ha = ddb_entry->ha;
3463 if (!sc)
3464 return qla4xxx_send_passthru0(task);
3466 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
3467 __func__);
3468 return -ENOSYS;
3471 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
3472 struct iscsi_bus_flash_conn *conn,
3473 struct dev_db_entry *fw_ddb_entry)
3475 unsigned long options = 0;
3476 int rc = 0;
3478 options = le16_to_cpu(fw_ddb_entry->options);
3479 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3480 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3481 rc = iscsi_switch_str_param(&sess->portal_type,
3482 PORTAL_TYPE_IPV6);
3483 if (rc)
3484 goto exit_copy;
3485 } else {
3486 rc = iscsi_switch_str_param(&sess->portal_type,
3487 PORTAL_TYPE_IPV4);
3488 if (rc)
3489 goto exit_copy;
3492 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3493 &options);
3494 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3495 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
3497 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3498 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3499 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3500 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3501 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3502 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3503 &options);
3504 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3505 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3506 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
3507 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3508 &options);
3509 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3510 sess->discovery_auth_optional =
3511 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3512 if (test_bit(ISCSIOPT_ERL1, &options))
3513 sess->erl |= BIT_1;
3514 if (test_bit(ISCSIOPT_ERL0, &options))
3515 sess->erl |= BIT_0;
3517 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3518 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3519 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3520 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3521 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3522 conn->tcp_timer_scale |= BIT_3;
3523 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3524 conn->tcp_timer_scale |= BIT_2;
3525 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3526 conn->tcp_timer_scale |= BIT_1;
3528 conn->tcp_timer_scale >>= 1;
3529 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3531 options = le16_to_cpu(fw_ddb_entry->ip_options);
3532 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3534 conn->max_recv_dlength = BYTE_UNITS *
3535 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3536 conn->max_xmit_dlength = BYTE_UNITS *
3537 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3538 sess->first_burst = BYTE_UNITS *
3539 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3540 sess->max_burst = BYTE_UNITS *
3541 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3542 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3543 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3544 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3545 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3546 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3547 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3548 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3549 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
3550 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
3551 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3552 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3553 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3554 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
3555 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
3556 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3557 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3559 sess->default_taskmgmt_timeout =
3560 le16_to_cpu(fw_ddb_entry->def_timeout);
3561 conn->port = le16_to_cpu(fw_ddb_entry->port);
3563 options = le16_to_cpu(fw_ddb_entry->options);
3564 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3565 if (!conn->ipaddress) {
3566 rc = -ENOMEM;
3567 goto exit_copy;
3570 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3571 if (!conn->redirect_ipaddr) {
3572 rc = -ENOMEM;
3573 goto exit_copy;
3576 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
3577 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
3579 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3580 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
3582 conn->link_local_ipv6_addr = kmemdup(
3583 fw_ddb_entry->link_local_ipv6_addr,
3584 IPv6_ADDR_LEN, GFP_KERNEL);
3585 if (!conn->link_local_ipv6_addr) {
3586 rc = -ENOMEM;
3587 goto exit_copy;
3589 } else {
3590 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3593 if (fw_ddb_entry->iscsi_name[0]) {
3594 rc = iscsi_switch_str_param(&sess->targetname,
3595 (char *)fw_ddb_entry->iscsi_name);
3596 if (rc)
3597 goto exit_copy;
3600 if (fw_ddb_entry->iscsi_alias[0]) {
3601 rc = iscsi_switch_str_param(&sess->targetalias,
3602 (char *)fw_ddb_entry->iscsi_alias);
3603 if (rc)
3604 goto exit_copy;
3607 COPY_ISID(sess->isid, fw_ddb_entry->isid);
3609 exit_copy:
3610 return rc;
3613 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
3614 struct iscsi_bus_flash_conn *conn,
3615 struct dev_db_entry *fw_ddb_entry)
3617 uint16_t options;
3618 int rc = 0;
3620 options = le16_to_cpu(fw_ddb_entry->options);
3621 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
3622 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3623 options |= BIT_8;
3624 else
3625 options &= ~BIT_8;
3627 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
3628 SET_BITVAL(sess->discovery_sess, options, BIT_4);
3629 SET_BITVAL(sess->entry_state, options, BIT_3);
3630 fw_ddb_entry->options = cpu_to_le16(options);
3632 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3633 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
3634 SET_BITVAL(conn->datadgst_en, options, BIT_12);
3635 SET_BITVAL(sess->imm_data_en, options, BIT_11);
3636 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
3637 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
3638 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
3639 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
3640 SET_BITVAL(conn->snack_req_en, options, BIT_6);
3641 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
3642 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
3643 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
3644 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
3645 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
3646 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
3648 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3649 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
3650 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
3651 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
3652 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
3653 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
3654 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
3655 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
3656 fw_ddb_entry->tcp_options = cpu_to_le16(options);
3658 options = le16_to_cpu(fw_ddb_entry->ip_options);
3659 SET_BITVAL(conn->fragment_disable, options, BIT_4);
3660 fw_ddb_entry->ip_options = cpu_to_le16(options);
3662 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
3663 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
3664 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
3665 fw_ddb_entry->iscsi_max_snd_data_seg_len =
3666 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
3667 fw_ddb_entry->iscsi_first_burst_len =
3668 cpu_to_le16(sess->first_burst / BYTE_UNITS);
3669 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
3670 BYTE_UNITS);
3671 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
3672 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
3673 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
3674 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
3675 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
3676 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
3677 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
3678 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
3679 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
3680 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
3681 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
3682 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
3683 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
3684 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
3685 fw_ddb_entry->port = cpu_to_le16(conn->port);
3686 fw_ddb_entry->def_timeout =
3687 cpu_to_le16(sess->default_taskmgmt_timeout);
3689 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3690 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
3691 else
3692 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
3694 if (conn->ipaddress)
3695 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
3696 sizeof(fw_ddb_entry->ip_addr));
3698 if (conn->redirect_ipaddr)
3699 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
3700 sizeof(fw_ddb_entry->tgt_addr));
3702 if (conn->link_local_ipv6_addr)
3703 memcpy(fw_ddb_entry->link_local_ipv6_addr,
3704 conn->link_local_ipv6_addr,
3705 sizeof(fw_ddb_entry->link_local_ipv6_addr));
3707 if (sess->targetname)
3708 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
3709 sizeof(fw_ddb_entry->iscsi_name));
3711 if (sess->targetalias)
3712 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
3713 sizeof(fw_ddb_entry->iscsi_alias));
3715 COPY_ISID(fw_ddb_entry->isid, sess->isid);
3717 return rc;
3720 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
3721 struct iscsi_session *sess,
3722 struct dev_db_entry *fw_ddb_entry)
3724 unsigned long options = 0;
3725 uint16_t ddb_link;
3726 uint16_t disc_parent;
3727 char ip_addr[DDB_IPADDR_LEN];
3729 options = le16_to_cpu(fw_ddb_entry->options);
3730 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3731 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3732 &options);
3733 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3735 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3736 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3737 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3738 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3739 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3740 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3741 &options);
3742 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3743 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3744 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3745 &options);
3746 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3747 sess->discovery_auth_optional =
3748 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3749 if (test_bit(ISCSIOPT_ERL1, &options))
3750 sess->erl |= BIT_1;
3751 if (test_bit(ISCSIOPT_ERL0, &options))
3752 sess->erl |= BIT_0;
3754 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3755 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3756 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3757 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3758 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3759 conn->tcp_timer_scale |= BIT_3;
3760 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3761 conn->tcp_timer_scale |= BIT_2;
3762 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3763 conn->tcp_timer_scale |= BIT_1;
3765 conn->tcp_timer_scale >>= 1;
3766 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3768 options = le16_to_cpu(fw_ddb_entry->ip_options);
3769 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3771 conn->max_recv_dlength = BYTE_UNITS *
3772 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3773 conn->max_xmit_dlength = BYTE_UNITS *
3774 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3775 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3776 sess->first_burst = BYTE_UNITS *
3777 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3778 sess->max_burst = BYTE_UNITS *
3779 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3780 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3781 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3782 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3783 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3784 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3785 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3786 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3787 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
3788 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3789 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3790 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3791 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3792 COPY_ISID(sess->isid, fw_ddb_entry->isid);
3794 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
3795 if (ddb_link == DDB_ISNS)
3796 disc_parent = ISCSI_DISC_PARENT_ISNS;
3797 else if (ddb_link == DDB_NO_LINK)
3798 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3799 else if (ddb_link < MAX_DDB_ENTRIES)
3800 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
3801 else
3802 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3804 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
3805 iscsi_get_discovery_parent_name(disc_parent), 0);
3807 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
3808 (char *)fw_ddb_entry->iscsi_alias, 0);
3810 options = le16_to_cpu(fw_ddb_entry->options);
3811 if (options & DDB_OPT_IPV6_DEVICE) {
3812 memset(ip_addr, 0, sizeof(ip_addr));
3813 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
3814 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
3815 (char *)ip_addr, 0);
3819 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
3820 struct dev_db_entry *fw_ddb_entry,
3821 struct iscsi_cls_session *cls_sess,
3822 struct iscsi_cls_conn *cls_conn)
3824 int buflen = 0;
3825 struct iscsi_session *sess;
3826 struct ddb_entry *ddb_entry;
3827 struct ql4_chap_table chap_tbl;
3828 struct iscsi_conn *conn;
3829 char ip_addr[DDB_IPADDR_LEN];
3830 uint16_t options = 0;
3832 sess = cls_sess->dd_data;
3833 ddb_entry = sess->dd_data;
3834 conn = cls_conn->dd_data;
3835 memset(&chap_tbl, 0, sizeof(chap_tbl));
3837 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3839 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3841 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
3842 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
3844 memset(ip_addr, 0, sizeof(ip_addr));
3845 options = le16_to_cpu(fw_ddb_entry->options);
3846 if (options & DDB_OPT_IPV6_DEVICE) {
3847 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
3849 memset(ip_addr, 0, sizeof(ip_addr));
3850 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
3851 } else {
3852 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
3853 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
3856 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
3857 (char *)ip_addr, buflen);
3858 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
3859 (char *)fw_ddb_entry->iscsi_name, buflen);
3860 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
3861 (char *)ha->name_string, buflen);
3863 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
3864 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
3865 chap_tbl.secret,
3866 ddb_entry->chap_tbl_idx)) {
3867 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
3868 (char *)chap_tbl.name,
3869 strlen((char *)chap_tbl.name));
3870 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
3871 (char *)chap_tbl.secret,
3872 chap_tbl.secret_len);
3877 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
3878 struct ddb_entry *ddb_entry)
3880 struct iscsi_cls_session *cls_sess;
3881 struct iscsi_cls_conn *cls_conn;
3882 uint32_t ddb_state;
3883 dma_addr_t fw_ddb_entry_dma;
3884 struct dev_db_entry *fw_ddb_entry;
3886 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3887 &fw_ddb_entry_dma, GFP_KERNEL);
3888 if (!fw_ddb_entry) {
3889 ql4_printk(KERN_ERR, ha,
3890 "%s: Unable to allocate dma buffer\n", __func__);
3891 goto exit_session_conn_fwddb_param;
3894 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3895 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3896 NULL, NULL, NULL) == QLA_ERROR) {
3897 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3898 "get_ddb_entry for fw_ddb_index %d\n",
3899 ha->host_no, __func__,
3900 ddb_entry->fw_ddb_index));
3901 goto exit_session_conn_fwddb_param;
3904 cls_sess = ddb_entry->sess;
3906 cls_conn = ddb_entry->conn;
3908 /* Update params */
3909 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
3911 exit_session_conn_fwddb_param:
3912 if (fw_ddb_entry)
3913 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3914 fw_ddb_entry, fw_ddb_entry_dma);
3917 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
3918 struct ddb_entry *ddb_entry)
3920 struct iscsi_cls_session *cls_sess;
3921 struct iscsi_cls_conn *cls_conn;
3922 struct iscsi_session *sess;
3923 struct iscsi_conn *conn;
3924 uint32_t ddb_state;
3925 dma_addr_t fw_ddb_entry_dma;
3926 struct dev_db_entry *fw_ddb_entry;
3928 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3929 &fw_ddb_entry_dma, GFP_KERNEL);
3930 if (!fw_ddb_entry) {
3931 ql4_printk(KERN_ERR, ha,
3932 "%s: Unable to allocate dma buffer\n", __func__);
3933 goto exit_session_conn_param;
3936 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3937 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3938 NULL, NULL, NULL) == QLA_ERROR) {
3939 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3940 "get_ddb_entry for fw_ddb_index %d\n",
3941 ha->host_no, __func__,
3942 ddb_entry->fw_ddb_index));
3943 goto exit_session_conn_param;
3946 cls_sess = ddb_entry->sess;
3947 sess = cls_sess->dd_data;
3949 cls_conn = ddb_entry->conn;
3950 conn = cls_conn->dd_data;
3952 /* Update timers after login */
3953 ddb_entry->default_relogin_timeout =
3954 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
3955 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
3956 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
3957 ddb_entry->default_time2wait =
3958 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3960 /* Update params */
3961 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3962 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3964 memcpy(sess->initiatorname, ha->name_string,
3965 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
3967 exit_session_conn_param:
3968 if (fw_ddb_entry)
3969 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3970 fw_ddb_entry, fw_ddb_entry_dma);
3974 * Timer routines
3977 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
3978 unsigned long interval)
3980 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
3981 __func__, ha->host->host_no));
3982 init_timer(&ha->timer);
3983 ha->timer.expires = jiffies + interval * HZ;
3984 ha->timer.data = (unsigned long)ha;
3985 ha->timer.function = (void (*)(unsigned long))func;
3986 add_timer(&ha->timer);
3987 ha->timer_active = 1;
3990 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
3992 del_timer_sync(&ha->timer);
3993 ha->timer_active = 0;
3996 /***
3997 * qla4xxx_mark_device_missing - blocks the session
3998 * @cls_session: Pointer to the session to be blocked
3999 * @ddb_entry: Pointer to device database entry
4001 * This routine marks a device missing and close connection.
4003 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
4005 iscsi_block_session(cls_session);
4009 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
4010 * @ha: Pointer to host adapter structure.
4012 * This routine marks a device missing and resets the relogin retry count.
4014 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
4016 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
4019 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
4020 struct ddb_entry *ddb_entry,
4021 struct scsi_cmnd *cmd)
4023 struct srb *srb;
4025 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
4026 if (!srb)
4027 return srb;
4029 kref_init(&srb->srb_ref);
4030 srb->ha = ha;
4031 srb->ddb = ddb_entry;
4032 srb->cmd = cmd;
4033 srb->flags = 0;
4034 CMD_SP(cmd) = (void *)srb;
4036 return srb;
4039 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
4041 struct scsi_cmnd *cmd = srb->cmd;
4043 if (srb->flags & SRB_DMA_VALID) {
4044 scsi_dma_unmap(cmd);
4045 srb->flags &= ~SRB_DMA_VALID;
4047 CMD_SP(cmd) = NULL;
4050 void qla4xxx_srb_compl(struct kref *ref)
4052 struct srb *srb = container_of(ref, struct srb, srb_ref);
4053 struct scsi_cmnd *cmd = srb->cmd;
4054 struct scsi_qla_host *ha = srb->ha;
4056 qla4xxx_srb_free_dma(ha, srb);
4058 mempool_free(srb, ha->srb_mempool);
4060 cmd->scsi_done(cmd);
4064 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
4065 * @host: scsi host
4066 * @cmd: Pointer to Linux's SCSI command structure
4068 * Remarks:
4069 * This routine is invoked by Linux to send a SCSI command to the driver.
4070 * The mid-level driver tries to ensure that queuecommand never gets
4071 * invoked concurrently with itself or the interrupt handler (although
4072 * the interrupt handler may call this routine as part of request-
4073 * completion handling). Unfortunely, it sometimes calls the scheduler
4074 * in interrupt context which is a big NO! NO!.
4076 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
4078 struct scsi_qla_host *ha = to_qla_host(host);
4079 struct ddb_entry *ddb_entry = cmd->device->hostdata;
4080 struct iscsi_cls_session *sess = ddb_entry->sess;
4081 struct srb *srb;
4082 int rval;
4084 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4085 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
4086 cmd->result = DID_NO_CONNECT << 16;
4087 else
4088 cmd->result = DID_REQUEUE << 16;
4089 goto qc_fail_command;
4092 if (!sess) {
4093 cmd->result = DID_IMM_RETRY << 16;
4094 goto qc_fail_command;
4097 rval = iscsi_session_chkready(sess);
4098 if (rval) {
4099 cmd->result = rval;
4100 goto qc_fail_command;
4103 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4104 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4105 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4106 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4107 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4108 !test_bit(AF_ONLINE, &ha->flags) ||
4109 !test_bit(AF_LINK_UP, &ha->flags) ||
4110 test_bit(AF_LOOPBACK, &ha->flags) ||
4111 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
4112 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
4113 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
4114 goto qc_host_busy;
4116 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
4117 if (!srb)
4118 goto qc_host_busy;
4120 rval = qla4xxx_send_command_to_isp(ha, srb);
4121 if (rval != QLA_SUCCESS)
4122 goto qc_host_busy_free_sp;
4124 return 0;
4126 qc_host_busy_free_sp:
4127 qla4xxx_srb_free_dma(ha, srb);
4128 mempool_free(srb, ha->srb_mempool);
4130 qc_host_busy:
4131 return SCSI_MLQUEUE_HOST_BUSY;
4133 qc_fail_command:
4134 cmd->scsi_done(cmd);
4136 return 0;
4140 * qla4xxx_mem_free - frees memory allocated to adapter
4141 * @ha: Pointer to host adapter structure.
4143 * Frees memory previously allocated by qla4xxx_mem_alloc
4145 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
4147 if (ha->queues)
4148 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
4149 ha->queues_dma);
4151 if (ha->fw_dump)
4152 vfree(ha->fw_dump);
4154 ha->queues_len = 0;
4155 ha->queues = NULL;
4156 ha->queues_dma = 0;
4157 ha->request_ring = NULL;
4158 ha->request_dma = 0;
4159 ha->response_ring = NULL;
4160 ha->response_dma = 0;
4161 ha->shadow_regs = NULL;
4162 ha->shadow_regs_dma = 0;
4163 ha->fw_dump = NULL;
4164 ha->fw_dump_size = 0;
4166 /* Free srb pool. */
4167 if (ha->srb_mempool)
4168 mempool_destroy(ha->srb_mempool);
4170 ha->srb_mempool = NULL;
4172 if (ha->chap_dma_pool)
4173 dma_pool_destroy(ha->chap_dma_pool);
4175 if (ha->chap_list)
4176 vfree(ha->chap_list);
4177 ha->chap_list = NULL;
4179 if (ha->fw_ddb_dma_pool)
4180 dma_pool_destroy(ha->fw_ddb_dma_pool);
4182 /* release io space registers */
4183 if (is_qla8022(ha)) {
4184 if (ha->nx_pcibase)
4185 iounmap(
4186 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
4187 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4188 if (ha->nx_pcibase)
4189 iounmap(
4190 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
4191 } else if (ha->reg) {
4192 iounmap(ha->reg);
4195 if (ha->reset_tmplt.buff)
4196 vfree(ha->reset_tmplt.buff);
4198 pci_release_regions(ha->pdev);
4202 * qla4xxx_mem_alloc - allocates memory for use by adapter.
4203 * @ha: Pointer to host adapter structure
4205 * Allocates DMA memory for request and response queues. Also allocates memory
4206 * for srbs.
4208 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4210 unsigned long align;
4212 /* Allocate contiguous block of DMA memory for queues. */
4213 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4214 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
4215 sizeof(struct shadow_regs) +
4216 MEM_ALIGN_VALUE +
4217 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4218 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4219 &ha->queues_dma, GFP_KERNEL);
4220 if (ha->queues == NULL) {
4221 ql4_printk(KERN_WARNING, ha,
4222 "Memory Allocation failed - queues.\n");
4224 goto mem_alloc_error_exit;
4226 memset(ha->queues, 0, ha->queues_len);
4229 * As per RISC alignment requirements -- the bus-address must be a
4230 * multiple of the request-ring size (in bytes).
4232 align = 0;
4233 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
4234 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
4235 (MEM_ALIGN_VALUE - 1));
4237 /* Update request and response queue pointers. */
4238 ha->request_dma = ha->queues_dma + align;
4239 ha->request_ring = (struct queue_entry *) (ha->queues + align);
4240 ha->response_dma = ha->queues_dma + align +
4241 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
4242 ha->response_ring = (struct queue_entry *) (ha->queues + align +
4243 (REQUEST_QUEUE_DEPTH *
4244 QUEUE_SIZE));
4245 ha->shadow_regs_dma = ha->queues_dma + align +
4246 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4247 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
4248 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
4249 (REQUEST_QUEUE_DEPTH *
4250 QUEUE_SIZE) +
4251 (RESPONSE_QUEUE_DEPTH *
4252 QUEUE_SIZE));
4254 /* Allocate memory for srb pool. */
4255 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
4256 mempool_free_slab, srb_cachep);
4257 if (ha->srb_mempool == NULL) {
4258 ql4_printk(KERN_WARNING, ha,
4259 "Memory Allocation failed - SRB Pool.\n");
4261 goto mem_alloc_error_exit;
4264 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
4265 CHAP_DMA_BLOCK_SIZE, 8, 0);
4267 if (ha->chap_dma_pool == NULL) {
4268 ql4_printk(KERN_WARNING, ha,
4269 "%s: chap_dma_pool allocation failed..\n", __func__);
4270 goto mem_alloc_error_exit;
4273 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
4274 DDB_DMA_BLOCK_SIZE, 8, 0);
4276 if (ha->fw_ddb_dma_pool == NULL) {
4277 ql4_printk(KERN_WARNING, ha,
4278 "%s: fw_ddb_dma_pool allocation failed..\n",
4279 __func__);
4280 goto mem_alloc_error_exit;
4283 return QLA_SUCCESS;
4285 mem_alloc_error_exit:
4286 qla4xxx_mem_free(ha);
4287 return QLA_ERROR;
4291 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
4292 * @ha: adapter block pointer.
4294 * Note: The caller should not hold the idc lock.
4296 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
4298 uint32_t temp, temp_state, temp_val;
4299 int status = QLA_SUCCESS;
4301 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
4303 temp_state = qla82xx_get_temp_state(temp);
4304 temp_val = qla82xx_get_temp_val(temp);
4306 if (temp_state == QLA82XX_TEMP_PANIC) {
4307 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
4308 " exceeds maximum allowed. Hardware has been shut"
4309 " down.\n", temp_val);
4310 status = QLA_ERROR;
4311 } else if (temp_state == QLA82XX_TEMP_WARN) {
4312 if (ha->temperature == QLA82XX_TEMP_NORMAL)
4313 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
4314 " degrees C exceeds operating range."
4315 " Immediate action needed.\n", temp_val);
4316 } else {
4317 if (ha->temperature == QLA82XX_TEMP_WARN)
4318 ql4_printk(KERN_INFO, ha, "Device temperature is"
4319 " now %d degrees C in normal range.\n",
4320 temp_val);
4322 ha->temperature = temp_state;
4323 return status;
4327 * qla4_8xxx_check_fw_alive - Check firmware health
4328 * @ha: Pointer to host adapter structure.
4330 * Context: Interrupt
4332 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
4334 uint32_t fw_heartbeat_counter;
4335 int status = QLA_SUCCESS;
4337 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
4338 QLA8XXX_PEG_ALIVE_COUNTER);
4339 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
4340 if (fw_heartbeat_counter == 0xffffffff) {
4341 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
4342 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
4343 ha->host_no, __func__));
4344 return status;
4347 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
4348 ha->seconds_since_last_heartbeat++;
4349 /* FW not alive after 2 seconds */
4350 if (ha->seconds_since_last_heartbeat == 2) {
4351 ha->seconds_since_last_heartbeat = 0;
4352 qla4_8xxx_dump_peg_reg(ha);
4353 status = QLA_ERROR;
4355 } else
4356 ha->seconds_since_last_heartbeat = 0;
4358 ha->fw_heartbeat_counter = fw_heartbeat_counter;
4359 return status;
4362 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
4364 uint32_t halt_status;
4365 int halt_status_unrecoverable = 0;
4367 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
4369 if (is_qla8022(ha)) {
4370 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4371 __func__);
4372 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4373 CRB_NIU_XG_PAUSE_CTL_P0 |
4374 CRB_NIU_XG_PAUSE_CTL_P1);
4376 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
4377 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
4378 __func__);
4379 if (halt_status & HALT_STATUS_UNRECOVERABLE)
4380 halt_status_unrecoverable = 1;
4381 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4382 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
4383 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
4384 __func__);
4385 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
4386 halt_status_unrecoverable = 1;
4390 * Since we cannot change dev_state in interrupt context,
4391 * set appropriate DPC flag then wakeup DPC
4393 if (halt_status_unrecoverable) {
4394 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4395 } else {
4396 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
4397 __func__);
4398 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4400 qla4xxx_mailbox_premature_completion(ha);
4401 qla4xxx_wake_dpc(ha);
4405 * qla4_8xxx_watchdog - Poll dev state
4406 * @ha: Pointer to host adapter structure.
4408 * Context: Interrupt
4410 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
4412 uint32_t dev_state;
4413 uint32_t idc_ctrl;
4415 if (is_qla8032(ha) &&
4416 (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
4417 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
4418 __func__, ha->func_num);
4420 /* don't poll if reset is going on */
4421 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4422 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4423 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
4424 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
4426 if (qla4_8xxx_check_temp(ha)) {
4427 if (is_qla8022(ha)) {
4428 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
4429 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4430 CRB_NIU_XG_PAUSE_CTL_P0 |
4431 CRB_NIU_XG_PAUSE_CTL_P1);
4433 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4434 qla4xxx_wake_dpc(ha);
4435 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
4436 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
4438 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
4439 __func__);
4441 if (is_qla8032(ha) || is_qla8042(ha)) {
4442 idc_ctrl = qla4_83xx_rd_reg(ha,
4443 QLA83XX_IDC_DRV_CTRL);
4444 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
4445 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
4446 __func__);
4447 qla4xxx_mailbox_premature_completion(
4448 ha);
4452 if ((is_qla8032(ha) || is_qla8042(ha)) ||
4453 (is_qla8022(ha) && !ql4xdontresethba)) {
4454 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4455 qla4xxx_wake_dpc(ha);
4457 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
4458 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
4459 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
4460 __func__);
4461 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
4462 qla4xxx_wake_dpc(ha);
4463 } else {
4464 /* Check firmware health */
4465 if (qla4_8xxx_check_fw_alive(ha))
4466 qla4_8xxx_process_fw_error(ha);
4471 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
4473 struct iscsi_session *sess;
4474 struct ddb_entry *ddb_entry;
4475 struct scsi_qla_host *ha;
4477 sess = cls_sess->dd_data;
4478 ddb_entry = sess->dd_data;
4479 ha = ddb_entry->ha;
4481 if (!(ddb_entry->ddb_type == FLASH_DDB))
4482 return;
4484 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
4485 !iscsi_is_session_online(cls_sess)) {
4486 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
4487 INVALID_ENTRY) {
4488 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
4489 0) {
4490 atomic_set(&ddb_entry->retry_relogin_timer,
4491 INVALID_ENTRY);
4492 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4493 set_bit(DF_RELOGIN, &ddb_entry->flags);
4494 DEBUG2(ql4_printk(KERN_INFO, ha,
4495 "%s: index [%d] login device\n",
4496 __func__, ddb_entry->fw_ddb_index));
4497 } else
4498 atomic_dec(&ddb_entry->retry_relogin_timer);
4502 /* Wait for relogin to timeout */
4503 if (atomic_read(&ddb_entry->relogin_timer) &&
4504 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
4506 * If the relogin times out and the device is
4507 * still NOT ONLINE then try and relogin again.
4509 if (!iscsi_is_session_online(cls_sess)) {
4510 /* Reset retry relogin timer */
4511 atomic_inc(&ddb_entry->relogin_retry_count);
4512 DEBUG2(ql4_printk(KERN_INFO, ha,
4513 "%s: index[%d] relogin timed out-retrying"
4514 " relogin (%d), retry (%d)\n", __func__,
4515 ddb_entry->fw_ddb_index,
4516 atomic_read(&ddb_entry->relogin_retry_count),
4517 ddb_entry->default_time2wait + 4));
4518 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4519 atomic_set(&ddb_entry->retry_relogin_timer,
4520 ddb_entry->default_time2wait + 4);
4526 * qla4xxx_timer - checks every second for work to do.
4527 * @ha: Pointer to host adapter structure.
4529 static void qla4xxx_timer(struct scsi_qla_host *ha)
4531 int start_dpc = 0;
4532 uint16_t w;
4534 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
4536 /* If we are in the middle of AER/EEH processing
4537 * skip any processing and reschedule the timer
4539 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4540 mod_timer(&ha->timer, jiffies + HZ);
4541 return;
4544 /* Hardware read to trigger an EEH error during mailbox waits. */
4545 if (!pci_channel_offline(ha->pdev))
4546 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
4548 if (is_qla80XX(ha))
4549 qla4_8xxx_watchdog(ha);
4551 if (is_qla40XX(ha)) {
4552 /* Check for heartbeat interval. */
4553 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
4554 ha->heartbeat_interval != 0) {
4555 ha->seconds_since_last_heartbeat++;
4556 if (ha->seconds_since_last_heartbeat >
4557 ha->heartbeat_interval + 2)
4558 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4562 /* Process any deferred work. */
4563 if (!list_empty(&ha->work_list))
4564 start_dpc++;
4566 /* Wakeup the dpc routine for this adapter, if needed. */
4567 if (start_dpc ||
4568 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4569 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
4570 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
4571 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
4572 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4573 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
4574 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
4575 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4576 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4577 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
4578 test_bit(DPC_AEN, &ha->dpc_flags)) {
4579 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
4580 " - dpc flags = 0x%lx\n",
4581 ha->host_no, __func__, ha->dpc_flags));
4582 qla4xxx_wake_dpc(ha);
4585 /* Reschedule timer thread to call us back in one second */
4586 mod_timer(&ha->timer, jiffies + HZ);
4588 DEBUG2(ha->seconds_since_last_intr++);
4592 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
4593 * @ha: Pointer to host adapter structure.
4595 * This routine stalls the driver until all outstanding commands are returned.
4596 * Caller must release the Hardware Lock prior to calling this routine.
4598 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
4600 uint32_t index = 0;
4601 unsigned long flags;
4602 struct scsi_cmnd *cmd;
4603 unsigned long wtime;
4604 uint32_t wtmo;
4606 if (is_qla40XX(ha))
4607 wtmo = WAIT_CMD_TOV;
4608 else
4609 wtmo = ha->nx_reset_timeout / 2;
4611 wtime = jiffies + (wtmo * HZ);
4613 DEBUG2(ql4_printk(KERN_INFO, ha,
4614 "Wait up to %u seconds for cmds to complete\n",
4615 wtmo));
4617 while (!time_after_eq(jiffies, wtime)) {
4618 spin_lock_irqsave(&ha->hardware_lock, flags);
4619 /* Find a command that hasn't completed. */
4620 for (index = 0; index < ha->host->can_queue; index++) {
4621 cmd = scsi_host_find_tag(ha->host, index);
4623 * We cannot just check if the index is valid,
4624 * becase if we are run from the scsi eh, then
4625 * the scsi/block layer is going to prevent
4626 * the tag from being released.
4628 if (cmd != NULL && CMD_SP(cmd))
4629 break;
4631 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4633 /* If No Commands are pending, wait is complete */
4634 if (index == ha->host->can_queue)
4635 return QLA_SUCCESS;
4637 msleep(1000);
4639 /* If we timed out on waiting for commands to come back
4640 * return ERROR. */
4641 return QLA_ERROR;
4644 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
4646 uint32_t ctrl_status;
4647 unsigned long flags = 0;
4649 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
4651 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
4652 return QLA_ERROR;
4654 spin_lock_irqsave(&ha->hardware_lock, flags);
4657 * If the SCSI Reset Interrupt bit is set, clear it.
4658 * Otherwise, the Soft Reset won't work.
4660 ctrl_status = readw(&ha->reg->ctrl_status);
4661 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
4662 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4664 /* Issue Soft Reset */
4665 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
4666 readl(&ha->reg->ctrl_status);
4668 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4669 return QLA_SUCCESS;
4673 * qla4xxx_soft_reset - performs soft reset.
4674 * @ha: Pointer to host adapter structure.
4676 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
4678 uint32_t max_wait_time;
4679 unsigned long flags = 0;
4680 int status;
4681 uint32_t ctrl_status;
4683 status = qla4xxx_hw_reset(ha);
4684 if (status != QLA_SUCCESS)
4685 return status;
4687 status = QLA_ERROR;
4688 /* Wait until the Network Reset Intr bit is cleared */
4689 max_wait_time = RESET_INTR_TOV;
4690 do {
4691 spin_lock_irqsave(&ha->hardware_lock, flags);
4692 ctrl_status = readw(&ha->reg->ctrl_status);
4693 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4695 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
4696 break;
4698 msleep(1000);
4699 } while ((--max_wait_time));
4701 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
4702 DEBUG2(printk(KERN_WARNING
4703 "scsi%ld: Network Reset Intr not cleared by "
4704 "Network function, clearing it now!\n",
4705 ha->host_no));
4706 spin_lock_irqsave(&ha->hardware_lock, flags);
4707 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
4708 readl(&ha->reg->ctrl_status);
4709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4712 /* Wait until the firmware tells us the Soft Reset is done */
4713 max_wait_time = SOFT_RESET_TOV;
4714 do {
4715 spin_lock_irqsave(&ha->hardware_lock, flags);
4716 ctrl_status = readw(&ha->reg->ctrl_status);
4717 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4719 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
4720 status = QLA_SUCCESS;
4721 break;
4724 msleep(1000);
4725 } while ((--max_wait_time));
4728 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
4729 * after the soft reset has taken place.
4731 spin_lock_irqsave(&ha->hardware_lock, flags);
4732 ctrl_status = readw(&ha->reg->ctrl_status);
4733 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
4734 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4735 readl(&ha->reg->ctrl_status);
4737 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4739 /* If soft reset fails then most probably the bios on other
4740 * function is also enabled.
4741 * Since the initialization is sequential the other fn
4742 * wont be able to acknowledge the soft reset.
4743 * Issue a force soft reset to workaround this scenario.
4745 if (max_wait_time == 0) {
4746 /* Issue Force Soft Reset */
4747 spin_lock_irqsave(&ha->hardware_lock, flags);
4748 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
4749 readl(&ha->reg->ctrl_status);
4750 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4751 /* Wait until the firmware tells us the Soft Reset is done */
4752 max_wait_time = SOFT_RESET_TOV;
4753 do {
4754 spin_lock_irqsave(&ha->hardware_lock, flags);
4755 ctrl_status = readw(&ha->reg->ctrl_status);
4756 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4758 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
4759 status = QLA_SUCCESS;
4760 break;
4763 msleep(1000);
4764 } while ((--max_wait_time));
4767 return status;
4771 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
4772 * @ha: Pointer to host adapter structure.
4773 * @res: returned scsi status
4775 * This routine is called just prior to a HARD RESET to return all
4776 * outstanding commands back to the Operating System.
4777 * Caller should make sure that the following locks are released
4778 * before this calling routine: Hardware lock, and io_request_lock.
4780 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
4782 struct srb *srb;
4783 int i;
4784 unsigned long flags;
4786 spin_lock_irqsave(&ha->hardware_lock, flags);
4787 for (i = 0; i < ha->host->can_queue; i++) {
4788 srb = qla4xxx_del_from_active_array(ha, i);
4789 if (srb != NULL) {
4790 srb->cmd->result = res;
4791 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
4794 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4797 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
4799 clear_bit(AF_ONLINE, &ha->flags);
4801 /* Disable the board */
4802 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
4804 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4805 qla4xxx_mark_all_devices_missing(ha);
4806 clear_bit(AF_INIT_DONE, &ha->flags);
4809 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
4811 struct iscsi_session *sess;
4812 struct ddb_entry *ddb_entry;
4814 sess = cls_session->dd_data;
4815 ddb_entry = sess->dd_data;
4816 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
4818 if (ddb_entry->ddb_type == FLASH_DDB)
4819 iscsi_block_session(ddb_entry->sess);
4820 else
4821 iscsi_session_failure(cls_session->dd_data,
4822 ISCSI_ERR_CONN_FAILED);
4826 * qla4xxx_recover_adapter - recovers adapter after a fatal error
4827 * @ha: Pointer to host adapter structure.
4829 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
4831 int status = QLA_ERROR;
4832 uint8_t reset_chip = 0;
4833 uint32_t dev_state;
4834 unsigned long wait;
4836 /* Stall incoming I/O until we are done */
4837 scsi_block_requests(ha->host);
4838 clear_bit(AF_ONLINE, &ha->flags);
4839 clear_bit(AF_LINK_UP, &ha->flags);
4841 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
4843 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4845 if ((is_qla8032(ha) || is_qla8042(ha)) &&
4846 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4847 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4848 __func__);
4849 /* disable pause frame for ISP83xx */
4850 qla4_83xx_disable_pause(ha);
4853 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4855 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
4856 reset_chip = 1;
4858 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
4859 * do not reset adapter, jump to initialize_adapter */
4860 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4861 status = QLA_SUCCESS;
4862 goto recover_ha_init_adapter;
4865 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
4866 * from eh_host_reset or ioctl module */
4867 if (is_qla80XX(ha) && !reset_chip &&
4868 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4870 DEBUG2(ql4_printk(KERN_INFO, ha,
4871 "scsi%ld: %s - Performing stop_firmware...\n",
4872 ha->host_no, __func__));
4873 status = ha->isp_ops->reset_firmware(ha);
4874 if (status == QLA_SUCCESS) {
4875 ha->isp_ops->disable_intrs(ha);
4876 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4877 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4878 } else {
4879 /* If the stop_firmware fails then
4880 * reset the entire chip */
4881 reset_chip = 1;
4882 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
4883 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4887 /* Issue full chip reset if recovering from a catastrophic error,
4888 * or if stop_firmware fails for ISP-8xxx.
4889 * This is the default case for ISP-4xxx */
4890 if (is_qla40XX(ha) || reset_chip) {
4891 if (is_qla40XX(ha))
4892 goto chip_reset;
4894 /* Check if 8XXX firmware is alive or not
4895 * We may have arrived here from NEED_RESET
4896 * detection only */
4897 if (test_bit(AF_FW_RECOVERY, &ha->flags))
4898 goto chip_reset;
4900 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
4901 while (time_before(jiffies, wait)) {
4902 if (qla4_8xxx_check_fw_alive(ha)) {
4903 qla4xxx_mailbox_premature_completion(ha);
4904 break;
4907 set_current_state(TASK_UNINTERRUPTIBLE);
4908 schedule_timeout(HZ);
4910 chip_reset:
4911 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
4912 qla4xxx_cmd_wait(ha);
4914 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4915 DEBUG2(ql4_printk(KERN_INFO, ha,
4916 "scsi%ld: %s - Performing chip reset..\n",
4917 ha->host_no, __func__));
4918 status = ha->isp_ops->reset_chip(ha);
4919 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4922 /* Flush any pending ddb changed AENs */
4923 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4925 recover_ha_init_adapter:
4926 /* Upon successful firmware/chip reset, re-initialize the adapter */
4927 if (status == QLA_SUCCESS) {
4928 /* For ISP-4xxx, force function 1 to always initialize
4929 * before function 3 to prevent both funcions from
4930 * stepping on top of the other */
4931 if (is_qla40XX(ha) && (ha->mac_index == 3))
4932 ssleep(6);
4934 /* NOTE: AF_ONLINE flag set upon successful completion of
4935 * qla4xxx_initialize_adapter */
4936 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4937 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
4938 status = qla4_8xxx_check_init_adapter_retry(ha);
4939 if (status == QLA_ERROR) {
4940 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
4941 ha->host_no, __func__);
4942 qla4xxx_dead_adapter_cleanup(ha);
4943 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4944 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4945 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4946 &ha->dpc_flags);
4947 goto exit_recover;
4952 /* Retry failed adapter initialization, if necessary
4953 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
4954 * case to prevent ping-pong resets between functions */
4955 if (!test_bit(AF_ONLINE, &ha->flags) &&
4956 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4957 /* Adapter initialization failed, see if we can retry
4958 * resetting the ha.
4959 * Since we don't want to block the DPC for too long
4960 * with multiple resets in the same thread,
4961 * utilize DPC to retry */
4962 if (is_qla80XX(ha)) {
4963 ha->isp_ops->idc_lock(ha);
4964 dev_state = qla4_8xxx_rd_direct(ha,
4965 QLA8XXX_CRB_DEV_STATE);
4966 ha->isp_ops->idc_unlock(ha);
4967 if (dev_state == QLA8XXX_DEV_FAILED) {
4968 ql4_printk(KERN_INFO, ha, "%s: don't retry "
4969 "recover adapter. H/W is in Failed "
4970 "state\n", __func__);
4971 qla4xxx_dead_adapter_cleanup(ha);
4972 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4973 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4974 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4975 &ha->dpc_flags);
4976 status = QLA_ERROR;
4978 goto exit_recover;
4982 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
4983 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
4984 DEBUG2(printk("scsi%ld: recover adapter - retrying "
4985 "(%d) more times\n", ha->host_no,
4986 ha->retry_reset_ha_cnt));
4987 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4988 status = QLA_ERROR;
4989 } else {
4990 if (ha->retry_reset_ha_cnt > 0) {
4991 /* Schedule another Reset HA--DPC will retry */
4992 ha->retry_reset_ha_cnt--;
4993 DEBUG2(printk("scsi%ld: recover adapter - "
4994 "retry remaining %d\n",
4995 ha->host_no,
4996 ha->retry_reset_ha_cnt));
4997 status = QLA_ERROR;
5000 if (ha->retry_reset_ha_cnt == 0) {
5001 /* Recover adapter retries have been exhausted.
5002 * Adapter DEAD */
5003 DEBUG2(printk("scsi%ld: recover adapter "
5004 "failed - board disabled\n",
5005 ha->host_no));
5006 qla4xxx_dead_adapter_cleanup(ha);
5007 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
5008 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5009 clear_bit(DPC_RESET_HA_FW_CONTEXT,
5010 &ha->dpc_flags);
5011 status = QLA_ERROR;
5014 } else {
5015 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5016 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5017 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
5020 exit_recover:
5021 ha->adapter_error_count++;
5023 if (test_bit(AF_ONLINE, &ha->flags))
5024 ha->isp_ops->enable_intrs(ha);
5026 scsi_unblock_requests(ha->host);
5028 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5029 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
5030 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
5032 return status;
5035 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
5037 struct iscsi_session *sess;
5038 struct ddb_entry *ddb_entry;
5039 struct scsi_qla_host *ha;
5041 sess = cls_session->dd_data;
5042 ddb_entry = sess->dd_data;
5043 ha = ddb_entry->ha;
5044 if (!iscsi_is_session_online(cls_session)) {
5045 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
5046 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5047 " unblock session\n", ha->host_no, __func__,
5048 ddb_entry->fw_ddb_index);
5049 iscsi_unblock_session(ddb_entry->sess);
5050 } else {
5051 /* Trigger relogin */
5052 if (ddb_entry->ddb_type == FLASH_DDB) {
5053 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
5054 test_bit(DF_DISABLE_RELOGIN,
5055 &ddb_entry->flags)))
5056 qla4xxx_arm_relogin_timer(ddb_entry);
5057 } else
5058 iscsi_session_failure(cls_session->dd_data,
5059 ISCSI_ERR_CONN_FAILED);
5064 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
5066 struct iscsi_session *sess;
5067 struct ddb_entry *ddb_entry;
5068 struct scsi_qla_host *ha;
5070 sess = cls_session->dd_data;
5071 ddb_entry = sess->dd_data;
5072 ha = ddb_entry->ha;
5073 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5074 " unblock session\n", ha->host_no, __func__,
5075 ddb_entry->fw_ddb_index);
5077 iscsi_unblock_session(ddb_entry->sess);
5079 /* Start scan target */
5080 if (test_bit(AF_ONLINE, &ha->flags)) {
5081 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5082 " start scan\n", ha->host_no, __func__,
5083 ddb_entry->fw_ddb_index);
5084 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
5086 return QLA_SUCCESS;
5089 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
5091 struct iscsi_session *sess;
5092 struct ddb_entry *ddb_entry;
5093 struct scsi_qla_host *ha;
5094 int status = QLA_SUCCESS;
5096 sess = cls_session->dd_data;
5097 ddb_entry = sess->dd_data;
5098 ha = ddb_entry->ha;
5099 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5100 " unblock user space session\n", ha->host_no, __func__,
5101 ddb_entry->fw_ddb_index);
5103 if (!iscsi_is_session_online(cls_session)) {
5104 iscsi_conn_start(ddb_entry->conn);
5105 iscsi_conn_login_event(ddb_entry->conn,
5106 ISCSI_CONN_STATE_LOGGED_IN);
5107 } else {
5108 ql4_printk(KERN_INFO, ha,
5109 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
5110 ha->host_no, __func__, ddb_entry->fw_ddb_index,
5111 cls_session->sid);
5112 status = QLA_ERROR;
5115 return status;
5118 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
5120 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
5123 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
5125 uint16_t relogin_timer;
5126 struct iscsi_session *sess;
5127 struct ddb_entry *ddb_entry;
5128 struct scsi_qla_host *ha;
5130 sess = cls_sess->dd_data;
5131 ddb_entry = sess->dd_data;
5132 ha = ddb_entry->ha;
5134 relogin_timer = max(ddb_entry->default_relogin_timeout,
5135 (uint16_t)RELOGIN_TOV);
5136 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
5138 DEBUG2(ql4_printk(KERN_INFO, ha,
5139 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
5140 ddb_entry->fw_ddb_index, relogin_timer));
5142 qla4xxx_login_flash_ddb(cls_sess);
5145 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
5147 struct iscsi_session *sess;
5148 struct ddb_entry *ddb_entry;
5149 struct scsi_qla_host *ha;
5151 sess = cls_sess->dd_data;
5152 ddb_entry = sess->dd_data;
5153 ha = ddb_entry->ha;
5155 if (!(ddb_entry->ddb_type == FLASH_DDB))
5156 return;
5158 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
5159 return;
5161 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
5162 !iscsi_is_session_online(cls_sess)) {
5163 DEBUG2(ql4_printk(KERN_INFO, ha,
5164 "relogin issued\n"));
5165 qla4xxx_relogin_flash_ddb(cls_sess);
5169 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
5171 if (ha->dpc_thread)
5172 queue_work(ha->dpc_thread, &ha->dpc_work);
5175 static struct qla4_work_evt *
5176 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
5177 enum qla4_work_type type)
5179 struct qla4_work_evt *e;
5180 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
5182 e = kzalloc(size, GFP_ATOMIC);
5183 if (!e)
5184 return NULL;
5186 INIT_LIST_HEAD(&e->list);
5187 e->type = type;
5188 return e;
5191 static void qla4xxx_post_work(struct scsi_qla_host *ha,
5192 struct qla4_work_evt *e)
5194 unsigned long flags;
5196 spin_lock_irqsave(&ha->work_lock, flags);
5197 list_add_tail(&e->list, &ha->work_list);
5198 spin_unlock_irqrestore(&ha->work_lock, flags);
5199 qla4xxx_wake_dpc(ha);
5202 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
5203 enum iscsi_host_event_code aen_code,
5204 uint32_t data_size, uint8_t *data)
5206 struct qla4_work_evt *e;
5208 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
5209 if (!e)
5210 return QLA_ERROR;
5212 e->u.aen.code = aen_code;
5213 e->u.aen.data_size = data_size;
5214 memcpy(e->u.aen.data, data, data_size);
5216 qla4xxx_post_work(ha, e);
5218 return QLA_SUCCESS;
5221 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
5222 uint32_t status, uint32_t pid,
5223 uint32_t data_size, uint8_t *data)
5225 struct qla4_work_evt *e;
5227 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
5228 if (!e)
5229 return QLA_ERROR;
5231 e->u.ping.status = status;
5232 e->u.ping.pid = pid;
5233 e->u.ping.data_size = data_size;
5234 memcpy(e->u.ping.data, data, data_size);
5236 qla4xxx_post_work(ha, e);
5238 return QLA_SUCCESS;
5241 static void qla4xxx_do_work(struct scsi_qla_host *ha)
5243 struct qla4_work_evt *e, *tmp;
5244 unsigned long flags;
5245 LIST_HEAD(work);
5247 spin_lock_irqsave(&ha->work_lock, flags);
5248 list_splice_init(&ha->work_list, &work);
5249 spin_unlock_irqrestore(&ha->work_lock, flags);
5251 list_for_each_entry_safe(e, tmp, &work, list) {
5252 list_del_init(&e->list);
5254 switch (e->type) {
5255 case QLA4_EVENT_AEN:
5256 iscsi_post_host_event(ha->host_no,
5257 &qla4xxx_iscsi_transport,
5258 e->u.aen.code,
5259 e->u.aen.data_size,
5260 e->u.aen.data);
5261 break;
5262 case QLA4_EVENT_PING_STATUS:
5263 iscsi_ping_comp_event(ha->host_no,
5264 &qla4xxx_iscsi_transport,
5265 e->u.ping.status,
5266 e->u.ping.pid,
5267 e->u.ping.data_size,
5268 e->u.ping.data);
5269 break;
5270 default:
5271 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
5272 "supported", e->type);
5274 kfree(e);
5279 * qla4xxx_do_dpc - dpc routine
5280 * @data: in our case pointer to adapter structure
5282 * This routine is a task that is schedule by the interrupt handler
5283 * to perform the background processing for interrupts. We put it
5284 * on a task queue that is consumed whenever the scheduler runs; that's
5285 * so you can do anything (i.e. put the process to sleep etc). In fact,
5286 * the mid-level tries to sleep when it reaches the driver threshold
5287 * "host->can_queue". This can cause a panic if we were in our interrupt code.
5289 static void qla4xxx_do_dpc(struct work_struct *work)
5291 struct scsi_qla_host *ha =
5292 container_of(work, struct scsi_qla_host, dpc_work);
5293 int status = QLA_ERROR;
5295 DEBUG2(ql4_printk(KERN_INFO, ha,
5296 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
5297 ha->host_no, __func__, ha->flags, ha->dpc_flags));
5299 /* Initialization not yet finished. Don't do anything yet. */
5300 if (!test_bit(AF_INIT_DONE, &ha->flags))
5301 return;
5303 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
5304 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
5305 ha->host_no, __func__, ha->flags));
5306 return;
5309 /* post events to application */
5310 qla4xxx_do_work(ha);
5312 if (is_qla80XX(ha)) {
5313 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
5314 if (is_qla8032(ha) || is_qla8042(ha)) {
5315 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
5316 __func__);
5317 /* disable pause frame for ISP83xx */
5318 qla4_83xx_disable_pause(ha);
5321 ha->isp_ops->idc_lock(ha);
5322 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5323 QLA8XXX_DEV_FAILED);
5324 ha->isp_ops->idc_unlock(ha);
5325 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
5326 qla4_8xxx_device_state_handler(ha);
5329 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
5330 if (is_qla8042(ha)) {
5331 if (ha->idc_info.info2 &
5332 ENABLE_INTERNAL_LOOPBACK) {
5333 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
5334 __func__);
5335 status = qla4_84xx_config_acb(ha,
5336 ACB_CONFIG_DISABLE);
5337 if (status != QLA_SUCCESS) {
5338 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
5339 __func__);
5343 qla4_83xx_post_idc_ack(ha);
5344 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
5347 if (is_qla8042(ha) &&
5348 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
5349 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
5350 __func__);
5351 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
5352 QLA_SUCCESS) {
5353 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
5354 __func__);
5356 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
5359 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
5360 qla4_8xxx_need_qsnt_handler(ha);
5364 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
5365 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
5366 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
5367 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
5368 if ((is_qla8022(ha) && ql4xdontresethba) ||
5369 ((is_qla8032(ha) || is_qla8042(ha)) &&
5370 qla4_83xx_idc_dontreset(ha))) {
5371 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5372 ha->host_no, __func__));
5373 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5374 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5375 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5376 goto dpc_post_reset_ha;
5378 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
5379 test_bit(DPC_RESET_HA, &ha->dpc_flags))
5380 qla4xxx_recover_adapter(ha);
5382 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
5383 uint8_t wait_time = RESET_INTR_TOV;
5385 while ((readw(&ha->reg->ctrl_status) &
5386 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
5387 if (--wait_time == 0)
5388 break;
5389 msleep(1000);
5391 if (wait_time == 0)
5392 DEBUG2(printk("scsi%ld: %s: SR|FSR "
5393 "bit not cleared-- resetting\n",
5394 ha->host_no, __func__));
5395 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5396 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
5397 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
5398 status = qla4xxx_recover_adapter(ha);
5400 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5401 if (status == QLA_SUCCESS)
5402 ha->isp_ops->enable_intrs(ha);
5406 dpc_post_reset_ha:
5407 /* ---- process AEN? --- */
5408 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
5409 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
5411 /* ---- Get DHCP IP Address? --- */
5412 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
5413 qla4xxx_get_dhcp_ip_address(ha);
5415 /* ---- relogin device? --- */
5416 if (adapter_up(ha) &&
5417 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
5418 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
5421 /* ---- link change? --- */
5422 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
5423 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
5424 if (!test_bit(AF_LINK_UP, &ha->flags)) {
5425 /* ---- link down? --- */
5426 qla4xxx_mark_all_devices_missing(ha);
5427 } else {
5428 /* ---- link up? --- *
5429 * F/W will auto login to all devices ONLY ONCE after
5430 * link up during driver initialization and runtime
5431 * fatal error recovery. Therefore, the driver must
5432 * manually relogin to devices when recovering from
5433 * connection failures, logouts, expired KATO, etc. */
5434 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
5435 qla4xxx_build_ddb_list(ha, ha->is_reset);
5436 iscsi_host_for_each_session(ha->host,
5437 qla4xxx_login_flash_ddb);
5438 } else
5439 qla4xxx_relogin_all_devices(ha);
5442 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
5443 if (qla4xxx_sysfs_ddb_export(ha))
5444 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
5445 __func__);
5450 * qla4xxx_free_adapter - release the adapter
5451 * @ha: pointer to adapter structure
5453 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
5455 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5457 /* Turn-off interrupts on the card. */
5458 ha->isp_ops->disable_intrs(ha);
5460 if (is_qla40XX(ha)) {
5461 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
5462 &ha->reg->ctrl_status);
5463 readl(&ha->reg->ctrl_status);
5464 } else if (is_qla8022(ha)) {
5465 writel(0, &ha->qla4_82xx_reg->host_int);
5466 readl(&ha->qla4_82xx_reg->host_int);
5467 } else if (is_qla8032(ha) || is_qla8042(ha)) {
5468 writel(0, &ha->qla4_83xx_reg->risc_intr);
5469 readl(&ha->qla4_83xx_reg->risc_intr);
5472 /* Remove timer thread, if present */
5473 if (ha->timer_active)
5474 qla4xxx_stop_timer(ha);
5476 /* Kill the kernel thread for this host */
5477 if (ha->dpc_thread)
5478 destroy_workqueue(ha->dpc_thread);
5480 /* Kill the kernel thread for this host */
5481 if (ha->task_wq)
5482 destroy_workqueue(ha->task_wq);
5484 /* Put firmware in known state */
5485 ha->isp_ops->reset_firmware(ha);
5487 if (is_qla80XX(ha)) {
5488 ha->isp_ops->idc_lock(ha);
5489 qla4_8xxx_clear_drv_active(ha);
5490 ha->isp_ops->idc_unlock(ha);
5493 /* Detach interrupts */
5494 qla4xxx_free_irqs(ha);
5496 /* free extra memory */
5497 qla4xxx_mem_free(ha);
5500 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
5502 int status = 0;
5503 unsigned long mem_base, mem_len, db_base, db_len;
5504 struct pci_dev *pdev = ha->pdev;
5506 status = pci_request_regions(pdev, DRIVER_NAME);
5507 if (status) {
5508 printk(KERN_WARNING
5509 "scsi(%ld) Failed to reserve PIO regions (%s) "
5510 "status=%d\n", ha->host_no, pci_name(pdev), status);
5511 goto iospace_error_exit;
5514 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
5515 __func__, pdev->revision));
5516 ha->revision_id = pdev->revision;
5518 /* remap phys address */
5519 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
5520 mem_len = pci_resource_len(pdev, 0);
5521 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
5522 __func__, mem_base, mem_len));
5524 /* mapping of pcibase pointer */
5525 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
5526 if (!ha->nx_pcibase) {
5527 printk(KERN_ERR
5528 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
5529 pci_release_regions(ha->pdev);
5530 goto iospace_error_exit;
5533 /* Mapping of IO base pointer, door bell read and write pointer */
5535 /* mapping of IO base pointer */
5536 if (is_qla8022(ha)) {
5537 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
5538 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
5539 (ha->pdev->devfn << 11));
5540 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
5541 QLA82XX_CAM_RAM_DB2);
5542 } else if (is_qla8032(ha) || is_qla8042(ha)) {
5543 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
5544 ((uint8_t *)ha->nx_pcibase);
5547 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
5548 db_len = pci_resource_len(pdev, 4);
5550 return 0;
5551 iospace_error_exit:
5552 return -ENOMEM;
5555 /***
5556 * qla4xxx_iospace_config - maps registers
5557 * @ha: pointer to adapter structure
5559 * This routines maps HBA's registers from the pci address space
5560 * into the kernel virtual address space for memory mapped i/o.
5562 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
5564 unsigned long pio, pio_len, pio_flags;
5565 unsigned long mmio, mmio_len, mmio_flags;
5567 pio = pci_resource_start(ha->pdev, 0);
5568 pio_len = pci_resource_len(ha->pdev, 0);
5569 pio_flags = pci_resource_flags(ha->pdev, 0);
5570 if (pio_flags & IORESOURCE_IO) {
5571 if (pio_len < MIN_IOBASE_LEN) {
5572 ql4_printk(KERN_WARNING, ha,
5573 "Invalid PCI I/O region size\n");
5574 pio = 0;
5576 } else {
5577 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
5578 pio = 0;
5581 /* Use MMIO operations for all accesses. */
5582 mmio = pci_resource_start(ha->pdev, 1);
5583 mmio_len = pci_resource_len(ha->pdev, 1);
5584 mmio_flags = pci_resource_flags(ha->pdev, 1);
5586 if (!(mmio_flags & IORESOURCE_MEM)) {
5587 ql4_printk(KERN_ERR, ha,
5588 "region #0 not an MMIO resource, aborting\n");
5590 goto iospace_error_exit;
5593 if (mmio_len < MIN_IOBASE_LEN) {
5594 ql4_printk(KERN_ERR, ha,
5595 "Invalid PCI mem region size, aborting\n");
5596 goto iospace_error_exit;
5599 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
5600 ql4_printk(KERN_WARNING, ha,
5601 "Failed to reserve PIO/MMIO regions\n");
5603 goto iospace_error_exit;
5606 ha->pio_address = pio;
5607 ha->pio_length = pio_len;
5608 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
5609 if (!ha->reg) {
5610 ql4_printk(KERN_ERR, ha,
5611 "cannot remap MMIO, aborting\n");
5613 goto iospace_error_exit;
5616 return 0;
5618 iospace_error_exit:
5619 return -ENOMEM;
5622 static struct isp_operations qla4xxx_isp_ops = {
5623 .iospace_config = qla4xxx_iospace_config,
5624 .pci_config = qla4xxx_pci_config,
5625 .disable_intrs = qla4xxx_disable_intrs,
5626 .enable_intrs = qla4xxx_enable_intrs,
5627 .start_firmware = qla4xxx_start_firmware,
5628 .intr_handler = qla4xxx_intr_handler,
5629 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
5630 .reset_chip = qla4xxx_soft_reset,
5631 .reset_firmware = qla4xxx_hw_reset,
5632 .queue_iocb = qla4xxx_queue_iocb,
5633 .complete_iocb = qla4xxx_complete_iocb,
5634 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
5635 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
5636 .get_sys_info = qla4xxx_get_sys_info,
5637 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
5638 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
5641 static struct isp_operations qla4_82xx_isp_ops = {
5642 .iospace_config = qla4_8xxx_iospace_config,
5643 .pci_config = qla4_8xxx_pci_config,
5644 .disable_intrs = qla4_82xx_disable_intrs,
5645 .enable_intrs = qla4_82xx_enable_intrs,
5646 .start_firmware = qla4_8xxx_load_risc,
5647 .restart_firmware = qla4_82xx_try_start_fw,
5648 .intr_handler = qla4_82xx_intr_handler,
5649 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
5650 .need_reset = qla4_8xxx_need_reset,
5651 .reset_chip = qla4_82xx_isp_reset,
5652 .reset_firmware = qla4_8xxx_stop_firmware,
5653 .queue_iocb = qla4_82xx_queue_iocb,
5654 .complete_iocb = qla4_82xx_complete_iocb,
5655 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
5656 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
5657 .get_sys_info = qla4_8xxx_get_sys_info,
5658 .rd_reg_direct = qla4_82xx_rd_32,
5659 .wr_reg_direct = qla4_82xx_wr_32,
5660 .rd_reg_indirect = qla4_82xx_md_rd_32,
5661 .wr_reg_indirect = qla4_82xx_md_wr_32,
5662 .idc_lock = qla4_82xx_idc_lock,
5663 .idc_unlock = qla4_82xx_idc_unlock,
5664 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
5665 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
5666 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
5669 static struct isp_operations qla4_83xx_isp_ops = {
5670 .iospace_config = qla4_8xxx_iospace_config,
5671 .pci_config = qla4_8xxx_pci_config,
5672 .disable_intrs = qla4_83xx_disable_intrs,
5673 .enable_intrs = qla4_83xx_enable_intrs,
5674 .start_firmware = qla4_8xxx_load_risc,
5675 .restart_firmware = qla4_83xx_start_firmware,
5676 .intr_handler = qla4_83xx_intr_handler,
5677 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
5678 .need_reset = qla4_8xxx_need_reset,
5679 .reset_chip = qla4_83xx_isp_reset,
5680 .reset_firmware = qla4_8xxx_stop_firmware,
5681 .queue_iocb = qla4_83xx_queue_iocb,
5682 .complete_iocb = qla4_83xx_complete_iocb,
5683 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
5684 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
5685 .get_sys_info = qla4_8xxx_get_sys_info,
5686 .rd_reg_direct = qla4_83xx_rd_reg,
5687 .wr_reg_direct = qla4_83xx_wr_reg,
5688 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
5689 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
5690 .idc_lock = qla4_83xx_drv_lock,
5691 .idc_unlock = qla4_83xx_drv_unlock,
5692 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
5693 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
5694 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
5697 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5699 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
5702 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5704 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
5707 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5709 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
5712 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5714 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
5717 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
5719 struct scsi_qla_host *ha = data;
5720 char *str = buf;
5721 int rc;
5723 switch (type) {
5724 case ISCSI_BOOT_ETH_FLAGS:
5725 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5726 break;
5727 case ISCSI_BOOT_ETH_INDEX:
5728 rc = sprintf(str, "0\n");
5729 break;
5730 case ISCSI_BOOT_ETH_MAC:
5731 rc = sysfs_format_mac(str, ha->my_mac,
5732 MAC_ADDR_LEN);
5733 break;
5734 default:
5735 rc = -ENOSYS;
5736 break;
5738 return rc;
5741 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
5743 int rc;
5745 switch (type) {
5746 case ISCSI_BOOT_ETH_FLAGS:
5747 case ISCSI_BOOT_ETH_MAC:
5748 case ISCSI_BOOT_ETH_INDEX:
5749 rc = S_IRUGO;
5750 break;
5751 default:
5752 rc = 0;
5753 break;
5755 return rc;
5758 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
5760 struct scsi_qla_host *ha = data;
5761 char *str = buf;
5762 int rc;
5764 switch (type) {
5765 case ISCSI_BOOT_INI_INITIATOR_NAME:
5766 rc = sprintf(str, "%s\n", ha->name_string);
5767 break;
5768 default:
5769 rc = -ENOSYS;
5770 break;
5772 return rc;
5775 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
5777 int rc;
5779 switch (type) {
5780 case ISCSI_BOOT_INI_INITIATOR_NAME:
5781 rc = S_IRUGO;
5782 break;
5783 default:
5784 rc = 0;
5785 break;
5787 return rc;
5790 static ssize_t
5791 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
5792 char *buf)
5794 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
5795 char *str = buf;
5796 int rc;
5798 switch (type) {
5799 case ISCSI_BOOT_TGT_NAME:
5800 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
5801 break;
5802 case ISCSI_BOOT_TGT_IP_ADDR:
5803 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
5804 rc = sprintf(buf, "%pI4\n",
5805 &boot_conn->dest_ipaddr.ip_address);
5806 else
5807 rc = sprintf(str, "%pI6\n",
5808 &boot_conn->dest_ipaddr.ip_address);
5809 break;
5810 case ISCSI_BOOT_TGT_PORT:
5811 rc = sprintf(str, "%d\n", boot_conn->dest_port);
5812 break;
5813 case ISCSI_BOOT_TGT_CHAP_NAME:
5814 rc = sprintf(str, "%.*s\n",
5815 boot_conn->chap.target_chap_name_length,
5816 (char *)&boot_conn->chap.target_chap_name);
5817 break;
5818 case ISCSI_BOOT_TGT_CHAP_SECRET:
5819 rc = sprintf(str, "%.*s\n",
5820 boot_conn->chap.target_secret_length,
5821 (char *)&boot_conn->chap.target_secret);
5822 break;
5823 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5824 rc = sprintf(str, "%.*s\n",
5825 boot_conn->chap.intr_chap_name_length,
5826 (char *)&boot_conn->chap.intr_chap_name);
5827 break;
5828 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5829 rc = sprintf(str, "%.*s\n",
5830 boot_conn->chap.intr_secret_length,
5831 (char *)&boot_conn->chap.intr_secret);
5832 break;
5833 case ISCSI_BOOT_TGT_FLAGS:
5834 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5835 break;
5836 case ISCSI_BOOT_TGT_NIC_ASSOC:
5837 rc = sprintf(str, "0\n");
5838 break;
5839 default:
5840 rc = -ENOSYS;
5841 break;
5843 return rc;
5846 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
5848 struct scsi_qla_host *ha = data;
5849 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
5851 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5854 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
5856 struct scsi_qla_host *ha = data;
5857 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
5859 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5862 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
5864 int rc;
5866 switch (type) {
5867 case ISCSI_BOOT_TGT_NAME:
5868 case ISCSI_BOOT_TGT_IP_ADDR:
5869 case ISCSI_BOOT_TGT_PORT:
5870 case ISCSI_BOOT_TGT_CHAP_NAME:
5871 case ISCSI_BOOT_TGT_CHAP_SECRET:
5872 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5873 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5874 case ISCSI_BOOT_TGT_NIC_ASSOC:
5875 case ISCSI_BOOT_TGT_FLAGS:
5876 rc = S_IRUGO;
5877 break;
5878 default:
5879 rc = 0;
5880 break;
5882 return rc;
5885 static void qla4xxx_boot_release(void *data)
5887 struct scsi_qla_host *ha = data;
5889 scsi_host_put(ha->host);
5892 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
5894 dma_addr_t buf_dma;
5895 uint32_t addr, pri_addr, sec_addr;
5896 uint32_t offset;
5897 uint16_t func_num;
5898 uint8_t val;
5899 uint8_t *buf = NULL;
5900 size_t size = 13 * sizeof(uint8_t);
5901 int ret = QLA_SUCCESS;
5903 func_num = PCI_FUNC(ha->pdev->devfn);
5905 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
5906 __func__, ha->pdev->device, func_num);
5908 if (is_qla40XX(ha)) {
5909 if (func_num == 1) {
5910 addr = NVRAM_PORT0_BOOT_MODE;
5911 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
5912 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
5913 } else if (func_num == 3) {
5914 addr = NVRAM_PORT1_BOOT_MODE;
5915 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
5916 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
5917 } else {
5918 ret = QLA_ERROR;
5919 goto exit_boot_info;
5922 /* Check Boot Mode */
5923 val = rd_nvram_byte(ha, addr);
5924 if (!(val & 0x07)) {
5925 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
5926 "options : 0x%x\n", __func__, val));
5927 ret = QLA_ERROR;
5928 goto exit_boot_info;
5931 /* get primary valid target index */
5932 val = rd_nvram_byte(ha, pri_addr);
5933 if (val & BIT_7)
5934 ddb_index[0] = (val & 0x7f);
5936 /* get secondary valid target index */
5937 val = rd_nvram_byte(ha, sec_addr);
5938 if (val & BIT_7)
5939 ddb_index[1] = (val & 0x7f);
5941 } else if (is_qla80XX(ha)) {
5942 buf = dma_alloc_coherent(&ha->pdev->dev, size,
5943 &buf_dma, GFP_KERNEL);
5944 if (!buf) {
5945 DEBUG2(ql4_printk(KERN_ERR, ha,
5946 "%s: Unable to allocate dma buffer\n",
5947 __func__));
5948 ret = QLA_ERROR;
5949 goto exit_boot_info;
5952 if (ha->port_num == 0)
5953 offset = BOOT_PARAM_OFFSET_PORT0;
5954 else if (ha->port_num == 1)
5955 offset = BOOT_PARAM_OFFSET_PORT1;
5956 else {
5957 ret = QLA_ERROR;
5958 goto exit_boot_info_free;
5960 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
5961 offset;
5962 if (qla4xxx_get_flash(ha, buf_dma, addr,
5963 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
5964 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
5965 " failed\n", ha->host_no, __func__));
5966 ret = QLA_ERROR;
5967 goto exit_boot_info_free;
5969 /* Check Boot Mode */
5970 if (!(buf[1] & 0x07)) {
5971 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
5972 " : 0x%x\n", buf[1]));
5973 ret = QLA_ERROR;
5974 goto exit_boot_info_free;
5977 /* get primary valid target index */
5978 if (buf[2] & BIT_7)
5979 ddb_index[0] = buf[2] & 0x7f;
5981 /* get secondary valid target index */
5982 if (buf[11] & BIT_7)
5983 ddb_index[1] = buf[11] & 0x7f;
5984 } else {
5985 ret = QLA_ERROR;
5986 goto exit_boot_info;
5989 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
5990 " target ID %d\n", __func__, ddb_index[0],
5991 ddb_index[1]));
5993 exit_boot_info_free:
5994 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
5995 exit_boot_info:
5996 ha->pri_ddb_idx = ddb_index[0];
5997 ha->sec_ddb_idx = ddb_index[1];
5998 return ret;
6002 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
6003 * @ha: pointer to adapter structure
6004 * @username: CHAP username to be returned
6005 * @password: CHAP password to be returned
6007 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
6008 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
6009 * So from the CHAP cache find the first BIDI CHAP entry and set it
6010 * to the boot record in sysfs.
6012 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
6013 char *password)
6015 int i, ret = -EINVAL;
6016 int max_chap_entries = 0;
6017 struct ql4_chap_table *chap_table;
6019 if (is_qla80XX(ha))
6020 max_chap_entries = (ha->hw.flt_chap_size / 2) /
6021 sizeof(struct ql4_chap_table);
6022 else
6023 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
6025 if (!ha->chap_list) {
6026 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
6027 return ret;
6030 mutex_lock(&ha->chap_sem);
6031 for (i = 0; i < max_chap_entries; i++) {
6032 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
6033 if (chap_table->cookie !=
6034 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
6035 continue;
6038 if (chap_table->flags & BIT_7) /* local */
6039 continue;
6041 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
6042 continue;
6044 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
6045 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
6046 ret = 0;
6047 break;
6049 mutex_unlock(&ha->chap_sem);
6051 return ret;
6055 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
6056 struct ql4_boot_session_info *boot_sess,
6057 uint16_t ddb_index)
6059 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
6060 struct dev_db_entry *fw_ddb_entry;
6061 dma_addr_t fw_ddb_entry_dma;
6062 uint16_t idx;
6063 uint16_t options;
6064 int ret = QLA_SUCCESS;
6066 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6067 &fw_ddb_entry_dma, GFP_KERNEL);
6068 if (!fw_ddb_entry) {
6069 DEBUG2(ql4_printk(KERN_ERR, ha,
6070 "%s: Unable to allocate dma buffer.\n",
6071 __func__));
6072 ret = QLA_ERROR;
6073 return ret;
6076 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
6077 fw_ddb_entry_dma, ddb_index)) {
6078 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
6079 "index [%d]\n", __func__, ddb_index));
6080 ret = QLA_ERROR;
6081 goto exit_boot_target;
6084 /* Update target name and IP from DDB */
6085 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
6086 min(sizeof(boot_sess->target_name),
6087 sizeof(fw_ddb_entry->iscsi_name)));
6089 options = le16_to_cpu(fw_ddb_entry->options);
6090 if (options & DDB_OPT_IPV6_DEVICE) {
6091 memcpy(&boot_conn->dest_ipaddr.ip_address,
6092 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
6093 } else {
6094 boot_conn->dest_ipaddr.ip_type = 0x1;
6095 memcpy(&boot_conn->dest_ipaddr.ip_address,
6096 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
6099 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
6101 /* update chap information */
6102 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
6104 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6106 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
6108 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
6109 target_chap_name,
6110 (char *)&boot_conn->chap.target_secret,
6111 idx);
6112 if (ret) {
6113 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
6114 ret = QLA_ERROR;
6115 goto exit_boot_target;
6118 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6119 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6122 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6124 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
6126 ret = qla4xxx_get_bidi_chap(ha,
6127 (char *)&boot_conn->chap.intr_chap_name,
6128 (char *)&boot_conn->chap.intr_secret);
6130 if (ret) {
6131 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
6132 ret = QLA_ERROR;
6133 goto exit_boot_target;
6136 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6137 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6140 exit_boot_target:
6141 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6142 fw_ddb_entry, fw_ddb_entry_dma);
6143 return ret;
6146 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
6148 uint16_t ddb_index[2];
6149 int ret = QLA_ERROR;
6150 int rval;
6152 memset(ddb_index, 0, sizeof(ddb_index));
6153 ddb_index[0] = 0xffff;
6154 ddb_index[1] = 0xffff;
6155 ret = get_fw_boot_info(ha, ddb_index);
6156 if (ret != QLA_SUCCESS) {
6157 DEBUG2(ql4_printk(KERN_INFO, ha,
6158 "%s: No boot target configured.\n", __func__));
6159 return ret;
6162 if (ql4xdisablesysfsboot)
6163 return QLA_SUCCESS;
6165 if (ddb_index[0] == 0xffff)
6166 goto sec_target;
6168 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
6169 ddb_index[0]);
6170 if (rval != QLA_SUCCESS) {
6171 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
6172 "configured\n", __func__));
6173 } else
6174 ret = QLA_SUCCESS;
6176 sec_target:
6177 if (ddb_index[1] == 0xffff)
6178 goto exit_get_boot_info;
6180 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
6181 ddb_index[1]);
6182 if (rval != QLA_SUCCESS) {
6183 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
6184 " configured\n", __func__));
6185 } else
6186 ret = QLA_SUCCESS;
6188 exit_get_boot_info:
6189 return ret;
6192 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
6194 struct iscsi_boot_kobj *boot_kobj;
6196 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
6197 return QLA_ERROR;
6199 if (ql4xdisablesysfsboot) {
6200 ql4_printk(KERN_INFO, ha,
6201 "%s: syfsboot disabled - driver will trigger login "
6202 "and publish session for discovery .\n", __func__);
6203 return QLA_SUCCESS;
6207 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
6208 if (!ha->boot_kset)
6209 goto kset_free;
6211 if (!scsi_host_get(ha->host))
6212 goto kset_free;
6213 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
6214 qla4xxx_show_boot_tgt_pri_info,
6215 qla4xxx_tgt_get_attr_visibility,
6216 qla4xxx_boot_release);
6217 if (!boot_kobj)
6218 goto put_host;
6220 if (!scsi_host_get(ha->host))
6221 goto kset_free;
6222 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
6223 qla4xxx_show_boot_tgt_sec_info,
6224 qla4xxx_tgt_get_attr_visibility,
6225 qla4xxx_boot_release);
6226 if (!boot_kobj)
6227 goto put_host;
6229 if (!scsi_host_get(ha->host))
6230 goto kset_free;
6231 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
6232 qla4xxx_show_boot_ini_info,
6233 qla4xxx_ini_get_attr_visibility,
6234 qla4xxx_boot_release);
6235 if (!boot_kobj)
6236 goto put_host;
6238 if (!scsi_host_get(ha->host))
6239 goto kset_free;
6240 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
6241 qla4xxx_show_boot_eth_info,
6242 qla4xxx_eth_get_attr_visibility,
6243 qla4xxx_boot_release);
6244 if (!boot_kobj)
6245 goto put_host;
6247 return QLA_SUCCESS;
6249 put_host:
6250 scsi_host_put(ha->host);
6251 kset_free:
6252 iscsi_boot_destroy_kset(ha->boot_kset);
6253 return -ENOMEM;
6257 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
6258 struct ql4_tuple_ddb *tddb)
6260 struct scsi_qla_host *ha;
6261 struct iscsi_cls_session *cls_sess;
6262 struct iscsi_cls_conn *cls_conn;
6263 struct iscsi_session *sess;
6264 struct iscsi_conn *conn;
6266 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
6267 ha = ddb_entry->ha;
6268 cls_sess = ddb_entry->sess;
6269 sess = cls_sess->dd_data;
6270 cls_conn = ddb_entry->conn;
6271 conn = cls_conn->dd_data;
6273 tddb->tpgt = sess->tpgt;
6274 tddb->port = conn->persistent_port;
6275 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
6276 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
6279 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
6280 struct ql4_tuple_ddb *tddb,
6281 uint8_t *flash_isid)
6283 uint16_t options = 0;
6285 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
6286 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
6287 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
6289 options = le16_to_cpu(fw_ddb_entry->options);
6290 if (options & DDB_OPT_IPV6_DEVICE)
6291 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
6292 else
6293 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
6295 tddb->port = le16_to_cpu(fw_ddb_entry->port);
6297 if (flash_isid == NULL)
6298 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
6299 sizeof(tddb->isid));
6300 else
6301 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
6304 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
6305 struct ql4_tuple_ddb *old_tddb,
6306 struct ql4_tuple_ddb *new_tddb,
6307 uint8_t is_isid_compare)
6309 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6310 return QLA_ERROR;
6312 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
6313 return QLA_ERROR;
6315 if (old_tddb->port != new_tddb->port)
6316 return QLA_ERROR;
6318 /* For multi sessions, driver generates the ISID, so do not compare
6319 * ISID in reset path since it would be a comparison between the
6320 * driver generated ISID and firmware generated ISID. This could
6321 * lead to adding duplicated DDBs in the list as driver generated
6322 * ISID would not match firmware generated ISID.
6324 if (is_isid_compare) {
6325 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
6326 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
6327 __func__, old_tddb->isid[5], old_tddb->isid[4],
6328 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
6329 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
6330 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
6331 new_tddb->isid[0]));
6333 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6334 sizeof(old_tddb->isid)))
6335 return QLA_ERROR;
6338 DEBUG2(ql4_printk(KERN_INFO, ha,
6339 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
6340 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
6341 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
6342 new_tddb->ip_addr, new_tddb->iscsi_name));
6344 return QLA_SUCCESS;
6347 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
6348 struct dev_db_entry *fw_ddb_entry,
6349 uint32_t *index)
6351 struct ddb_entry *ddb_entry;
6352 struct ql4_tuple_ddb *fw_tddb = NULL;
6353 struct ql4_tuple_ddb *tmp_tddb = NULL;
6354 int idx;
6355 int ret = QLA_ERROR;
6357 fw_tddb = vzalloc(sizeof(*fw_tddb));
6358 if (!fw_tddb) {
6359 DEBUG2(ql4_printk(KERN_WARNING, ha,
6360 "Memory Allocation failed.\n"));
6361 ret = QLA_SUCCESS;
6362 goto exit_check;
6365 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6366 if (!tmp_tddb) {
6367 DEBUG2(ql4_printk(KERN_WARNING, ha,
6368 "Memory Allocation failed.\n"));
6369 ret = QLA_SUCCESS;
6370 goto exit_check;
6373 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6375 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6376 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6377 if (ddb_entry == NULL)
6378 continue;
6380 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
6381 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
6382 ret = QLA_SUCCESS; /* found */
6383 if (index != NULL)
6384 *index = idx;
6385 goto exit_check;
6389 exit_check:
6390 if (fw_tddb)
6391 vfree(fw_tddb);
6392 if (tmp_tddb)
6393 vfree(tmp_tddb);
6394 return ret;
6398 * qla4xxx_check_existing_isid - check if target with same isid exist
6399 * in target list
6400 * @list_nt: list of target
6401 * @isid: isid to check
6403 * This routine return QLA_SUCCESS if target with same isid exist
6405 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
6407 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6408 struct dev_db_entry *fw_ddb_entry;
6410 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6411 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
6413 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
6414 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
6415 return QLA_SUCCESS;
6418 return QLA_ERROR;
6422 * qla4xxx_update_isid - compare ddbs and updated isid
6423 * @ha: Pointer to host adapter structure.
6424 * @list_nt: list of nt target
6425 * @fw_ddb_entry: firmware ddb entry
6427 * This routine update isid if ddbs have same iqn, same isid and
6428 * different IP addr.
6429 * Return QLA_SUCCESS if isid is updated.
6431 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
6432 struct list_head *list_nt,
6433 struct dev_db_entry *fw_ddb_entry)
6435 uint8_t base_value, i;
6437 base_value = fw_ddb_entry->isid[1] & 0x1f;
6438 for (i = 0; i < 8; i++) {
6439 fw_ddb_entry->isid[1] = (base_value | (i << 5));
6440 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6441 break;
6444 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6445 return QLA_ERROR;
6447 return QLA_SUCCESS;
6451 * qla4xxx_should_update_isid - check if isid need to update
6452 * @ha: Pointer to host adapter structure.
6453 * @old_tddb: ddb tuple
6454 * @new_tddb: ddb tuple
6456 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
6457 * same isid
6459 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
6460 struct ql4_tuple_ddb *old_tddb,
6461 struct ql4_tuple_ddb *new_tddb)
6463 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
6464 /* Same ip */
6465 if (old_tddb->port == new_tddb->port)
6466 return QLA_ERROR;
6469 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6470 /* different iqn */
6471 return QLA_ERROR;
6473 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6474 sizeof(old_tddb->isid)))
6475 /* different isid */
6476 return QLA_ERROR;
6478 return QLA_SUCCESS;
6482 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
6483 * @ha: Pointer to host adapter structure.
6484 * @list_nt: list of nt target.
6485 * @fw_ddb_entry: firmware ddb entry.
6487 * This routine check if fw_ddb_entry already exists in list_nt to avoid
6488 * duplicate ddb in list_nt.
6489 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
6490 * Note: This function also update isid of DDB if required.
6493 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
6494 struct list_head *list_nt,
6495 struct dev_db_entry *fw_ddb_entry)
6497 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6498 struct ql4_tuple_ddb *fw_tddb = NULL;
6499 struct ql4_tuple_ddb *tmp_tddb = NULL;
6500 int rval, ret = QLA_ERROR;
6502 fw_tddb = vzalloc(sizeof(*fw_tddb));
6503 if (!fw_tddb) {
6504 DEBUG2(ql4_printk(KERN_WARNING, ha,
6505 "Memory Allocation failed.\n"));
6506 ret = QLA_SUCCESS;
6507 goto exit_check;
6510 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6511 if (!tmp_tddb) {
6512 DEBUG2(ql4_printk(KERN_WARNING, ha,
6513 "Memory Allocation failed.\n"));
6514 ret = QLA_SUCCESS;
6515 goto exit_check;
6518 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6520 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6521 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
6522 nt_ddb_idx->flash_isid);
6523 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
6524 /* found duplicate ddb */
6525 if (ret == QLA_SUCCESS)
6526 goto exit_check;
6529 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6530 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
6532 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
6533 if (ret == QLA_SUCCESS) {
6534 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
6535 if (rval == QLA_SUCCESS)
6536 ret = QLA_ERROR;
6537 else
6538 ret = QLA_SUCCESS;
6540 goto exit_check;
6544 exit_check:
6545 if (fw_tddb)
6546 vfree(fw_tddb);
6547 if (tmp_tddb)
6548 vfree(tmp_tddb);
6549 return ret;
6552 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
6554 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6556 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6557 list_del_init(&ddb_idx->list);
6558 vfree(ddb_idx);
6562 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
6563 struct dev_db_entry *fw_ddb_entry)
6565 struct iscsi_endpoint *ep;
6566 struct sockaddr_in *addr;
6567 struct sockaddr_in6 *addr6;
6568 struct sockaddr *t_addr;
6569 struct sockaddr_storage *dst_addr;
6570 char *ip;
6572 /* TODO: need to destroy on unload iscsi_endpoint*/
6573 dst_addr = vmalloc(sizeof(*dst_addr));
6574 if (!dst_addr)
6575 return NULL;
6577 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
6578 t_addr = (struct sockaddr *)dst_addr;
6579 t_addr->sa_family = AF_INET6;
6580 addr6 = (struct sockaddr_in6 *)dst_addr;
6581 ip = (char *)&addr6->sin6_addr;
6582 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
6583 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
6585 } else {
6586 t_addr = (struct sockaddr *)dst_addr;
6587 t_addr->sa_family = AF_INET;
6588 addr = (struct sockaddr_in *)dst_addr;
6589 ip = (char *)&addr->sin_addr;
6590 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
6591 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
6594 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
6595 vfree(dst_addr);
6596 return ep;
6599 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
6601 if (ql4xdisablesysfsboot)
6602 return QLA_SUCCESS;
6603 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
6604 return QLA_ERROR;
6605 return QLA_SUCCESS;
6608 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
6609 struct ddb_entry *ddb_entry,
6610 uint16_t idx)
6612 uint16_t def_timeout;
6614 ddb_entry->ddb_type = FLASH_DDB;
6615 ddb_entry->fw_ddb_index = INVALID_ENTRY;
6616 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
6617 ddb_entry->ha = ha;
6618 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
6619 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
6620 ddb_entry->chap_tbl_idx = INVALID_ENTRY;
6622 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6623 atomic_set(&ddb_entry->relogin_timer, 0);
6624 atomic_set(&ddb_entry->relogin_retry_count, 0);
6625 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
6626 ddb_entry->default_relogin_timeout =
6627 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
6628 def_timeout : LOGIN_TOV;
6629 ddb_entry->default_time2wait =
6630 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
6632 if (ql4xdisablesysfsboot &&
6633 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
6634 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
6637 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
6639 uint32_t idx = 0;
6640 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
6641 uint32_t sts[MBOX_REG_COUNT];
6642 uint32_t ip_state;
6643 unsigned long wtime;
6644 int ret;
6646 wtime = jiffies + (HZ * IP_CONFIG_TOV);
6647 do {
6648 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
6649 if (ip_idx[idx] == -1)
6650 continue;
6652 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
6654 if (ret == QLA_ERROR) {
6655 ip_idx[idx] = -1;
6656 continue;
6659 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
6661 DEBUG2(ql4_printk(KERN_INFO, ha,
6662 "Waiting for IP state for idx = %d, state = 0x%x\n",
6663 ip_idx[idx], ip_state));
6664 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
6665 ip_state == IP_ADDRSTATE_INVALID ||
6666 ip_state == IP_ADDRSTATE_PREFERRED ||
6667 ip_state == IP_ADDRSTATE_DEPRICATED ||
6668 ip_state == IP_ADDRSTATE_DISABLING)
6669 ip_idx[idx] = -1;
6672 /* Break if all IP states checked */
6673 if ((ip_idx[0] == -1) &&
6674 (ip_idx[1] == -1) &&
6675 (ip_idx[2] == -1) &&
6676 (ip_idx[3] == -1))
6677 break;
6678 schedule_timeout_uninterruptible(HZ);
6679 } while (time_after(wtime, jiffies));
6682 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
6683 struct dev_db_entry *flash_ddb_entry)
6685 uint16_t options = 0;
6686 size_t ip_len = IP_ADDR_LEN;
6688 options = le16_to_cpu(fw_ddb_entry->options);
6689 if (options & DDB_OPT_IPV6_DEVICE)
6690 ip_len = IPv6_ADDR_LEN;
6692 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
6693 return QLA_ERROR;
6695 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
6696 sizeof(fw_ddb_entry->isid)))
6697 return QLA_ERROR;
6699 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
6700 sizeof(fw_ddb_entry->port)))
6701 return QLA_ERROR;
6703 return QLA_SUCCESS;
6706 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
6707 struct dev_db_entry *fw_ddb_entry,
6708 uint32_t fw_idx, uint32_t *flash_index)
6710 struct dev_db_entry *flash_ddb_entry;
6711 dma_addr_t flash_ddb_entry_dma;
6712 uint32_t idx = 0;
6713 int max_ddbs;
6714 int ret = QLA_ERROR, status;
6716 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6717 MAX_DEV_DB_ENTRIES;
6719 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6720 &flash_ddb_entry_dma);
6721 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
6722 ql4_printk(KERN_ERR, ha, "Out of memory\n");
6723 goto exit_find_st_idx;
6726 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6727 flash_ddb_entry_dma, fw_idx);
6728 if (status == QLA_SUCCESS) {
6729 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6730 if (status == QLA_SUCCESS) {
6731 *flash_index = fw_idx;
6732 ret = QLA_SUCCESS;
6733 goto exit_find_st_idx;
6737 for (idx = 0; idx < max_ddbs; idx++) {
6738 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6739 flash_ddb_entry_dma, idx);
6740 if (status == QLA_ERROR)
6741 continue;
6743 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6744 if (status == QLA_SUCCESS) {
6745 *flash_index = idx;
6746 ret = QLA_SUCCESS;
6747 goto exit_find_st_idx;
6751 if (idx == max_ddbs)
6752 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
6753 fw_idx);
6755 exit_find_st_idx:
6756 if (flash_ddb_entry)
6757 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
6758 flash_ddb_entry_dma);
6760 return ret;
6763 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
6764 struct list_head *list_st)
6766 struct qla_ddb_index *st_ddb_idx;
6767 int max_ddbs;
6768 int fw_idx_size;
6769 struct dev_db_entry *fw_ddb_entry;
6770 dma_addr_t fw_ddb_dma;
6771 int ret;
6772 uint32_t idx = 0, next_idx = 0;
6773 uint32_t state = 0, conn_err = 0;
6774 uint32_t flash_index = -1;
6775 uint16_t conn_id = 0;
6777 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6778 &fw_ddb_dma);
6779 if (fw_ddb_entry == NULL) {
6780 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6781 goto exit_st_list;
6784 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6785 MAX_DEV_DB_ENTRIES;
6786 fw_idx_size = sizeof(struct qla_ddb_index);
6788 for (idx = 0; idx < max_ddbs; idx = next_idx) {
6789 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
6790 NULL, &next_idx, &state,
6791 &conn_err, NULL, &conn_id);
6792 if (ret == QLA_ERROR)
6793 break;
6795 /* Ignore DDB if invalid state (unassigned) */
6796 if (state == DDB_DS_UNASSIGNED)
6797 goto continue_next_st;
6799 /* Check if ST, add to the list_st */
6800 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
6801 goto continue_next_st;
6803 st_ddb_idx = vzalloc(fw_idx_size);
6804 if (!st_ddb_idx)
6805 break;
6807 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
6808 &flash_index);
6809 if (ret == QLA_ERROR) {
6810 ql4_printk(KERN_ERR, ha,
6811 "No flash entry for ST at idx [%d]\n", idx);
6812 st_ddb_idx->flash_ddb_idx = idx;
6813 } else {
6814 ql4_printk(KERN_INFO, ha,
6815 "ST at idx [%d] is stored at flash [%d]\n",
6816 idx, flash_index);
6817 st_ddb_idx->flash_ddb_idx = flash_index;
6820 st_ddb_idx->fw_ddb_idx = idx;
6822 list_add_tail(&st_ddb_idx->list, list_st);
6823 continue_next_st:
6824 if (next_idx == 0)
6825 break;
6828 exit_st_list:
6829 if (fw_ddb_entry)
6830 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6834 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
6835 * @ha: pointer to adapter structure
6836 * @list_ddb: List from which failed ddb to be removed
6838 * Iterate over the list of DDBs and find and remove DDBs that are either in
6839 * no connection active state or failed state
6841 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
6842 struct list_head *list_ddb)
6844 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6845 uint32_t next_idx = 0;
6846 uint32_t state = 0, conn_err = 0;
6847 int ret;
6849 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6850 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
6851 NULL, 0, NULL, &next_idx, &state,
6852 &conn_err, NULL, NULL);
6853 if (ret == QLA_ERROR)
6854 continue;
6856 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6857 state == DDB_DS_SESSION_FAILED) {
6858 list_del_init(&ddb_idx->list);
6859 vfree(ddb_idx);
6864 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
6865 struct ddb_entry *ddb_entry,
6866 struct dev_db_entry *fw_ddb_entry)
6868 struct iscsi_cls_session *cls_sess;
6869 struct iscsi_session *sess;
6870 uint32_t max_ddbs = 0;
6871 uint16_t ddb_link = -1;
6873 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6874 MAX_DEV_DB_ENTRIES;
6876 cls_sess = ddb_entry->sess;
6877 sess = cls_sess->dd_data;
6879 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6880 if (ddb_link < max_ddbs)
6881 sess->discovery_parent_idx = ddb_link;
6882 else
6883 sess->discovery_parent_idx = DDB_NO_LINK;
6886 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
6887 struct dev_db_entry *fw_ddb_entry,
6888 int is_reset, uint16_t idx)
6890 struct iscsi_cls_session *cls_sess;
6891 struct iscsi_session *sess;
6892 struct iscsi_cls_conn *cls_conn;
6893 struct iscsi_endpoint *ep;
6894 uint16_t cmds_max = 32;
6895 uint16_t conn_id = 0;
6896 uint32_t initial_cmdsn = 0;
6897 int ret = QLA_SUCCESS;
6899 struct ddb_entry *ddb_entry = NULL;
6901 /* Create session object, with INVALID_ENTRY,
6902 * the targer_id would get set when we issue the login
6904 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
6905 cmds_max, sizeof(struct ddb_entry),
6906 sizeof(struct ql4_task_data),
6907 initial_cmdsn, INVALID_ENTRY);
6908 if (!cls_sess) {
6909 ret = QLA_ERROR;
6910 goto exit_setup;
6914 * so calling module_put function to decrement the
6915 * reference count.
6917 module_put(qla4xxx_iscsi_transport.owner);
6918 sess = cls_sess->dd_data;
6919 ddb_entry = sess->dd_data;
6920 ddb_entry->sess = cls_sess;
6922 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
6923 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
6924 sizeof(struct dev_db_entry));
6926 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
6928 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
6930 if (!cls_conn) {
6931 ret = QLA_ERROR;
6932 goto exit_setup;
6935 ddb_entry->conn = cls_conn;
6937 /* Setup ep, for displaying attributes in sysfs */
6938 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
6939 if (ep) {
6940 ep->conn = cls_conn;
6941 cls_conn->ep = ep;
6942 } else {
6943 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
6944 ret = QLA_ERROR;
6945 goto exit_setup;
6948 /* Update sess/conn params */
6949 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
6950 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
6952 if (is_reset == RESET_ADAPTER) {
6953 iscsi_block_session(cls_sess);
6954 /* Use the relogin path to discover new devices
6955 * by short-circuting the logic of setting
6956 * timer to relogin - instead set the flags
6957 * to initiate login right away.
6959 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
6960 set_bit(DF_RELOGIN, &ddb_entry->flags);
6963 exit_setup:
6964 return ret;
6967 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
6968 struct list_head *list_ddb,
6969 struct dev_db_entry *fw_ddb_entry)
6971 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6972 uint16_t ddb_link;
6974 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6976 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6977 if (ddb_idx->fw_ddb_idx == ddb_link) {
6978 DEBUG2(ql4_printk(KERN_INFO, ha,
6979 "Updating NT parent idx from [%d] to [%d]\n",
6980 ddb_link, ddb_idx->flash_ddb_idx));
6981 fw_ddb_entry->ddb_link =
6982 cpu_to_le16(ddb_idx->flash_ddb_idx);
6983 return;
6988 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
6989 struct list_head *list_nt,
6990 struct list_head *list_st,
6991 int is_reset)
6993 struct dev_db_entry *fw_ddb_entry;
6994 struct ddb_entry *ddb_entry = NULL;
6995 dma_addr_t fw_ddb_dma;
6996 int max_ddbs;
6997 int fw_idx_size;
6998 int ret;
6999 uint32_t idx = 0, next_idx = 0;
7000 uint32_t state = 0, conn_err = 0;
7001 uint32_t ddb_idx = -1;
7002 uint16_t conn_id = 0;
7003 uint16_t ddb_link = -1;
7004 struct qla_ddb_index *nt_ddb_idx;
7006 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7007 &fw_ddb_dma);
7008 if (fw_ddb_entry == NULL) {
7009 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
7010 goto exit_nt_list;
7012 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7013 MAX_DEV_DB_ENTRIES;
7014 fw_idx_size = sizeof(struct qla_ddb_index);
7016 for (idx = 0; idx < max_ddbs; idx = next_idx) {
7017 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7018 NULL, &next_idx, &state,
7019 &conn_err, NULL, &conn_id);
7020 if (ret == QLA_ERROR)
7021 break;
7023 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
7024 goto continue_next_nt;
7026 /* Check if NT, then add to list it */
7027 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
7028 goto continue_next_nt;
7030 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
7031 if (ddb_link < max_ddbs)
7032 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
7034 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
7035 state == DDB_DS_SESSION_FAILED) &&
7036 (is_reset == INIT_ADAPTER))
7037 goto continue_next_nt;
7039 DEBUG2(ql4_printk(KERN_INFO, ha,
7040 "Adding DDB to session = 0x%x\n", idx));
7042 if (is_reset == INIT_ADAPTER) {
7043 nt_ddb_idx = vmalloc(fw_idx_size);
7044 if (!nt_ddb_idx)
7045 break;
7047 nt_ddb_idx->fw_ddb_idx = idx;
7049 /* Copy original isid as it may get updated in function
7050 * qla4xxx_update_isid(). We need original isid in
7051 * function qla4xxx_compare_tuple_ddb to find duplicate
7052 * target */
7053 memcpy(&nt_ddb_idx->flash_isid[0],
7054 &fw_ddb_entry->isid[0],
7055 sizeof(nt_ddb_idx->flash_isid));
7057 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
7058 fw_ddb_entry);
7059 if (ret == QLA_SUCCESS) {
7060 /* free nt_ddb_idx and do not add to list_nt */
7061 vfree(nt_ddb_idx);
7062 goto continue_next_nt;
7065 /* Copy updated isid */
7066 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
7067 sizeof(struct dev_db_entry));
7069 list_add_tail(&nt_ddb_idx->list, list_nt);
7070 } else if (is_reset == RESET_ADAPTER) {
7071 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
7072 &ddb_idx);
7073 if (ret == QLA_SUCCESS) {
7074 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
7075 ddb_idx);
7076 if (ddb_entry != NULL)
7077 qla4xxx_update_sess_disc_idx(ha,
7078 ddb_entry,
7079 fw_ddb_entry);
7080 goto continue_next_nt;
7084 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
7085 if (ret == QLA_ERROR)
7086 goto exit_nt_list;
7088 continue_next_nt:
7089 if (next_idx == 0)
7090 break;
7093 exit_nt_list:
7094 if (fw_ddb_entry)
7095 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7098 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
7099 struct list_head *list_nt,
7100 uint16_t target_id)
7102 struct dev_db_entry *fw_ddb_entry;
7103 dma_addr_t fw_ddb_dma;
7104 int max_ddbs;
7105 int fw_idx_size;
7106 int ret;
7107 uint32_t idx = 0, next_idx = 0;
7108 uint32_t state = 0, conn_err = 0;
7109 uint16_t conn_id = 0;
7110 struct qla_ddb_index *nt_ddb_idx;
7112 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7113 &fw_ddb_dma);
7114 if (fw_ddb_entry == NULL) {
7115 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
7116 goto exit_new_nt_list;
7118 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7119 MAX_DEV_DB_ENTRIES;
7120 fw_idx_size = sizeof(struct qla_ddb_index);
7122 for (idx = 0; idx < max_ddbs; idx = next_idx) {
7123 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7124 NULL, &next_idx, &state,
7125 &conn_err, NULL, &conn_id);
7126 if (ret == QLA_ERROR)
7127 break;
7129 /* Check if NT, then add it to list */
7130 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7131 goto continue_next_new_nt;
7133 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
7134 goto continue_next_new_nt;
7136 DEBUG2(ql4_printk(KERN_INFO, ha,
7137 "Adding DDB to session = 0x%x\n", idx));
7139 nt_ddb_idx = vmalloc(fw_idx_size);
7140 if (!nt_ddb_idx)
7141 break;
7143 nt_ddb_idx->fw_ddb_idx = idx;
7145 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7146 if (ret == QLA_SUCCESS) {
7147 /* free nt_ddb_idx and do not add to list_nt */
7148 vfree(nt_ddb_idx);
7149 goto continue_next_new_nt;
7152 if (target_id < max_ddbs)
7153 fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
7155 list_add_tail(&nt_ddb_idx->list, list_nt);
7157 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7158 idx);
7159 if (ret == QLA_ERROR)
7160 goto exit_new_nt_list;
7162 continue_next_new_nt:
7163 if (next_idx == 0)
7164 break;
7167 exit_new_nt_list:
7168 if (fw_ddb_entry)
7169 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7173 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
7174 * @dev: dev associated with the sysfs entry
7175 * @data: pointer to flashnode session object
7177 * Returns:
7178 * 1: if flashnode entry is non-persistent
7179 * 0: if flashnode entry is persistent
7181 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
7183 struct iscsi_bus_flash_session *fnode_sess;
7185 if (!iscsi_flashnode_bus_match(dev, NULL))
7186 return 0;
7188 fnode_sess = iscsi_dev_to_flash_session(dev);
7190 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
7194 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
7195 * @ha: pointer to host
7196 * @fw_ddb_entry: flash ddb data
7197 * @idx: target index
7198 * @user: if set then this call is made from userland else from kernel
7200 * Returns:
7201 * On sucess: QLA_SUCCESS
7202 * On failure: QLA_ERROR
7204 * This create separate sysfs entries for session and connection attributes of
7205 * the given fw ddb entry.
7206 * If this is invoked as a result of a userspace call then the entry is marked
7207 * as nonpersistent using flash_state field.
7209 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7210 struct dev_db_entry *fw_ddb_entry,
7211 uint16_t *idx, int user)
7213 struct iscsi_bus_flash_session *fnode_sess = NULL;
7214 struct iscsi_bus_flash_conn *fnode_conn = NULL;
7215 int rc = QLA_ERROR;
7217 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
7218 &qla4xxx_iscsi_transport, 0);
7219 if (!fnode_sess) {
7220 ql4_printk(KERN_ERR, ha,
7221 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
7222 __func__, *idx, ha->host_no);
7223 goto exit_tgt_create;
7226 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
7227 &qla4xxx_iscsi_transport, 0);
7228 if (!fnode_conn) {
7229 ql4_printk(KERN_ERR, ha,
7230 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
7231 __func__, *idx, ha->host_no);
7232 goto free_sess;
7235 if (user) {
7236 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
7237 } else {
7238 fnode_sess->flash_state = DEV_DB_PERSISTENT;
7240 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
7241 fnode_sess->is_boot_target = 1;
7242 else
7243 fnode_sess->is_boot_target = 0;
7246 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7247 fw_ddb_entry);
7248 if (rc)
7249 goto free_sess;
7251 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7252 __func__, fnode_sess->dev.kobj.name);
7254 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7255 __func__, fnode_conn->dev.kobj.name);
7257 return QLA_SUCCESS;
7259 free_sess:
7260 iscsi_destroy_flashnode_sess(fnode_sess);
7262 exit_tgt_create:
7263 return QLA_ERROR;
7267 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
7268 * @shost: pointer to host
7269 * @buf: type of ddb entry (ipv4/ipv6)
7270 * @len: length of buf
7272 * This creates new ddb entry in the flash by finding first free index and
7273 * storing default ddb there. And then create sysfs entry for the new ddb entry.
7275 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
7276 int len)
7278 struct scsi_qla_host *ha = to_qla_host(shost);
7279 struct dev_db_entry *fw_ddb_entry = NULL;
7280 dma_addr_t fw_ddb_entry_dma;
7281 struct device *dev;
7282 uint16_t idx = 0;
7283 uint16_t max_ddbs = 0;
7284 uint32_t options = 0;
7285 uint32_t rval = QLA_ERROR;
7287 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
7288 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
7289 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
7290 __func__));
7291 goto exit_ddb_add;
7294 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7295 MAX_DEV_DB_ENTRIES;
7297 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7298 &fw_ddb_entry_dma, GFP_KERNEL);
7299 if (!fw_ddb_entry) {
7300 DEBUG2(ql4_printk(KERN_ERR, ha,
7301 "%s: Unable to allocate dma buffer\n",
7302 __func__));
7303 goto exit_ddb_add;
7306 dev = iscsi_find_flashnode_sess(ha->host, NULL,
7307 qla4xxx_sysfs_ddb_is_non_persistent);
7308 if (dev) {
7309 ql4_printk(KERN_ERR, ha,
7310 "%s: A non-persistent entry %s found\n",
7311 __func__, dev->kobj.name);
7312 put_device(dev);
7313 goto exit_ddb_add;
7316 /* Index 0 and 1 are reserved for boot target entries */
7317 for (idx = 2; idx < max_ddbs; idx++) {
7318 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
7319 fw_ddb_entry_dma, idx))
7320 break;
7323 if (idx == max_ddbs)
7324 goto exit_ddb_add;
7326 if (!strncasecmp("ipv6", buf, 4))
7327 options |= IPV6_DEFAULT_DDB_ENTRY;
7329 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7330 if (rval == QLA_ERROR)
7331 goto exit_ddb_add;
7333 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
7335 exit_ddb_add:
7336 if (fw_ddb_entry)
7337 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7338 fw_ddb_entry, fw_ddb_entry_dma);
7339 if (rval == QLA_SUCCESS)
7340 return idx;
7341 else
7342 return -EIO;
7346 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
7347 * @fnode_sess: pointer to session attrs of flash ddb entry
7348 * @fnode_conn: pointer to connection attrs of flash ddb entry
7350 * This writes the contents of target ddb buffer to Flash with a valid cookie
7351 * value in order to make the ddb entry persistent.
7353 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
7354 struct iscsi_bus_flash_conn *fnode_conn)
7356 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7357 struct scsi_qla_host *ha = to_qla_host(shost);
7358 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
7359 struct dev_db_entry *fw_ddb_entry = NULL;
7360 dma_addr_t fw_ddb_entry_dma;
7361 uint32_t options = 0;
7362 int rval = 0;
7364 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7365 &fw_ddb_entry_dma, GFP_KERNEL);
7366 if (!fw_ddb_entry) {
7367 DEBUG2(ql4_printk(KERN_ERR, ha,
7368 "%s: Unable to allocate dma buffer\n",
7369 __func__));
7370 rval = -ENOMEM;
7371 goto exit_ddb_apply;
7374 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7375 options |= IPV6_DEFAULT_DDB_ENTRY;
7377 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7378 if (rval == QLA_ERROR)
7379 goto exit_ddb_apply;
7381 dev_db_start_offset += (fnode_sess->target_id *
7382 sizeof(*fw_ddb_entry));
7384 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7385 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7387 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7388 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
7390 if (rval == QLA_SUCCESS) {
7391 fnode_sess->flash_state = DEV_DB_PERSISTENT;
7392 ql4_printk(KERN_INFO, ha,
7393 "%s: flash node %u of host %lu written to flash\n",
7394 __func__, fnode_sess->target_id, ha->host_no);
7395 } else {
7396 rval = -EIO;
7397 ql4_printk(KERN_ERR, ha,
7398 "%s: Error while writing flash node %u of host %lu to flash\n",
7399 __func__, fnode_sess->target_id, ha->host_no);
7402 exit_ddb_apply:
7403 if (fw_ddb_entry)
7404 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7405 fw_ddb_entry, fw_ddb_entry_dma);
7406 return rval;
7409 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
7410 struct dev_db_entry *fw_ddb_entry,
7411 uint16_t idx)
7413 struct dev_db_entry *ddb_entry = NULL;
7414 dma_addr_t ddb_entry_dma;
7415 unsigned long wtime;
7416 uint32_t mbx_sts = 0;
7417 uint32_t state = 0, conn_err = 0;
7418 uint16_t tmo = 0;
7419 int ret = 0;
7421 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7422 &ddb_entry_dma, GFP_KERNEL);
7423 if (!ddb_entry) {
7424 DEBUG2(ql4_printk(KERN_ERR, ha,
7425 "%s: Unable to allocate dma buffer\n",
7426 __func__));
7427 return QLA_ERROR;
7430 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
7432 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
7433 if (ret != QLA_SUCCESS) {
7434 DEBUG2(ql4_printk(KERN_ERR, ha,
7435 "%s: Unable to set ddb entry for index %d\n",
7436 __func__, idx));
7437 goto exit_ddb_conn_open;
7440 qla4xxx_conn_open(ha, idx);
7442 /* To ensure that sendtargets is done, wait for at least 12 secs */
7443 tmo = ((ha->def_timeout > LOGIN_TOV) &&
7444 (ha->def_timeout < LOGIN_TOV * 10) ?
7445 ha->def_timeout : LOGIN_TOV);
7447 DEBUG2(ql4_printk(KERN_INFO, ha,
7448 "Default time to wait for login to ddb %d\n", tmo));
7450 wtime = jiffies + (HZ * tmo);
7451 do {
7452 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
7453 NULL, &state, &conn_err, NULL,
7454 NULL);
7455 if (ret == QLA_ERROR)
7456 continue;
7458 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
7459 state == DDB_DS_SESSION_FAILED)
7460 break;
7462 schedule_timeout_uninterruptible(HZ / 10);
7463 } while (time_after(wtime, jiffies));
7465 exit_ddb_conn_open:
7466 if (ddb_entry)
7467 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7468 ddb_entry, ddb_entry_dma);
7469 return ret;
7472 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
7473 struct dev_db_entry *fw_ddb_entry,
7474 uint16_t target_id)
7476 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
7477 struct list_head list_nt;
7478 uint16_t ddb_index;
7479 int ret = 0;
7481 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
7482 ql4_printk(KERN_WARNING, ha,
7483 "%s: A discovery already in progress!\n", __func__);
7484 return QLA_ERROR;
7487 INIT_LIST_HEAD(&list_nt);
7489 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7491 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
7492 if (ret == QLA_ERROR)
7493 goto exit_login_st_clr_bit;
7495 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
7496 if (ret == QLA_ERROR)
7497 goto exit_login_st;
7499 qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
7501 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
7502 list_del_init(&ddb_idx->list);
7503 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
7504 vfree(ddb_idx);
7507 exit_login_st:
7508 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
7509 ql4_printk(KERN_ERR, ha,
7510 "Unable to clear DDB index = 0x%x\n", ddb_index);
7513 clear_bit(ddb_index, ha->ddb_idx_map);
7515 exit_login_st_clr_bit:
7516 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7517 return ret;
7520 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
7521 struct dev_db_entry *fw_ddb_entry,
7522 uint16_t idx)
7524 int ret = QLA_ERROR;
7526 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7527 if (ret != QLA_SUCCESS)
7528 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7529 idx);
7530 else
7531 ret = -EPERM;
7533 return ret;
7537 * qla4xxx_sysfs_ddb_login - Login to the specified target
7538 * @fnode_sess: pointer to session attrs of flash ddb entry
7539 * @fnode_conn: pointer to connection attrs of flash ddb entry
7541 * This logs in to the specified target
7543 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
7544 struct iscsi_bus_flash_conn *fnode_conn)
7546 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7547 struct scsi_qla_host *ha = to_qla_host(shost);
7548 struct dev_db_entry *fw_ddb_entry = NULL;
7549 dma_addr_t fw_ddb_entry_dma;
7550 uint32_t options = 0;
7551 int ret = 0;
7553 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
7554 ql4_printk(KERN_ERR, ha,
7555 "%s: Target info is not persistent\n", __func__);
7556 ret = -EIO;
7557 goto exit_ddb_login;
7560 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7561 &fw_ddb_entry_dma, GFP_KERNEL);
7562 if (!fw_ddb_entry) {
7563 DEBUG2(ql4_printk(KERN_ERR, ha,
7564 "%s: Unable to allocate dma buffer\n",
7565 __func__));
7566 ret = -ENOMEM;
7567 goto exit_ddb_login;
7570 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7571 options |= IPV6_DEFAULT_DDB_ENTRY;
7573 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7574 if (ret == QLA_ERROR)
7575 goto exit_ddb_login;
7577 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7578 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7580 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7581 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
7582 fnode_sess->target_id);
7583 else
7584 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
7585 fnode_sess->target_id);
7587 if (ret > 0)
7588 ret = -EIO;
7590 exit_ddb_login:
7591 if (fw_ddb_entry)
7592 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7593 fw_ddb_entry, fw_ddb_entry_dma);
7594 return ret;
7598 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
7599 * @cls_sess: pointer to session to be logged out
7601 * This performs session log out from the specified target
7603 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
7605 struct iscsi_session *sess;
7606 struct ddb_entry *ddb_entry = NULL;
7607 struct scsi_qla_host *ha;
7608 struct dev_db_entry *fw_ddb_entry = NULL;
7609 dma_addr_t fw_ddb_entry_dma;
7610 unsigned long flags;
7611 unsigned long wtime;
7612 uint32_t ddb_state;
7613 int options;
7614 int ret = 0;
7616 sess = cls_sess->dd_data;
7617 ddb_entry = sess->dd_data;
7618 ha = ddb_entry->ha;
7620 if (ddb_entry->ddb_type != FLASH_DDB) {
7621 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
7622 __func__);
7623 ret = -ENXIO;
7624 goto exit_ddb_logout;
7627 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7628 ql4_printk(KERN_ERR, ha,
7629 "%s: Logout from boot target entry is not permitted.\n",
7630 __func__);
7631 ret = -EPERM;
7632 goto exit_ddb_logout;
7635 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7636 &fw_ddb_entry_dma, GFP_KERNEL);
7637 if (!fw_ddb_entry) {
7638 ql4_printk(KERN_ERR, ha,
7639 "%s: Unable to allocate dma buffer\n", __func__);
7640 ret = -ENOMEM;
7641 goto exit_ddb_logout;
7644 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
7645 goto ddb_logout_init;
7647 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7648 fw_ddb_entry, fw_ddb_entry_dma,
7649 NULL, NULL, &ddb_state, NULL,
7650 NULL, NULL);
7651 if (ret == QLA_ERROR)
7652 goto ddb_logout_init;
7654 if (ddb_state == DDB_DS_SESSION_ACTIVE)
7655 goto ddb_logout_init;
7657 /* wait until next relogin is triggered using DF_RELOGIN and
7658 * clear DF_RELOGIN to avoid invocation of further relogin
7660 wtime = jiffies + (HZ * RELOGIN_TOV);
7661 do {
7662 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
7663 goto ddb_logout_init;
7665 schedule_timeout_uninterruptible(HZ);
7666 } while ((time_after(wtime, jiffies)));
7668 ddb_logout_init:
7669 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
7670 atomic_set(&ddb_entry->relogin_timer, 0);
7672 options = LOGOUT_OPTION_CLOSE_SESSION;
7673 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
7675 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
7676 wtime = jiffies + (HZ * LOGOUT_TOV);
7677 do {
7678 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7679 fw_ddb_entry, fw_ddb_entry_dma,
7680 NULL, NULL, &ddb_state, NULL,
7681 NULL, NULL);
7682 if (ret == QLA_ERROR)
7683 goto ddb_logout_clr_sess;
7685 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
7686 (ddb_state == DDB_DS_SESSION_FAILED))
7687 goto ddb_logout_clr_sess;
7689 schedule_timeout_uninterruptible(HZ);
7690 } while ((time_after(wtime, jiffies)));
7692 ddb_logout_clr_sess:
7693 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7695 * we have decremented the reference count of the driver
7696 * when we setup the session to have the driver unload
7697 * to be seamless without actually destroying the
7698 * session
7700 try_module_get(qla4xxx_iscsi_transport.owner);
7701 iscsi_destroy_endpoint(ddb_entry->conn->ep);
7703 spin_lock_irqsave(&ha->hardware_lock, flags);
7704 qla4xxx_free_ddb(ha, ddb_entry);
7705 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
7706 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7708 iscsi_session_teardown(ddb_entry->sess);
7710 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
7711 ret = QLA_SUCCESS;
7713 exit_ddb_logout:
7714 if (fw_ddb_entry)
7715 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7716 fw_ddb_entry, fw_ddb_entry_dma);
7717 return ret;
7721 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
7722 * @fnode_sess: pointer to session attrs of flash ddb entry
7723 * @fnode_conn: pointer to connection attrs of flash ddb entry
7725 * This performs log out from the specified target
7727 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
7728 struct iscsi_bus_flash_conn *fnode_conn)
7730 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7731 struct scsi_qla_host *ha = to_qla_host(shost);
7732 struct ql4_tuple_ddb *flash_tddb = NULL;
7733 struct ql4_tuple_ddb *tmp_tddb = NULL;
7734 struct dev_db_entry *fw_ddb_entry = NULL;
7735 struct ddb_entry *ddb_entry = NULL;
7736 dma_addr_t fw_ddb_dma;
7737 uint32_t next_idx = 0;
7738 uint32_t state = 0, conn_err = 0;
7739 uint16_t conn_id = 0;
7740 int idx, index;
7741 int status, ret = 0;
7743 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7744 &fw_ddb_dma);
7745 if (fw_ddb_entry == NULL) {
7746 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
7747 ret = -ENOMEM;
7748 goto exit_ddb_logout;
7751 flash_tddb = vzalloc(sizeof(*flash_tddb));
7752 if (!flash_tddb) {
7753 ql4_printk(KERN_WARNING, ha,
7754 "%s:Memory Allocation failed.\n", __func__);
7755 ret = -ENOMEM;
7756 goto exit_ddb_logout;
7759 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
7760 if (!tmp_tddb) {
7761 ql4_printk(KERN_WARNING, ha,
7762 "%s:Memory Allocation failed.\n", __func__);
7763 ret = -ENOMEM;
7764 goto exit_ddb_logout;
7767 if (!fnode_sess->targetname) {
7768 ql4_printk(KERN_ERR, ha,
7769 "%s:Cannot logout from SendTarget entry\n",
7770 __func__);
7771 ret = -EPERM;
7772 goto exit_ddb_logout;
7775 if (fnode_sess->is_boot_target) {
7776 ql4_printk(KERN_ERR, ha,
7777 "%s: Logout from boot target entry is not permitted.\n",
7778 __func__);
7779 ret = -EPERM;
7780 goto exit_ddb_logout;
7783 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
7784 ISCSI_NAME_SIZE);
7786 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7787 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
7788 else
7789 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
7791 flash_tddb->tpgt = fnode_sess->tpgt;
7792 flash_tddb->port = fnode_conn->port;
7794 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
7796 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7797 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7798 if (ddb_entry == NULL)
7799 continue;
7801 if (ddb_entry->ddb_type != FLASH_DDB)
7802 continue;
7804 index = ddb_entry->sess->target_id;
7805 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
7806 fw_ddb_dma, NULL, &next_idx,
7807 &state, &conn_err, NULL,
7808 &conn_id);
7809 if (status == QLA_ERROR) {
7810 ret = -ENOMEM;
7811 break;
7814 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
7816 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
7817 true);
7818 if (status == QLA_SUCCESS) {
7819 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
7820 break;
7824 if (idx == MAX_DDB_ENTRIES)
7825 ret = -ESRCH;
7827 exit_ddb_logout:
7828 if (flash_tddb)
7829 vfree(flash_tddb);
7830 if (tmp_tddb)
7831 vfree(tmp_tddb);
7832 if (fw_ddb_entry)
7833 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7835 return ret;
7838 static int
7839 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
7840 int param, char *buf)
7842 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7843 struct scsi_qla_host *ha = to_qla_host(shost);
7844 struct iscsi_bus_flash_conn *fnode_conn;
7845 struct ql4_chap_table chap_tbl;
7846 struct device *dev;
7847 int parent_type;
7848 int rc = 0;
7850 dev = iscsi_find_flashnode_conn(fnode_sess);
7851 if (!dev)
7852 return -EIO;
7854 fnode_conn = iscsi_dev_to_flash_conn(dev);
7856 switch (param) {
7857 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
7858 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
7859 break;
7860 case ISCSI_FLASHNODE_PORTAL_TYPE:
7861 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
7862 break;
7863 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
7864 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
7865 break;
7866 case ISCSI_FLASHNODE_DISCOVERY_SESS:
7867 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
7868 break;
7869 case ISCSI_FLASHNODE_ENTRY_EN:
7870 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
7871 break;
7872 case ISCSI_FLASHNODE_HDR_DGST_EN:
7873 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
7874 break;
7875 case ISCSI_FLASHNODE_DATA_DGST_EN:
7876 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
7877 break;
7878 case ISCSI_FLASHNODE_IMM_DATA_EN:
7879 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
7880 break;
7881 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
7882 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
7883 break;
7884 case ISCSI_FLASHNODE_DATASEQ_INORDER:
7885 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
7886 break;
7887 case ISCSI_FLASHNODE_PDU_INORDER:
7888 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
7889 break;
7890 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
7891 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
7892 break;
7893 case ISCSI_FLASHNODE_SNACK_REQ_EN:
7894 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
7895 break;
7896 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
7897 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
7898 break;
7899 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
7900 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
7901 break;
7902 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
7903 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
7904 break;
7905 case ISCSI_FLASHNODE_ERL:
7906 rc = sprintf(buf, "%u\n", fnode_sess->erl);
7907 break;
7908 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
7909 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
7910 break;
7911 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
7912 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
7913 break;
7914 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
7915 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
7916 break;
7917 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
7918 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
7919 break;
7920 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
7921 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
7922 break;
7923 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
7924 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
7925 break;
7926 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
7927 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
7928 break;
7929 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
7930 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
7931 break;
7932 case ISCSI_FLASHNODE_FIRST_BURST:
7933 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
7934 break;
7935 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
7936 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
7937 break;
7938 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
7939 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
7940 break;
7941 case ISCSI_FLASHNODE_MAX_R2T:
7942 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
7943 break;
7944 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
7945 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
7946 break;
7947 case ISCSI_FLASHNODE_ISID:
7948 rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
7949 fnode_sess->isid[0], fnode_sess->isid[1],
7950 fnode_sess->isid[2], fnode_sess->isid[3],
7951 fnode_sess->isid[4], fnode_sess->isid[5]);
7952 break;
7953 case ISCSI_FLASHNODE_TSID:
7954 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
7955 break;
7956 case ISCSI_FLASHNODE_PORT:
7957 rc = sprintf(buf, "%d\n", fnode_conn->port);
7958 break;
7959 case ISCSI_FLASHNODE_MAX_BURST:
7960 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
7961 break;
7962 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
7963 rc = sprintf(buf, "%u\n",
7964 fnode_sess->default_taskmgmt_timeout);
7965 break;
7966 case ISCSI_FLASHNODE_IPADDR:
7967 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7968 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
7969 else
7970 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
7971 break;
7972 case ISCSI_FLASHNODE_ALIAS:
7973 if (fnode_sess->targetalias)
7974 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
7975 else
7976 rc = sprintf(buf, "\n");
7977 break;
7978 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
7979 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7980 rc = sprintf(buf, "%pI6\n",
7981 fnode_conn->redirect_ipaddr);
7982 else
7983 rc = sprintf(buf, "%pI4\n",
7984 fnode_conn->redirect_ipaddr);
7985 break;
7986 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
7987 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
7988 break;
7989 case ISCSI_FLASHNODE_LOCAL_PORT:
7990 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
7991 break;
7992 case ISCSI_FLASHNODE_IPV4_TOS:
7993 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
7994 break;
7995 case ISCSI_FLASHNODE_IPV6_TC:
7996 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7997 rc = sprintf(buf, "%u\n",
7998 fnode_conn->ipv6_traffic_class);
7999 else
8000 rc = sprintf(buf, "\n");
8001 break;
8002 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
8003 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
8004 break;
8005 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
8006 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
8007 rc = sprintf(buf, "%pI6\n",
8008 fnode_conn->link_local_ipv6_addr);
8009 else
8010 rc = sprintf(buf, "\n");
8011 break;
8012 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
8013 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
8014 break;
8015 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
8016 if (fnode_sess->discovery_parent_type == DDB_ISNS)
8017 parent_type = ISCSI_DISC_PARENT_ISNS;
8018 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
8019 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8020 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
8021 parent_type = ISCSI_DISC_PARENT_SENDTGT;
8022 else
8023 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8025 rc = sprintf(buf, "%s\n",
8026 iscsi_get_discovery_parent_name(parent_type));
8027 break;
8028 case ISCSI_FLASHNODE_NAME:
8029 if (fnode_sess->targetname)
8030 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
8031 else
8032 rc = sprintf(buf, "\n");
8033 break;
8034 case ISCSI_FLASHNODE_TPGT:
8035 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
8036 break;
8037 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8038 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
8039 break;
8040 case ISCSI_FLASHNODE_TCP_RECV_WSF:
8041 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
8042 break;
8043 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8044 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
8045 break;
8046 case ISCSI_FLASHNODE_USERNAME:
8047 if (fnode_sess->chap_auth_en) {
8048 qla4xxx_get_uni_chap_at_index(ha,
8049 chap_tbl.name,
8050 chap_tbl.secret,
8051 fnode_sess->chap_out_idx);
8052 rc = sprintf(buf, "%s\n", chap_tbl.name);
8053 } else {
8054 rc = sprintf(buf, "\n");
8056 break;
8057 case ISCSI_FLASHNODE_PASSWORD:
8058 if (fnode_sess->chap_auth_en) {
8059 qla4xxx_get_uni_chap_at_index(ha,
8060 chap_tbl.name,
8061 chap_tbl.secret,
8062 fnode_sess->chap_out_idx);
8063 rc = sprintf(buf, "%s\n", chap_tbl.secret);
8064 } else {
8065 rc = sprintf(buf, "\n");
8067 break;
8068 case ISCSI_FLASHNODE_STATSN:
8069 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
8070 break;
8071 case ISCSI_FLASHNODE_EXP_STATSN:
8072 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
8073 break;
8074 case ISCSI_FLASHNODE_IS_BOOT_TGT:
8075 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
8076 break;
8077 default:
8078 rc = -ENOSYS;
8079 break;
8082 put_device(dev);
8083 return rc;
8087 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
8088 * @fnode_sess: pointer to session attrs of flash ddb entry
8089 * @fnode_conn: pointer to connection attrs of flash ddb entry
8090 * @data: Parameters and their values to update
8091 * @len: len of data
8093 * This sets the parameter of flash ddb entry and writes them to flash
8095 static int
8096 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
8097 struct iscsi_bus_flash_conn *fnode_conn,
8098 void *data, int len)
8100 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8101 struct scsi_qla_host *ha = to_qla_host(shost);
8102 struct iscsi_flashnode_param_info *fnode_param;
8103 struct ql4_chap_table chap_tbl;
8104 struct nlattr *attr;
8105 uint16_t chap_out_idx = INVALID_ENTRY;
8106 int rc = QLA_ERROR;
8107 uint32_t rem = len;
8109 memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
8110 nla_for_each_attr(attr, data, len, rem) {
8111 fnode_param = nla_data(attr);
8113 switch (fnode_param->param) {
8114 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
8115 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
8116 break;
8117 case ISCSI_FLASHNODE_PORTAL_TYPE:
8118 memcpy(fnode_sess->portal_type, fnode_param->value,
8119 strlen(fnode_sess->portal_type));
8120 break;
8121 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
8122 fnode_sess->auto_snd_tgt_disable =
8123 fnode_param->value[0];
8124 break;
8125 case ISCSI_FLASHNODE_DISCOVERY_SESS:
8126 fnode_sess->discovery_sess = fnode_param->value[0];
8127 break;
8128 case ISCSI_FLASHNODE_ENTRY_EN:
8129 fnode_sess->entry_state = fnode_param->value[0];
8130 break;
8131 case ISCSI_FLASHNODE_HDR_DGST_EN:
8132 fnode_conn->hdrdgst_en = fnode_param->value[0];
8133 break;
8134 case ISCSI_FLASHNODE_DATA_DGST_EN:
8135 fnode_conn->datadgst_en = fnode_param->value[0];
8136 break;
8137 case ISCSI_FLASHNODE_IMM_DATA_EN:
8138 fnode_sess->imm_data_en = fnode_param->value[0];
8139 break;
8140 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
8141 fnode_sess->initial_r2t_en = fnode_param->value[0];
8142 break;
8143 case ISCSI_FLASHNODE_DATASEQ_INORDER:
8144 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
8145 break;
8146 case ISCSI_FLASHNODE_PDU_INORDER:
8147 fnode_sess->pdu_inorder_en = fnode_param->value[0];
8148 break;
8149 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
8150 fnode_sess->chap_auth_en = fnode_param->value[0];
8151 /* Invalidate chap index if chap auth is disabled */
8152 if (!fnode_sess->chap_auth_en)
8153 fnode_sess->chap_out_idx = INVALID_ENTRY;
8155 break;
8156 case ISCSI_FLASHNODE_SNACK_REQ_EN:
8157 fnode_conn->snack_req_en = fnode_param->value[0];
8158 break;
8159 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
8160 fnode_sess->discovery_logout_en = fnode_param->value[0];
8161 break;
8162 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
8163 fnode_sess->bidi_chap_en = fnode_param->value[0];
8164 break;
8165 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
8166 fnode_sess->discovery_auth_optional =
8167 fnode_param->value[0];
8168 break;
8169 case ISCSI_FLASHNODE_ERL:
8170 fnode_sess->erl = fnode_param->value[0];
8171 break;
8172 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
8173 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
8174 break;
8175 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
8176 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
8177 break;
8178 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
8179 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
8180 break;
8181 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
8182 fnode_conn->tcp_timer_scale = fnode_param->value[0];
8183 break;
8184 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
8185 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
8186 break;
8187 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
8188 fnode_conn->fragment_disable = fnode_param->value[0];
8189 break;
8190 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
8191 fnode_conn->max_recv_dlength =
8192 *(unsigned *)fnode_param->value;
8193 break;
8194 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
8195 fnode_conn->max_xmit_dlength =
8196 *(unsigned *)fnode_param->value;
8197 break;
8198 case ISCSI_FLASHNODE_FIRST_BURST:
8199 fnode_sess->first_burst =
8200 *(unsigned *)fnode_param->value;
8201 break;
8202 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
8203 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
8204 break;
8205 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
8206 fnode_sess->time2retain =
8207 *(uint16_t *)fnode_param->value;
8208 break;
8209 case ISCSI_FLASHNODE_MAX_R2T:
8210 fnode_sess->max_r2t =
8211 *(uint16_t *)fnode_param->value;
8212 break;
8213 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
8214 fnode_conn->keepalive_timeout =
8215 *(uint16_t *)fnode_param->value;
8216 break;
8217 case ISCSI_FLASHNODE_ISID:
8218 memcpy(fnode_sess->isid, fnode_param->value,
8219 sizeof(fnode_sess->isid));
8220 break;
8221 case ISCSI_FLASHNODE_TSID:
8222 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
8223 break;
8224 case ISCSI_FLASHNODE_PORT:
8225 fnode_conn->port = *(uint16_t *)fnode_param->value;
8226 break;
8227 case ISCSI_FLASHNODE_MAX_BURST:
8228 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
8229 break;
8230 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
8231 fnode_sess->default_taskmgmt_timeout =
8232 *(uint16_t *)fnode_param->value;
8233 break;
8234 case ISCSI_FLASHNODE_IPADDR:
8235 memcpy(fnode_conn->ipaddress, fnode_param->value,
8236 IPv6_ADDR_LEN);
8237 break;
8238 case ISCSI_FLASHNODE_ALIAS:
8239 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
8240 (char *)fnode_param->value);
8241 break;
8242 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
8243 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
8244 IPv6_ADDR_LEN);
8245 break;
8246 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
8247 fnode_conn->max_segment_size =
8248 *(unsigned *)fnode_param->value;
8249 break;
8250 case ISCSI_FLASHNODE_LOCAL_PORT:
8251 fnode_conn->local_port =
8252 *(uint16_t *)fnode_param->value;
8253 break;
8254 case ISCSI_FLASHNODE_IPV4_TOS:
8255 fnode_conn->ipv4_tos = fnode_param->value[0];
8256 break;
8257 case ISCSI_FLASHNODE_IPV6_TC:
8258 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
8259 break;
8260 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
8261 fnode_conn->ipv6_flow_label = fnode_param->value[0];
8262 break;
8263 case ISCSI_FLASHNODE_NAME:
8264 rc = iscsi_switch_str_param(&fnode_sess->targetname,
8265 (char *)fnode_param->value);
8266 break;
8267 case ISCSI_FLASHNODE_TPGT:
8268 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
8269 break;
8270 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
8271 memcpy(fnode_conn->link_local_ipv6_addr,
8272 fnode_param->value, IPv6_ADDR_LEN);
8273 break;
8274 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
8275 fnode_sess->discovery_parent_idx =
8276 *(uint16_t *)fnode_param->value;
8277 break;
8278 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8279 fnode_conn->tcp_xmit_wsf =
8280 *(uint8_t *)fnode_param->value;
8281 break;
8282 case ISCSI_FLASHNODE_TCP_RECV_WSF:
8283 fnode_conn->tcp_recv_wsf =
8284 *(uint8_t *)fnode_param->value;
8285 break;
8286 case ISCSI_FLASHNODE_STATSN:
8287 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
8288 break;
8289 case ISCSI_FLASHNODE_EXP_STATSN:
8290 fnode_conn->exp_statsn =
8291 *(uint32_t *)fnode_param->value;
8292 break;
8293 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8294 chap_out_idx = *(uint16_t *)fnode_param->value;
8295 if (!qla4xxx_get_uni_chap_at_index(ha,
8296 chap_tbl.name,
8297 chap_tbl.secret,
8298 chap_out_idx)) {
8299 fnode_sess->chap_out_idx = chap_out_idx;
8300 /* Enable chap auth if chap index is valid */
8301 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
8303 break;
8304 default:
8305 ql4_printk(KERN_ERR, ha,
8306 "%s: No such sysfs attribute\n", __func__);
8307 rc = -ENOSYS;
8308 goto exit_set_param;
8312 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
8314 exit_set_param:
8315 return rc;
8319 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
8320 * @fnode_sess: pointer to session attrs of flash ddb entry
8322 * This invalidates the flash ddb entry at the given index
8324 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
8326 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8327 struct scsi_qla_host *ha = to_qla_host(shost);
8328 uint32_t dev_db_start_offset;
8329 uint32_t dev_db_end_offset;
8330 struct dev_db_entry *fw_ddb_entry = NULL;
8331 dma_addr_t fw_ddb_entry_dma;
8332 uint16_t *ddb_cookie = NULL;
8333 size_t ddb_size = 0;
8334 void *pddb = NULL;
8335 int target_id;
8336 int rc = 0;
8338 if (fnode_sess->is_boot_target) {
8339 rc = -EPERM;
8340 DEBUG2(ql4_printk(KERN_ERR, ha,
8341 "%s: Deletion of boot target entry is not permitted.\n",
8342 __func__));
8343 goto exit_ddb_del;
8346 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
8347 goto sysfs_ddb_del;
8349 if (is_qla40XX(ha)) {
8350 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
8351 dev_db_end_offset = FLASH_OFFSET_DB_END;
8352 dev_db_start_offset += (fnode_sess->target_id *
8353 sizeof(*fw_ddb_entry));
8354 ddb_size = sizeof(*fw_ddb_entry);
8355 } else {
8356 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
8357 (ha->hw.flt_region_ddb << 2);
8358 /* flt_ddb_size is DDB table size for both ports
8359 * so divide it by 2 to calculate the offset for second port
8361 if (ha->port_num == 1)
8362 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
8364 dev_db_end_offset = dev_db_start_offset +
8365 (ha->hw.flt_ddb_size / 2);
8367 dev_db_start_offset += (fnode_sess->target_id *
8368 sizeof(*fw_ddb_entry));
8369 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
8371 ddb_size = sizeof(*ddb_cookie);
8374 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
8375 __func__, dev_db_start_offset, dev_db_end_offset));
8377 if (dev_db_start_offset > dev_db_end_offset) {
8378 rc = -EIO;
8379 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
8380 __func__, fnode_sess->target_id));
8381 goto exit_ddb_del;
8384 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
8385 &fw_ddb_entry_dma, GFP_KERNEL);
8386 if (!pddb) {
8387 rc = -ENOMEM;
8388 DEBUG2(ql4_printk(KERN_ERR, ha,
8389 "%s: Unable to allocate dma buffer\n",
8390 __func__));
8391 goto exit_ddb_del;
8394 if (is_qla40XX(ha)) {
8395 fw_ddb_entry = pddb;
8396 memset(fw_ddb_entry, 0, ddb_size);
8397 ddb_cookie = &fw_ddb_entry->cookie;
8398 } else {
8399 ddb_cookie = pddb;
8402 /* invalidate the cookie */
8403 *ddb_cookie = 0xFFEE;
8404 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
8405 ddb_size, FLASH_OPT_RMW_COMMIT);
8407 sysfs_ddb_del:
8408 target_id = fnode_sess->target_id;
8409 iscsi_destroy_flashnode_sess(fnode_sess);
8410 ql4_printk(KERN_INFO, ha,
8411 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
8412 __func__, target_id, ha->host_no);
8413 exit_ddb_del:
8414 if (pddb)
8415 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
8416 fw_ddb_entry_dma);
8417 return rc;
8421 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
8422 * @ha: pointer to adapter structure
8424 * Export the firmware DDB for all send targets and normal targets to sysfs.
8426 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
8428 struct dev_db_entry *fw_ddb_entry = NULL;
8429 dma_addr_t fw_ddb_entry_dma;
8430 uint16_t max_ddbs;
8431 uint16_t idx = 0;
8432 int ret = QLA_SUCCESS;
8434 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
8435 sizeof(*fw_ddb_entry),
8436 &fw_ddb_entry_dma, GFP_KERNEL);
8437 if (!fw_ddb_entry) {
8438 DEBUG2(ql4_printk(KERN_ERR, ha,
8439 "%s: Unable to allocate dma buffer\n",
8440 __func__));
8441 return -ENOMEM;
8444 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
8445 MAX_DEV_DB_ENTRIES;
8447 for (idx = 0; idx < max_ddbs; idx++) {
8448 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
8449 idx))
8450 continue;
8452 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
8453 if (ret) {
8454 ret = -EIO;
8455 break;
8459 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
8460 fw_ddb_entry_dma);
8462 return ret;
8465 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
8467 iscsi_destroy_all_flashnode(ha->host);
8471 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
8472 * @ha: pointer to adapter structure
8473 * @is_reset: Is this init path or reset path
8475 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
8476 * using connection open, then create the list of normal targets (nt)
8477 * from firmware DDBs. Based on the list of nt setup session and connection
8478 * objects.
8480 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
8482 uint16_t tmo = 0;
8483 struct list_head list_st, list_nt;
8484 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
8485 unsigned long wtime;
8487 if (!test_bit(AF_LINK_UP, &ha->flags)) {
8488 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
8489 ha->is_reset = is_reset;
8490 return;
8493 INIT_LIST_HEAD(&list_st);
8494 INIT_LIST_HEAD(&list_nt);
8496 qla4xxx_build_st_list(ha, &list_st);
8498 /* Before issuing conn open mbox, ensure all IPs states are configured
8499 * Note, conn open fails if IPs are not configured
8501 qla4xxx_wait_for_ip_configuration(ha);
8503 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
8504 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
8505 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
8508 /* Wait to ensure all sendtargets are done for min 12 sec wait */
8509 tmo = ((ha->def_timeout > LOGIN_TOV) &&
8510 (ha->def_timeout < LOGIN_TOV * 10) ?
8511 ha->def_timeout : LOGIN_TOV);
8513 DEBUG2(ql4_printk(KERN_INFO, ha,
8514 "Default time to wait for build ddb %d\n", tmo));
8516 wtime = jiffies + (HZ * tmo);
8517 do {
8518 if (list_empty(&list_st))
8519 break;
8521 qla4xxx_remove_failed_ddb(ha, &list_st);
8522 schedule_timeout_uninterruptible(HZ / 10);
8523 } while (time_after(wtime, jiffies));
8526 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
8528 qla4xxx_free_ddb_list(&list_st);
8529 qla4xxx_free_ddb_list(&list_nt);
8531 qla4xxx_free_ddb_index(ha);
8535 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
8536 * response.
8537 * @ha: pointer to adapter structure
8539 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
8540 * set in DDB and we will wait for login response of boot targets during
8541 * probe.
8543 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
8545 struct ddb_entry *ddb_entry;
8546 struct dev_db_entry *fw_ddb_entry = NULL;
8547 dma_addr_t fw_ddb_entry_dma;
8548 unsigned long wtime;
8549 uint32_t ddb_state;
8550 int max_ddbs, idx, ret;
8552 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
8553 MAX_DEV_DB_ENTRIES;
8555 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8556 &fw_ddb_entry_dma, GFP_KERNEL);
8557 if (!fw_ddb_entry) {
8558 ql4_printk(KERN_ERR, ha,
8559 "%s: Unable to allocate dma buffer\n", __func__);
8560 goto exit_login_resp;
8563 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
8565 for (idx = 0; idx < max_ddbs; idx++) {
8566 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8567 if (ddb_entry == NULL)
8568 continue;
8570 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
8571 DEBUG2(ql4_printk(KERN_INFO, ha,
8572 "%s: DDB index [%d]\n", __func__,
8573 ddb_entry->fw_ddb_index));
8574 do {
8575 ret = qla4xxx_get_fwddb_entry(ha,
8576 ddb_entry->fw_ddb_index,
8577 fw_ddb_entry, fw_ddb_entry_dma,
8578 NULL, NULL, &ddb_state, NULL,
8579 NULL, NULL);
8580 if (ret == QLA_ERROR)
8581 goto exit_login_resp;
8583 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
8584 (ddb_state == DDB_DS_SESSION_FAILED))
8585 break;
8587 schedule_timeout_uninterruptible(HZ);
8589 } while ((time_after(wtime, jiffies)));
8591 if (!time_after(wtime, jiffies)) {
8592 DEBUG2(ql4_printk(KERN_INFO, ha,
8593 "%s: Login response wait timer expired\n",
8594 __func__));
8595 goto exit_login_resp;
8600 exit_login_resp:
8601 if (fw_ddb_entry)
8602 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8603 fw_ddb_entry, fw_ddb_entry_dma);
8607 * qla4xxx_probe_adapter - callback function to probe HBA
8608 * @pdev: pointer to pci_dev structure
8609 * @pci_device_id: pointer to pci_device entry
8611 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
8612 * It returns zero if successful. It also initializes all data necessary for
8613 * the driver.
8615 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
8616 const struct pci_device_id *ent)
8618 int ret = -ENODEV, status;
8619 struct Scsi_Host *host;
8620 struct scsi_qla_host *ha;
8621 uint8_t init_retry_count = 0;
8622 char buf[34];
8623 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
8624 uint32_t dev_state;
8626 if (pci_enable_device(pdev))
8627 return -1;
8629 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
8630 if (host == NULL) {
8631 printk(KERN_WARNING
8632 "qla4xxx: Couldn't allocate host from scsi layer!\n");
8633 goto probe_disable_device;
8636 /* Clear our data area */
8637 ha = to_qla_host(host);
8638 memset(ha, 0, sizeof(*ha));
8640 /* Save the information from PCI BIOS. */
8641 ha->pdev = pdev;
8642 ha->host = host;
8643 ha->host_no = host->host_no;
8644 ha->func_num = PCI_FUNC(ha->pdev->devfn);
8646 pci_enable_pcie_error_reporting(pdev);
8648 /* Setup Runtime configurable options */
8649 if (is_qla8022(ha)) {
8650 ha->isp_ops = &qla4_82xx_isp_ops;
8651 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
8652 ha->qdr_sn_window = -1;
8653 ha->ddr_mn_window = -1;
8654 ha->curr_window = 255;
8655 nx_legacy_intr = &legacy_intr[ha->func_num];
8656 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
8657 ha->nx_legacy_intr.tgt_status_reg =
8658 nx_legacy_intr->tgt_status_reg;
8659 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
8660 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
8661 } else if (is_qla8032(ha) || is_qla8042(ha)) {
8662 ha->isp_ops = &qla4_83xx_isp_ops;
8663 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
8664 } else {
8665 ha->isp_ops = &qla4xxx_isp_ops;
8668 if (is_qla80XX(ha)) {
8669 rwlock_init(&ha->hw_lock);
8670 ha->pf_bit = ha->func_num << 16;
8671 /* Set EEH reset type to fundamental if required by hba */
8672 pdev->needs_freset = 1;
8675 /* Configure PCI I/O space. */
8676 ret = ha->isp_ops->iospace_config(ha);
8677 if (ret)
8678 goto probe_failed_ioconfig;
8680 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
8681 pdev->device, pdev->irq, ha->reg);
8683 qla4xxx_config_dma_addressing(ha);
8685 /* Initialize lists and spinlocks. */
8686 INIT_LIST_HEAD(&ha->free_srb_q);
8688 mutex_init(&ha->mbox_sem);
8689 mutex_init(&ha->chap_sem);
8690 init_completion(&ha->mbx_intr_comp);
8691 init_completion(&ha->disable_acb_comp);
8692 init_completion(&ha->idc_comp);
8693 init_completion(&ha->link_up_comp);
8694 init_completion(&ha->disable_acb_comp);
8696 spin_lock_init(&ha->hardware_lock);
8697 spin_lock_init(&ha->work_lock);
8699 /* Initialize work list */
8700 INIT_LIST_HEAD(&ha->work_list);
8702 /* Allocate dma buffers */
8703 if (qla4xxx_mem_alloc(ha)) {
8704 ql4_printk(KERN_WARNING, ha,
8705 "[ERROR] Failed to allocate memory for adapter\n");
8707 ret = -ENOMEM;
8708 goto probe_failed;
8711 host->cmd_per_lun = 3;
8712 host->max_channel = 0;
8713 host->max_lun = MAX_LUNS - 1;
8714 host->max_id = MAX_TARGETS;
8715 host->max_cmd_len = IOCB_MAX_CDB_LEN;
8716 host->can_queue = MAX_SRBS ;
8717 host->transportt = qla4xxx_scsi_transport;
8719 pci_set_drvdata(pdev, ha);
8721 ret = scsi_add_host(host, &pdev->dev);
8722 if (ret)
8723 goto probe_failed;
8725 if (is_qla80XX(ha))
8726 qla4_8xxx_get_flash_info(ha);
8728 if (is_qla8032(ha) || is_qla8042(ha)) {
8729 qla4_83xx_read_reset_template(ha);
8731 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
8732 * If DONRESET_BIT0 is set, drivers should not set dev_state
8733 * to NEED_RESET. But if NEED_RESET is set, drivers should
8734 * should honor the reset.
8736 if (ql4xdontresethba == 1)
8737 qla4_83xx_set_idc_dontreset(ha);
8741 * Initialize the Host adapter request/response queues and
8742 * firmware
8743 * NOTE: interrupts enabled upon successful completion
8745 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8747 /* Dont retry adapter initialization if IRQ allocation failed */
8748 if (is_qla80XX(ha) && (status == QLA_ERROR))
8749 goto skip_retry_init;
8751 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
8752 init_retry_count++ < MAX_INIT_RETRIES) {
8754 if (is_qla80XX(ha)) {
8755 ha->isp_ops->idc_lock(ha);
8756 dev_state = qla4_8xxx_rd_direct(ha,
8757 QLA8XXX_CRB_DEV_STATE);
8758 ha->isp_ops->idc_unlock(ha);
8759 if (dev_state == QLA8XXX_DEV_FAILED) {
8760 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
8761 "initialize adapter. H/W is in failed state\n",
8762 __func__);
8763 break;
8766 DEBUG2(printk("scsi: %s: retrying adapter initialization "
8767 "(%d)\n", __func__, init_retry_count));
8769 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
8770 continue;
8772 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8773 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
8774 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
8775 goto skip_retry_init;
8779 skip_retry_init:
8780 if (!test_bit(AF_ONLINE, &ha->flags)) {
8781 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
8783 if ((is_qla8022(ha) && ql4xdontresethba) ||
8784 ((is_qla8032(ha) || is_qla8042(ha)) &&
8785 qla4_83xx_idc_dontreset(ha))) {
8786 /* Put the device in failed state. */
8787 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
8788 ha->isp_ops->idc_lock(ha);
8789 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8790 QLA8XXX_DEV_FAILED);
8791 ha->isp_ops->idc_unlock(ha);
8793 ret = -ENODEV;
8794 goto remove_host;
8797 /* Startup the kernel thread for this host adapter. */
8798 DEBUG2(printk("scsi: %s: Starting kernel thread for "
8799 "qla4xxx_dpc\n", __func__));
8800 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
8801 ha->dpc_thread = create_singlethread_workqueue(buf);
8802 if (!ha->dpc_thread) {
8803 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
8804 ret = -ENODEV;
8805 goto remove_host;
8807 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
8809 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
8810 ha->host_no);
8811 if (!ha->task_wq) {
8812 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
8813 ret = -ENODEV;
8814 goto remove_host;
8818 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
8819 * (which is called indirectly by qla4xxx_initialize_adapter),
8820 * so that irqs will be registered after crbinit but before
8821 * mbx_intr_enable.
8823 if (is_qla40XX(ha)) {
8824 ret = qla4xxx_request_irqs(ha);
8825 if (ret) {
8826 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
8827 "interrupt %d already in use.\n", pdev->irq);
8828 goto remove_host;
8832 pci_save_state(ha->pdev);
8833 ha->isp_ops->enable_intrs(ha);
8835 /* Start timer thread. */
8836 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
8838 set_bit(AF_INIT_DONE, &ha->flags);
8840 qla4_8xxx_alloc_sysfs_attr(ha);
8842 printk(KERN_INFO
8843 " QLogic iSCSI HBA Driver version: %s\n"
8844 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
8845 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
8846 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
8847 ha->fw_info.fw_patch, ha->fw_info.fw_build);
8849 /* Set the driver version */
8850 if (is_qla80XX(ha))
8851 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
8853 if (qla4xxx_setup_boot_info(ha))
8854 ql4_printk(KERN_ERR, ha,
8855 "%s: No iSCSI boot target configured\n", __func__);
8857 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
8858 /* Perform the build ddb list and login to each */
8859 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
8860 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
8861 qla4xxx_wait_login_resp_boot_tgt(ha);
8863 qla4xxx_create_chap_list(ha);
8865 qla4xxx_create_ifaces(ha);
8866 return 0;
8868 remove_host:
8869 scsi_remove_host(ha->host);
8871 probe_failed:
8872 qla4xxx_free_adapter(ha);
8874 probe_failed_ioconfig:
8875 pci_disable_pcie_error_reporting(pdev);
8876 scsi_host_put(ha->host);
8878 probe_disable_device:
8879 pci_disable_device(pdev);
8881 return ret;
8885 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
8886 * @ha: pointer to adapter structure
8888 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
8889 * so that the other port will not re-initialize while in the process of
8890 * removing the ha due to driver unload or hba hotplug.
8892 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
8894 struct scsi_qla_host *other_ha = NULL;
8895 struct pci_dev *other_pdev = NULL;
8896 int fn = ISP4XXX_PCI_FN_2;
8898 /*iscsi function numbers for ISP4xxx is 1 and 3*/
8899 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
8900 fn = ISP4XXX_PCI_FN_1;
8902 other_pdev =
8903 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8904 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8905 fn));
8907 /* Get other_ha if other_pdev is valid and state is enable*/
8908 if (other_pdev) {
8909 if (atomic_read(&other_pdev->enable_cnt)) {
8910 other_ha = pci_get_drvdata(other_pdev);
8911 if (other_ha) {
8912 set_bit(AF_HA_REMOVAL, &other_ha->flags);
8913 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
8914 "Prevent %s reinit\n", __func__,
8915 dev_name(&other_ha->pdev->dev)));
8918 pci_dev_put(other_pdev);
8922 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
8923 struct ddb_entry *ddb_entry)
8925 struct dev_db_entry *fw_ddb_entry = NULL;
8926 dma_addr_t fw_ddb_entry_dma;
8927 unsigned long wtime;
8928 uint32_t ddb_state;
8929 int options;
8930 int status;
8932 options = LOGOUT_OPTION_CLOSE_SESSION;
8933 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
8934 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
8935 goto clear_ddb;
8938 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8939 &fw_ddb_entry_dma, GFP_KERNEL);
8940 if (!fw_ddb_entry) {
8941 ql4_printk(KERN_ERR, ha,
8942 "%s: Unable to allocate dma buffer\n", __func__);
8943 goto clear_ddb;
8946 wtime = jiffies + (HZ * LOGOUT_TOV);
8947 do {
8948 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
8949 fw_ddb_entry, fw_ddb_entry_dma,
8950 NULL, NULL, &ddb_state, NULL,
8951 NULL, NULL);
8952 if (status == QLA_ERROR)
8953 goto free_ddb;
8955 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
8956 (ddb_state == DDB_DS_SESSION_FAILED))
8957 goto free_ddb;
8959 schedule_timeout_uninterruptible(HZ);
8960 } while ((time_after(wtime, jiffies)));
8962 free_ddb:
8963 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8964 fw_ddb_entry, fw_ddb_entry_dma);
8965 clear_ddb:
8966 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
8969 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
8971 struct ddb_entry *ddb_entry;
8972 int idx;
8974 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
8976 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8977 if ((ddb_entry != NULL) &&
8978 (ddb_entry->ddb_type == FLASH_DDB)) {
8980 qla4xxx_destroy_ddb(ha, ddb_entry);
8982 * we have decremented the reference count of the driver
8983 * when we setup the session to have the driver unload
8984 * to be seamless without actually destroying the
8985 * session
8987 try_module_get(qla4xxx_iscsi_transport.owner);
8988 iscsi_destroy_endpoint(ddb_entry->conn->ep);
8989 qla4xxx_free_ddb(ha, ddb_entry);
8990 iscsi_session_teardown(ddb_entry->sess);
8995 * qla4xxx_remove_adapter - callback function to remove adapter.
8996 * @pci_dev: PCI device pointer
8998 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
9000 struct scsi_qla_host *ha;
9003 * If the PCI device is disabled then it means probe_adapter had
9004 * failed and resources already cleaned up on probe_adapter exit.
9006 if (!pci_is_enabled(pdev))
9007 return;
9009 ha = pci_get_drvdata(pdev);
9011 if (is_qla40XX(ha))
9012 qla4xxx_prevent_other_port_reinit(ha);
9014 /* destroy iface from sysfs */
9015 qla4xxx_destroy_ifaces(ha);
9017 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
9018 iscsi_boot_destroy_kset(ha->boot_kset);
9020 qla4xxx_destroy_fw_ddb_session(ha);
9021 qla4_8xxx_free_sysfs_attr(ha);
9023 qla4xxx_sysfs_ddb_remove(ha);
9024 scsi_remove_host(ha->host);
9026 qla4xxx_free_adapter(ha);
9028 scsi_host_put(ha->host);
9030 pci_disable_pcie_error_reporting(pdev);
9031 pci_disable_device(pdev);
9035 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
9036 * @ha: HA context
9038 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
9039 * supported addressing method.
9041 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
9043 int retval;
9045 /* Update our PCI device dma_mask for full 64 bit mask */
9046 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
9047 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
9048 dev_dbg(&ha->pdev->dev,
9049 "Failed to set 64 bit PCI consistent mask; "
9050 "using 32 bit.\n");
9051 retval = pci_set_consistent_dma_mask(ha->pdev,
9052 DMA_BIT_MASK(32));
9054 } else
9055 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
9058 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
9060 struct iscsi_cls_session *cls_sess;
9061 struct iscsi_session *sess;
9062 struct ddb_entry *ddb;
9063 int queue_depth = QL4_DEF_QDEPTH;
9065 cls_sess = starget_to_session(sdev->sdev_target);
9066 sess = cls_sess->dd_data;
9067 ddb = sess->dd_data;
9069 sdev->hostdata = ddb;
9071 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
9072 queue_depth = ql4xmaxqdepth;
9074 scsi_change_queue_depth(sdev, queue_depth);
9075 return 0;
9079 * qla4xxx_del_from_active_array - returns an active srb
9080 * @ha: Pointer to host adapter structure.
9081 * @index: index into the active_array
9083 * This routine removes and returns the srb at the specified index
9085 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
9086 uint32_t index)
9088 struct srb *srb = NULL;
9089 struct scsi_cmnd *cmd = NULL;
9091 cmd = scsi_host_find_tag(ha->host, index);
9092 if (!cmd)
9093 return srb;
9095 srb = (struct srb *)CMD_SP(cmd);
9096 if (!srb)
9097 return srb;
9099 /* update counters */
9100 if (srb->flags & SRB_DMA_VALID) {
9101 ha->iocb_cnt -= srb->iocb_cnt;
9102 if (srb->cmd)
9103 srb->cmd->host_scribble =
9104 (unsigned char *)(unsigned long) MAX_SRBS;
9106 return srb;
9110 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
9111 * @ha: Pointer to host adapter structure.
9112 * @cmd: Scsi Command to wait on.
9114 * This routine waits for the command to be returned by the Firmware
9115 * for some max time.
9117 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
9118 struct scsi_cmnd *cmd)
9120 int done = 0;
9121 struct srb *rp;
9122 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
9123 int ret = SUCCESS;
9125 /* Dont wait on command if PCI error is being handled
9126 * by PCI AER driver
9128 if (unlikely(pci_channel_offline(ha->pdev)) ||
9129 (test_bit(AF_EEH_BUSY, &ha->flags))) {
9130 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
9131 ha->host_no, __func__);
9132 return ret;
9135 do {
9136 /* Checking to see if its returned to OS */
9137 rp = (struct srb *) CMD_SP(cmd);
9138 if (rp == NULL) {
9139 done++;
9140 break;
9143 msleep(2000);
9144 } while (max_wait_time--);
9146 return done;
9150 * qla4xxx_wait_for_hba_online - waits for HBA to come online
9151 * @ha: Pointer to host adapter structure
9153 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
9155 unsigned long wait_online;
9157 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
9158 while (time_before(jiffies, wait_online)) {
9160 if (adapter_up(ha))
9161 return QLA_SUCCESS;
9163 msleep(2000);
9166 return QLA_ERROR;
9170 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
9171 * @ha: pointer to HBA
9172 * @t: target id
9173 * @l: lun id
9175 * This function waits for all outstanding commands to a lun to complete. It
9176 * returns 0 if all pending commands are returned and 1 otherwise.
9178 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
9179 struct scsi_target *stgt,
9180 struct scsi_device *sdev)
9182 int cnt;
9183 int status = 0;
9184 struct scsi_cmnd *cmd;
9187 * Waiting for all commands for the designated target or dev
9188 * in the active array
9190 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
9191 cmd = scsi_host_find_tag(ha->host, cnt);
9192 if (cmd && stgt == scsi_target(cmd->device) &&
9193 (!sdev || sdev == cmd->device)) {
9194 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9195 status++;
9196 break;
9200 return status;
9204 * qla4xxx_eh_abort - callback for abort task.
9205 * @cmd: Pointer to Linux's SCSI command structure
9207 * This routine is called by the Linux OS to abort the specified
9208 * command.
9210 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9212 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9213 unsigned int id = cmd->device->id;
9214 uint64_t lun = cmd->device->lun;
9215 unsigned long flags;
9216 struct srb *srb = NULL;
9217 int ret = SUCCESS;
9218 int wait = 0;
9219 int rval;
9221 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
9222 ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9224 rval = qla4xxx_isp_check_reg(ha);
9225 if (rval != QLA_SUCCESS) {
9226 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9227 return FAILED;
9230 spin_lock_irqsave(&ha->hardware_lock, flags);
9231 srb = (struct srb *) CMD_SP(cmd);
9232 if (!srb) {
9233 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9234 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
9235 ha->host_no, id, lun);
9236 return SUCCESS;
9238 kref_get(&srb->srb_ref);
9239 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9241 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
9242 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
9243 ha->host_no, id, lun));
9244 ret = FAILED;
9245 } else {
9246 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
9247 ha->host_no, id, lun));
9248 wait = 1;
9251 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
9253 /* Wait for command to complete */
9254 if (wait) {
9255 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9256 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
9257 ha->host_no, id, lun));
9258 ret = FAILED;
9262 ql4_printk(KERN_INFO, ha,
9263 "scsi%ld:%d:%llu: Abort command - %s\n",
9264 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
9266 return ret;
9270 * qla4xxx_eh_device_reset - callback for target reset.
9271 * @cmd: Pointer to Linux's SCSI command structure
9273 * This routine is called by the Linux OS to reset all luns on the
9274 * specified target.
9276 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9278 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9279 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9280 int ret = FAILED, stat;
9281 int rval;
9283 if (!ddb_entry)
9284 return ret;
9286 ret = iscsi_block_scsi_eh(cmd);
9287 if (ret)
9288 return ret;
9289 ret = FAILED;
9291 ql4_printk(KERN_INFO, ha,
9292 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
9293 cmd->device->channel, cmd->device->id, cmd->device->lun);
9295 DEBUG2(printk(KERN_INFO
9296 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
9297 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
9298 cmd, jiffies, cmd->request->timeout / HZ,
9299 ha->dpc_flags, cmd->result, cmd->allowed));
9301 rval = qla4xxx_isp_check_reg(ha);
9302 if (rval != QLA_SUCCESS) {
9303 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9304 return FAILED;
9307 /* FIXME: wait for hba to go online */
9308 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
9309 if (stat != QLA_SUCCESS) {
9310 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
9311 goto eh_dev_reset_done;
9314 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9315 cmd->device)) {
9316 ql4_printk(KERN_INFO, ha,
9317 "DEVICE RESET FAILED - waiting for "
9318 "commands.\n");
9319 goto eh_dev_reset_done;
9322 /* Send marker. */
9323 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9324 MM_LUN_RESET) != QLA_SUCCESS)
9325 goto eh_dev_reset_done;
9327 ql4_printk(KERN_INFO, ha,
9328 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
9329 ha->host_no, cmd->device->channel, cmd->device->id,
9330 cmd->device->lun);
9332 ret = SUCCESS;
9334 eh_dev_reset_done:
9336 return ret;
9340 * qla4xxx_eh_target_reset - callback for target reset.
9341 * @cmd: Pointer to Linux's SCSI command structure
9343 * This routine is called by the Linux OS to reset the target.
9345 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9347 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9348 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9349 int stat, ret;
9350 int rval;
9352 if (!ddb_entry)
9353 return FAILED;
9355 ret = iscsi_block_scsi_eh(cmd);
9356 if (ret)
9357 return ret;
9359 starget_printk(KERN_INFO, scsi_target(cmd->device),
9360 "WARM TARGET RESET ISSUED.\n");
9362 DEBUG2(printk(KERN_INFO
9363 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
9364 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
9365 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
9366 ha->dpc_flags, cmd->result, cmd->allowed));
9368 rval = qla4xxx_isp_check_reg(ha);
9369 if (rval != QLA_SUCCESS) {
9370 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9371 return FAILED;
9374 stat = qla4xxx_reset_target(ha, ddb_entry);
9375 if (stat != QLA_SUCCESS) {
9376 starget_printk(KERN_INFO, scsi_target(cmd->device),
9377 "WARM TARGET RESET FAILED.\n");
9378 return FAILED;
9381 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9382 NULL)) {
9383 starget_printk(KERN_INFO, scsi_target(cmd->device),
9384 "WARM TARGET DEVICE RESET FAILED - "
9385 "waiting for commands.\n");
9386 return FAILED;
9389 /* Send marker. */
9390 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9391 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
9392 starget_printk(KERN_INFO, scsi_target(cmd->device),
9393 "WARM TARGET DEVICE RESET FAILED - "
9394 "marker iocb failed.\n");
9395 return FAILED;
9398 starget_printk(KERN_INFO, scsi_target(cmd->device),
9399 "WARM TARGET RESET SUCCEEDED.\n");
9400 return SUCCESS;
9404 * qla4xxx_is_eh_active - check if error handler is running
9405 * @shost: Pointer to SCSI Host struct
9407 * This routine finds that if reset host is called in EH
9408 * scenario or from some application like sg_reset
9410 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
9412 if (shost->shost_state == SHOST_RECOVERY)
9413 return 1;
9414 return 0;
9418 * qla4xxx_eh_host_reset - kernel callback
9419 * @cmd: Pointer to Linux's SCSI command structure
9421 * This routine is invoked by the Linux kernel to perform fatal error
9422 * recovery on the specified adapter.
9424 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
9426 int return_status = FAILED;
9427 struct scsi_qla_host *ha;
9428 int rval;
9430 ha = to_qla_host(cmd->device->host);
9432 rval = qla4xxx_isp_check_reg(ha);
9433 if (rval != QLA_SUCCESS) {
9434 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9435 return FAILED;
9438 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
9439 qla4_83xx_set_idc_dontreset(ha);
9442 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
9443 * protocol drivers, we should not set device_state to NEED_RESET
9445 if (ql4xdontresethba ||
9446 ((is_qla8032(ha) || is_qla8042(ha)) &&
9447 qla4_83xx_idc_dontreset(ha))) {
9448 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
9449 ha->host_no, __func__));
9451 /* Clear outstanding srb in queues */
9452 if (qla4xxx_is_eh_active(cmd->device->host))
9453 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
9455 return FAILED;
9458 ql4_printk(KERN_INFO, ha,
9459 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
9460 cmd->device->channel, cmd->device->id, cmd->device->lun);
9462 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
9463 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
9464 "DEAD.\n", ha->host_no, cmd->device->channel,
9465 __func__));
9467 return FAILED;
9470 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9471 if (is_qla80XX(ha))
9472 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
9473 else
9474 set_bit(DPC_RESET_HA, &ha->dpc_flags);
9477 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
9478 return_status = SUCCESS;
9480 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
9481 return_status == FAILED ? "FAILED" : "SUCCEEDED");
9483 return return_status;
9486 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
9488 uint32_t mbox_cmd[MBOX_REG_COUNT];
9489 uint32_t mbox_sts[MBOX_REG_COUNT];
9490 struct addr_ctrl_blk_def *acb = NULL;
9491 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
9492 int rval = QLA_SUCCESS;
9493 dma_addr_t acb_dma;
9495 acb = dma_alloc_coherent(&ha->pdev->dev,
9496 sizeof(struct addr_ctrl_blk_def),
9497 &acb_dma, GFP_KERNEL);
9498 if (!acb) {
9499 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
9500 __func__);
9501 rval = -ENOMEM;
9502 goto exit_port_reset;
9505 memset(acb, 0, acb_len);
9507 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
9508 if (rval != QLA_SUCCESS) {
9509 rval = -EIO;
9510 goto exit_free_acb;
9513 rval = qla4xxx_disable_acb(ha);
9514 if (rval != QLA_SUCCESS) {
9515 rval = -EIO;
9516 goto exit_free_acb;
9519 wait_for_completion_timeout(&ha->disable_acb_comp,
9520 DISABLE_ACB_TOV * HZ);
9522 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
9523 if (rval != QLA_SUCCESS) {
9524 rval = -EIO;
9525 goto exit_free_acb;
9528 exit_free_acb:
9529 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
9530 acb, acb_dma);
9531 exit_port_reset:
9532 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
9533 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
9534 return rval;
9537 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
9539 struct scsi_qla_host *ha = to_qla_host(shost);
9540 int rval = QLA_SUCCESS;
9541 uint32_t idc_ctrl;
9543 if (ql4xdontresethba) {
9544 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
9545 __func__));
9546 rval = -EPERM;
9547 goto exit_host_reset;
9550 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
9551 goto recover_adapter;
9553 switch (reset_type) {
9554 case SCSI_ADAPTER_RESET:
9555 set_bit(DPC_RESET_HA, &ha->dpc_flags);
9556 break;
9557 case SCSI_FIRMWARE_RESET:
9558 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9559 if (is_qla80XX(ha))
9560 /* set firmware context reset */
9561 set_bit(DPC_RESET_HA_FW_CONTEXT,
9562 &ha->dpc_flags);
9563 else {
9564 rval = qla4xxx_context_reset(ha);
9565 goto exit_host_reset;
9568 break;
9571 recover_adapter:
9572 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
9573 * reset is issued by application */
9574 if ((is_qla8032(ha) || is_qla8042(ha)) &&
9575 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9576 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
9577 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
9578 (idc_ctrl | GRACEFUL_RESET_BIT1));
9581 rval = qla4xxx_recover_adapter(ha);
9582 if (rval != QLA_SUCCESS) {
9583 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
9584 __func__));
9585 rval = -EIO;
9588 exit_host_reset:
9589 return rval;
9592 /* PCI AER driver recovers from all correctable errors w/o
9593 * driver intervention. For uncorrectable errors PCI AER
9594 * driver calls the following device driver's callbacks
9596 * - Fatal Errors - link_reset
9597 * - Non-Fatal Errors - driver's pci_error_detected() which
9598 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
9600 * PCI AER driver calls
9601 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
9602 * returns RECOVERED or NEED_RESET if fw_hung
9603 * NEED_RESET - driver's slot_reset()
9604 * DISCONNECT - device is dead & cannot recover
9605 * RECOVERED - driver's pci_resume()
9607 static pci_ers_result_t
9608 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9610 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9612 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
9613 ha->host_no, __func__, state);
9615 if (!is_aer_supported(ha))
9616 return PCI_ERS_RESULT_NONE;
9618 switch (state) {
9619 case pci_channel_io_normal:
9620 clear_bit(AF_EEH_BUSY, &ha->flags);
9621 return PCI_ERS_RESULT_CAN_RECOVER;
9622 case pci_channel_io_frozen:
9623 set_bit(AF_EEH_BUSY, &ha->flags);
9624 qla4xxx_mailbox_premature_completion(ha);
9625 qla4xxx_free_irqs(ha);
9626 pci_disable_device(pdev);
9627 /* Return back all IOs */
9628 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
9629 return PCI_ERS_RESULT_NEED_RESET;
9630 case pci_channel_io_perm_failure:
9631 set_bit(AF_EEH_BUSY, &ha->flags);
9632 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
9633 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
9634 return PCI_ERS_RESULT_DISCONNECT;
9636 return PCI_ERS_RESULT_NEED_RESET;
9640 * qla4xxx_pci_mmio_enabled() gets called if
9641 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
9642 * and read/write to the device still works.
9644 static pci_ers_result_t
9645 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
9647 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9649 if (!is_aer_supported(ha))
9650 return PCI_ERS_RESULT_NONE;
9652 return PCI_ERS_RESULT_RECOVERED;
9655 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9657 uint32_t rval = QLA_ERROR;
9658 int fn;
9659 struct pci_dev *other_pdev = NULL;
9661 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
9663 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9665 if (test_bit(AF_ONLINE, &ha->flags)) {
9666 clear_bit(AF_ONLINE, &ha->flags);
9667 clear_bit(AF_LINK_UP, &ha->flags);
9668 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
9669 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
9672 fn = PCI_FUNC(ha->pdev->devfn);
9673 if (is_qla8022(ha)) {
9674 while (fn > 0) {
9675 fn--;
9676 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
9677 ha->host_no, __func__, fn);
9678 /* Get the pci device given the domain, bus,
9679 * slot/function number */
9680 other_pdev = pci_get_domain_bus_and_slot(
9681 pci_domain_nr(ha->pdev->bus),
9682 ha->pdev->bus->number,
9683 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
9684 fn));
9686 if (!other_pdev)
9687 continue;
9689 if (atomic_read(&other_pdev->enable_cnt)) {
9690 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
9691 ha->host_no, __func__, fn);
9692 pci_dev_put(other_pdev);
9693 break;
9695 pci_dev_put(other_pdev);
9697 } else {
9698 /* this case is meant for ISP83xx/ISP84xx only */
9699 if (qla4_83xx_can_perform_reset(ha)) {
9700 /* reset fn as iSCSI is going to perform the reset */
9701 fn = 0;
9705 /* The first function on the card, the reset owner will
9706 * start & initialize the firmware. The other functions
9707 * on the card will reset the firmware context
9709 if (!fn) {
9710 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
9711 "0x%x is the owner\n", ha->host_no, __func__,
9712 ha->pdev->devfn);
9714 ha->isp_ops->idc_lock(ha);
9715 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9716 QLA8XXX_DEV_COLD);
9717 ha->isp_ops->idc_unlock(ha);
9719 rval = qla4_8xxx_update_idc_reg(ha);
9720 if (rval == QLA_ERROR) {
9721 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
9722 ha->host_no, __func__);
9723 ha->isp_ops->idc_lock(ha);
9724 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9725 QLA8XXX_DEV_FAILED);
9726 ha->isp_ops->idc_unlock(ha);
9727 goto exit_error_recovery;
9730 clear_bit(AF_FW_RECOVERY, &ha->flags);
9731 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9733 if (rval != QLA_SUCCESS) {
9734 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9735 "FAILED\n", ha->host_no, __func__);
9736 qla4xxx_free_irqs(ha);
9737 ha->isp_ops->idc_lock(ha);
9738 qla4_8xxx_clear_drv_active(ha);
9739 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9740 QLA8XXX_DEV_FAILED);
9741 ha->isp_ops->idc_unlock(ha);
9742 } else {
9743 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9744 "READY\n", ha->host_no, __func__);
9745 ha->isp_ops->idc_lock(ha);
9746 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9747 QLA8XXX_DEV_READY);
9748 /* Clear driver state register */
9749 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
9750 qla4_8xxx_set_drv_active(ha);
9751 ha->isp_ops->idc_unlock(ha);
9752 ha->isp_ops->enable_intrs(ha);
9754 } else {
9755 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
9756 "the reset owner\n", ha->host_no, __func__,
9757 ha->pdev->devfn);
9758 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
9759 QLA8XXX_DEV_READY)) {
9760 clear_bit(AF_FW_RECOVERY, &ha->flags);
9761 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9762 if (rval == QLA_SUCCESS)
9763 ha->isp_ops->enable_intrs(ha);
9764 else
9765 qla4xxx_free_irqs(ha);
9767 ha->isp_ops->idc_lock(ha);
9768 qla4_8xxx_set_drv_active(ha);
9769 ha->isp_ops->idc_unlock(ha);
9772 exit_error_recovery:
9773 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9774 return rval;
9777 static pci_ers_result_t
9778 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
9780 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
9781 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9782 int rc;
9784 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
9785 ha->host_no, __func__);
9787 if (!is_aer_supported(ha))
9788 return PCI_ERS_RESULT_NONE;
9790 /* Restore the saved state of PCIe device -
9791 * BAR registers, PCI Config space, PCIX, MSI,
9792 * IOV states
9794 pci_restore_state(pdev);
9796 /* pci_restore_state() clears the saved_state flag of the device
9797 * save restored state which resets saved_state flag
9799 pci_save_state(pdev);
9801 /* Initialize device or resume if in suspended state */
9802 rc = pci_enable_device(pdev);
9803 if (rc) {
9804 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
9805 "device after reset\n", ha->host_no, __func__);
9806 goto exit_slot_reset;
9809 ha->isp_ops->disable_intrs(ha);
9811 if (is_qla80XX(ha)) {
9812 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
9813 ret = PCI_ERS_RESULT_RECOVERED;
9814 goto exit_slot_reset;
9815 } else
9816 goto exit_slot_reset;
9819 exit_slot_reset:
9820 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
9821 "device after reset\n", ha->host_no, __func__, ret);
9822 return ret;
9825 static void
9826 qla4xxx_pci_resume(struct pci_dev *pdev)
9828 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9829 int ret;
9831 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
9832 ha->host_no, __func__);
9834 ret = qla4xxx_wait_for_hba_online(ha);
9835 if (ret != QLA_SUCCESS) {
9836 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
9837 "resume I/O from slot/link_reset\n", ha->host_no,
9838 __func__);
9841 pci_cleanup_aer_uncorrect_error_status(pdev);
9842 clear_bit(AF_EEH_BUSY, &ha->flags);
9845 static const struct pci_error_handlers qla4xxx_err_handler = {
9846 .error_detected = qla4xxx_pci_error_detected,
9847 .mmio_enabled = qla4xxx_pci_mmio_enabled,
9848 .slot_reset = qla4xxx_pci_slot_reset,
9849 .resume = qla4xxx_pci_resume,
9852 static struct pci_device_id qla4xxx_pci_tbl[] = {
9854 .vendor = PCI_VENDOR_ID_QLOGIC,
9855 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
9856 .subvendor = PCI_ANY_ID,
9857 .subdevice = PCI_ANY_ID,
9860 .vendor = PCI_VENDOR_ID_QLOGIC,
9861 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
9862 .subvendor = PCI_ANY_ID,
9863 .subdevice = PCI_ANY_ID,
9866 .vendor = PCI_VENDOR_ID_QLOGIC,
9867 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
9868 .subvendor = PCI_ANY_ID,
9869 .subdevice = PCI_ANY_ID,
9872 .vendor = PCI_VENDOR_ID_QLOGIC,
9873 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
9874 .subvendor = PCI_ANY_ID,
9875 .subdevice = PCI_ANY_ID,
9878 .vendor = PCI_VENDOR_ID_QLOGIC,
9879 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
9880 .subvendor = PCI_ANY_ID,
9881 .subdevice = PCI_ANY_ID,
9884 .vendor = PCI_VENDOR_ID_QLOGIC,
9885 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
9886 .subvendor = PCI_ANY_ID,
9887 .subdevice = PCI_ANY_ID,
9889 {0, 0},
9891 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
9893 static struct pci_driver qla4xxx_pci_driver = {
9894 .name = DRIVER_NAME,
9895 .id_table = qla4xxx_pci_tbl,
9896 .probe = qla4xxx_probe_adapter,
9897 .remove = qla4xxx_remove_adapter,
9898 .err_handler = &qla4xxx_err_handler,
9901 static int __init qla4xxx_module_init(void)
9903 int ret;
9905 if (ql4xqfulltracking)
9906 qla4xxx_driver_template.track_queue_depth = 1;
9908 /* Allocate cache for SRBs. */
9909 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
9910 SLAB_HWCACHE_ALIGN, NULL);
9911 if (srb_cachep == NULL) {
9912 printk(KERN_ERR
9913 "%s: Unable to allocate SRB cache..."
9914 "Failing load!\n", DRIVER_NAME);
9915 ret = -ENOMEM;
9916 goto no_srp_cache;
9919 /* Derive version string. */
9920 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
9921 if (ql4xextended_error_logging)
9922 strcat(qla4xxx_version_str, "-debug");
9924 qla4xxx_scsi_transport =
9925 iscsi_register_transport(&qla4xxx_iscsi_transport);
9926 if (!qla4xxx_scsi_transport){
9927 ret = -ENODEV;
9928 goto release_srb_cache;
9931 ret = pci_register_driver(&qla4xxx_pci_driver);
9932 if (ret)
9933 goto unregister_transport;
9935 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
9936 return 0;
9938 unregister_transport:
9939 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9940 release_srb_cache:
9941 kmem_cache_destroy(srb_cachep);
9942 no_srp_cache:
9943 return ret;
9946 static void __exit qla4xxx_module_exit(void)
9948 pci_unregister_driver(&qla4xxx_pci_driver);
9949 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9950 kmem_cache_destroy(srb_cachep);
9953 module_init(qla4xxx_module_init);
9954 module_exit(qla4xxx_module_exit);
9956 MODULE_AUTHOR("QLogic Corporation");
9957 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
9958 MODULE_LICENSE("GPL");
9959 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);