Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / s390 / scsi / zfcp_fsf.c
blobe9a787e2e6a59ba46fe7fa70f7b8f719cc95ef04
1 /*
2 * zfcp device driver
4 * Implementation of FSF commands.
6 * Copyright IBM Corporation 2002, 2010
7 */
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
21 struct kmem_cache *zfcp_fsf_qtcb_cache;
23 static void zfcp_fsf_request_timeout_handler(unsigned long data)
25 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
26 zfcp_qdio_siosl(adapter);
27 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
28 "fsrth_1");
31 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32 unsigned long timeout)
34 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36 fsf_req->timer.expires = jiffies + timeout;
37 add_timer(&fsf_req->timer);
40 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
42 BUG_ON(!fsf_req->erp_action);
43 fsf_req->timer.function = zfcp_erp_timeout_handler;
44 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45 fsf_req->timer.expires = jiffies + 30 * HZ;
46 add_timer(&fsf_req->timer);
49 /* association between FSF command and FSF QTCB type */
50 static u32 fsf_qtcb_type[] = {
51 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
52 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
54 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
57 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
59 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
60 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
61 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
62 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
63 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
68 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
69 "operational because of an unsupported FC class\n");
70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
71 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
74 /**
75 * zfcp_fsf_req_free - free memory used by fsf request
76 * @fsf_req: pointer to struct zfcp_fsf_req
78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
80 if (likely(req->pool)) {
81 if (likely(req->qtcb))
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
83 mempool_free(req, req->pool);
84 return;
87 if (likely(req->qtcb))
88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
89 kfree(req);
92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
94 unsigned long flags;
95 struct fsf_status_read_buffer *sr_buf = req->data;
96 struct zfcp_adapter *adapter = req->adapter;
97 struct zfcp_port *port;
98 int d_id = ntoh24(sr_buf->d_id);
100 read_lock_irqsave(&adapter->port_list_lock, flags);
101 list_for_each_entry(port, &adapter->port_list, list)
102 if (port->d_id == d_id) {
103 zfcp_erp_port_reopen(port, 0, "fssrpc1");
104 break;
106 read_unlock_irqrestore(&adapter->port_list_lock, flags);
109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
110 struct fsf_link_down_info *link_down)
112 struct zfcp_adapter *adapter = req->adapter;
114 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
115 return;
117 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
119 zfcp_scsi_schedule_rports_block(adapter);
121 if (!link_down)
122 goto out;
124 switch (link_down->error_code) {
125 case FSF_PSQ_LINK_NO_LIGHT:
126 dev_warn(&req->adapter->ccw_device->dev,
127 "There is no light signal from the local "
128 "fibre channel cable\n");
129 break;
130 case FSF_PSQ_LINK_WRAP_PLUG:
131 dev_warn(&req->adapter->ccw_device->dev,
132 "There is a wrap plug instead of a fibre "
133 "channel cable\n");
134 break;
135 case FSF_PSQ_LINK_NO_FCP:
136 dev_warn(&req->adapter->ccw_device->dev,
137 "The adjacent fibre channel node does not "
138 "support FCP\n");
139 break;
140 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
141 dev_warn(&req->adapter->ccw_device->dev,
142 "The FCP device is suspended because of a "
143 "firmware update\n");
144 break;
145 case FSF_PSQ_LINK_INVALID_WWPN:
146 dev_warn(&req->adapter->ccw_device->dev,
147 "The FCP device detected a WWPN that is "
148 "duplicate or not valid\n");
149 break;
150 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
151 dev_warn(&req->adapter->ccw_device->dev,
152 "The fibre channel fabric does not support NPIV\n");
153 break;
154 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
155 dev_warn(&req->adapter->ccw_device->dev,
156 "The FCP adapter cannot support more NPIV ports\n");
157 break;
158 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
159 dev_warn(&req->adapter->ccw_device->dev,
160 "The adjacent switch cannot support "
161 "more NPIV ports\n");
162 break;
163 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
164 dev_warn(&req->adapter->ccw_device->dev,
165 "The FCP adapter could not log in to the "
166 "fibre channel fabric\n");
167 break;
168 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
169 dev_warn(&req->adapter->ccw_device->dev,
170 "The WWPN assignment file on the FCP adapter "
171 "has been damaged\n");
172 break;
173 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
174 dev_warn(&req->adapter->ccw_device->dev,
175 "The mode table on the FCP adapter "
176 "has been damaged\n");
177 break;
178 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
179 dev_warn(&req->adapter->ccw_device->dev,
180 "All NPIV ports on the FCP adapter have "
181 "been assigned\n");
182 break;
183 default:
184 dev_warn(&req->adapter->ccw_device->dev,
185 "The link between the FCP adapter and "
186 "the FC fabric is down\n");
188 out:
189 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
194 struct fsf_status_read_buffer *sr_buf = req->data;
195 struct fsf_link_down_info *ldi =
196 (struct fsf_link_down_info *) &sr_buf->payload;
198 switch (sr_buf->status_subtype) {
199 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
200 zfcp_fsf_link_down_info_eval(req, ldi);
201 break;
202 case FSF_STATUS_READ_SUB_FDISC_FAILED:
203 zfcp_fsf_link_down_info_eval(req, ldi);
204 break;
205 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
206 zfcp_fsf_link_down_info_eval(req, NULL);
210 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
212 struct zfcp_adapter *adapter = req->adapter;
213 struct fsf_status_read_buffer *sr_buf = req->data;
215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
216 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
218 zfcp_fsf_req_free(req);
219 return;
222 zfcp_dbf_hba_fsf_uss("fssrh_2", req);
224 switch (sr_buf->status_type) {
225 case FSF_STATUS_READ_PORT_CLOSED:
226 zfcp_fsf_status_read_port_closed(req);
227 break;
228 case FSF_STATUS_READ_INCOMING_ELS:
229 zfcp_fc_incoming_els(req);
230 break;
231 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
232 break;
233 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
234 dev_warn(&adapter->ccw_device->dev,
235 "The error threshold for checksum statistics "
236 "has been exceeded\n");
237 zfcp_dbf_hba_bit_err("fssrh_3", req);
238 break;
239 case FSF_STATUS_READ_LINK_DOWN:
240 zfcp_fsf_status_read_link_down(req);
241 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
242 break;
243 case FSF_STATUS_READ_LINK_UP:
244 dev_info(&adapter->ccw_device->dev,
245 "The local link has been restored\n");
246 /* All ports should be marked as ready to run again */
247 zfcp_erp_set_adapter_status(adapter,
248 ZFCP_STATUS_COMMON_RUNNING);
249 zfcp_erp_adapter_reopen(adapter,
250 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
251 ZFCP_STATUS_COMMON_ERP_FAILED,
252 "fssrh_2");
253 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
255 break;
256 case FSF_STATUS_READ_NOTIFICATION_LOST:
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
258 zfcp_cfdc_adapter_access_changed(adapter);
259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
260 queue_work(adapter->work_queue, &adapter->scan_work);
261 break;
262 case FSF_STATUS_READ_CFDC_UPDATED:
263 zfcp_cfdc_adapter_access_changed(adapter);
264 break;
265 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
266 adapter->adapter_features = sr_buf->payload.word[0];
267 break;
270 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
271 zfcp_fsf_req_free(req);
273 atomic_inc(&adapter->stat_miss);
274 queue_work(adapter->work_queue, &adapter->stat_work);
277 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
279 switch (req->qtcb->header.fsf_status_qual.word[0]) {
280 case FSF_SQ_FCP_RSP_AVAILABLE:
281 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
282 case FSF_SQ_NO_RETRY_POSSIBLE:
283 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
284 return;
285 case FSF_SQ_COMMAND_ABORTED:
286 break;
287 case FSF_SQ_NO_RECOM:
288 dev_err(&req->adapter->ccw_device->dev,
289 "The FCP adapter reported a problem "
290 "that cannot be recovered\n");
291 zfcp_qdio_siosl(req->adapter);
292 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
293 break;
295 /* all non-return stats set FSFREQ_ERROR*/
296 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
299 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
301 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
302 return;
304 switch (req->qtcb->header.fsf_status) {
305 case FSF_UNKNOWN_COMMAND:
306 dev_err(&req->adapter->ccw_device->dev,
307 "The FCP adapter does not recognize the command 0x%x\n",
308 req->qtcb->header.fsf_command);
309 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
310 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
311 break;
312 case FSF_ADAPTER_STATUS_AVAILABLE:
313 zfcp_fsf_fsfstatus_qual_eval(req);
314 break;
318 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
320 struct zfcp_adapter *adapter = req->adapter;
321 struct fsf_qtcb *qtcb = req->qtcb;
322 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
324 zfcp_dbf_hba_fsf_response(req);
326 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
327 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
328 return;
331 switch (qtcb->prefix.prot_status) {
332 case FSF_PROT_GOOD:
333 case FSF_PROT_FSF_STATUS_PRESENTED:
334 return;
335 case FSF_PROT_QTCB_VERSION_ERROR:
336 dev_err(&adapter->ccw_device->dev,
337 "QTCB version 0x%x not supported by FCP adapter "
338 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
339 psq->word[0], psq->word[1]);
340 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
341 break;
342 case FSF_PROT_ERROR_STATE:
343 case FSF_PROT_SEQ_NUMB_ERROR:
344 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
345 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
346 break;
347 case FSF_PROT_UNSUPP_QTCB_TYPE:
348 dev_err(&adapter->ccw_device->dev,
349 "The QTCB type is not supported by the FCP adapter\n");
350 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
351 break;
352 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
353 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
354 &adapter->status);
355 break;
356 case FSF_PROT_DUPLICATE_REQUEST_ID:
357 dev_err(&adapter->ccw_device->dev,
358 "0x%Lx is an ambiguous request identifier\n",
359 (unsigned long long)qtcb->bottom.support.req_handle);
360 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
361 break;
362 case FSF_PROT_LINK_DOWN:
363 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
364 /* go through reopen to flush pending requests */
365 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
366 break;
367 case FSF_PROT_REEST_QUEUE:
368 /* All ports should be marked as ready to run again */
369 zfcp_erp_set_adapter_status(adapter,
370 ZFCP_STATUS_COMMON_RUNNING);
371 zfcp_erp_adapter_reopen(adapter,
372 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
373 ZFCP_STATUS_COMMON_ERP_FAILED,
374 "fspse_8");
375 break;
376 default:
377 dev_err(&adapter->ccw_device->dev,
378 "0x%x is not a valid transfer protocol status\n",
379 qtcb->prefix.prot_status);
380 zfcp_qdio_siosl(adapter);
381 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
383 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
387 * zfcp_fsf_req_complete - process completion of a FSF request
388 * @fsf_req: The FSF request that has been completed.
390 * When a request has been completed either from the FCP adapter,
391 * or it has been dismissed due to a queue shutdown, this function
392 * is called to process the completion status and trigger further
393 * events related to the FSF request.
395 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
397 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
398 zfcp_fsf_status_read_handler(req);
399 return;
402 del_timer(&req->timer);
403 zfcp_fsf_protstatus_eval(req);
404 zfcp_fsf_fsfstatus_eval(req);
405 req->handler(req);
407 if (req->erp_action)
408 zfcp_erp_notify(req->erp_action, 0);
410 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
411 zfcp_fsf_req_free(req);
412 else
413 complete(&req->completion);
417 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
418 * @adapter: pointer to struct zfcp_adapter
420 * Never ever call this without shutting down the adapter first.
421 * Otherwise the adapter would continue using and corrupting s390 storage.
422 * Included BUG_ON() call to ensure this is done.
423 * ERP is supposed to be the only user of this function.
425 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
427 struct zfcp_fsf_req *req, *tmp;
428 LIST_HEAD(remove_queue);
430 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
431 zfcp_reqlist_move(adapter->req_list, &remove_queue);
433 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
434 list_del(&req->list);
435 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
436 zfcp_fsf_req_complete(req);
440 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
442 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
443 struct zfcp_adapter *adapter = req->adapter;
444 struct Scsi_Host *shost = adapter->scsi_host;
445 struct fc_els_flogi *nsp, *plogi;
447 /* adjust pointers for missing command code */
448 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
449 - sizeof(u32));
450 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
451 - sizeof(u32));
453 if (req->data)
454 memcpy(req->data, bottom, sizeof(*bottom));
456 fc_host_port_name(shost) = nsp->fl_wwpn;
457 fc_host_node_name(shost) = nsp->fl_wwnn;
458 fc_host_port_id(shost) = ntoh24(bottom->s_id);
459 fc_host_speed(shost) = bottom->fc_link_speed;
460 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
462 adapter->hydra_version = bottom->adapter_type;
463 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
464 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
465 (u16)FSF_STATUS_READS_RECOM);
467 if (fc_host_permanent_port_name(shost) == -1)
468 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
470 switch (bottom->fc_topology) {
471 case FSF_TOPO_P2P:
472 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
473 adapter->peer_wwpn = plogi->fl_wwpn;
474 adapter->peer_wwnn = plogi->fl_wwnn;
475 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
476 break;
477 case FSF_TOPO_FABRIC:
478 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
479 break;
480 case FSF_TOPO_AL:
481 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
482 /* fall through */
483 default:
484 dev_err(&adapter->ccw_device->dev,
485 "Unknown or unsupported arbitrated loop "
486 "fibre channel topology detected\n");
487 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
488 return -EIO;
491 zfcp_scsi_set_prot(adapter);
493 return 0;
496 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
498 struct zfcp_adapter *adapter = req->adapter;
499 struct fsf_qtcb *qtcb = req->qtcb;
500 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
501 struct Scsi_Host *shost = adapter->scsi_host;
503 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
504 return;
506 adapter->fsf_lic_version = bottom->lic_version;
507 adapter->adapter_features = bottom->adapter_features;
508 adapter->connection_features = bottom->connection_features;
509 adapter->peer_wwpn = 0;
510 adapter->peer_wwnn = 0;
511 adapter->peer_d_id = 0;
513 switch (qtcb->header.fsf_status) {
514 case FSF_GOOD:
515 if (zfcp_fsf_exchange_config_evaluate(req))
516 return;
518 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
519 dev_err(&adapter->ccw_device->dev,
520 "FCP adapter maximum QTCB size (%d bytes) "
521 "is too small\n",
522 bottom->max_qtcb_size);
523 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
524 return;
526 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
527 &adapter->status);
528 break;
529 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
530 fc_host_node_name(shost) = 0;
531 fc_host_port_name(shost) = 0;
532 fc_host_port_id(shost) = 0;
533 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
534 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
535 adapter->hydra_version = 0;
537 zfcp_fsf_link_down_info_eval(req,
538 &qtcb->header.fsf_status_qual.link_down_info);
539 break;
540 default:
541 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
542 return;
545 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
546 adapter->hardware_version = bottom->hardware_version;
547 memcpy(fc_host_serial_number(shost), bottom->serial_number,
548 min(FC_SERIAL_NUMBER_SIZE, 17));
549 EBCASC(fc_host_serial_number(shost),
550 min(FC_SERIAL_NUMBER_SIZE, 17));
553 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
554 dev_err(&adapter->ccw_device->dev,
555 "The FCP adapter only supports newer "
556 "control block versions\n");
557 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
558 return;
560 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
561 dev_err(&adapter->ccw_device->dev,
562 "The FCP adapter only supports older "
563 "control block versions\n");
564 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
568 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
570 struct zfcp_adapter *adapter = req->adapter;
571 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
572 struct Scsi_Host *shost = adapter->scsi_host;
574 if (req->data)
575 memcpy(req->data, bottom, sizeof(*bottom));
577 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
578 fc_host_permanent_port_name(shost) = bottom->wwpn;
579 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
580 } else
581 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
582 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
583 fc_host_supported_speeds(shost) = bottom->supported_speed;
584 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
585 FC_FC4_LIST_SIZE);
586 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
587 FC_FC4_LIST_SIZE);
590 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
592 struct fsf_qtcb *qtcb = req->qtcb;
594 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
595 return;
597 switch (qtcb->header.fsf_status) {
598 case FSF_GOOD:
599 zfcp_fsf_exchange_port_evaluate(req);
600 break;
601 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
602 zfcp_fsf_exchange_port_evaluate(req);
603 zfcp_fsf_link_down_info_eval(req,
604 &qtcb->header.fsf_status_qual.link_down_info);
605 break;
609 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
611 struct zfcp_fsf_req *req;
613 if (likely(pool))
614 req = mempool_alloc(pool, GFP_ATOMIC);
615 else
616 req = kmalloc(sizeof(*req), GFP_ATOMIC);
618 if (unlikely(!req))
619 return NULL;
621 memset(req, 0, sizeof(*req));
622 req->pool = pool;
623 return req;
626 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
628 struct fsf_qtcb *qtcb;
630 if (likely(pool))
631 qtcb = mempool_alloc(pool, GFP_ATOMIC);
632 else
633 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
635 if (unlikely(!qtcb))
636 return NULL;
638 memset(qtcb, 0, sizeof(*qtcb));
639 return qtcb;
642 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
643 u32 fsf_cmd, u8 sbtype,
644 mempool_t *pool)
646 struct zfcp_adapter *adapter = qdio->adapter;
647 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
649 if (unlikely(!req))
650 return ERR_PTR(-ENOMEM);
652 if (adapter->req_no == 0)
653 adapter->req_no++;
655 INIT_LIST_HEAD(&req->list);
656 init_timer(&req->timer);
657 init_completion(&req->completion);
659 req->adapter = adapter;
660 req->fsf_command = fsf_cmd;
661 req->req_id = adapter->req_no;
663 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
664 if (likely(pool))
665 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
666 else
667 req->qtcb = zfcp_qtcb_alloc(NULL);
669 if (unlikely(!req->qtcb)) {
670 zfcp_fsf_req_free(req);
671 return ERR_PTR(-ENOMEM);
674 req->seq_no = adapter->fsf_req_seq_no;
675 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
676 req->qtcb->prefix.req_id = req->req_id;
677 req->qtcb->prefix.ulp_info = 26;
678 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
679 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
680 req->qtcb->header.req_handle = req->req_id;
681 req->qtcb->header.fsf_command = req->fsf_command;
684 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
685 req->qtcb, sizeof(struct fsf_qtcb));
687 return req;
690 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
692 struct zfcp_adapter *adapter = req->adapter;
693 struct zfcp_qdio *qdio = adapter->qdio;
694 int with_qtcb = (req->qtcb != NULL);
695 int req_id = req->req_id;
697 zfcp_reqlist_add(adapter->req_list, req);
699 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
700 req->issued = get_clock();
701 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
702 del_timer(&req->timer);
703 /* lookup request again, list might have changed */
704 zfcp_reqlist_find_rm(adapter->req_list, req_id);
705 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
706 return -EIO;
709 /* Don't increase for unsolicited status */
710 if (with_qtcb)
711 adapter->fsf_req_seq_no++;
712 adapter->req_no++;
714 return 0;
718 * zfcp_fsf_status_read - send status read request
719 * @adapter: pointer to struct zfcp_adapter
720 * @req_flags: request flags
721 * Returns: 0 on success, ERROR otherwise
723 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
725 struct zfcp_adapter *adapter = qdio->adapter;
726 struct zfcp_fsf_req *req;
727 struct fsf_status_read_buffer *sr_buf;
728 struct page *page;
729 int retval = -EIO;
731 spin_lock_irq(&qdio->req_q_lock);
732 if (zfcp_qdio_sbal_get(qdio))
733 goto out;
735 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
736 adapter->pool.status_read_req);
737 if (IS_ERR(req)) {
738 retval = PTR_ERR(req);
739 goto out;
742 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
743 if (!page) {
744 retval = -ENOMEM;
745 goto failed_buf;
747 sr_buf = page_address(page);
748 memset(sr_buf, 0, sizeof(*sr_buf));
749 req->data = sr_buf;
751 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
752 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
754 retval = zfcp_fsf_req_send(req);
755 if (retval)
756 goto failed_req_send;
758 goto out;
760 failed_req_send:
761 req->data = NULL;
762 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
763 failed_buf:
764 zfcp_dbf_hba_fsf_uss("fssr__1", req);
765 zfcp_fsf_req_free(req);
766 out:
767 spin_unlock_irq(&qdio->req_q_lock);
768 return retval;
771 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
773 struct scsi_device *sdev = req->data;
774 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
775 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
777 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
778 return;
780 switch (req->qtcb->header.fsf_status) {
781 case FSF_PORT_HANDLE_NOT_VALID:
782 if (fsq->word[0] == fsq->word[1]) {
783 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
784 "fsafch1");
785 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
787 break;
788 case FSF_LUN_HANDLE_NOT_VALID:
789 if (fsq->word[0] == fsq->word[1]) {
790 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
791 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
793 break;
794 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
795 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
796 break;
797 case FSF_PORT_BOXED:
798 zfcp_erp_set_port_status(zfcp_sdev->port,
799 ZFCP_STATUS_COMMON_ACCESS_BOXED);
800 zfcp_erp_port_reopen(zfcp_sdev->port,
801 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
802 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
803 break;
804 case FSF_LUN_BOXED:
805 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
806 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
807 "fsafch4");
808 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
809 break;
810 case FSF_ADAPTER_STATUS_AVAILABLE:
811 switch (fsq->word[0]) {
812 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
813 zfcp_fc_test_link(zfcp_sdev->port);
814 /* fall through */
815 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
816 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
817 break;
819 break;
820 case FSF_GOOD:
821 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
822 break;
827 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
828 * @scmnd: The SCSI command to abort
829 * Returns: pointer to struct zfcp_fsf_req
832 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
834 struct zfcp_fsf_req *req = NULL;
835 struct scsi_device *sdev = scmnd->device;
836 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
837 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
838 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
840 spin_lock_irq(&qdio->req_q_lock);
841 if (zfcp_qdio_sbal_get(qdio))
842 goto out;
843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
844 SBAL_SFLAGS0_TYPE_READ,
845 qdio->adapter->pool.scsi_abort);
846 if (IS_ERR(req)) {
847 req = NULL;
848 goto out;
851 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
852 ZFCP_STATUS_COMMON_UNBLOCKED)))
853 goto out_error_free;
855 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
857 req->data = sdev;
858 req->handler = zfcp_fsf_abort_fcp_command_handler;
859 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
860 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
861 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
863 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
864 if (!zfcp_fsf_req_send(req))
865 goto out;
867 out_error_free:
868 zfcp_fsf_req_free(req);
869 req = NULL;
870 out:
871 spin_unlock_irq(&qdio->req_q_lock);
872 return req;
875 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
877 struct zfcp_adapter *adapter = req->adapter;
878 struct zfcp_fsf_ct_els *ct = req->data;
879 struct fsf_qtcb_header *header = &req->qtcb->header;
881 ct->status = -EINVAL;
883 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
884 goto skip_fsfstatus;
886 switch (header->fsf_status) {
887 case FSF_GOOD:
888 zfcp_dbf_san_res("fsscth1", req);
889 ct->status = 0;
890 break;
891 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
892 zfcp_fsf_class_not_supp(req);
893 break;
894 case FSF_ADAPTER_STATUS_AVAILABLE:
895 switch (header->fsf_status_qual.word[0]){
896 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
897 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
898 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
899 break;
901 break;
902 case FSF_ACCESS_DENIED:
903 break;
904 case FSF_PORT_BOXED:
905 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
906 break;
907 case FSF_PORT_HANDLE_NOT_VALID:
908 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
909 /* fall through */
910 case FSF_GENERIC_COMMAND_REJECTED:
911 case FSF_PAYLOAD_SIZE_MISMATCH:
912 case FSF_REQUEST_SIZE_TOO_LARGE:
913 case FSF_RESPONSE_SIZE_TOO_LARGE:
914 case FSF_SBAL_MISMATCH:
915 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
916 break;
919 skip_fsfstatus:
920 if (ct->handler)
921 ct->handler(ct->handler_data);
924 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
925 struct zfcp_qdio_req *q_req,
926 struct scatterlist *sg_req,
927 struct scatterlist *sg_resp)
929 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
930 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
931 zfcp_qdio_set_sbale_last(qdio, q_req);
934 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
935 struct scatterlist *sg_req,
936 struct scatterlist *sg_resp)
938 struct zfcp_adapter *adapter = req->adapter;
939 struct zfcp_qdio *qdio = adapter->qdio;
940 struct fsf_qtcb *qtcb = req->qtcb;
941 u32 feat = adapter->adapter_features;
943 if (zfcp_adapter_multi_buffer_active(adapter)) {
944 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
945 return -EIO;
946 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
947 return -EIO;
949 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
950 zfcp_qdio_sbale_count(sg_req));
951 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
952 zfcp_qdio_set_scount(qdio, &req->qdio_req);
953 return 0;
956 /* use single, unchained SBAL if it can hold the request */
957 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
958 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
959 sg_req, sg_resp);
960 return 0;
963 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
964 return -EOPNOTSUPP;
966 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
967 return -EIO;
969 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
971 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
972 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
974 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
975 return -EIO;
977 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
979 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
981 return 0;
984 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
985 struct scatterlist *sg_req,
986 struct scatterlist *sg_resp,
987 unsigned int timeout)
989 int ret;
991 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
992 if (ret)
993 return ret;
995 /* common settings for ct/gs and els requests */
996 if (timeout > 255)
997 timeout = 255; /* max value accepted by hardware */
998 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
999 req->qtcb->bottom.support.timeout = timeout;
1000 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1002 return 0;
1006 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1007 * @ct: pointer to struct zfcp_send_ct with data for request
1008 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1010 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1011 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1012 unsigned int timeout)
1014 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1015 struct zfcp_fsf_req *req;
1016 int ret = -EIO;
1018 spin_lock_irq(&qdio->req_q_lock);
1019 if (zfcp_qdio_sbal_get(qdio))
1020 goto out;
1022 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1023 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1025 if (IS_ERR(req)) {
1026 ret = PTR_ERR(req);
1027 goto out;
1030 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1031 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1032 if (ret)
1033 goto failed_send;
1035 req->handler = zfcp_fsf_send_ct_handler;
1036 req->qtcb->header.port_handle = wka_port->handle;
1037 req->data = ct;
1039 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1041 ret = zfcp_fsf_req_send(req);
1042 if (ret)
1043 goto failed_send;
1045 goto out;
1047 failed_send:
1048 zfcp_fsf_req_free(req);
1049 out:
1050 spin_unlock_irq(&qdio->req_q_lock);
1051 return ret;
1054 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1056 struct zfcp_fsf_ct_els *send_els = req->data;
1057 struct zfcp_port *port = send_els->port;
1058 struct fsf_qtcb_header *header = &req->qtcb->header;
1060 send_els->status = -EINVAL;
1062 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1063 goto skip_fsfstatus;
1065 switch (header->fsf_status) {
1066 case FSF_GOOD:
1067 zfcp_dbf_san_res("fsselh1", req);
1068 send_els->status = 0;
1069 break;
1070 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1071 zfcp_fsf_class_not_supp(req);
1072 break;
1073 case FSF_ADAPTER_STATUS_AVAILABLE:
1074 switch (header->fsf_status_qual.word[0]){
1075 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1076 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1077 case FSF_SQ_RETRY_IF_POSSIBLE:
1078 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1079 break;
1081 break;
1082 case FSF_ELS_COMMAND_REJECTED:
1083 case FSF_PAYLOAD_SIZE_MISMATCH:
1084 case FSF_REQUEST_SIZE_TOO_LARGE:
1085 case FSF_RESPONSE_SIZE_TOO_LARGE:
1086 break;
1087 case FSF_ACCESS_DENIED:
1088 if (port) {
1089 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1090 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1092 break;
1093 case FSF_SBAL_MISMATCH:
1094 /* should never occur, avoided in zfcp_fsf_send_els */
1095 /* fall through */
1096 default:
1097 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1098 break;
1100 skip_fsfstatus:
1101 if (send_els->handler)
1102 send_els->handler(send_els->handler_data);
1106 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1107 * @els: pointer to struct zfcp_send_els with data for the command
1109 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1110 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1112 struct zfcp_fsf_req *req;
1113 struct zfcp_qdio *qdio = adapter->qdio;
1114 int ret = -EIO;
1116 spin_lock_irq(&qdio->req_q_lock);
1117 if (zfcp_qdio_sbal_get(qdio))
1118 goto out;
1120 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1121 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1123 if (IS_ERR(req)) {
1124 ret = PTR_ERR(req);
1125 goto out;
1128 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1130 if (!zfcp_adapter_multi_buffer_active(adapter))
1131 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1133 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1135 if (ret)
1136 goto failed_send;
1138 hton24(req->qtcb->bottom.support.d_id, d_id);
1139 req->handler = zfcp_fsf_send_els_handler;
1140 req->data = els;
1142 zfcp_dbf_san_req("fssels1", req, d_id);
1144 ret = zfcp_fsf_req_send(req);
1145 if (ret)
1146 goto failed_send;
1148 goto out;
1150 failed_send:
1151 zfcp_fsf_req_free(req);
1152 out:
1153 spin_unlock_irq(&qdio->req_q_lock);
1154 return ret;
1157 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1159 struct zfcp_fsf_req *req;
1160 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1161 int retval = -EIO;
1163 spin_lock_irq(&qdio->req_q_lock);
1164 if (zfcp_qdio_sbal_get(qdio))
1165 goto out;
1167 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1168 SBAL_SFLAGS0_TYPE_READ,
1169 qdio->adapter->pool.erp_req);
1171 if (IS_ERR(req)) {
1172 retval = PTR_ERR(req);
1173 goto out;
1176 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1177 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1179 req->qtcb->bottom.config.feature_selection =
1180 FSF_FEATURE_CFDC |
1181 FSF_FEATURE_LUN_SHARING |
1182 FSF_FEATURE_NOTIFICATION_LOST |
1183 FSF_FEATURE_UPDATE_ALERT;
1184 req->erp_action = erp_action;
1185 req->handler = zfcp_fsf_exchange_config_data_handler;
1186 erp_action->fsf_req_id = req->req_id;
1188 zfcp_fsf_start_erp_timer(req);
1189 retval = zfcp_fsf_req_send(req);
1190 if (retval) {
1191 zfcp_fsf_req_free(req);
1192 erp_action->fsf_req_id = 0;
1194 out:
1195 spin_unlock_irq(&qdio->req_q_lock);
1196 return retval;
1199 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1200 struct fsf_qtcb_bottom_config *data)
1202 struct zfcp_fsf_req *req = NULL;
1203 int retval = -EIO;
1205 spin_lock_irq(&qdio->req_q_lock);
1206 if (zfcp_qdio_sbal_get(qdio))
1207 goto out_unlock;
1209 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1210 SBAL_SFLAGS0_TYPE_READ, NULL);
1212 if (IS_ERR(req)) {
1213 retval = PTR_ERR(req);
1214 goto out_unlock;
1217 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1218 req->handler = zfcp_fsf_exchange_config_data_handler;
1220 req->qtcb->bottom.config.feature_selection =
1221 FSF_FEATURE_CFDC |
1222 FSF_FEATURE_LUN_SHARING |
1223 FSF_FEATURE_NOTIFICATION_LOST |
1224 FSF_FEATURE_UPDATE_ALERT;
1226 if (data)
1227 req->data = data;
1229 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1230 retval = zfcp_fsf_req_send(req);
1231 spin_unlock_irq(&qdio->req_q_lock);
1232 if (!retval)
1233 wait_for_completion(&req->completion);
1235 zfcp_fsf_req_free(req);
1236 return retval;
1238 out_unlock:
1239 spin_unlock_irq(&qdio->req_q_lock);
1240 return retval;
1244 * zfcp_fsf_exchange_port_data - request information about local port
1245 * @erp_action: ERP action for the adapter for which port data is requested
1246 * Returns: 0 on success, error otherwise
1248 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1250 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1251 struct zfcp_fsf_req *req;
1252 int retval = -EIO;
1254 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1255 return -EOPNOTSUPP;
1257 spin_lock_irq(&qdio->req_q_lock);
1258 if (zfcp_qdio_sbal_get(qdio))
1259 goto out;
1261 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1262 SBAL_SFLAGS0_TYPE_READ,
1263 qdio->adapter->pool.erp_req);
1265 if (IS_ERR(req)) {
1266 retval = PTR_ERR(req);
1267 goto out;
1270 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1271 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1273 req->handler = zfcp_fsf_exchange_port_data_handler;
1274 req->erp_action = erp_action;
1275 erp_action->fsf_req_id = req->req_id;
1277 zfcp_fsf_start_erp_timer(req);
1278 retval = zfcp_fsf_req_send(req);
1279 if (retval) {
1280 zfcp_fsf_req_free(req);
1281 erp_action->fsf_req_id = 0;
1283 out:
1284 spin_unlock_irq(&qdio->req_q_lock);
1285 return retval;
1289 * zfcp_fsf_exchange_port_data_sync - request information about local port
1290 * @qdio: pointer to struct zfcp_qdio
1291 * @data: pointer to struct fsf_qtcb_bottom_port
1292 * Returns: 0 on success, error otherwise
1294 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1295 struct fsf_qtcb_bottom_port *data)
1297 struct zfcp_fsf_req *req = NULL;
1298 int retval = -EIO;
1300 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1301 return -EOPNOTSUPP;
1303 spin_lock_irq(&qdio->req_q_lock);
1304 if (zfcp_qdio_sbal_get(qdio))
1305 goto out_unlock;
1307 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1308 SBAL_SFLAGS0_TYPE_READ, NULL);
1310 if (IS_ERR(req)) {
1311 retval = PTR_ERR(req);
1312 goto out_unlock;
1315 if (data)
1316 req->data = data;
1318 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1320 req->handler = zfcp_fsf_exchange_port_data_handler;
1321 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1322 retval = zfcp_fsf_req_send(req);
1323 spin_unlock_irq(&qdio->req_q_lock);
1325 if (!retval)
1326 wait_for_completion(&req->completion);
1328 zfcp_fsf_req_free(req);
1330 return retval;
1332 out_unlock:
1333 spin_unlock_irq(&qdio->req_q_lock);
1334 return retval;
1337 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1339 struct zfcp_port *port = req->data;
1340 struct fsf_qtcb_header *header = &req->qtcb->header;
1341 struct fc_els_flogi *plogi;
1343 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1344 goto out;
1346 switch (header->fsf_status) {
1347 case FSF_PORT_ALREADY_OPEN:
1348 break;
1349 case FSF_ACCESS_DENIED:
1350 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1351 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1352 break;
1353 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1354 dev_warn(&req->adapter->ccw_device->dev,
1355 "Not enough FCP adapter resources to open "
1356 "remote port 0x%016Lx\n",
1357 (unsigned long long)port->wwpn);
1358 zfcp_erp_set_port_status(port,
1359 ZFCP_STATUS_COMMON_ERP_FAILED);
1360 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1361 break;
1362 case FSF_ADAPTER_STATUS_AVAILABLE:
1363 switch (header->fsf_status_qual.word[0]) {
1364 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1365 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1366 case FSF_SQ_NO_RETRY_POSSIBLE:
1367 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1368 break;
1370 break;
1371 case FSF_GOOD:
1372 port->handle = header->port_handle;
1373 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1374 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1375 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1376 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1377 &port->status);
1378 /* check whether D_ID has changed during open */
1380 * FIXME: This check is not airtight, as the FCP channel does
1381 * not monitor closures of target port connections caused on
1382 * the remote side. Thus, they might miss out on invalidating
1383 * locally cached WWPNs (and other N_Port parameters) of gone
1384 * target ports. So, our heroic attempt to make things safe
1385 * could be undermined by 'open port' response data tagged with
1386 * obsolete WWPNs. Another reason to monitor potential
1387 * connection closures ourself at least (by interpreting
1388 * incoming ELS' and unsolicited status). It just crosses my
1389 * mind that one should be able to cross-check by means of
1390 * another GID_PN straight after a port has been opened.
1391 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1393 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1394 if (req->qtcb->bottom.support.els1_length >=
1395 FSF_PLOGI_MIN_LEN)
1396 zfcp_fc_plogi_evaluate(port, plogi);
1397 break;
1398 case FSF_UNKNOWN_OP_SUBTYPE:
1399 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1400 break;
1403 out:
1404 put_device(&port->dev);
1408 * zfcp_fsf_open_port - create and send open port request
1409 * @erp_action: pointer to struct zfcp_erp_action
1410 * Returns: 0 on success, error otherwise
1412 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1414 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1415 struct zfcp_port *port = erp_action->port;
1416 struct zfcp_fsf_req *req;
1417 int retval = -EIO;
1419 spin_lock_irq(&qdio->req_q_lock);
1420 if (zfcp_qdio_sbal_get(qdio))
1421 goto out;
1423 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1424 SBAL_SFLAGS0_TYPE_READ,
1425 qdio->adapter->pool.erp_req);
1427 if (IS_ERR(req)) {
1428 retval = PTR_ERR(req);
1429 goto out;
1432 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1433 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1435 req->handler = zfcp_fsf_open_port_handler;
1436 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1437 req->data = port;
1438 req->erp_action = erp_action;
1439 erp_action->fsf_req_id = req->req_id;
1440 get_device(&port->dev);
1442 zfcp_fsf_start_erp_timer(req);
1443 retval = zfcp_fsf_req_send(req);
1444 if (retval) {
1445 zfcp_fsf_req_free(req);
1446 erp_action->fsf_req_id = 0;
1447 put_device(&port->dev);
1449 out:
1450 spin_unlock_irq(&qdio->req_q_lock);
1451 return retval;
1454 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1456 struct zfcp_port *port = req->data;
1458 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1459 return;
1461 switch (req->qtcb->header.fsf_status) {
1462 case FSF_PORT_HANDLE_NOT_VALID:
1463 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1464 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1465 break;
1466 case FSF_ADAPTER_STATUS_AVAILABLE:
1467 break;
1468 case FSF_GOOD:
1469 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1470 break;
1475 * zfcp_fsf_close_port - create and send close port request
1476 * @erp_action: pointer to struct zfcp_erp_action
1477 * Returns: 0 on success, error otherwise
1479 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1481 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1482 struct zfcp_fsf_req *req;
1483 int retval = -EIO;
1485 spin_lock_irq(&qdio->req_q_lock);
1486 if (zfcp_qdio_sbal_get(qdio))
1487 goto out;
1489 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1490 SBAL_SFLAGS0_TYPE_READ,
1491 qdio->adapter->pool.erp_req);
1493 if (IS_ERR(req)) {
1494 retval = PTR_ERR(req);
1495 goto out;
1498 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1499 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1501 req->handler = zfcp_fsf_close_port_handler;
1502 req->data = erp_action->port;
1503 req->erp_action = erp_action;
1504 req->qtcb->header.port_handle = erp_action->port->handle;
1505 erp_action->fsf_req_id = req->req_id;
1507 zfcp_fsf_start_erp_timer(req);
1508 retval = zfcp_fsf_req_send(req);
1509 if (retval) {
1510 zfcp_fsf_req_free(req);
1511 erp_action->fsf_req_id = 0;
1513 out:
1514 spin_unlock_irq(&qdio->req_q_lock);
1515 return retval;
1518 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1520 struct zfcp_fc_wka_port *wka_port = req->data;
1521 struct fsf_qtcb_header *header = &req->qtcb->header;
1523 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1524 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1525 goto out;
1528 switch (header->fsf_status) {
1529 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1530 dev_warn(&req->adapter->ccw_device->dev,
1531 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1532 /* fall through */
1533 case FSF_ADAPTER_STATUS_AVAILABLE:
1534 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1535 /* fall through */
1536 case FSF_ACCESS_DENIED:
1537 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1538 break;
1539 case FSF_GOOD:
1540 wka_port->handle = header->port_handle;
1541 /* fall through */
1542 case FSF_PORT_ALREADY_OPEN:
1543 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1545 out:
1546 wake_up(&wka_port->completion_wq);
1550 * zfcp_fsf_open_wka_port - create and send open wka-port request
1551 * @wka_port: pointer to struct zfcp_fc_wka_port
1552 * Returns: 0 on success, error otherwise
1554 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1556 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1557 struct zfcp_fsf_req *req;
1558 int retval = -EIO;
1560 spin_lock_irq(&qdio->req_q_lock);
1561 if (zfcp_qdio_sbal_get(qdio))
1562 goto out;
1564 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1565 SBAL_SFLAGS0_TYPE_READ,
1566 qdio->adapter->pool.erp_req);
1568 if (IS_ERR(req)) {
1569 retval = PTR_ERR(req);
1570 goto out;
1573 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1574 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1576 req->handler = zfcp_fsf_open_wka_port_handler;
1577 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1578 req->data = wka_port;
1580 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1581 retval = zfcp_fsf_req_send(req);
1582 if (retval)
1583 zfcp_fsf_req_free(req);
1584 out:
1585 spin_unlock_irq(&qdio->req_q_lock);
1586 return retval;
1589 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1591 struct zfcp_fc_wka_port *wka_port = req->data;
1593 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1594 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1595 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1598 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1599 wake_up(&wka_port->completion_wq);
1603 * zfcp_fsf_close_wka_port - create and send close wka port request
1604 * @wka_port: WKA port to open
1605 * Returns: 0 on success, error otherwise
1607 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1609 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1610 struct zfcp_fsf_req *req;
1611 int retval = -EIO;
1613 spin_lock_irq(&qdio->req_q_lock);
1614 if (zfcp_qdio_sbal_get(qdio))
1615 goto out;
1617 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1618 SBAL_SFLAGS0_TYPE_READ,
1619 qdio->adapter->pool.erp_req);
1621 if (IS_ERR(req)) {
1622 retval = PTR_ERR(req);
1623 goto out;
1626 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1627 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1629 req->handler = zfcp_fsf_close_wka_port_handler;
1630 req->data = wka_port;
1631 req->qtcb->header.port_handle = wka_port->handle;
1633 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1634 retval = zfcp_fsf_req_send(req);
1635 if (retval)
1636 zfcp_fsf_req_free(req);
1637 out:
1638 spin_unlock_irq(&qdio->req_q_lock);
1639 return retval;
1642 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1644 struct zfcp_port *port = req->data;
1645 struct fsf_qtcb_header *header = &req->qtcb->header;
1646 struct scsi_device *sdev;
1648 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1649 return;
1651 switch (header->fsf_status) {
1652 case FSF_PORT_HANDLE_NOT_VALID:
1653 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1654 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1655 break;
1656 case FSF_ACCESS_DENIED:
1657 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1658 break;
1659 case FSF_PORT_BOXED:
1660 /* can't use generic zfcp_erp_modify_port_status because
1661 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1662 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1663 shost_for_each_device(sdev, port->adapter->scsi_host)
1664 if (sdev_to_zfcp(sdev)->port == port)
1665 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1666 &sdev_to_zfcp(sdev)->status);
1667 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1668 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1669 "fscpph2");
1670 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1671 break;
1672 case FSF_ADAPTER_STATUS_AVAILABLE:
1673 switch (header->fsf_status_qual.word[0]) {
1674 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1675 /* fall through */
1676 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1677 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1678 break;
1680 break;
1681 case FSF_GOOD:
1682 /* can't use generic zfcp_erp_modify_port_status because
1683 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1685 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1686 shost_for_each_device(sdev, port->adapter->scsi_host)
1687 if (sdev_to_zfcp(sdev)->port == port)
1688 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1689 &sdev_to_zfcp(sdev)->status);
1690 break;
1695 * zfcp_fsf_close_physical_port - close physical port
1696 * @erp_action: pointer to struct zfcp_erp_action
1697 * Returns: 0 on success
1699 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1701 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1702 struct zfcp_fsf_req *req;
1703 int retval = -EIO;
1705 spin_lock_irq(&qdio->req_q_lock);
1706 if (zfcp_qdio_sbal_get(qdio))
1707 goto out;
1709 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1710 SBAL_SFLAGS0_TYPE_READ,
1711 qdio->adapter->pool.erp_req);
1713 if (IS_ERR(req)) {
1714 retval = PTR_ERR(req);
1715 goto out;
1718 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1719 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1721 req->data = erp_action->port;
1722 req->qtcb->header.port_handle = erp_action->port->handle;
1723 req->erp_action = erp_action;
1724 req->handler = zfcp_fsf_close_physical_port_handler;
1725 erp_action->fsf_req_id = req->req_id;
1727 zfcp_fsf_start_erp_timer(req);
1728 retval = zfcp_fsf_req_send(req);
1729 if (retval) {
1730 zfcp_fsf_req_free(req);
1731 erp_action->fsf_req_id = 0;
1733 out:
1734 spin_unlock_irq(&qdio->req_q_lock);
1735 return retval;
1738 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1740 struct zfcp_adapter *adapter = req->adapter;
1741 struct scsi_device *sdev = req->data;
1742 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1743 struct fsf_qtcb_header *header = &req->qtcb->header;
1744 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1746 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1747 return;
1749 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1750 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1751 ZFCP_STATUS_LUN_SHARED |
1752 ZFCP_STATUS_LUN_READONLY,
1753 &zfcp_sdev->status);
1755 switch (header->fsf_status) {
1757 case FSF_PORT_HANDLE_NOT_VALID:
1758 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1759 /* fall through */
1760 case FSF_LUN_ALREADY_OPEN:
1761 break;
1762 case FSF_ACCESS_DENIED:
1763 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1764 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1765 break;
1766 case FSF_PORT_BOXED:
1767 zfcp_erp_set_port_status(zfcp_sdev->port,
1768 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1769 zfcp_erp_port_reopen(zfcp_sdev->port,
1770 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1771 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1772 break;
1773 case FSF_LUN_SHARING_VIOLATION:
1774 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1775 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1776 break;
1777 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1778 dev_warn(&adapter->ccw_device->dev,
1779 "No handle is available for LUN "
1780 "0x%016Lx on port 0x%016Lx\n",
1781 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1782 (unsigned long long)zfcp_sdev->port->wwpn);
1783 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1784 /* fall through */
1785 case FSF_INVALID_COMMAND_OPTION:
1786 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1787 break;
1788 case FSF_ADAPTER_STATUS_AVAILABLE:
1789 switch (header->fsf_status_qual.word[0]) {
1790 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1791 zfcp_fc_test_link(zfcp_sdev->port);
1792 /* fall through */
1793 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1794 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1795 break;
1797 break;
1799 case FSF_GOOD:
1800 zfcp_sdev->lun_handle = header->lun_handle;
1801 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1802 zfcp_cfdc_open_lun_eval(sdev, bottom);
1803 break;
1808 * zfcp_fsf_open_lun - open LUN
1809 * @erp_action: pointer to struct zfcp_erp_action
1810 * Returns: 0 on success, error otherwise
1812 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1814 struct zfcp_adapter *adapter = erp_action->adapter;
1815 struct zfcp_qdio *qdio = adapter->qdio;
1816 struct zfcp_fsf_req *req;
1817 int retval = -EIO;
1819 spin_lock_irq(&qdio->req_q_lock);
1820 if (zfcp_qdio_sbal_get(qdio))
1821 goto out;
1823 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1824 SBAL_SFLAGS0_TYPE_READ,
1825 adapter->pool.erp_req);
1827 if (IS_ERR(req)) {
1828 retval = PTR_ERR(req);
1829 goto out;
1832 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1833 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1835 req->qtcb->header.port_handle = erp_action->port->handle;
1836 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1837 req->handler = zfcp_fsf_open_lun_handler;
1838 req->data = erp_action->sdev;
1839 req->erp_action = erp_action;
1840 erp_action->fsf_req_id = req->req_id;
1842 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1843 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1845 zfcp_fsf_start_erp_timer(req);
1846 retval = zfcp_fsf_req_send(req);
1847 if (retval) {
1848 zfcp_fsf_req_free(req);
1849 erp_action->fsf_req_id = 0;
1851 out:
1852 spin_unlock_irq(&qdio->req_q_lock);
1853 return retval;
1856 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1858 struct scsi_device *sdev = req->data;
1859 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1861 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1862 return;
1864 switch (req->qtcb->header.fsf_status) {
1865 case FSF_PORT_HANDLE_NOT_VALID:
1866 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1867 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1868 break;
1869 case FSF_LUN_HANDLE_NOT_VALID:
1870 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1871 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1872 break;
1873 case FSF_PORT_BOXED:
1874 zfcp_erp_set_port_status(zfcp_sdev->port,
1875 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1876 zfcp_erp_port_reopen(zfcp_sdev->port,
1877 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1878 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1879 break;
1880 case FSF_ADAPTER_STATUS_AVAILABLE:
1881 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1882 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1883 zfcp_fc_test_link(zfcp_sdev->port);
1884 /* fall through */
1885 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1886 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1887 break;
1889 break;
1890 case FSF_GOOD:
1891 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1892 break;
1897 * zfcp_fsf_close_LUN - close LUN
1898 * @erp_action: pointer to erp_action triggering the "close LUN"
1899 * Returns: 0 on success, error otherwise
1901 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1903 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1904 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1905 struct zfcp_fsf_req *req;
1906 int retval = -EIO;
1908 spin_lock_irq(&qdio->req_q_lock);
1909 if (zfcp_qdio_sbal_get(qdio))
1910 goto out;
1912 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1913 SBAL_SFLAGS0_TYPE_READ,
1914 qdio->adapter->pool.erp_req);
1916 if (IS_ERR(req)) {
1917 retval = PTR_ERR(req);
1918 goto out;
1921 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1922 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1924 req->qtcb->header.port_handle = erp_action->port->handle;
1925 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1926 req->handler = zfcp_fsf_close_lun_handler;
1927 req->data = erp_action->sdev;
1928 req->erp_action = erp_action;
1929 erp_action->fsf_req_id = req->req_id;
1931 zfcp_fsf_start_erp_timer(req);
1932 retval = zfcp_fsf_req_send(req);
1933 if (retval) {
1934 zfcp_fsf_req_free(req);
1935 erp_action->fsf_req_id = 0;
1937 out:
1938 spin_unlock_irq(&qdio->req_q_lock);
1939 return retval;
1942 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1944 lat_rec->sum += lat;
1945 lat_rec->min = min(lat_rec->min, lat);
1946 lat_rec->max = max(lat_rec->max, lat);
1949 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1951 struct fsf_qual_latency_info *lat_in;
1952 struct latency_cont *lat = NULL;
1953 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
1954 struct zfcp_blk_drv_data blktrc;
1955 int ticks = req->adapter->timer_ticks;
1957 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1959 blktrc.flags = 0;
1960 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1961 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1962 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
1963 blktrc.inb_usage = 0;
1964 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
1966 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1967 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
1968 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1969 blktrc.channel_lat = lat_in->channel_lat * ticks;
1970 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
1972 switch (req->qtcb->bottom.io.data_direction) {
1973 case FSF_DATADIR_DIF_READ_STRIP:
1974 case FSF_DATADIR_DIF_READ_CONVERT:
1975 case FSF_DATADIR_READ:
1976 lat = &zfcp_sdev->latencies.read;
1977 break;
1978 case FSF_DATADIR_DIF_WRITE_INSERT:
1979 case FSF_DATADIR_DIF_WRITE_CONVERT:
1980 case FSF_DATADIR_WRITE:
1981 lat = &zfcp_sdev->latencies.write;
1982 break;
1983 case FSF_DATADIR_CMND:
1984 lat = &zfcp_sdev->latencies.cmd;
1985 break;
1988 if (lat) {
1989 spin_lock(&zfcp_sdev->latencies.lock);
1990 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
1991 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
1992 lat->counter++;
1993 spin_unlock(&zfcp_sdev->latencies.lock);
1997 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
1998 sizeof(blktrc));
2001 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2003 struct scsi_cmnd *scmnd = req->data;
2004 struct scsi_device *sdev = scmnd->device;
2005 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2006 struct fsf_qtcb_header *header = &req->qtcb->header;
2008 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2009 return;
2011 switch (header->fsf_status) {
2012 case FSF_HANDLE_MISMATCH:
2013 case FSF_PORT_HANDLE_NOT_VALID:
2014 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2015 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2016 break;
2017 case FSF_FCPLUN_NOT_VALID:
2018 case FSF_LUN_HANDLE_NOT_VALID:
2019 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2020 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2021 break;
2022 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2023 zfcp_fsf_class_not_supp(req);
2024 break;
2025 case FSF_ACCESS_DENIED:
2026 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2027 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2028 break;
2029 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2030 dev_err(&req->adapter->ccw_device->dev,
2031 "Incorrect direction %d, LUN 0x%016Lx on port "
2032 "0x%016Lx closed\n",
2033 req->qtcb->bottom.io.data_direction,
2034 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2035 (unsigned long long)zfcp_sdev->port->wwpn);
2036 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2037 "fssfch3");
2038 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2039 break;
2040 case FSF_CMND_LENGTH_NOT_VALID:
2041 dev_err(&req->adapter->ccw_device->dev,
2042 "Incorrect CDB length %d, LUN 0x%016Lx on "
2043 "port 0x%016Lx closed\n",
2044 req->qtcb->bottom.io.fcp_cmnd_length,
2045 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2046 (unsigned long long)zfcp_sdev->port->wwpn);
2047 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2048 "fssfch4");
2049 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2050 break;
2051 case FSF_PORT_BOXED:
2052 zfcp_erp_set_port_status(zfcp_sdev->port,
2053 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2054 zfcp_erp_port_reopen(zfcp_sdev->port,
2055 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2056 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2057 break;
2058 case FSF_LUN_BOXED:
2059 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2060 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2061 "fssfch6");
2062 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2063 break;
2064 case FSF_ADAPTER_STATUS_AVAILABLE:
2065 if (header->fsf_status_qual.word[0] ==
2066 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2067 zfcp_fc_test_link(zfcp_sdev->port);
2068 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2069 break;
2073 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2075 struct scsi_cmnd *scpnt;
2076 struct fcp_resp_with_ext *fcp_rsp;
2077 unsigned long flags;
2079 read_lock_irqsave(&req->adapter->abort_lock, flags);
2081 scpnt = req->data;
2082 if (unlikely(!scpnt)) {
2083 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2084 return;
2087 zfcp_fsf_fcp_handler_common(req);
2089 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2090 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2091 goto skip_fsfstatus;
2094 switch (req->qtcb->header.fsf_status) {
2095 case FSF_INCONSISTENT_PROT_DATA:
2096 case FSF_INVALID_PROT_PARM:
2097 set_host_byte(scpnt, DID_ERROR);
2098 goto skip_fsfstatus;
2099 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2100 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2101 goto skip_fsfstatus;
2102 case FSF_APP_TAG_CHECK_FAILURE:
2103 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2104 goto skip_fsfstatus;
2105 case FSF_REF_TAG_CHECK_FAILURE:
2106 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2107 goto skip_fsfstatus;
2109 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2110 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2112 skip_fsfstatus:
2113 zfcp_fsf_req_trace(req, scpnt);
2114 zfcp_dbf_scsi_result(scpnt, req);
2116 scpnt->host_scribble = NULL;
2117 (scpnt->scsi_done) (scpnt);
2119 * We must hold this lock until scsi_done has been called.
2120 * Otherwise we may call scsi_done after abort regarding this
2121 * command has completed.
2122 * Note: scsi_done must not block!
2124 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2127 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2129 switch (scsi_get_prot_op(scsi_cmnd)) {
2130 case SCSI_PROT_NORMAL:
2131 switch (scsi_cmnd->sc_data_direction) {
2132 case DMA_NONE:
2133 *data_dir = FSF_DATADIR_CMND;
2134 break;
2135 case DMA_FROM_DEVICE:
2136 *data_dir = FSF_DATADIR_READ;
2137 break;
2138 case DMA_TO_DEVICE:
2139 *data_dir = FSF_DATADIR_WRITE;
2140 break;
2141 case DMA_BIDIRECTIONAL:
2142 return -EINVAL;
2144 break;
2146 case SCSI_PROT_READ_STRIP:
2147 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2148 break;
2149 case SCSI_PROT_WRITE_INSERT:
2150 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2151 break;
2152 case SCSI_PROT_READ_PASS:
2153 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2154 break;
2155 case SCSI_PROT_WRITE_PASS:
2156 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2157 break;
2158 default:
2159 return -EINVAL;
2162 return 0;
2166 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2167 * @scsi_cmnd: scsi command to be sent
2169 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2171 struct zfcp_fsf_req *req;
2172 struct fcp_cmnd *fcp_cmnd;
2173 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2174 int retval = -EIO;
2175 struct scsi_device *sdev = scsi_cmnd->device;
2176 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2177 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2178 struct zfcp_qdio *qdio = adapter->qdio;
2179 struct fsf_qtcb_bottom_io *io;
2180 unsigned long flags;
2182 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2183 ZFCP_STATUS_COMMON_UNBLOCKED)))
2184 return -EBUSY;
2186 spin_lock_irqsave(&qdio->req_q_lock, flags);
2187 if (atomic_read(&qdio->req_q_free) <= 0) {
2188 atomic_inc(&qdio->req_q_full);
2189 goto out;
2192 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2193 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2195 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2196 sbtype, adapter->pool.scsi_req);
2198 if (IS_ERR(req)) {
2199 retval = PTR_ERR(req);
2200 goto out;
2203 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2205 io = &req->qtcb->bottom.io;
2206 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2207 req->data = scsi_cmnd;
2208 req->handler = zfcp_fsf_fcp_cmnd_handler;
2209 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2210 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2211 io->service_class = FSF_CLASS_3;
2212 io->fcp_cmnd_length = FCP_CMND_LEN;
2214 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2215 io->data_block_length = scsi_cmnd->device->sector_size;
2216 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2219 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2220 goto failed_scsi_cmnd;
2222 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2223 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2225 if (scsi_prot_sg_count(scsi_cmnd)) {
2226 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2227 scsi_prot_sg_count(scsi_cmnd));
2228 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2229 scsi_prot_sglist(scsi_cmnd));
2230 if (retval)
2231 goto failed_scsi_cmnd;
2232 io->prot_data_length = zfcp_qdio_real_bytes(
2233 scsi_prot_sglist(scsi_cmnd));
2236 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2237 scsi_sglist(scsi_cmnd));
2238 if (unlikely(retval))
2239 goto failed_scsi_cmnd;
2241 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2242 if (zfcp_adapter_multi_buffer_active(adapter))
2243 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2245 retval = zfcp_fsf_req_send(req);
2246 if (unlikely(retval))
2247 goto failed_scsi_cmnd;
2249 goto out;
2251 failed_scsi_cmnd:
2252 zfcp_fsf_req_free(req);
2253 scsi_cmnd->host_scribble = NULL;
2254 out:
2255 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2256 return retval;
2259 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2261 struct fcp_resp_with_ext *fcp_rsp;
2262 struct fcp_resp_rsp_info *rsp_info;
2264 zfcp_fsf_fcp_handler_common(req);
2266 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2267 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2269 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2270 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2271 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2275 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2276 * @scmnd: SCSI command to send the task management command for
2277 * @tm_flags: unsigned byte for task management flags
2278 * Returns: on success pointer to struct fsf_req, NULL otherwise
2280 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2281 u8 tm_flags)
2283 struct zfcp_fsf_req *req = NULL;
2284 struct fcp_cmnd *fcp_cmnd;
2285 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2286 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2288 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2289 ZFCP_STATUS_COMMON_UNBLOCKED)))
2290 return NULL;
2292 spin_lock_irq(&qdio->req_q_lock);
2293 if (zfcp_qdio_sbal_get(qdio))
2294 goto out;
2296 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2297 SBAL_SFLAGS0_TYPE_WRITE,
2298 qdio->adapter->pool.scsi_req);
2300 if (IS_ERR(req)) {
2301 req = NULL;
2302 goto out;
2305 req->data = scmnd;
2306 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2307 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2308 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2309 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2310 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2311 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2313 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2315 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2316 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2318 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2319 if (!zfcp_fsf_req_send(req))
2320 goto out;
2322 zfcp_fsf_req_free(req);
2323 req = NULL;
2324 out:
2325 spin_unlock_irq(&qdio->req_q_lock);
2326 return req;
2329 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2334 * zfcp_fsf_control_file - control file upload/download
2335 * @adapter: pointer to struct zfcp_adapter
2336 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2337 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2339 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2340 struct zfcp_fsf_cfdc *fsf_cfdc)
2342 struct zfcp_qdio *qdio = adapter->qdio;
2343 struct zfcp_fsf_req *req = NULL;
2344 struct fsf_qtcb_bottom_support *bottom;
2345 int retval = -EIO;
2346 u8 direction;
2348 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2349 return ERR_PTR(-EOPNOTSUPP);
2351 switch (fsf_cfdc->command) {
2352 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2353 direction = SBAL_SFLAGS0_TYPE_WRITE;
2354 break;
2355 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2356 direction = SBAL_SFLAGS0_TYPE_READ;
2357 break;
2358 default:
2359 return ERR_PTR(-EINVAL);
2362 spin_lock_irq(&qdio->req_q_lock);
2363 if (zfcp_qdio_sbal_get(qdio))
2364 goto out;
2366 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
2367 if (IS_ERR(req)) {
2368 retval = -EPERM;
2369 goto out;
2372 req->handler = zfcp_fsf_control_file_handler;
2374 bottom = &req->qtcb->bottom.support;
2375 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2376 bottom->option = fsf_cfdc->option;
2378 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2380 if (retval ||
2381 (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
2382 zfcp_fsf_req_free(req);
2383 retval = -EIO;
2384 goto out;
2386 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2387 if (zfcp_adapter_multi_buffer_active(adapter))
2388 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2390 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2391 retval = zfcp_fsf_req_send(req);
2392 out:
2393 spin_unlock_irq(&qdio->req_q_lock);
2395 if (!retval) {
2396 wait_for_completion(&req->completion);
2397 return req;
2399 return ERR_PTR(retval);
2403 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2404 * @adapter: pointer to struct zfcp_adapter
2405 * @sbal_idx: response queue index of SBAL to be processed
2407 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2409 struct zfcp_adapter *adapter = qdio->adapter;
2410 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2411 struct qdio_buffer_element *sbale;
2412 struct zfcp_fsf_req *fsf_req;
2413 unsigned long req_id;
2414 int idx;
2416 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2418 sbale = &sbal->element[idx];
2419 req_id = (unsigned long) sbale->addr;
2420 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2422 if (!fsf_req) {
2424 * Unknown request means that we have potentially memory
2425 * corruption and must stop the machine immediately.
2427 zfcp_qdio_siosl(adapter);
2428 panic("error: unknown req_id (%lx) on adapter %s.\n",
2429 req_id, dev_name(&adapter->ccw_device->dev));
2432 fsf_req->qdio_req.sbal_response = sbal_idx;
2433 zfcp_fsf_req_complete(fsf_req);
2435 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2436 break;
2440 struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
2441 struct qdio_buffer *sbal)
2443 struct qdio_buffer_element *sbale = &sbal->element[0];
2444 u64 req_id = (unsigned long) sbale->addr;
2446 return zfcp_reqlist_find(qdio->adapter->req_list, req_id);