4 * Fibre Channel related functions for the zfcp device driver.
6 * Copyright IBM Corporation 2008, 2010
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/utsname.h>
15 #include <scsi/fc/fc_els.h>
16 #include <scsi/libfc.h>
20 struct kmem_cache
*zfcp_fc_req_cache
;
22 static u32 zfcp_fc_rscn_range_mask
[] = {
23 [ELS_ADDR_FMT_PORT
] = 0xFFFFFF,
24 [ELS_ADDR_FMT_AREA
] = 0xFFFF00,
25 [ELS_ADDR_FMT_DOM
] = 0xFF0000,
26 [ELS_ADDR_FMT_FAB
] = 0x000000,
30 * zfcp_fc_post_event - post event to userspace via fc_transport
31 * @work: work struct with enqueued events
33 void zfcp_fc_post_event(struct work_struct
*work
)
35 struct zfcp_fc_event
*event
= NULL
, *tmp
= NULL
;
37 struct zfcp_fc_events
*events
= container_of(work
,
38 struct zfcp_fc_events
, work
);
39 struct zfcp_adapter
*adapter
= container_of(events
, struct zfcp_adapter
,
42 spin_lock_bh(&events
->list_lock
);
43 list_splice_init(&events
->list
, &tmp_lh
);
44 spin_unlock_bh(&events
->list_lock
);
46 list_for_each_entry_safe(event
, tmp
, &tmp_lh
, list
) {
47 fc_host_post_event(adapter
->scsi_host
, fc_get_event_number(),
48 event
->code
, event
->data
);
49 list_del(&event
->list
);
56 * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
57 * @adapter: The adapter where to enqueue the event
58 * @event_code: The event code (as defined in fc_host_event_code in
59 * scsi_transport_fc.h)
60 * @event_data: The event data (e.g. n_port page in case of els)
62 void zfcp_fc_enqueue_event(struct zfcp_adapter
*adapter
,
63 enum fc_host_event_code event_code
, u32 event_data
)
65 struct zfcp_fc_event
*event
;
67 event
= kmalloc(sizeof(struct zfcp_fc_event
), GFP_ATOMIC
);
71 event
->code
= event_code
;
72 event
->data
= event_data
;
74 spin_lock(&adapter
->events
.list_lock
);
75 list_add_tail(&event
->list
, &adapter
->events
.list
);
76 spin_unlock(&adapter
->events
.list_lock
);
78 queue_work(adapter
->work_queue
, &adapter
->events
.work
);
81 static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port
*wka_port
)
83 if (mutex_lock_interruptible(&wka_port
->mutex
))
86 if (wka_port
->status
== ZFCP_FC_WKA_PORT_OFFLINE
||
87 wka_port
->status
== ZFCP_FC_WKA_PORT_CLOSING
) {
88 wka_port
->status
= ZFCP_FC_WKA_PORT_OPENING
;
89 if (zfcp_fsf_open_wka_port(wka_port
))
90 wka_port
->status
= ZFCP_FC_WKA_PORT_OFFLINE
;
93 mutex_unlock(&wka_port
->mutex
);
95 wait_event(wka_port
->completion_wq
,
96 wka_port
->status
== ZFCP_FC_WKA_PORT_ONLINE
||
97 wka_port
->status
== ZFCP_FC_WKA_PORT_OFFLINE
);
99 if (wka_port
->status
== ZFCP_FC_WKA_PORT_ONLINE
) {
100 atomic_inc(&wka_port
->refcount
);
106 static void zfcp_fc_wka_port_offline(struct work_struct
*work
)
108 struct delayed_work
*dw
= to_delayed_work(work
);
109 struct zfcp_fc_wka_port
*wka_port
=
110 container_of(dw
, struct zfcp_fc_wka_port
, work
);
112 mutex_lock(&wka_port
->mutex
);
113 if ((atomic_read(&wka_port
->refcount
) != 0) ||
114 (wka_port
->status
!= ZFCP_FC_WKA_PORT_ONLINE
))
117 wka_port
->status
= ZFCP_FC_WKA_PORT_CLOSING
;
118 if (zfcp_fsf_close_wka_port(wka_port
)) {
119 wka_port
->status
= ZFCP_FC_WKA_PORT_OFFLINE
;
120 wake_up(&wka_port
->completion_wq
);
123 mutex_unlock(&wka_port
->mutex
);
126 static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port
*wka_port
)
128 if (atomic_dec_return(&wka_port
->refcount
) != 0)
130 /* wait 10 milliseconds, other reqs might pop in */
131 schedule_delayed_work(&wka_port
->work
, HZ
/ 100);
134 static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port
*wka_port
, u32 d_id
,
135 struct zfcp_adapter
*adapter
)
137 init_waitqueue_head(&wka_port
->completion_wq
);
139 wka_port
->adapter
= adapter
;
140 wka_port
->d_id
= d_id
;
142 wka_port
->status
= ZFCP_FC_WKA_PORT_OFFLINE
;
143 atomic_set(&wka_port
->refcount
, 0);
144 mutex_init(&wka_port
->mutex
);
145 INIT_DELAYED_WORK(&wka_port
->work
, zfcp_fc_wka_port_offline
);
148 static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port
*wka
)
150 cancel_delayed_work_sync(&wka
->work
);
151 mutex_lock(&wka
->mutex
);
152 wka
->status
= ZFCP_FC_WKA_PORT_OFFLINE
;
153 mutex_unlock(&wka
->mutex
);
156 void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports
*gs
)
160 zfcp_fc_wka_port_force_offline(&gs
->ms
);
161 zfcp_fc_wka_port_force_offline(&gs
->ts
);
162 zfcp_fc_wka_port_force_offline(&gs
->ds
);
163 zfcp_fc_wka_port_force_offline(&gs
->as
);
166 static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req
*fsf_req
, u32 range
,
167 struct fc_els_rscn_page
*page
)
170 struct zfcp_adapter
*adapter
= fsf_req
->adapter
;
171 struct zfcp_port
*port
;
173 read_lock_irqsave(&adapter
->port_list_lock
, flags
);
174 list_for_each_entry(port
, &adapter
->port_list
, list
) {
175 if ((port
->d_id
& range
) == (ntoh24(page
->rscn_fid
) & range
))
176 zfcp_fc_test_link(port
);
178 zfcp_erp_port_reopen(port
,
179 ZFCP_STATUS_COMMON_ERP_FAILED
,
182 read_unlock_irqrestore(&adapter
->port_list_lock
, flags
);
185 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req
*fsf_req
)
187 struct fsf_status_read_buffer
*status_buffer
= (void *)fsf_req
->data
;
188 struct fc_els_rscn
*head
;
189 struct fc_els_rscn_page
*page
;
194 head
= (struct fc_els_rscn
*) status_buffer
->payload
.data
;
195 page
= (struct fc_els_rscn_page
*) head
;
198 no_entries
= head
->rscn_plen
/ sizeof(struct fc_els_rscn_page
);
200 for (i
= 1; i
< no_entries
; i
++) {
201 /* skip head and start with 1st element */
203 afmt
= page
->rscn_page_flags
& ELS_RSCN_ADDR_FMT_MASK
;
204 _zfcp_fc_incoming_rscn(fsf_req
, zfcp_fc_rscn_range_mask
[afmt
],
206 zfcp_fc_enqueue_event(fsf_req
->adapter
, FCH_EVT_RSCN
,
209 queue_work(fsf_req
->adapter
->work_queue
, &fsf_req
->adapter
->scan_work
);
212 static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req
*req
, u64 wwpn
)
215 struct zfcp_adapter
*adapter
= req
->adapter
;
216 struct zfcp_port
*port
;
218 read_lock_irqsave(&adapter
->port_list_lock
, flags
);
219 list_for_each_entry(port
, &adapter
->port_list
, list
)
220 if (port
->wwpn
== wwpn
) {
221 zfcp_erp_port_forced_reopen(port
, 0, "fciwwp1");
224 read_unlock_irqrestore(&adapter
->port_list_lock
, flags
);
227 static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req
*req
)
229 struct fsf_status_read_buffer
*status_buffer
;
230 struct fc_els_flogi
*plogi
;
232 status_buffer
= (struct fsf_status_read_buffer
*) req
->data
;
233 plogi
= (struct fc_els_flogi
*) status_buffer
->payload
.data
;
234 zfcp_fc_incoming_wwpn(req
, plogi
->fl_wwpn
);
237 static void zfcp_fc_incoming_logo(struct zfcp_fsf_req
*req
)
239 struct fsf_status_read_buffer
*status_buffer
=
240 (struct fsf_status_read_buffer
*)req
->data
;
241 struct fc_els_logo
*logo
=
242 (struct fc_els_logo
*) status_buffer
->payload
.data
;
244 zfcp_fc_incoming_wwpn(req
, logo
->fl_n_port_wwn
);
248 * zfcp_fc_incoming_els - handle incoming ELS
249 * @fsf_req - request which contains incoming ELS
251 void zfcp_fc_incoming_els(struct zfcp_fsf_req
*fsf_req
)
253 struct fsf_status_read_buffer
*status_buffer
=
254 (struct fsf_status_read_buffer
*) fsf_req
->data
;
255 unsigned int els_type
= status_buffer
->payload
.data
[0];
257 zfcp_dbf_san_in_els("fciels1", fsf_req
);
258 if (els_type
== ELS_PLOGI
)
259 zfcp_fc_incoming_plogi(fsf_req
);
260 else if (els_type
== ELS_LOGO
)
261 zfcp_fc_incoming_logo(fsf_req
);
262 else if (els_type
== ELS_RSCN
)
263 zfcp_fc_incoming_rscn(fsf_req
);
266 static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req
*fc_req
)
268 struct zfcp_fsf_ct_els
*ct_els
= &fc_req
->ct_els
;
269 struct zfcp_fc_gid_pn_rsp
*gid_pn_rsp
= &fc_req
->u
.gid_pn
.rsp
;
273 if (gid_pn_rsp
->ct_hdr
.ct_cmd
!= FC_FS_ACC
)
276 /* looks like a valid d_id */
277 ct_els
->port
->d_id
= ntoh24(gid_pn_rsp
->gid_pn
.fp_fid
);
280 static void zfcp_fc_complete(void *data
)
285 static void zfcp_fc_ct_ns_init(struct fc_ct_hdr
*ct_hdr
, u16 cmd
, u16 mr_size
)
287 ct_hdr
->ct_rev
= FC_CT_REV
;
288 ct_hdr
->ct_fs_type
= FC_FST_DIR
;
289 ct_hdr
->ct_fs_subtype
= FC_NS_SUBTYPE
;
290 ct_hdr
->ct_cmd
= cmd
;
291 ct_hdr
->ct_mr_size
= mr_size
/ 4;
294 static int zfcp_fc_ns_gid_pn_request(struct zfcp_port
*port
,
295 struct zfcp_fc_req
*fc_req
)
297 struct zfcp_adapter
*adapter
= port
->adapter
;
298 DECLARE_COMPLETION_ONSTACK(completion
);
299 struct zfcp_fc_gid_pn_req
*gid_pn_req
= &fc_req
->u
.gid_pn
.req
;
300 struct zfcp_fc_gid_pn_rsp
*gid_pn_rsp
= &fc_req
->u
.gid_pn
.rsp
;
303 /* setup parameters for send generic command */
304 fc_req
->ct_els
.port
= port
;
305 fc_req
->ct_els
.handler
= zfcp_fc_complete
;
306 fc_req
->ct_els
.handler_data
= &completion
;
307 fc_req
->ct_els
.req
= &fc_req
->sg_req
;
308 fc_req
->ct_els
.resp
= &fc_req
->sg_rsp
;
309 sg_init_one(&fc_req
->sg_req
, gid_pn_req
, sizeof(*gid_pn_req
));
310 sg_init_one(&fc_req
->sg_rsp
, gid_pn_rsp
, sizeof(*gid_pn_rsp
));
312 zfcp_fc_ct_ns_init(&gid_pn_req
->ct_hdr
,
313 FC_NS_GID_PN
, ZFCP_FC_CT_SIZE_PAGE
);
314 gid_pn_req
->gid_pn
.fn_wwpn
= port
->wwpn
;
316 ret
= zfcp_fsf_send_ct(&adapter
->gs
->ds
, &fc_req
->ct_els
,
317 adapter
->pool
.gid_pn_req
,
320 wait_for_completion(&completion
);
321 zfcp_fc_ns_gid_pn_eval(fc_req
);
327 * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
328 * @port: port where GID_PN request is needed
329 * return: -ENOMEM on error, 0 otherwise
331 static int zfcp_fc_ns_gid_pn(struct zfcp_port
*port
)
334 struct zfcp_fc_req
*fc_req
;
335 struct zfcp_adapter
*adapter
= port
->adapter
;
337 fc_req
= mempool_alloc(adapter
->pool
.gid_pn
, GFP_ATOMIC
);
341 memset(fc_req
, 0, sizeof(*fc_req
));
343 ret
= zfcp_fc_wka_port_get(&adapter
->gs
->ds
);
347 ret
= zfcp_fc_ns_gid_pn_request(port
, fc_req
);
349 zfcp_fc_wka_port_put(&adapter
->gs
->ds
);
351 mempool_free(fc_req
, adapter
->pool
.gid_pn
);
355 void zfcp_fc_port_did_lookup(struct work_struct
*work
)
358 struct zfcp_port
*port
= container_of(work
, struct zfcp_port
,
361 ret
= zfcp_fc_ns_gid_pn(port
);
363 /* could not issue gid_pn for some reason */
364 zfcp_erp_adapter_reopen(port
->adapter
, 0, "fcgpn_1");
369 zfcp_erp_set_port_status(port
, ZFCP_STATUS_COMMON_ERP_FAILED
);
373 zfcp_erp_port_reopen(port
, 0, "fcgpn_3");
375 put_device(&port
->dev
);
379 * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
380 * @port: The zfcp_port to lookup the d_id for.
382 void zfcp_fc_trigger_did_lookup(struct zfcp_port
*port
)
384 get_device(&port
->dev
);
385 if (!queue_work(port
->adapter
->work_queue
, &port
->gid_pn_work
))
386 put_device(&port
->dev
);
390 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
391 * @port: zfcp_port structure
392 * @plogi: plogi payload
394 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
396 void zfcp_fc_plogi_evaluate(struct zfcp_port
*port
, struct fc_els_flogi
*plogi
)
398 if (plogi
->fl_wwpn
!= port
->wwpn
) {
400 dev_warn(&port
->adapter
->ccw_device
->dev
,
401 "A port opened with WWPN 0x%016Lx returned data that "
402 "identifies it as WWPN 0x%016Lx\n",
403 (unsigned long long) port
->wwpn
,
404 (unsigned long long) plogi
->fl_wwpn
);
408 port
->wwnn
= plogi
->fl_wwnn
;
409 port
->maxframe_size
= plogi
->fl_csp
.sp_bb_data
;
411 if (plogi
->fl_cssp
[0].cp_class
& FC_CPC_VALID
)
412 port
->supported_classes
|= FC_COS_CLASS1
;
413 if (plogi
->fl_cssp
[1].cp_class
& FC_CPC_VALID
)
414 port
->supported_classes
|= FC_COS_CLASS2
;
415 if (plogi
->fl_cssp
[2].cp_class
& FC_CPC_VALID
)
416 port
->supported_classes
|= FC_COS_CLASS3
;
417 if (plogi
->fl_cssp
[3].cp_class
& FC_CPC_VALID
)
418 port
->supported_classes
|= FC_COS_CLASS4
;
421 static void zfcp_fc_adisc_handler(void *data
)
423 struct zfcp_fc_req
*fc_req
= data
;
424 struct zfcp_port
*port
= fc_req
->ct_els
.port
;
425 struct fc_els_adisc
*adisc_resp
= &fc_req
->u
.adisc
.rsp
;
427 if (fc_req
->ct_els
.status
) {
428 /* request rejected or timed out */
429 zfcp_erp_port_forced_reopen(port
, ZFCP_STATUS_COMMON_ERP_FAILED
,
435 port
->wwnn
= adisc_resp
->adisc_wwnn
;
437 if ((port
->wwpn
!= adisc_resp
->adisc_wwpn
) ||
438 !(atomic_read(&port
->status
) & ZFCP_STATUS_COMMON_OPEN
)) {
439 zfcp_erp_port_reopen(port
, ZFCP_STATUS_COMMON_ERP_FAILED
,
444 /* port is good, unblock rport without going through erp */
445 zfcp_scsi_schedule_rport_register(port
);
447 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST
, &port
->status
);
448 put_device(&port
->dev
);
449 kmem_cache_free(zfcp_fc_req_cache
, fc_req
);
452 static int zfcp_fc_adisc(struct zfcp_port
*port
)
454 struct zfcp_fc_req
*fc_req
;
455 struct zfcp_adapter
*adapter
= port
->adapter
;
456 struct Scsi_Host
*shost
= adapter
->scsi_host
;
459 fc_req
= kmem_cache_zalloc(zfcp_fc_req_cache
, GFP_ATOMIC
);
463 fc_req
->ct_els
.port
= port
;
464 fc_req
->ct_els
.req
= &fc_req
->sg_req
;
465 fc_req
->ct_els
.resp
= &fc_req
->sg_rsp
;
466 sg_init_one(&fc_req
->sg_req
, &fc_req
->u
.adisc
.req
,
467 sizeof(struct fc_els_adisc
));
468 sg_init_one(&fc_req
->sg_rsp
, &fc_req
->u
.adisc
.rsp
,
469 sizeof(struct fc_els_adisc
));
471 fc_req
->ct_els
.handler
= zfcp_fc_adisc_handler
;
472 fc_req
->ct_els
.handler_data
= fc_req
;
474 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
475 without FC-AL-2 capability, so we don't set it */
476 fc_req
->u
.adisc
.req
.adisc_wwpn
= fc_host_port_name(shost
);
477 fc_req
->u
.adisc
.req
.adisc_wwnn
= fc_host_node_name(shost
);
478 fc_req
->u
.adisc
.req
.adisc_cmd
= ELS_ADISC
;
479 hton24(fc_req
->u
.adisc
.req
.adisc_port_id
, fc_host_port_id(shost
));
481 ret
= zfcp_fsf_send_els(adapter
, port
->d_id
, &fc_req
->ct_els
,
484 kmem_cache_free(zfcp_fc_req_cache
, fc_req
);
489 void zfcp_fc_link_test_work(struct work_struct
*work
)
491 struct zfcp_port
*port
=
492 container_of(work
, struct zfcp_port
, test_link_work
);
495 get_device(&port
->dev
);
496 port
->rport_task
= RPORT_DEL
;
497 zfcp_scsi_rport_work(&port
->rport_work
);
499 /* only issue one test command at one time per port */
500 if (atomic_read(&port
->status
) & ZFCP_STATUS_PORT_LINK_TEST
)
503 atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST
, &port
->status
);
505 retval
= zfcp_fc_adisc(port
);
509 /* send of ADISC was not possible */
510 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST
, &port
->status
);
511 zfcp_erp_port_forced_reopen(port
, 0, "fcltwk1");
514 put_device(&port
->dev
);
518 * zfcp_fc_test_link - lightweight link test procedure
519 * @port: port to be tested
521 * Test status of a link to a remote port using the ELS command ADISC.
522 * If there is a problem with the remote port, error recovery steps
525 void zfcp_fc_test_link(struct zfcp_port
*port
)
527 get_device(&port
->dev
);
528 if (!queue_work(port
->adapter
->work_queue
, &port
->test_link_work
))
529 put_device(&port
->dev
);
532 static struct zfcp_fc_req
*zfcp_alloc_sg_env(int buf_num
)
534 struct zfcp_fc_req
*fc_req
;
536 fc_req
= kmem_cache_zalloc(zfcp_fc_req_cache
, GFP_KERNEL
);
540 if (zfcp_sg_setup_table(&fc_req
->sg_rsp
, buf_num
)) {
541 kmem_cache_free(zfcp_fc_req_cache
, fc_req
);
545 sg_init_one(&fc_req
->sg_req
, &fc_req
->u
.gpn_ft
.req
,
546 sizeof(struct zfcp_fc_gpn_ft_req
));
551 static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req
*fc_req
,
552 struct zfcp_adapter
*adapter
, int max_bytes
)
554 struct zfcp_fsf_ct_els
*ct_els
= &fc_req
->ct_els
;
555 struct zfcp_fc_gpn_ft_req
*req
= &fc_req
->u
.gpn_ft
.req
;
556 DECLARE_COMPLETION_ONSTACK(completion
);
559 zfcp_fc_ct_ns_init(&req
->ct_hdr
, FC_NS_GPN_FT
, max_bytes
);
560 req
->gpn_ft
.fn_fc4_type
= FC_TYPE_FCP
;
562 ct_els
->handler
= zfcp_fc_complete
;
563 ct_els
->handler_data
= &completion
;
564 ct_els
->req
= &fc_req
->sg_req
;
565 ct_els
->resp
= &fc_req
->sg_rsp
;
567 ret
= zfcp_fsf_send_ct(&adapter
->gs
->ds
, ct_els
, NULL
,
570 wait_for_completion(&completion
);
574 static void zfcp_fc_validate_port(struct zfcp_port
*port
, struct list_head
*lh
)
576 if (!(atomic_read(&port
->status
) & ZFCP_STATUS_COMMON_NOESC
))
579 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC
, &port
->status
);
581 if ((port
->supported_classes
!= 0) ||
582 !list_empty(&port
->unit_list
))
585 list_move_tail(&port
->list
, lh
);
588 static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req
*fc_req
,
589 struct zfcp_adapter
*adapter
, int max_entries
)
591 struct zfcp_fsf_ct_els
*ct_els
= &fc_req
->ct_els
;
592 struct scatterlist
*sg
= &fc_req
->sg_rsp
;
593 struct fc_ct_hdr
*hdr
= sg_virt(sg
);
594 struct fc_gpn_ft_resp
*acc
= sg_virt(sg
);
595 struct zfcp_port
*port
, *tmp
;
597 LIST_HEAD(remove_lh
);
599 int ret
= 0, x
, last
= 0;
604 if (hdr
->ct_cmd
!= FC_FS_ACC
) {
605 if (hdr
->ct_reason
== FC_BA_RJT_UNABLE
)
606 return -EAGAIN
; /* might be a temporary condition */
610 if (hdr
->ct_mr_size
) {
611 dev_warn(&adapter
->ccw_device
->dev
,
612 "The name server reported %d words residual data\n",
617 /* first entry is the header */
618 for (x
= 1; x
< max_entries
&& !last
; x
++) {
619 if (x
% (ZFCP_FC_GPN_FT_ENT_PAGE
+ 1))
624 last
= acc
->fp_flags
& FC_NS_FID_LAST
;
625 d_id
= ntoh24(acc
->fp_fid
);
627 /* don't attach ports with a well known address */
628 if (d_id
>= FC_FID_WELL_KNOWN_BASE
)
630 /* skip the adapter's port and known remote ports */
631 if (acc
->fp_wwpn
== fc_host_port_name(adapter
->scsi_host
))
634 port
= zfcp_port_enqueue(adapter
, acc
->fp_wwpn
,
635 ZFCP_STATUS_COMMON_NOESC
, d_id
);
637 zfcp_erp_port_reopen(port
, 0, "fcegpf1");
638 else if (PTR_ERR(port
) != -EEXIST
)
642 zfcp_erp_wait(adapter
);
643 write_lock_irqsave(&adapter
->port_list_lock
, flags
);
644 list_for_each_entry_safe(port
, tmp
, &adapter
->port_list
, list
)
645 zfcp_fc_validate_port(port
, &remove_lh
);
646 write_unlock_irqrestore(&adapter
->port_list_lock
, flags
);
648 list_for_each_entry_safe(port
, tmp
, &remove_lh
, list
) {
649 zfcp_erp_port_shutdown(port
, 0, "fcegpf2");
650 zfcp_device_unregister(&port
->dev
, &zfcp_sysfs_port_attrs
);
657 * zfcp_fc_scan_ports - scan remote ports and attach new ports
658 * @work: reference to scheduled work
660 void zfcp_fc_scan_ports(struct work_struct
*work
)
662 struct zfcp_adapter
*adapter
= container_of(work
, struct zfcp_adapter
,
665 struct zfcp_fc_req
*fc_req
;
666 int chain
, max_entries
, buf_num
, max_bytes
;
668 chain
= adapter
->adapter_features
& FSF_FEATURE_ELS_CT_CHAINED_SBALS
;
669 buf_num
= chain
? ZFCP_FC_GPN_FT_NUM_BUFS
: 1;
670 max_entries
= chain
? ZFCP_FC_GPN_FT_MAX_ENT
: ZFCP_FC_GPN_FT_ENT_PAGE
;
671 max_bytes
= chain
? ZFCP_FC_GPN_FT_MAX_SIZE
: ZFCP_FC_CT_SIZE_PAGE
;
673 if (fc_host_port_type(adapter
->scsi_host
) != FC_PORTTYPE_NPORT
&&
674 fc_host_port_type(adapter
->scsi_host
) != FC_PORTTYPE_NPIV
)
677 if (zfcp_fc_wka_port_get(&adapter
->gs
->ds
))
680 fc_req
= zfcp_alloc_sg_env(buf_num
);
684 for (i
= 0; i
< 3; i
++) {
685 ret
= zfcp_fc_send_gpn_ft(fc_req
, adapter
, max_bytes
);
687 ret
= zfcp_fc_eval_gpn_ft(fc_req
, adapter
, max_entries
);
694 zfcp_sg_free_table(&fc_req
->sg_rsp
, buf_num
);
695 kmem_cache_free(zfcp_fc_req_cache
, fc_req
);
697 zfcp_fc_wka_port_put(&adapter
->gs
->ds
);
700 static int zfcp_fc_gspn(struct zfcp_adapter
*adapter
,
701 struct zfcp_fc_req
*fc_req
)
703 DECLARE_COMPLETION_ONSTACK(completion
);
704 char devno
[] = "DEVNO:";
705 struct zfcp_fsf_ct_els
*ct_els
= &fc_req
->ct_els
;
706 struct zfcp_fc_gspn_req
*gspn_req
= &fc_req
->u
.gspn
.req
;
707 struct zfcp_fc_gspn_rsp
*gspn_rsp
= &fc_req
->u
.gspn
.rsp
;
710 zfcp_fc_ct_ns_init(&gspn_req
->ct_hdr
, FC_NS_GSPN_ID
,
711 FC_SYMBOLIC_NAME_SIZE
);
712 hton24(gspn_req
->gspn
.fp_fid
, fc_host_port_id(adapter
->scsi_host
));
714 sg_init_one(&fc_req
->sg_req
, gspn_req
, sizeof(*gspn_req
));
715 sg_init_one(&fc_req
->sg_rsp
, gspn_rsp
, sizeof(*gspn_rsp
));
717 ct_els
->handler
= zfcp_fc_complete
;
718 ct_els
->handler_data
= &completion
;
719 ct_els
->req
= &fc_req
->sg_req
;
720 ct_els
->resp
= &fc_req
->sg_rsp
;
722 ret
= zfcp_fsf_send_ct(&adapter
->gs
->ds
, ct_els
, NULL
,
727 wait_for_completion(&completion
);
729 return ct_els
->status
;
731 if (fc_host_port_type(adapter
->scsi_host
) == FC_PORTTYPE_NPIV
&&
732 !(strstr(gspn_rsp
->gspn
.fp_name
, devno
)))
733 snprintf(fc_host_symbolic_name(adapter
->scsi_host
),
734 FC_SYMBOLIC_NAME_SIZE
, "%s%s %s NAME: %s",
735 gspn_rsp
->gspn
.fp_name
, devno
,
736 dev_name(&adapter
->ccw_device
->dev
),
737 init_utsname()->nodename
);
739 strlcpy(fc_host_symbolic_name(adapter
->scsi_host
),
740 gspn_rsp
->gspn
.fp_name
, FC_SYMBOLIC_NAME_SIZE
);
745 static void zfcp_fc_rspn(struct zfcp_adapter
*adapter
,
746 struct zfcp_fc_req
*fc_req
)
748 DECLARE_COMPLETION_ONSTACK(completion
);
749 struct Scsi_Host
*shost
= adapter
->scsi_host
;
750 struct zfcp_fsf_ct_els
*ct_els
= &fc_req
->ct_els
;
751 struct zfcp_fc_rspn_req
*rspn_req
= &fc_req
->u
.rspn
.req
;
752 struct fc_ct_hdr
*rspn_rsp
= &fc_req
->u
.rspn
.rsp
;
755 zfcp_fc_ct_ns_init(&rspn_req
->ct_hdr
, FC_NS_RSPN_ID
,
756 FC_SYMBOLIC_NAME_SIZE
);
757 hton24(rspn_req
->rspn
.fr_fid
.fp_fid
, fc_host_port_id(shost
));
758 len
= strlcpy(rspn_req
->rspn
.fr_name
, fc_host_symbolic_name(shost
),
759 FC_SYMBOLIC_NAME_SIZE
);
760 rspn_req
->rspn
.fr_name_len
= len
;
762 sg_init_one(&fc_req
->sg_req
, rspn_req
, sizeof(*rspn_req
));
763 sg_init_one(&fc_req
->sg_rsp
, rspn_rsp
, sizeof(*rspn_rsp
));
765 ct_els
->handler
= zfcp_fc_complete
;
766 ct_els
->handler_data
= &completion
;
767 ct_els
->req
= &fc_req
->sg_req
;
768 ct_els
->resp
= &fc_req
->sg_rsp
;
770 ret
= zfcp_fsf_send_ct(&adapter
->gs
->ds
, ct_els
, NULL
,
773 wait_for_completion(&completion
);
777 * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
778 * @work: ns_up_work of the adapter where to update the symbolic port name
780 * Retrieve the current symbolic port name that may have been set by
781 * the hardware using the GSPN request and update the fc_host
782 * symbolic_name sysfs attribute. When running in NPIV mode (and hence
783 * the port name is unique for this system), update the symbolic port
784 * name to add Linux specific information and update the FC nameserver
785 * using the RSPN request.
787 void zfcp_fc_sym_name_update(struct work_struct
*work
)
789 struct zfcp_adapter
*adapter
= container_of(work
, struct zfcp_adapter
,
792 struct zfcp_fc_req
*fc_req
;
794 if (fc_host_port_type(adapter
->scsi_host
) != FC_PORTTYPE_NPORT
&&
795 fc_host_port_type(adapter
->scsi_host
) != FC_PORTTYPE_NPIV
)
798 fc_req
= kmem_cache_zalloc(zfcp_fc_req_cache
, GFP_KERNEL
);
802 ret
= zfcp_fc_wka_port_get(&adapter
->gs
->ds
);
806 ret
= zfcp_fc_gspn(adapter
, fc_req
);
807 if (ret
|| fc_host_port_type(adapter
->scsi_host
) != FC_PORTTYPE_NPIV
)
810 memset(fc_req
, 0, sizeof(*fc_req
));
811 zfcp_fc_rspn(adapter
, fc_req
);
814 zfcp_fc_wka_port_put(&adapter
->gs
->ds
);
816 kmem_cache_free(zfcp_fc_req_cache
, fc_req
);
819 static void zfcp_fc_ct_els_job_handler(void *data
)
821 struct fc_bsg_job
*job
= data
;
822 struct zfcp_fsf_ct_els
*zfcp_ct_els
= job
->dd_data
;
823 struct fc_bsg_reply
*jr
= job
->reply
;
825 jr
->reply_payload_rcv_len
= job
->reply_payload
.payload_len
;
826 jr
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
827 jr
->result
= zfcp_ct_els
->status
? -EIO
: 0;
831 static struct zfcp_fc_wka_port
*zfcp_fc_job_wka_port(struct fc_bsg_job
*job
)
835 struct zfcp_adapter
*adapter
;
837 preamble_word1
= job
->request
->rqst_data
.r_ct
.preamble_word1
;
838 gs_type
= (preamble_word1
& 0xff000000) >> 24;
840 adapter
= (struct zfcp_adapter
*) job
->shost
->hostdata
[0];
844 return &adapter
->gs
->as
;
846 return &adapter
->gs
->ms
;
848 return &adapter
->gs
->ts
;
851 return &adapter
->gs
->ds
;
858 static void zfcp_fc_ct_job_handler(void *data
)
860 struct fc_bsg_job
*job
= data
;
861 struct zfcp_fc_wka_port
*wka_port
;
863 wka_port
= zfcp_fc_job_wka_port(job
);
864 zfcp_fc_wka_port_put(wka_port
);
866 zfcp_fc_ct_els_job_handler(data
);
869 static int zfcp_fc_exec_els_job(struct fc_bsg_job
*job
,
870 struct zfcp_adapter
*adapter
)
872 struct zfcp_fsf_ct_els
*els
= job
->dd_data
;
873 struct fc_rport
*rport
= job
->rport
;
874 struct zfcp_port
*port
;
878 port
= zfcp_get_port_by_wwpn(adapter
, rport
->port_name
);
883 put_device(&port
->dev
);
885 d_id
= ntoh24(job
->request
->rqst_data
.h_els
.port_id
);
887 els
->handler
= zfcp_fc_ct_els_job_handler
;
888 return zfcp_fsf_send_els(adapter
, d_id
, els
, job
->req
->timeout
/ HZ
);
891 static int zfcp_fc_exec_ct_job(struct fc_bsg_job
*job
,
892 struct zfcp_adapter
*adapter
)
895 struct zfcp_fsf_ct_els
*ct
= job
->dd_data
;
896 struct zfcp_fc_wka_port
*wka_port
;
898 wka_port
= zfcp_fc_job_wka_port(job
);
902 ret
= zfcp_fc_wka_port_get(wka_port
);
906 ct
->handler
= zfcp_fc_ct_job_handler
;
907 ret
= zfcp_fsf_send_ct(wka_port
, ct
, NULL
, job
->req
->timeout
/ HZ
);
909 zfcp_fc_wka_port_put(wka_port
);
914 int zfcp_fc_exec_bsg_job(struct fc_bsg_job
*job
)
916 struct Scsi_Host
*shost
;
917 struct zfcp_adapter
*adapter
;
918 struct zfcp_fsf_ct_els
*ct_els
= job
->dd_data
;
920 shost
= job
->rport
? rport_to_shost(job
->rport
) : job
->shost
;
921 adapter
= (struct zfcp_adapter
*)shost
->hostdata
[0];
923 if (!(atomic_read(&adapter
->status
) & ZFCP_STATUS_COMMON_OPEN
))
926 ct_els
->req
= job
->request_payload
.sg_list
;
927 ct_els
->resp
= job
->reply_payload
.sg_list
;
928 ct_els
->handler_data
= job
;
930 switch (job
->request
->msgcode
) {
932 case FC_BSG_HST_ELS_NOLOGIN
:
933 return zfcp_fc_exec_els_job(job
, adapter
);
936 return zfcp_fc_exec_ct_job(job
, adapter
);
942 int zfcp_fc_timeout_bsg_job(struct fc_bsg_job
*job
)
944 /* hardware tracks timeout, reset bsg timeout to not interfere */
948 int zfcp_fc_gs_setup(struct zfcp_adapter
*adapter
)
950 struct zfcp_fc_wka_ports
*wka_ports
;
952 wka_ports
= kzalloc(sizeof(struct zfcp_fc_wka_ports
), GFP_KERNEL
);
956 adapter
->gs
= wka_ports
;
957 zfcp_fc_wka_port_init(&wka_ports
->ms
, FC_FID_MGMT_SERV
, adapter
);
958 zfcp_fc_wka_port_init(&wka_ports
->ts
, FC_FID_TIME_SERV
, adapter
);
959 zfcp_fc_wka_port_init(&wka_ports
->ds
, FC_FID_DIR_SERV
, adapter
);
960 zfcp_fc_wka_port_init(&wka_ports
->as
, FC_FID_ALIASES
, adapter
);
965 void zfcp_fc_gs_destroy(struct zfcp_adapter
*adapter
)