Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / fibre-channel / ulp / fcp.c
blob230c2cafd25210b561d183014897cd354e60b36d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Fibre Channel SCSI ULP Mapping driver
27 #include <sys/scsi/scsi.h>
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/devctl.h>
31 #include <sys/thread.h>
32 #include <sys/thread.h>
33 #include <sys/open.h>
34 #include <sys/file.h>
35 #include <sys/sunndi.h>
36 #include <sys/console.h>
37 #include <sys/proc.h>
38 #include <sys/time.h>
39 #include <sys/utsname.h>
40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 #include <sys/ndi_impldefs.h>
42 #include <sys/byteorder.h>
43 #include <sys/fs/dv_node.h>
44 #include <sys/ctype.h>
45 #include <sys/sunmdi.h>
47 #include <sys/fibre-channel/fc.h>
48 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 #include <sys/fibre-channel/ulp/fcpvar.h>
52 * Discovery Process
53 * =================
55 * The discovery process is a major function of FCP. In order to help
56 * understand that function a flow diagram is given here. This diagram
57 * doesn't claim to cover all the cases and the events that can occur during
58 * the discovery process nor the subtleties of the code. The code paths shown
59 * are simplified. Its purpose is to help the reader (and potentially bug
60 * fixer) have an overall view of the logic of the code. For that reason the
61 * diagram covers the simple case of the line coming up cleanly or of a new
62 * port attaching to FCP the link being up. The reader must keep in mind
63 * that:
65 * - There are special cases where bringing devices online and offline
66 * is driven by Ioctl.
68 * - The behavior of the discovery process can be modified through the
69 * .conf file.
71 * - The line can go down and come back up at any time during the
72 * discovery process which explains some of the complexity of the code.
74 * ............................................................................
76 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
79 * +-------------------------+
80 * fp/fctl module --->| fcp_port_attach |
81 * +-------------------------+
82 * | |
83 * | |
84 * | v
85 * | +-------------------------+
86 * | | fcp_handle_port_attach |
87 * | +-------------------------+
88 * | |
89 * | |
90 * +--------------------+ |
91 * | |
92 * v v
93 * +-------------------------+
94 * | fcp_statec_callback |
95 * +-------------------------+
96 * |
97 * |
98 * v
99 * +-------------------------+
100 * | fcp_handle_devices |
101 * +-------------------------+
105 * +-------------------------+
106 * | fcp_handle_mapflags |
107 * +-------------------------+
111 * +-------------------------+
112 * | fcp_send_els |
113 * | |
114 * | PLOGI or PRLI To all the|
115 * | reachable devices. |
116 * +-------------------------+
119 * ............................................................................
121 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 * STEP 1 are called (it is actually the same function).
125 * +-------------------------+
126 * | fcp_icmd_callback |
127 * fp/fctl module --->| |
128 * | callback for PLOGI and |
129 * | PRLI. |
130 * +-------------------------+
133 * Received PLOGI Accept /-\ Received PRLI Accept
134 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 * | \ / |
136 * | \-/ |
137 * | |
138 * v v
139 * +-------------------------+ +-------------------------+
140 * | fcp_send_els | | fcp_send_scsi |
141 * | | | |
142 * | PRLI | | REPORT_LUN |
143 * +-------------------------+ +-------------------------+
145 * ............................................................................
147 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 * (It is actually the same function).
151 * +-------------------------+
152 * fp/fctl module ------->| fcp_scsi_callback |
153 * +-------------------------+
157 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 * | \ / |
160 * | \-/ |
161 * | | |
162 * | Receive INQUIRY reply| |
163 * | | |
164 * v v v
165 * +------------------------+ +----------------------+ +----------------------+
166 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 * +------------------------+ +----------------------+ +----------------------+
169 * | | |
170 * | | |
171 * | | |
172 * v v |
173 * +-----------------+ +-----------------+ |
174 * | fcp_send_scsi | | fcp_send_scsi | |
175 * | | | | |
176 * | INQUIRY | | INQUIRY PAGE83 | |
177 * | (To each LUN) | +-----------------+ |
178 * +-----------------+ |
181 * +------------------------+
182 * | fcp_call_finish_init |
183 * +------------------------+
186 * +-----------------------------+
187 * | fcp_call_finish_init_held |
188 * +-----------------------------+
191 * All LUNs scanned /-\
192 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 * | \ /
194 * | \-/
195 * v |
196 * +------------------+ |
197 * | fcp_finish_tgt | |
198 * +------------------+ |
199 * | Target Not Offline and |
200 * Target Not Offline and | not marked and tgt_node_state |
201 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 * | \ / | |
204 * | \-/ | |
205 * v v |
206 * +----------------------------+ +-------------------+ |
207 * | fcp_offline_target | | fcp_create_luns | |
208 * | | +-------------------+ |
209 * | A structure fcp_tgt_elem | | |
210 * | is created and queued in | v |
211 * | the FCP port list | +-------------------+ |
212 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 * | will be unqueued by the | | | |
214 * | watchdog timer. | | Called for each | |
215 * +----------------------------+ | LUN. Dispatches | |
216 * | | fcp_hp_task | |
217 * | +-------------------+ |
218 * | | |
219 * | | |
220 * | | |
221 * | +---------------->|
222 * | |
223 * +---------------------------------------------->|
226 * All the targets (devices) have been scanned /-\
227 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 * | \ /
229 * | \-/
230 * +-------------------------------------+ |
231 * | fcp_finish_init | |
232 * | | |
233 * | Signal broadcasts the condition | |
234 * | variable port_config_cv of the FCP | |
235 * | port. One potential code sequence | |
236 * | waiting on the condition variable | |
237 * | the code sequence handling | |
238 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 * | The other is in the function | |
240 * | fcp_reconfig_wait which is called | |
241 * | in the transmit path preventing IOs | |
242 * | from going through till the disco- | |
243 * | very process is over. | |
244 * +-------------------------------------+ |
245 * | |
246 * | |
247 * +--------------------------------->|
250 * Return
252 * ............................................................................
254 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
257 * +-------------------------+
258 * | fcp_hp_task |
259 * +-------------------------+
263 * +-------------------------+
264 * | fcp_trigger_lun |
265 * +-------------------------+
269 * Bring offline /-\ Bring online
270 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 * | \ / |
272 * | \-/ |
273 * v v
274 * +---------------------+ +-----------------------+
275 * | fcp_offline_child | | fcp_get_cip |
276 * +---------------------+ | |
277 * | Creates a dev_info_t |
278 * | or a mdi_pathinfo_t |
279 * | depending on whether |
280 * | mpxio is on or off. |
281 * +-----------------------+
285 * +-----------------------+
286 * | fcp_online_child |
287 * | |
288 * | Set device online |
289 * | using NDI or MDI. |
290 * +-----------------------+
292 * ............................................................................
294 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 * what is described here. We only show the target offline path.
298 * +--------------------------+
299 * | fcp_watch |
300 * +--------------------------+
304 * +--------------------------+
305 * | fcp_scan_offline_tgts |
306 * +--------------------------+
310 * +--------------------------+
311 * | fcp_offline_target_now |
312 * +--------------------------+
316 * +--------------------------+
317 * | fcp_offline_tgt_luns |
318 * +--------------------------+
322 * +--------------------------+
323 * | fcp_offline_lun |
324 * +--------------------------+
328 * +----------------------------------+
329 * | fcp_offline_lun_now |
330 * | |
331 * | A request (or two if mpxio) is |
332 * | sent to the hot plug task using |
333 * | a fcp_hp_elem structure. |
334 * +----------------------------------+
338 * Functions registered with DDI framework
340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 cred_t *credp, int *rval);
348 * Functions registered with FC Transport framework
350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 fc_attach_cmd_t cmd, uint32_t s_id);
352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 fc_detach_cmd_t cmd);
354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 uint32_t claimed);
357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 fc_unsol_buf_t *buf, uint32_t claimed);
359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 fc_unsol_buf_t *buf, uint32_t claimed);
361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 uint32_t dev_cnt, uint32_t port_sid);
366 * Functions registered with SCSA framework
368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 int whom);
380 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 void (*callback)(caddr_t), caddr_t arg);
383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 char *name, ddi_eventcookie_t *event_cookiep);
385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 ddi_callback_id_t *cb_id);
388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 ddi_callback_id_t cb_id);
390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 ddi_eventcookie_t eventid, void *impldata);
392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 ddi_bus_config_op_t op, void *arg);
398 * Internal functions
400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 int mode, int *rval);
403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 int mode, int *rval);
405 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 struct fcp_scsi_cmd *fscsi, int mode);
407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 caddr_t base_addr, int mode);
409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 int *fc_pkt_reason, int *fc_pkt_action);
414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 static void fcp_handle_devices(struct fcp_port *pptr,
424 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 fcp_map_tag_t *map_tag, int cause);
426 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 int tgt_cnt, int cause);
429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 int);
509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
597 * New functions added for mpxio support
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
616 * New functions added for lun masking support
618 static void fcp_read_blacklist(dev_info_t *dip,
619 struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
628 * New functions to support software FCA (like fcoei)
630 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 struct scsi_address *ap, struct scsi_pkt *pkt,
632 struct buf *bp, int cmdlen, int statuslen,
633 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 static void fcp_pseudo_destroy_pkt(
635 struct scsi_address *ap, struct scsi_pkt *pkt);
636 static void fcp_pseudo_sync_pkt(
637 struct scsi_address *ap, struct scsi_pkt *pkt);
638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 static void fcp_pseudo_dmafree(
640 struct scsi_address *ap, struct scsi_pkt *pkt);
642 extern struct mod_ops mod_driverops;
644 * This variable is defined in modctl.c and set to '1' after the root driver
645 * and fs are loaded. It serves as an indication that the root filesystem can
646 * be used.
648 extern int modrootloaded;
650 * This table contains strings associated with the SCSI sense key codes. It
651 * is used by FCP to print a clear explanation of the code returned in the
652 * sense information by a device.
654 extern char *sense_keys[];
656 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 * under this device that the paths to a physical device are created when
658 * MPxIO is used.
660 extern dev_info_t *scsi_vhci_dip;
663 * Report lun processing
665 #define FCP_LUN_ADDRESSING 0x80
666 #define FCP_PD_ADDRESSING 0x00
667 #define FCP_VOLUME_ADDRESSING 0x40
669 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 #define MAX_INT_DMA 0x7fffffff
672 * Property definitions
674 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 #define TARGET_PROP (char *)fcp_target_prop
677 #define LUN_PROP (char *)fcp_lun_prop
678 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
686 * Short hand macros.
688 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 #define LUN_TGT (plun->lun_tgt)
692 * Driver private macros
694 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 ((x) >= 'a' && (x) <= 'f') ? \
696 ((x) - 'a' + 10) : ((x) - 'A' + 10))
698 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
700 #define FCP_N_NDI_EVENTS \
701 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
703 #define FCP_LINK_STATE_CHANGED(p, c) \
704 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
706 #define FCP_TGT_STATE_CHANGED(t, c) \
707 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
709 #define FCP_STATE_CHANGED(p, t, c) \
710 (FCP_TGT_STATE_CHANGED(t, c))
712 #define FCP_MUST_RETRY(fpkt) \
713 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
722 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 (es)->es_add_code == 0x3f && \
725 (es)->es_qual_code == 0x0e)
727 #define FCP_SENSE_NO_LUN(es) \
728 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 (es)->es_add_code == 0x25 && \
730 (es)->es_qual_code == 0x0)
732 #define FCP_VERSION "20091208-1.192"
733 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
735 #define FCP_NUM_ELEMENTS(array) \
736 (sizeof (array) / sizeof ((array)[0]))
739 * Debugging, Error reporting, and tracing
741 #define FCP_LOG_SIZE 1024 * 1024
743 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 #define FCP_LEVEL_7 0x00040
750 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
756 * Log contents to system messages file
758 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
770 * Log contents to trace buffer
772 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
784 * Log contents to both system messages file and trace buffer
786 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 FC_TRACE_LOG_MSG)
788 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 FC_TRACE_LOG_MSG)
790 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 FC_TRACE_LOG_MSG)
792 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 FC_TRACE_LOG_MSG)
794 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 FC_TRACE_LOG_MSG)
796 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 FC_TRACE_LOG_MSG)
798 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 FC_TRACE_LOG_MSG)
800 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 FC_TRACE_LOG_MSG)
802 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 FC_TRACE_LOG_MSG)
804 #ifdef DEBUG
805 #define FCP_DTRACE fc_trace_debug
806 #else
807 #define FCP_DTRACE
808 #endif
810 #define FCP_TRACE fc_trace_debug
812 static struct cb_ops fcp_cb_ops = {
813 fcp_open, /* open */
814 fcp_close, /* close */
815 nodev, /* strategy */
816 nodev, /* print */
817 nodev, /* dump */
818 nodev, /* read */
819 nodev, /* write */
820 fcp_ioctl, /* ioctl */
821 nodev, /* devmap */
822 nodev, /* mmap */
823 nodev, /* segmap */
824 nochpoll, /* chpoll */
825 ddi_prop_op, /* cb_prop_op */
826 0, /* streamtab */
827 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 CB_REV, /* rev */
829 nodev, /* aread */
830 nodev /* awrite */
834 static struct dev_ops fcp_ops = {
835 DEVO_REV,
837 ddi_getinfo_1to1,
838 nulldev, /* identify */
839 nulldev, /* probe */
840 fcp_attach, /* attach and detach are mandatory */
841 fcp_detach,
842 nodev, /* reset */
843 &fcp_cb_ops, /* cb_ops */
844 NULL, /* bus_ops */
845 NULL, /* power */
849 char *fcp_version = FCP_NAME_VERSION;
851 static struct modldrv modldrv = {
852 &mod_driverops,
853 FCP_NAME_VERSION,
854 &fcp_ops
858 static struct modlinkage modlinkage = {
859 MODREV_1,
860 &modldrv,
861 NULL
865 static fc_ulp_modinfo_t fcp_modinfo = {
866 &fcp_modinfo, /* ulp_handle */
867 FCTL_ULP_MODREV_4, /* ulp_rev */
868 FC4_SCSI_FCP, /* ulp_type */
869 "fcp", /* ulp_name */
870 FCP_STATEC_MASK, /* ulp_statec_mask */
871 fcp_port_attach, /* ulp_port_attach */
872 fcp_port_detach, /* ulp_port_detach */
873 fcp_port_ioctl, /* ulp_port_ioctl */
874 fcp_els_callback, /* ulp_els_callback */
875 fcp_data_callback, /* ulp_data_callback */
876 fcp_statec_callback /* ulp_statec_callback */
879 #ifdef DEBUG
880 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 FCP_LEVEL_6 | FCP_LEVEL_7)
884 #else
885 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 FCP_LEVEL_6 | FCP_LEVEL_7)
889 #endif
891 /* FCP global variables */
892 int fcp_bus_config_debug = 0;
893 static int fcp_log_size = FCP_LOG_SIZE;
894 static int fcp_trace = FCP_TRACE_DEFAULT;
895 static fc_trace_logq_t *fcp_logq = NULL;
896 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
898 * The auto-configuration is set by default. The only way of disabling it is
899 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
901 static int fcp_enable_auto_configuration = 1;
902 static int fcp_max_bus_config_retries = 4;
903 static int fcp_lun_ready_retry = 300;
905 * The value assigned to the following variable has changed several times due
906 * to a problem with the data underruns reporting of some firmware(s). The
907 * current value of 50 gives a timeout value of 25 seconds for a max number
908 * of 256 LUNs.
910 static int fcp_max_target_retries = 50;
912 * Watchdog variables
913 * ------------------
915 * fcp_watchdog_init
917 * Indicates if the watchdog timer is running or not. This is actually
918 * a counter of the number of Fibre Channel ports that attached. When
919 * the first port attaches the watchdog is started. When the last port
920 * detaches the watchdog timer is stopped.
922 * fcp_watchdog_time
924 * This is the watchdog clock counter. It is incremented by
925 * fcp_watchdog_time each time the watchdog timer expires.
927 * fcp_watchdog_timeout
929 * Increment value of the variable fcp_watchdog_time as well as the
930 * the timeout value of the watchdog timer. The unit is 1 second. It
931 * is strange that this is not a #define but a variable since the code
932 * never changes this value. The reason why it can be said that the
933 * unit is 1 second is because the number of ticks for the watchdog
934 * timer is determined like this:
936 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 * drv_usectohz(1000000);
939 * The value 1000000 is hard coded in the code.
941 * fcp_watchdog_tick
943 * Watchdog timer value in ticks.
945 static int fcp_watchdog_init = 0;
946 static int fcp_watchdog_time = 0;
947 static int fcp_watchdog_timeout = 1;
948 static int fcp_watchdog_tick;
951 * fcp_offline_delay is a global variable to enable customisation of
952 * the timeout on link offlines or RSCNs. The default value is set
953 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 * specified in FCP4 Chapter 11 (see www.t10.org).
956 * The variable fcp_offline_delay is specified in SECONDS.
958 * If we made this a static var then the user would not be able to
959 * change it. This variable is set in fcp_attach().
961 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
963 static void *fcp_softstate = NULL; /* for soft state */
964 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 static kmutex_t fcp_global_mutex;
966 static kmutex_t fcp_ioctl_mutex;
967 static dev_info_t *fcp_global_dip = NULL;
968 static timeout_id_t fcp_watchdog_id;
969 const char *fcp_lun_prop = "lun";
970 const char *fcp_sam_lun_prop = "sam-lun";
971 const char *fcp_target_prop = "target";
973 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 * consolidation.
976 const char *fcp_node_wwn_prop = "node-wwn";
977 const char *fcp_port_wwn_prop = "port-wwn";
978 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 const char *fcp_manual_config_only = "manual_configuration_only";
981 const char *fcp_init_port_prop = "initiator-port";
982 const char *fcp_tgt_port_prop = "target-port";
983 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
985 static struct fcp_port *fcp_port_head = NULL;
986 static ddi_eventcookie_t fcp_insert_eid;
987 static ddi_eventcookie_t fcp_remove_eid;
989 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
995 * List of valid commands for the scsi_ioctl call
997 static uint8_t scsi_ioctl_list[] = {
998 SCMD_INQUIRY,
999 SCMD_REPORT_LUN,
1000 SCMD_READ_CAPACITY
1004 * this is used to dummy up a report lun response for cases
1005 * where the target doesn't support it
1007 static uchar_t fcp_dummy_lun[] = {
1008 0x00, /* MSB length (length = no of luns * 8) */
1009 0x00,
1010 0x00,
1011 0x08, /* LSB length */
1012 0x00, /* MSB reserved */
1013 0x00,
1014 0x00,
1015 0x00, /* LSB reserved */
1016 FCP_PD_ADDRESSING,
1017 0x00, /* LUN is ZERO at the first level */
1018 0x00,
1019 0x00, /* second level is zero */
1020 0x00,
1021 0x00, /* third level is zero */
1022 0x00,
1023 0x00 /* fourth level is zero */
1026 static uchar_t fcp_alpa_to_switch[] = {
1027 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053 static caddr_t pid = "SESS01 ";
1056 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1057 fcp_port::fcp_next fcp_watchdog_id))
1059 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1062 fcp_insert_eid
1063 fcp_remove_eid
1064 fcp_watchdog_time))
1066 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1067 fcp_cb_ops
1068 fcp_ops
1069 callb_cpr))
1073 * This table is used to determine whether or not it's safe to copy in
1074 * the target node name for a lun. Since all luns behind the same target
1075 * have the same wwnn, only tagets that do not support multiple luns are
1076 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079 char *fcp_symmetric_disk_table[] = {
1080 "SEAGATE ST",
1081 "IBM DDYFT",
1082 "SUNW SUNWGS", /* Daktari enclosure */
1083 "SUN SENA", /* SES device */
1084 "SUN SESS01" /* VICOM SVE box */
1087 int fcp_symmetric_disk_table_size =
1088 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1092 * will panic if you don't pass this in to the routine, this information.
1093 * Need to determine what the actual impact to the system is by providing
1094 * this information if any. Since dma allocation is done in pkt_init it may
1095 * not have any impact. These values are straight from the Writing Device
1096 * Driver manual.
1098 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1099 DMA_ATTR_V0, /* ddi_dma_attr version */
1100 0, /* low address */
1101 0xffffffff, /* high address */
1102 0x00ffffff, /* counter upper bound */
1103 1, /* alignment requirements */
1104 0x3f, /* burst sizes */
1105 1, /* minimum DMA access */
1106 0xffffffff, /* maximum DMA access */
1107 (1 << 24) - 1, /* segment boundary restrictions */
1108 1, /* scater/gather list length */
1109 512, /* device granularity */
1110 0 /* DMA flags */
1114 * The _init(9e) return value should be that of mod_install(9f). Under
1115 * some circumstances, a failure may not be related mod_install(9f) and
1116 * one would then require a return value to indicate the failure. Looking
1117 * at mod_install(9f), it is expected to return 0 for success and non-zero
1118 * for failure. mod_install(9f) for device drivers, further goes down the
1119 * calling chain and ends up in ddi_installdrv(), whose return values are
1120 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1121 * calling chain of mod_install(9f) which return values like EINVAL and
1122 * in some even return -1.
1124 * To work around the vagaries of the mod_install() calling chain, return
1125 * either 0 or ENODEV depending on the success or failure of mod_install()
1128 _init(void)
1130 int rval;
1133 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1134 * before registering with the transport first.
1136 if (ddi_soft_state_init(&fcp_softstate,
1137 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1138 return (EINVAL);
1141 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1142 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1144 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1145 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1146 mutex_destroy(&fcp_global_mutex);
1147 mutex_destroy(&fcp_ioctl_mutex);
1148 ddi_soft_state_fini(&fcp_softstate);
1149 return (ENODEV);
1152 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1154 if ((rval = mod_install(&modlinkage)) != 0) {
1155 fc_trace_free_logq(fcp_logq);
1156 (void) fc_ulp_remove(&fcp_modinfo);
1157 mutex_destroy(&fcp_global_mutex);
1158 mutex_destroy(&fcp_ioctl_mutex);
1159 ddi_soft_state_fini(&fcp_softstate);
1160 rval = ENODEV;
1163 return (rval);
1168 * the system is done with us as a driver, so clean up
1171 _fini(void)
1173 int rval;
1176 * don't start cleaning up until we know that the module remove
1177 * has worked -- if this works, then we know that each instance
1178 * has successfully been DDI_DETACHed
1180 if ((rval = mod_remove(&modlinkage)) != 0) {
1181 return (rval);
1184 (void) fc_ulp_remove(&fcp_modinfo);
1186 ddi_soft_state_fini(&fcp_softstate);
1187 mutex_destroy(&fcp_global_mutex);
1188 mutex_destroy(&fcp_ioctl_mutex);
1189 fc_trace_free_logq(fcp_logq);
1191 return (rval);
1196 _info(struct modinfo *modinfop)
1198 return (mod_info(&modlinkage, modinfop));
1203 * attach the module
1205 static int
1206 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1208 int rval = DDI_SUCCESS;
1210 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1211 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1213 if (cmd == DDI_ATTACH) {
1214 /* The FCP pseudo device is created here. */
1215 mutex_enter(&fcp_global_mutex);
1216 fcp_global_dip = devi;
1217 mutex_exit(&fcp_global_mutex);
1219 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1220 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1221 ddi_report_dev(fcp_global_dip);
1222 } else {
1223 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1224 mutex_enter(&fcp_global_mutex);
1225 fcp_global_dip = NULL;
1226 mutex_exit(&fcp_global_mutex);
1228 rval = DDI_FAILURE;
1231 * We check the fcp_offline_delay property at this
1232 * point. This variable is global for the driver,
1233 * not specific to an instance.
1235 * We do not recommend setting the value to less
1236 * than 10 seconds (RA_TOV_els), or greater than
1237 * 60 seconds.
1239 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1240 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1241 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1242 if ((fcp_offline_delay < 10) ||
1243 (fcp_offline_delay > 60)) {
1244 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1245 "to %d second(s). This is outside the "
1246 "recommended range of 10..60 seconds.",
1247 fcp_offline_delay);
1251 return (rval);
1255 /*ARGSUSED*/
1256 static int
1257 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1259 int res = DDI_SUCCESS;
1261 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1262 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1264 if (cmd == DDI_DETACH) {
1266 * Check if there are active ports/threads. If there
1267 * are any, we will fail, else we will succeed (there
1268 * should not be much to clean up)
1270 mutex_enter(&fcp_global_mutex);
1271 FCP_DTRACE(fcp_logq, "fcp",
1272 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1273 (void *) fcp_port_head);
1275 if (fcp_port_head == NULL) {
1276 ddi_remove_minor_node(fcp_global_dip, NULL);
1277 fcp_global_dip = NULL;
1278 mutex_exit(&fcp_global_mutex);
1279 } else {
1280 mutex_exit(&fcp_global_mutex);
1281 res = DDI_FAILURE;
1284 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1285 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1287 return (res);
1291 /* ARGSUSED */
1292 static int
1293 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1295 if (otype != OTYP_CHR) {
1296 return (EINVAL);
1300 * Allow only root to talk;
1302 if (drv_priv(credp)) {
1303 return (EPERM);
1306 mutex_enter(&fcp_global_mutex);
1307 if (fcp_oflag & FCP_EXCL) {
1308 mutex_exit(&fcp_global_mutex);
1309 return (EBUSY);
1312 if (flag & FEXCL) {
1313 if (fcp_oflag & FCP_OPEN) {
1314 mutex_exit(&fcp_global_mutex);
1315 return (EBUSY);
1317 fcp_oflag |= FCP_EXCL;
1319 fcp_oflag |= FCP_OPEN;
1320 mutex_exit(&fcp_global_mutex);
1322 return (0);
1326 /* ARGSUSED */
1327 static int
1328 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1330 if (otype != OTYP_CHR) {
1331 return (EINVAL);
1334 mutex_enter(&fcp_global_mutex);
1335 if (!(fcp_oflag & FCP_OPEN)) {
1336 mutex_exit(&fcp_global_mutex);
1337 return (ENODEV);
1339 fcp_oflag = FCP_IDLE;
1340 mutex_exit(&fcp_global_mutex);
1342 return (0);
1347 * fcp_ioctl
1348 * Entry point for the FCP ioctls
1350 * Input:
1351 * See ioctl(9E)
1353 * Output:
1354 * See ioctl(9E)
1356 * Returns:
1357 * See ioctl(9E)
1359 * Context:
1360 * Kernel context.
1362 /* ARGSUSED */
1363 static int
1364 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1365 int *rval)
1367 int ret = 0;
1369 mutex_enter(&fcp_global_mutex);
1370 if (!(fcp_oflag & FCP_OPEN)) {
1371 mutex_exit(&fcp_global_mutex);
1372 return (ENXIO);
1374 mutex_exit(&fcp_global_mutex);
1376 switch (cmd) {
1377 case FCP_TGT_INQUIRY:
1378 case FCP_TGT_CREATE:
1379 case FCP_TGT_DELETE:
1380 ret = fcp_setup_device_data_ioctl(cmd,
1381 (struct fcp_ioctl *)data, mode, rval);
1382 break;
1384 case FCP_TGT_SEND_SCSI:
1385 mutex_enter(&fcp_ioctl_mutex);
1386 ret = fcp_setup_scsi_ioctl(
1387 (struct fcp_scsi_cmd *)data, mode, rval);
1388 mutex_exit(&fcp_ioctl_mutex);
1389 break;
1391 case FCP_STATE_COUNT:
1392 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1393 mode, rval);
1394 break;
1395 case FCP_GET_TARGET_MAPPINGS:
1396 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1397 mode, rval);
1398 break;
1399 default:
1400 fcp_log(CE_WARN, NULL,
1401 "!Invalid ioctl opcode = 0x%x", cmd);
1402 ret = EINVAL;
1405 return (ret);
1410 * fcp_setup_device_data_ioctl
1411 * Setup handler for the "device data" style of
1412 * ioctl for FCP. See "fcp_util.h" for data structure
1413 * definition.
1415 * Input:
1416 * cmd = FCP ioctl command
1417 * data = ioctl data
1418 * mode = See ioctl(9E)
1420 * Output:
1421 * data = ioctl data
1422 * rval = return value - see ioctl(9E)
1424 * Returns:
1425 * See ioctl(9E)
1427 * Context:
1428 * Kernel context.
1430 /* ARGSUSED */
1431 static int
1432 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1433 int *rval)
1435 struct fcp_port *pptr;
1436 struct device_data *dev_data;
1437 uint32_t link_cnt;
1438 la_wwn_t *wwn_ptr = NULL;
1439 struct fcp_tgt *ptgt = NULL;
1440 struct fcp_lun *plun = NULL;
1441 int i, error;
1442 struct fcp_ioctl fioctl;
1444 #ifdef _MULTI_DATAMODEL
1445 switch (ddi_model_convert_from(mode & FMODELS)) {
1446 case DDI_MODEL_ILP32: {
1447 struct fcp32_ioctl f32_ioctl;
1449 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1450 sizeof (struct fcp32_ioctl), mode)) {
1451 return (EFAULT);
1453 fioctl.fp_minor = f32_ioctl.fp_minor;
1454 fioctl.listlen = f32_ioctl.listlen;
1455 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1456 break;
1458 case DDI_MODEL_NONE:
1459 if (ddi_copyin((void *)data, (void *)&fioctl,
1460 sizeof (struct fcp_ioctl), mode)) {
1461 return (EFAULT);
1463 break;
1466 #else /* _MULTI_DATAMODEL */
1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1468 sizeof (struct fcp_ioctl), mode)) {
1469 return (EFAULT);
1471 #endif /* _MULTI_DATAMODEL */
1474 * Right now we can assume that the minor number matches with
1475 * this instance of fp. If this changes we will need to
1476 * revisit this logic.
1478 mutex_enter(&fcp_global_mutex);
1479 pptr = fcp_port_head;
1480 while (pptr) {
1481 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1482 break;
1483 } else {
1484 pptr = pptr->port_next;
1487 mutex_exit(&fcp_global_mutex);
1488 if (pptr == NULL) {
1489 return (ENXIO);
1491 mutex_enter(&pptr->port_mutex);
1494 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1495 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1496 mutex_exit(&pptr->port_mutex);
1497 return (ENOMEM);
1500 if (ddi_copyin(fioctl.list, dev_data,
1501 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1502 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1503 mutex_exit(&pptr->port_mutex);
1504 return (EFAULT);
1506 link_cnt = pptr->port_link_cnt;
1508 if (cmd == FCP_TGT_INQUIRY) {
1509 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1510 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1511 sizeof (wwn_ptr->raw_wwn)) == 0) {
1512 /* This ioctl is requesting INQ info of local HBA */
1513 mutex_exit(&pptr->port_mutex);
1514 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1515 dev_data[0].dev_status = 0;
1516 if (ddi_copyout(dev_data, fioctl.list,
1517 (sizeof (struct device_data)) * fioctl.listlen,
1518 mode)) {
1519 kmem_free(dev_data,
1520 sizeof (*dev_data) * fioctl.listlen);
1521 return (EFAULT);
1523 kmem_free(dev_data,
1524 sizeof (*dev_data) * fioctl.listlen);
1525 #ifdef _MULTI_DATAMODEL
1526 switch (ddi_model_convert_from(mode & FMODELS)) {
1527 case DDI_MODEL_ILP32: {
1528 struct fcp32_ioctl f32_ioctl;
1529 f32_ioctl.fp_minor = fioctl.fp_minor;
1530 f32_ioctl.listlen = fioctl.listlen;
1531 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1532 if (ddi_copyout((void *)&f32_ioctl,
1533 (void *)data,
1534 sizeof (struct fcp32_ioctl), mode)) {
1535 return (EFAULT);
1537 break;
1539 case DDI_MODEL_NONE:
1540 if (ddi_copyout((void *)&fioctl, (void *)data,
1541 sizeof (struct fcp_ioctl), mode)) {
1542 return (EFAULT);
1544 break;
1546 #else /* _MULTI_DATAMODEL */
1547 if (ddi_copyout((void *)&fioctl, (void *)data,
1548 sizeof (struct fcp_ioctl), mode)) {
1549 return (EFAULT);
1551 #endif /* _MULTI_DATAMODEL */
1552 return (0);
1556 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1557 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1558 mutex_exit(&pptr->port_mutex);
1559 return (ENXIO);
1562 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1563 i++) {
1564 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1566 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 dev_data[i].dev_status = ENXIO;
1571 if ((ptgt = fcp_lookup_target(pptr,
1572 (uchar_t *)wwn_ptr)) == NULL) {
1573 mutex_exit(&pptr->port_mutex);
1574 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1575 wwn_ptr, &error, 0) == NULL) {
1576 dev_data[i].dev_status = ENODEV;
1577 mutex_enter(&pptr->port_mutex);
1578 continue;
1579 } else {
1581 dev_data[i].dev_status = EAGAIN;
1583 mutex_enter(&pptr->port_mutex);
1584 continue;
1586 } else {
1587 mutex_enter(&ptgt->tgt_mutex);
1588 if (ptgt->tgt_state & (FCP_TGT_MARK |
1589 FCP_TGT_BUSY)) {
1590 dev_data[i].dev_status = EAGAIN;
1591 mutex_exit(&ptgt->tgt_mutex);
1592 continue;
1595 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1596 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1597 dev_data[i].dev_status = ENOTSUP;
1598 } else {
1599 dev_data[i].dev_status = ENXIO;
1601 mutex_exit(&ptgt->tgt_mutex);
1602 continue;
1605 switch (cmd) {
1606 case FCP_TGT_INQUIRY:
1608 * The reason we give device type of
1609 * lun 0 only even though in some
1610 * cases(like maxstrat) lun 0 device
1611 * type may be 0x3f(invalid) is that
1612 * for bridge boxes target will appear
1613 * as luns and the first lun could be
1614 * a device that utility may not care
1615 * about (like a tape device).
1617 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1618 dev_data[i].dev_status = 0;
1619 mutex_exit(&ptgt->tgt_mutex);
1621 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1622 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1623 } else {
1624 dev_data[i].dev0_type = plun->lun_type;
1626 mutex_enter(&ptgt->tgt_mutex);
1627 break;
1629 case FCP_TGT_CREATE:
1630 mutex_exit(&ptgt->tgt_mutex);
1631 mutex_exit(&pptr->port_mutex);
1634 * serialize state change call backs.
1635 * only one call back will be handled
1636 * at a time.
1638 mutex_enter(&fcp_global_mutex);
1639 if (fcp_oflag & FCP_BUSY) {
1640 mutex_exit(&fcp_global_mutex);
1641 if (dev_data) {
1642 kmem_free(dev_data,
1643 sizeof (*dev_data) *
1644 fioctl.listlen);
1646 return (EBUSY);
1648 fcp_oflag |= FCP_BUSY;
1649 mutex_exit(&fcp_global_mutex);
1651 dev_data[i].dev_status =
1652 fcp_create_on_demand(pptr,
1653 wwn_ptr->raw_wwn);
1655 if (dev_data[i].dev_status != 0) {
1656 char buf[25];
1658 for (i = 0; i < FC_WWN_SIZE; i++) {
1659 (void) sprintf(&buf[i << 1],
1660 "%02x",
1661 wwn_ptr->raw_wwn[i]);
1664 fcp_log(CE_WARN, pptr->port_dip,
1665 "!Failed to create nodes for"
1666 " pwwn=%s; error=%x", buf,
1667 dev_data[i].dev_status);
1670 /* allow state change call backs again */
1671 mutex_enter(&fcp_global_mutex);
1672 fcp_oflag &= ~FCP_BUSY;
1673 mutex_exit(&fcp_global_mutex);
1675 mutex_enter(&pptr->port_mutex);
1676 mutex_enter(&ptgt->tgt_mutex);
1678 break;
1680 case FCP_TGT_DELETE:
1681 break;
1683 default:
1684 fcp_log(CE_WARN, pptr->port_dip,
1685 "!Invalid device data ioctl "
1686 "opcode = 0x%x", cmd);
1688 mutex_exit(&ptgt->tgt_mutex);
1691 mutex_exit(&pptr->port_mutex);
1693 if (ddi_copyout(dev_data, fioctl.list,
1694 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1695 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1696 return (EFAULT);
1698 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1700 #ifdef _MULTI_DATAMODEL
1701 switch (ddi_model_convert_from(mode & FMODELS)) {
1702 case DDI_MODEL_ILP32: {
1703 struct fcp32_ioctl f32_ioctl;
1705 f32_ioctl.fp_minor = fioctl.fp_minor;
1706 f32_ioctl.listlen = fioctl.listlen;
1707 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1708 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1709 sizeof (struct fcp32_ioctl), mode)) {
1710 return (EFAULT);
1712 break;
1714 case DDI_MODEL_NONE:
1715 if (ddi_copyout((void *)&fioctl, (void *)data,
1716 sizeof (struct fcp_ioctl), mode)) {
1717 return (EFAULT);
1719 break;
1721 #else /* _MULTI_DATAMODEL */
1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1724 sizeof (struct fcp_ioctl), mode)) {
1725 return (EFAULT);
1727 #endif /* _MULTI_DATAMODEL */
1729 return (0);
1733 * Fetch the target mappings (path, etc.) for all LUNs
1734 * on this port.
1736 /* ARGSUSED */
1737 static int
1738 fcp_get_target_mappings(struct fcp_ioctl *data,
1739 int mode, int *rval)
1741 struct fcp_port *pptr;
1742 fc_hba_target_mappings_t *mappings;
1743 fc_hba_mapping_entry_t *map;
1744 struct fcp_tgt *ptgt = NULL;
1745 struct fcp_lun *plun = NULL;
1746 int i, mapIndex, mappingSize;
1747 int listlen;
1748 struct fcp_ioctl fioctl;
1749 char *path;
1750 fcp_ent_addr_t sam_lun_addr;
1752 #ifdef _MULTI_DATAMODEL
1753 switch (ddi_model_convert_from(mode & FMODELS)) {
1754 case DDI_MODEL_ILP32: {
1755 struct fcp32_ioctl f32_ioctl;
1757 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1758 sizeof (struct fcp32_ioctl), mode)) {
1759 return (EFAULT);
1761 fioctl.fp_minor = f32_ioctl.fp_minor;
1762 fioctl.listlen = f32_ioctl.listlen;
1763 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1764 break;
1766 case DDI_MODEL_NONE:
1767 if (ddi_copyin((void *)data, (void *)&fioctl,
1768 sizeof (struct fcp_ioctl), mode)) {
1769 return (EFAULT);
1771 break;
1774 #else /* _MULTI_DATAMODEL */
1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1776 sizeof (struct fcp_ioctl), mode)) {
1777 return (EFAULT);
1779 #endif /* _MULTI_DATAMODEL */
1782 * Right now we can assume that the minor number matches with
1783 * this instance of fp. If this changes we will need to
1784 * revisit this logic.
1786 mutex_enter(&fcp_global_mutex);
1787 pptr = fcp_port_head;
1788 while (pptr) {
1789 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1790 break;
1791 } else {
1792 pptr = pptr->port_next;
1795 mutex_exit(&fcp_global_mutex);
1796 if (pptr == NULL) {
1797 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1798 fioctl.fp_minor);
1799 return (ENXIO);
1803 /* We use listlen to show the total buffer size */
1804 mappingSize = fioctl.listlen;
1806 /* Now calculate how many mapping entries will fit */
1807 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1808 - sizeof (fc_hba_target_mappings_t);
1809 if (listlen <= 0) {
1810 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1811 return (ENXIO);
1813 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1815 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1816 return (ENOMEM);
1818 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1820 /* Now get to work */
1821 mapIndex = 0;
1823 mutex_enter(&pptr->port_mutex);
1824 /* Loop through all targets on this port */
1825 for (i = 0; i < FCP_NUM_HASH; i++) {
1826 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1827 ptgt = ptgt->tgt_next) {
1829 mutex_enter(&ptgt->tgt_mutex);
1831 /* Loop through all LUNs on this target */
1832 for (plun = ptgt->tgt_lun; plun != NULL;
1833 plun = plun->lun_next) {
1834 if (plun->lun_state & FCP_LUN_OFFLINE) {
1835 continue;
1838 path = fcp_get_lun_path(plun);
1839 if (path == NULL) {
1840 continue;
1843 if (mapIndex >= listlen) {
1844 mapIndex ++;
1845 kmem_free(path, MAXPATHLEN);
1846 continue;
1848 map = &mappings->entries[mapIndex++];
1849 bcopy(path, map->targetDriver,
1850 sizeof (map->targetDriver));
1851 map->d_id = ptgt->tgt_d_id;
1852 map->busNumber = 0;
1853 map->targetNumber = ptgt->tgt_d_id;
1854 map->osLUN = plun->lun_num;
1857 * We had swapped lun when we stored it in
1858 * lun_addr. We need to swap it back before
1859 * returning it to user land
1862 sam_lun_addr.ent_addr_0 =
1863 BE_16(plun->lun_addr.ent_addr_0);
1864 sam_lun_addr.ent_addr_1 =
1865 BE_16(plun->lun_addr.ent_addr_1);
1866 sam_lun_addr.ent_addr_2 =
1867 BE_16(plun->lun_addr.ent_addr_2);
1868 sam_lun_addr.ent_addr_3 =
1869 BE_16(plun->lun_addr.ent_addr_3);
1871 bcopy(&sam_lun_addr, &map->samLUN,
1872 FCP_LUN_SIZE);
1873 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1874 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1875 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1876 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1878 if (plun->lun_guid) {
1880 /* convert ascii wwn to bytes */
1881 fcp_ascii_to_wwn(plun->lun_guid,
1882 map->guid, sizeof (map->guid));
1884 if ((sizeof (map->guid)) <
1885 plun->lun_guid_size / 2) {
1886 cmn_err(CE_WARN,
1887 "fcp_get_target_mappings:"
1888 "guid copy space "
1889 "insufficient."
1890 "Copy Truncation - "
1891 "available %d; need %d",
1892 (int)sizeof (map->guid),
1893 (int)
1894 plun->lun_guid_size / 2);
1897 kmem_free(path, MAXPATHLEN);
1899 mutex_exit(&ptgt->tgt_mutex);
1902 mutex_exit(&pptr->port_mutex);
1903 mappings->numLuns = mapIndex;
1905 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1906 kmem_free(mappings, mappingSize);
1907 return (EFAULT);
1909 kmem_free(mappings, mappingSize);
1911 #ifdef _MULTI_DATAMODEL
1912 switch (ddi_model_convert_from(mode & FMODELS)) {
1913 case DDI_MODEL_ILP32: {
1914 struct fcp32_ioctl f32_ioctl;
1916 f32_ioctl.fp_minor = fioctl.fp_minor;
1917 f32_ioctl.listlen = fioctl.listlen;
1918 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1919 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1920 sizeof (struct fcp32_ioctl), mode)) {
1921 return (EFAULT);
1923 break;
1925 case DDI_MODEL_NONE:
1926 if (ddi_copyout((void *)&fioctl, (void *)data,
1927 sizeof (struct fcp_ioctl), mode)) {
1928 return (EFAULT);
1930 break;
1932 #else /* _MULTI_DATAMODEL */
1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1935 sizeof (struct fcp_ioctl), mode)) {
1936 return (EFAULT);
1938 #endif /* _MULTI_DATAMODEL */
1940 return (0);
1944 * fcp_setup_scsi_ioctl
1945 * Setup handler for the "scsi passthru" style of
1946 * ioctl for FCP. See "fcp_util.h" for data structure
1947 * definition.
1949 * Input:
1950 * u_fscsi = ioctl data (user address space)
1951 * mode = See ioctl(9E)
1953 * Output:
1954 * u_fscsi = ioctl data (user address space)
1955 * rval = return value - see ioctl(9E)
1957 * Returns:
1958 * 0 = OK
1959 * EAGAIN = See errno.h
1960 * EBUSY = See errno.h
1961 * EFAULT = See errno.h
1962 * EINTR = See errno.h
1963 * EINVAL = See errno.h
1964 * EIO = See errno.h
1965 * ENOMEM = See errno.h
1966 * ENXIO = See errno.h
1968 * Context:
1969 * Kernel context.
1971 /* ARGSUSED */
1972 static int
1973 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1974 int mode, int *rval)
1976 int ret = 0;
1977 int temp_ret;
1978 caddr_t k_cdbbufaddr = NULL;
1979 caddr_t k_bufaddr = NULL;
1980 caddr_t k_rqbufaddr = NULL;
1981 caddr_t u_cdbbufaddr;
1982 caddr_t u_bufaddr;
1983 caddr_t u_rqbufaddr;
1984 struct fcp_scsi_cmd k_fscsi;
1987 * Get fcp_scsi_cmd array element from user address space
1989 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1990 != 0) {
1991 return (ret);
1996 * Even though kmem_alloc() checks the validity of the
1997 * buffer length, this check is needed when the
1998 * kmem_flags set and the zero buffer length is passed.
2000 if ((k_fscsi.scsi_cdblen <= 0) ||
2001 (k_fscsi.scsi_buflen <= 0) ||
2002 (k_fscsi.scsi_rqlen <= 0)) {
2003 return (EINVAL);
2007 * Allocate data for fcp_scsi_cmd pointer fields
2009 if (ret == 0) {
2010 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2011 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2012 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2014 if (k_cdbbufaddr == NULL ||
2015 k_bufaddr == NULL ||
2016 k_rqbufaddr == NULL) {
2017 ret = ENOMEM;
2022 * Get fcp_scsi_cmd pointer fields from user
2023 * address space
2025 if (ret == 0) {
2026 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2027 u_bufaddr = k_fscsi.scsi_bufaddr;
2028 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2030 if (ddi_copyin(u_cdbbufaddr,
2031 k_cdbbufaddr,
2032 k_fscsi.scsi_cdblen,
2033 mode)) {
2034 ret = EFAULT;
2035 } else if (ddi_copyin(u_bufaddr,
2036 k_bufaddr,
2037 k_fscsi.scsi_buflen,
2038 mode)) {
2039 ret = EFAULT;
2040 } else if (ddi_copyin(u_rqbufaddr,
2041 k_rqbufaddr,
2042 k_fscsi.scsi_rqlen,
2043 mode)) {
2044 ret = EFAULT;
2049 * Send scsi command (blocking)
2051 if (ret == 0) {
2053 * Prior to sending the scsi command, the
2054 * fcp_scsi_cmd data structure must contain kernel,
2055 * not user, addresses.
2057 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2058 k_fscsi.scsi_bufaddr = k_bufaddr;
2059 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2061 ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 * After sending the scsi command, the
2065 * fcp_scsi_cmd data structure must contain user,
2066 * not kernel, addresses.
2068 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2069 k_fscsi.scsi_bufaddr = u_bufaddr;
2070 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2074 * Put fcp_scsi_cmd pointer fields to user address space
2076 if (ret == 0) {
2077 if (ddi_copyout(k_cdbbufaddr,
2078 u_cdbbufaddr,
2079 k_fscsi.scsi_cdblen,
2080 mode)) {
2081 ret = EFAULT;
2082 } else if (ddi_copyout(k_bufaddr,
2083 u_bufaddr,
2084 k_fscsi.scsi_buflen,
2085 mode)) {
2086 ret = EFAULT;
2087 } else if (ddi_copyout(k_rqbufaddr,
2088 u_rqbufaddr,
2089 k_fscsi.scsi_rqlen,
2090 mode)) {
2091 ret = EFAULT;
2096 * Free data for fcp_scsi_cmd pointer fields
2098 if (k_cdbbufaddr != NULL) {
2099 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2101 if (k_bufaddr != NULL) {
2102 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2104 if (k_rqbufaddr != NULL) {
2105 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2109 * Put fcp_scsi_cmd array element to user address space
2111 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2112 if (temp_ret != 0) {
2113 ret = temp_ret;
2117 * Return status
2119 return (ret);
2124 * fcp_copyin_scsi_cmd
2125 * Copy in fcp_scsi_cmd data structure from user address space.
2126 * The data may be in 32 bit or 64 bit modes.
2128 * Input:
2129 * base_addr = from address (user address space)
2130 * mode = See ioctl(9E) and ddi_copyin(9F)
2132 * Output:
2133 * fscsi = to address (kernel address space)
2135 * Returns:
2136 * 0 = OK
2137 * EFAULT = Error
2139 * Context:
2140 * Kernel context.
2142 static int
2143 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2145 #ifdef _MULTI_DATAMODEL
2146 struct fcp32_scsi_cmd f32scsi;
2148 switch (ddi_model_convert_from(mode & FMODELS)) {
2149 case DDI_MODEL_ILP32:
2151 * Copy data from user address space
2153 if (ddi_copyin((void *)base_addr,
2154 &f32scsi,
2155 sizeof (struct fcp32_scsi_cmd),
2156 mode)) {
2157 return (EFAULT);
2160 * Convert from 32 bit to 64 bit
2162 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2163 break;
2164 case DDI_MODEL_NONE:
2166 * Copy data from user address space
2168 if (ddi_copyin((void *)base_addr,
2169 fscsi,
2170 sizeof (struct fcp_scsi_cmd),
2171 mode)) {
2172 return (EFAULT);
2174 break;
2176 #else /* _MULTI_DATAMODEL */
2178 * Copy data from user address space
2180 if (ddi_copyin((void *)base_addr,
2181 fscsi,
2182 sizeof (struct fcp_scsi_cmd),
2183 mode)) {
2184 return (EFAULT);
2186 #endif /* _MULTI_DATAMODEL */
2188 return (0);
2193 * fcp_copyout_scsi_cmd
2194 * Copy out fcp_scsi_cmd data structure to user address space.
2195 * The data may be in 32 bit or 64 bit modes.
2197 * Input:
2198 * fscsi = to address (kernel address space)
2199 * mode = See ioctl(9E) and ddi_copyin(9F)
2201 * Output:
2202 * base_addr = from address (user address space)
2204 * Returns:
2205 * 0 = OK
2206 * EFAULT = Error
2208 * Context:
2209 * Kernel context.
2211 static int
2212 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2214 #ifdef _MULTI_DATAMODEL
2215 struct fcp32_scsi_cmd f32scsi;
2217 switch (ddi_model_convert_from(mode & FMODELS)) {
2218 case DDI_MODEL_ILP32:
2220 * Convert from 64 bit to 32 bit
2222 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2224 * Copy data to user address space
2226 if (ddi_copyout(&f32scsi,
2227 (void *)base_addr,
2228 sizeof (struct fcp32_scsi_cmd),
2229 mode)) {
2230 return (EFAULT);
2232 break;
2233 case DDI_MODEL_NONE:
2235 * Copy data to user address space
2237 if (ddi_copyout(fscsi,
2238 (void *)base_addr,
2239 sizeof (struct fcp_scsi_cmd),
2240 mode)) {
2241 return (EFAULT);
2243 break;
2245 #else /* _MULTI_DATAMODEL */
2247 * Copy data to user address space
2249 if (ddi_copyout(fscsi,
2250 (void *)base_addr,
2251 sizeof (struct fcp_scsi_cmd),
2252 mode)) {
2253 return (EFAULT);
2255 #endif /* _MULTI_DATAMODEL */
2257 return (0);
2262 * fcp_send_scsi_ioctl
2263 * Sends the SCSI command in blocking mode.
2265 * Input:
2266 * fscsi = SCSI command data structure
2268 * Output:
2269 * fscsi = SCSI command data structure
2271 * Returns:
2272 * 0 = OK
2273 * EAGAIN = See errno.h
2274 * EBUSY = See errno.h
2275 * EINTR = See errno.h
2276 * EINVAL = See errno.h
2277 * EIO = See errno.h
2278 * ENOMEM = See errno.h
2279 * ENXIO = See errno.h
2281 * Context:
2282 * Kernel context.
2284 static int
2285 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2287 struct fcp_lun *plun = NULL;
2288 struct fcp_port *pptr = NULL;
2289 struct fcp_tgt *ptgt = NULL;
2290 fc_packet_t *fpkt = NULL;
2291 struct fcp_ipkt *icmd = NULL;
2292 int target_created = FALSE;
2293 fc_frame_hdr_t *hp;
2294 struct fcp_cmd fcp_cmd;
2295 struct fcp_cmd *fcmd;
2296 union scsi_cdb *scsi_cdb;
2297 la_wwn_t *wwn_ptr;
2298 int nodma;
2299 struct fcp_rsp *rsp;
2300 struct fcp_rsp_info *rsp_info;
2301 caddr_t rsp_sense;
2302 int buf_len;
2303 int info_len;
2304 int sense_len;
2305 struct scsi_extended_sense *sense_to = NULL;
2306 timeout_id_t tid;
2307 uint8_t reconfig_lun = FALSE;
2308 uint8_t reconfig_pending = FALSE;
2309 uint8_t scsi_cmd;
2310 int rsp_len;
2311 int cmd_index;
2312 int fc_status;
2313 int pkt_state;
2314 int pkt_action;
2315 int pkt_reason;
2316 int ret, xport_retval = ~FC_SUCCESS;
2317 int lcount;
2318 int tcount;
2319 int reconfig_status;
2320 int port_busy = FALSE;
2321 uchar_t *lun_string;
2324 * Check valid SCSI command
2326 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2327 ret = EINVAL;
2328 for (cmd_index = 0;
2329 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2330 ret != 0;
2331 cmd_index++) {
2333 * First byte of CDB is the SCSI command
2335 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2336 ret = 0;
2341 * Check inputs
2343 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2344 ret = EINVAL;
2345 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2346 /* no larger than */
2347 ret = EINVAL;
2352 * Find FC port
2354 if (ret == 0) {
2356 * Acquire global mutex
2358 mutex_enter(&fcp_global_mutex);
2360 pptr = fcp_port_head;
2361 while (pptr) {
2362 if (pptr->port_instance ==
2363 (uint32_t)fscsi->scsi_fc_port_num) {
2364 break;
2365 } else {
2366 pptr = pptr->port_next;
2370 if (pptr == NULL) {
2371 ret = ENXIO;
2372 } else {
2374 * fc_ulp_busy_port can raise power
2375 * so, we must not hold any mutexes involved in PM
2377 mutex_exit(&fcp_global_mutex);
2378 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 if (ret == 0) {
2383 /* remember port is busy, so we will release later */
2384 port_busy = TRUE;
2387 * If there is a reconfiguration in progress, wait
2388 * for it to complete.
2391 fcp_reconfig_wait(pptr);
2393 /* reacquire mutexes in order */
2394 mutex_enter(&fcp_global_mutex);
2395 mutex_enter(&pptr->port_mutex);
2398 * Will port accept DMA?
2400 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2401 ? 1 : 0;
2404 * If init or offline, device not known
2406 * If we are discovering (onlining), we can
2407 * NOT obviously provide reliable data about
2408 * devices until it is complete
2410 if (pptr->port_state & (FCP_STATE_INIT |
2411 FCP_STATE_OFFLINE)) {
2412 ret = ENXIO;
2413 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2414 ret = EBUSY;
2415 } else {
2417 * Find target from pwwn
2419 * The wwn must be put into a local
2420 * variable to ensure alignment.
2422 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2423 ptgt = fcp_lookup_target(pptr,
2424 (uchar_t *)wwn_ptr);
2427 * If target not found,
2429 if (ptgt == NULL) {
2431 * Note: Still have global &
2432 * port mutexes
2434 mutex_exit(&pptr->port_mutex);
2435 ptgt = fcp_port_create_tgt(pptr,
2436 wwn_ptr, &ret, &fc_status,
2437 &pkt_state, &pkt_action,
2438 &pkt_reason);
2439 mutex_enter(&pptr->port_mutex);
2441 fscsi->scsi_fc_status = fc_status;
2442 fscsi->scsi_pkt_state =
2443 (uchar_t)pkt_state;
2444 fscsi->scsi_pkt_reason = pkt_reason;
2445 fscsi->scsi_pkt_action =
2446 (uchar_t)pkt_action;
2448 if (ptgt != NULL) {
2449 target_created = TRUE;
2450 } else if (ret == 0) {
2451 ret = ENOMEM;
2455 if (ret == 0) {
2457 * Acquire target
2459 mutex_enter(&ptgt->tgt_mutex);
2462 * If target is mark or busy,
2463 * then target can not be used
2465 if (ptgt->tgt_state &
2466 (FCP_TGT_MARK |
2467 FCP_TGT_BUSY)) {
2468 ret = EBUSY;
2469 } else {
2471 * Mark target as busy
2473 ptgt->tgt_state |=
2474 FCP_TGT_BUSY;
2478 * Release target
2480 lcount = pptr->port_link_cnt;
2481 tcount = ptgt->tgt_change_cnt;
2482 mutex_exit(&ptgt->tgt_mutex);
2487 * Release port
2489 mutex_exit(&pptr->port_mutex);
2493 * Release global mutex
2495 mutex_exit(&fcp_global_mutex);
2498 if (ret == 0) {
2499 uint64_t belun = BE_64(fscsi->scsi_lun);
2502 * If it's a target device, find lun from pwwn
2503 * The wwn must be put into a local
2504 * variable to ensure alignment.
2506 mutex_enter(&pptr->port_mutex);
2507 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2508 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2509 /* this is not a target */
2510 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2511 ret = ENXIO;
2512 } else if ((belun << 16) != 0) {
2514 * Since fcp only support PD and LU addressing method
2515 * so far, the last 6 bytes of a valid LUN are expected
2516 * to be filled with 00h.
2518 fscsi->scsi_fc_status = FC_INVALID_LUN;
2519 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2520 " method 0x%02x with LUN number 0x%016" PRIx64,
2521 (uint8_t)(belun >> 62), belun);
2522 ret = ENXIO;
2523 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2524 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2526 * This is a SCSI target, but no LUN at this
2527 * address.
2529 * In the future, we may want to send this to
2530 * the target, and let it respond
2531 * appropriately
2533 ret = ENXIO;
2535 mutex_exit(&pptr->port_mutex);
2539 * Finished grabbing external resources
2540 * Allocate internal packet (icmd)
2542 if (ret == 0) {
2544 * Calc rsp len assuming rsp info included
2546 rsp_len = sizeof (struct fcp_rsp) +
2547 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2549 icmd = fcp_icmd_alloc(pptr, ptgt,
2550 sizeof (struct fcp_cmd),
2551 rsp_len,
2552 fscsi->scsi_buflen,
2553 nodma,
2554 lcount, /* ipkt_link_cnt */
2555 tcount, /* ipkt_change_cnt */
2556 0, /* cause */
2557 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2559 if (icmd == NULL) {
2560 ret = ENOMEM;
2561 } else {
2563 * Setup internal packet as sema sync
2565 fcp_ipkt_sema_init(icmd);
2569 if (ret == 0) {
2571 * Init fpkt pointer for use.
2574 fpkt = icmd->ipkt_fpkt;
2576 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2577 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2578 fpkt->pkt_timeout = fscsi->scsi_timeout;
2581 * Init fcmd pointer for use by SCSI command
2584 if (nodma) {
2585 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2586 } else {
2587 fcmd = &fcp_cmd;
2589 bzero(fcmd, sizeof (struct fcp_cmd));
2590 ptgt = plun->lun_tgt;
2592 lun_string = (uchar_t *)&fscsi->scsi_lun;
2594 fcmd->fcp_ent_addr.ent_addr_0 =
2595 BE_16(*(uint16_t *)&(lun_string[0]));
2596 fcmd->fcp_ent_addr.ent_addr_1 =
2597 BE_16(*(uint16_t *)&(lun_string[2]));
2598 fcmd->fcp_ent_addr.ent_addr_2 =
2599 BE_16(*(uint16_t *)&(lun_string[4]));
2600 fcmd->fcp_ent_addr.ent_addr_3 =
2601 BE_16(*(uint16_t *)&(lun_string[6]));
2604 * Setup internal packet(icmd)
2606 icmd->ipkt_lun = plun;
2607 icmd->ipkt_restart = 0;
2608 icmd->ipkt_retries = 0;
2609 icmd->ipkt_opcode = 0;
2612 * Init the frame HEADER Pointer for use
2614 hp = &fpkt->pkt_cmd_fhdr;
2616 hp->s_id = pptr->port_id;
2617 hp->d_id = ptgt->tgt_d_id;
2618 hp->r_ctl = R_CTL_COMMAND;
2619 hp->type = FC_TYPE_SCSI_FCP;
2620 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2621 hp->rsvd = 0;
2622 hp->seq_id = 0;
2623 hp->seq_cnt = 0;
2624 hp->ox_id = 0xffff;
2625 hp->rx_id = 0xffff;
2626 hp->ro = 0;
2628 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2629 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2630 fcmd->fcp_cntl.cntl_write_data = 0;
2631 fcmd->fcp_data_len = fscsi->scsi_buflen;
2633 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2634 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2635 fscsi->scsi_cdblen);
2637 if (!nodma) {
2638 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2639 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2643 * Send SCSI command to FC transport
2646 if (ret == 0) {
2647 mutex_enter(&ptgt->tgt_mutex);
2649 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2650 mutex_exit(&ptgt->tgt_mutex);
2651 fscsi->scsi_fc_status = xport_retval =
2652 fc_ulp_transport(pptr->port_fp_handle,
2653 fpkt);
2654 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2655 ret = EIO;
2657 } else {
2658 mutex_exit(&ptgt->tgt_mutex);
2659 ret = EBUSY;
2665 * Wait for completion only if fc_ulp_transport was called and it
2666 * returned a success. This is the only time callback will happen.
2667 * Otherwise, there is no point in waiting
2669 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2670 ret = fcp_ipkt_sema_wait(icmd);
2674 * Copy data to IOCTL data structures
2676 rsp = NULL;
2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2678 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2680 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2681 fcp_log(CE_WARN, pptr->port_dip,
2682 "!SCSI command to d_id=0x%x lun=0x%x"
2683 " failed, Bad FCP response values:"
2684 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2685 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2686 ptgt->tgt_d_id, plun->lun_num,
2687 rsp->reserved_0, rsp->reserved_1,
2688 rsp->fcp_u.fcp_status.reserved_0,
2689 rsp->fcp_u.fcp_status.reserved_1,
2690 rsp->fcp_response_len, rsp->fcp_sense_len);
2692 ret = EIO;
2696 if ((ret == 0) && (rsp != NULL)) {
2698 * Calc response lengths
2700 sense_len = 0;
2701 info_len = 0;
2703 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2704 info_len = rsp->fcp_response_len;
2707 rsp_info = (struct fcp_rsp_info *)
2708 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 * Get SCSI status
2713 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2715 * If a lun was just added or removed and the next command
2716 * comes through this interface, we need to capture the check
2717 * condition so we can discover the new topology.
2719 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2720 rsp->fcp_u.fcp_status.sense_len_set) {
2721 sense_len = rsp->fcp_sense_len;
2722 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2723 sense_to = (struct scsi_extended_sense *)rsp_sense;
2724 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2725 (FCP_SENSE_NO_LUN(sense_to))) {
2726 reconfig_lun = TRUE;
2730 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2731 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2732 if (reconfig_lun == FALSE) {
2733 reconfig_status =
2734 fcp_is_reconfig_needed(ptgt, fpkt);
2737 if ((reconfig_lun == TRUE) ||
2738 (reconfig_status == TRUE)) {
2739 mutex_enter(&ptgt->tgt_mutex);
2740 if (ptgt->tgt_tid == NULL) {
2742 * Either we've been notified the
2743 * REPORT_LUN data has changed, or
2744 * we've determined on our own that
2745 * we're out of date. Kick off
2746 * rediscovery.
2748 tid = timeout(fcp_reconfigure_luns,
2749 (caddr_t)ptgt, drv_usectohz(1));
2751 ptgt->tgt_tid = tid;
2752 ptgt->tgt_state |= FCP_TGT_BUSY;
2753 ret = EBUSY;
2754 reconfig_pending = TRUE;
2756 mutex_exit(&ptgt->tgt_mutex);
2761 * Calc residuals and buffer lengths
2764 if (ret == 0) {
2765 buf_len = fscsi->scsi_buflen;
2766 fscsi->scsi_bufresid = 0;
2767 if (rsp->fcp_u.fcp_status.resid_under) {
2768 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2769 fscsi->scsi_bufresid = rsp->fcp_resid;
2770 } else {
2771 cmn_err(CE_WARN, "fcp: bad residue %x "
2772 "for txfer len %x", rsp->fcp_resid,
2773 fscsi->scsi_buflen);
2774 fscsi->scsi_bufresid =
2775 fscsi->scsi_buflen;
2777 buf_len -= fscsi->scsi_bufresid;
2779 if (rsp->fcp_u.fcp_status.resid_over) {
2780 fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2784 if (fscsi->scsi_rqlen < sense_len) {
2785 sense_len = fscsi->scsi_rqlen;
2788 fscsi->scsi_fc_rspcode = 0;
2789 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2790 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2792 fscsi->scsi_pkt_state = fpkt->pkt_state;
2793 fscsi->scsi_pkt_action = fpkt->pkt_action;
2794 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2797 * Copy data and request sense
2799 * Data must be copied by using the FCP_CP_IN macro.
2800 * This will ensure the proper byte order since the data
2801 * is being copied directly from the memory mapped
2802 * device register.
2804 * The response (and request sense) will be in the
2805 * correct byte order. No special copy is necessary.
2808 if (buf_len) {
2809 FCP_CP_IN(fpkt->pkt_data,
2810 fscsi->scsi_bufaddr,
2811 fpkt->pkt_data_acc,
2812 buf_len);
2814 bcopy((void *)rsp_sense,
2815 (void *)fscsi->scsi_rqbufaddr,
2816 sense_len);
2821 * Cleanup transport data structures if icmd was alloc-ed
2822 * So, cleanup happens in the same thread that icmd was alloc-ed
2824 if (icmd != NULL) {
2825 fcp_ipkt_sema_cleanup(icmd);
2828 /* restore pm busy/idle status */
2829 if (port_busy) {
2830 fc_ulp_idle_port(pptr->port_fp_handle);
2834 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2835 * flag, it'll be cleared when the reconfig is complete.
2837 if ((ptgt != NULL) && !reconfig_pending) {
2839 * If target was created,
2841 if (target_created) {
2842 mutex_enter(&ptgt->tgt_mutex);
2843 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2844 mutex_exit(&ptgt->tgt_mutex);
2845 } else {
2847 * De-mark target as busy
2849 mutex_enter(&ptgt->tgt_mutex);
2850 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2851 mutex_exit(&ptgt->tgt_mutex);
2854 return (ret);
2858 static int
2859 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2860 fc_packet_t *fpkt)
2862 uchar_t *lun_string;
2863 uint16_t lun_num, i;
2864 int num_luns;
2865 int actual_luns;
2866 int num_masked_luns;
2867 int lun_buflen;
2868 struct fcp_lun *plun = NULL;
2869 struct fcp_reportlun_resp *report_lun;
2870 uint8_t reconfig_needed = FALSE;
2871 uint8_t lun_exists = FALSE;
2872 fcp_port_t *pptr = ptgt->tgt_port;
2874 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2876 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2877 fpkt->pkt_datalen);
2879 /* get number of luns (which is supplied as LUNS * 8) */
2880 num_luns = BE_32(report_lun->num_lun) >> 3;
2883 * Figure out exactly how many lun strings our response buffer
2884 * can hold.
2886 lun_buflen = (fpkt->pkt_datalen -
2887 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 * Is our response buffer full or not? We don't want to
2891 * potentially walk beyond the number of luns we have.
2893 if (num_luns <= lun_buflen) {
2894 actual_luns = num_luns;
2895 } else {
2896 actual_luns = lun_buflen;
2899 mutex_enter(&ptgt->tgt_mutex);
2901 /* Scan each lun to see if we have masked it. */
2902 num_masked_luns = 0;
2903 if (fcp_lun_blacklist != NULL) {
2904 for (i = 0; i < actual_luns; i++) {
2905 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2906 switch (lun_string[0] & 0xC0) {
2907 case FCP_LUN_ADDRESSING:
2908 case FCP_PD_ADDRESSING:
2909 case FCP_VOLUME_ADDRESSING:
2910 lun_num = ((lun_string[0] & 0x3F) << 8)
2911 | lun_string[1];
2912 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2913 lun_num) == TRUE) {
2914 num_masked_luns++;
2916 break;
2917 default:
2918 break;
2924 * The quick and easy check. If the number of LUNs reported
2925 * doesn't match the number we currently know about, we need
2926 * to reconfigure.
2928 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2929 mutex_exit(&ptgt->tgt_mutex);
2930 kmem_free(report_lun, fpkt->pkt_datalen);
2931 return (TRUE);
2935 * If the quick and easy check doesn't turn up anything, we walk
2936 * the list of luns from the REPORT_LUN response and look for
2937 * any luns we don't know about. If we find one, we know we need
2938 * to reconfigure. We will skip LUNs that are masked because of the
2939 * blacklist.
2941 for (i = 0; i < actual_luns; i++) {
2942 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2943 lun_exists = FALSE;
2944 switch (lun_string[0] & 0xC0) {
2945 case FCP_LUN_ADDRESSING:
2946 case FCP_PD_ADDRESSING:
2947 case FCP_VOLUME_ADDRESSING:
2948 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2950 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2951 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2952 lun_exists = TRUE;
2953 break;
2956 for (plun = ptgt->tgt_lun; plun;
2957 plun = plun->lun_next) {
2958 if (plun->lun_num == lun_num) {
2959 lun_exists = TRUE;
2960 break;
2963 break;
2964 default:
2965 break;
2968 if (lun_exists == FALSE) {
2969 reconfig_needed = TRUE;
2970 break;
2974 mutex_exit(&ptgt->tgt_mutex);
2975 kmem_free(report_lun, fpkt->pkt_datalen);
2977 return (reconfig_needed);
2981 * This function is called by fcp_handle_page83 and uses inquiry response data
2982 * stored in plun->lun_inq to determine whether or not a device is a member of
2983 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2984 * otherwise 1.
2986 static int
2987 fcp_symmetric_device_probe(struct fcp_lun *plun)
2989 struct scsi_inquiry *stdinq = &plun->lun_inq;
2990 char *devidptr;
2991 int i, len;
2993 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2994 devidptr = fcp_symmetric_disk_table[i];
2995 len = (int)strlen(devidptr);
2997 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2998 return (0);
3001 return (1);
3006 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3007 * It basically returns the current count of # of state change callbacks
3008 * i.e the value of tgt_change_cnt.
3010 * INPUT:
3011 * fcp_ioctl.fp_minor -> The minor # of the fp port
3012 * fcp_ioctl.listlen -> 1
3013 * fcp_ioctl.list -> Pointer to a 32 bit integer
3015 /*ARGSUSED2*/
3016 static int
3017 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3019 int ret;
3020 uint32_t link_cnt;
3021 struct fcp_ioctl fioctl;
3022 struct fcp_port *pptr = NULL;
3024 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3025 &pptr)) != 0) {
3026 return (ret);
3029 ASSERT(pptr != NULL);
3031 if (fioctl.listlen != 1) {
3032 return (EINVAL);
3035 mutex_enter(&pptr->port_mutex);
3036 if (pptr->port_state & FCP_STATE_OFFLINE) {
3037 mutex_exit(&pptr->port_mutex);
3038 return (ENXIO);
3042 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3043 * When the fcp initially attaches to the port and there are nothing
3044 * hanging out of the port or if there was a repeat offline state change
3045 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3046 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3047 * will differentiate the 2 cases.
3049 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3050 mutex_exit(&pptr->port_mutex);
3051 return (ENXIO);
3054 link_cnt = pptr->port_link_cnt;
3055 mutex_exit(&pptr->port_mutex);
3057 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3058 return (EFAULT);
3061 #ifdef _MULTI_DATAMODEL
3062 switch (ddi_model_convert_from(mode & FMODELS)) {
3063 case DDI_MODEL_ILP32: {
3064 struct fcp32_ioctl f32_ioctl;
3066 f32_ioctl.fp_minor = fioctl.fp_minor;
3067 f32_ioctl.listlen = fioctl.listlen;
3068 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3069 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3070 sizeof (struct fcp32_ioctl), mode)) {
3071 return (EFAULT);
3073 break;
3075 case DDI_MODEL_NONE:
3076 if (ddi_copyout((void *)&fioctl, (void *)data,
3077 sizeof (struct fcp_ioctl), mode)) {
3078 return (EFAULT);
3080 break;
3082 #else /* _MULTI_DATAMODEL */
3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3085 sizeof (struct fcp_ioctl), mode)) {
3086 return (EFAULT);
3088 #endif /* _MULTI_DATAMODEL */
3090 return (0);
3094 * This function copies the fcp_ioctl structure passed in from user land
3095 * into kernel land. Handles 32 bit applications.
3097 /*ARGSUSED*/
3098 static int
3099 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3100 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3102 struct fcp_port *t_pptr;
3104 #ifdef _MULTI_DATAMODEL
3105 switch (ddi_model_convert_from(mode & FMODELS)) {
3106 case DDI_MODEL_ILP32: {
3107 struct fcp32_ioctl f32_ioctl;
3109 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3110 sizeof (struct fcp32_ioctl), mode)) {
3111 return (EFAULT);
3113 fioctl->fp_minor = f32_ioctl.fp_minor;
3114 fioctl->listlen = f32_ioctl.listlen;
3115 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3116 break;
3118 case DDI_MODEL_NONE:
3119 if (ddi_copyin((void *)data, (void *)fioctl,
3120 sizeof (struct fcp_ioctl), mode)) {
3121 return (EFAULT);
3123 break;
3126 #else /* _MULTI_DATAMODEL */
3127 if (ddi_copyin((void *)data, (void *)fioctl,
3128 sizeof (struct fcp_ioctl), mode)) {
3129 return (EFAULT);
3131 #endif /* _MULTI_DATAMODEL */
3134 * Right now we can assume that the minor number matches with
3135 * this instance of fp. If this changes we will need to
3136 * revisit this logic.
3138 mutex_enter(&fcp_global_mutex);
3139 t_pptr = fcp_port_head;
3140 while (t_pptr) {
3141 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3142 break;
3143 } else {
3144 t_pptr = t_pptr->port_next;
3147 *pptr = t_pptr;
3148 mutex_exit(&fcp_global_mutex);
3149 if (t_pptr == NULL) {
3150 return (ENXIO);
3153 return (0);
3157 * Function: fcp_port_create_tgt
3159 * Description: As the name suggest this function creates the target context
3160 * specified by the the WWN provided by the caller. If the
3161 * creation goes well and the target is known by fp/fctl a PLOGI
3162 * followed by a PRLI are issued.
3164 * Argument: pptr fcp port structure
3165 * pwwn WWN of the target
3166 * ret_val Address of the return code. It could be:
3167 * EIO, ENOMEM or 0.
3168 * fc_status PLOGI or PRLI status completion
3169 * fc_pkt_state PLOGI or PRLI state completion
3170 * fc_pkt_reason PLOGI or PRLI reason completion
3171 * fc_pkt_action PLOGI or PRLI action completion
3173 * Return Value: NULL if it failed
3174 * Target structure address if it succeeds
3176 static struct fcp_tgt *
3177 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3178 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3180 struct fcp_tgt *ptgt = NULL;
3181 fc_portmap_t devlist;
3182 int lcount;
3183 int error;
3185 *ret_val = 0;
3188 * Check FC port device & get port map
3190 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3191 &error, 1) == NULL) {
3192 *ret_val = EIO;
3193 } else {
3194 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3195 &devlist) != FC_SUCCESS) {
3196 *ret_val = EIO;
3200 /* Set port map flags */
3201 devlist.map_type = PORT_DEVICE_USER_CREATE;
3203 /* Allocate target */
3204 if (*ret_val == 0) {
3205 lcount = pptr->port_link_cnt;
3206 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3207 if (ptgt == NULL) {
3208 fcp_log(CE_WARN, pptr->port_dip,
3209 "!FC target allocation failed");
3210 *ret_val = ENOMEM;
3211 } else {
3212 /* Setup target */
3213 mutex_enter(&ptgt->tgt_mutex);
3215 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3216 ptgt->tgt_tmp_cnt = 1;
3217 ptgt->tgt_d_id = devlist.map_did.port_id;
3218 ptgt->tgt_hard_addr =
3219 devlist.map_hard_addr.hard_addr;
3220 ptgt->tgt_pd_handle = devlist.map_pd;
3221 ptgt->tgt_fca_dev = NULL;
3223 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3224 FC_WWN_SIZE);
3225 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3226 FC_WWN_SIZE);
3228 mutex_exit(&ptgt->tgt_mutex);
3232 /* Release global mutex for PLOGI and PRLI */
3233 mutex_exit(&fcp_global_mutex);
3235 /* Send PLOGI (If necessary) */
3236 if (*ret_val == 0) {
3237 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3238 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 /* Send PRLI (If necessary) */
3242 if (*ret_val == 0) {
3243 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3244 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 mutex_enter(&fcp_global_mutex);
3249 return (ptgt);
3253 * Function: fcp_tgt_send_plogi
3255 * Description: This function sends a PLOGI to the target specified by the
3256 * caller and waits till it completes.
3258 * Argument: ptgt Target to send the plogi to.
3259 * fc_status Status returned by fp/fctl in the PLOGI request.
3260 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3261 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3262 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3264 * Return Value: 0
3265 * ENOMEM
3266 * EIO
3268 * Context: User context.
3270 static int
3271 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3272 int *fc_pkt_reason, int *fc_pkt_action)
3274 struct fcp_port *pptr;
3275 struct fcp_ipkt *icmd;
3276 struct fc_packet *fpkt;
3277 fc_frame_hdr_t *hp;
3278 struct la_els_logi logi;
3279 int tcount;
3280 int lcount;
3281 int ret, login_retval = ~FC_SUCCESS;
3283 ret = 0;
3285 pptr = ptgt->tgt_port;
3287 lcount = pptr->port_link_cnt;
3288 tcount = ptgt->tgt_change_cnt;
3290 /* Alloc internal packet */
3291 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3292 sizeof (la_els_logi_t), 0,
3293 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3294 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3296 if (icmd == NULL) {
3297 ret = ENOMEM;
3298 } else {
3300 * Setup internal packet as sema sync
3302 fcp_ipkt_sema_init(icmd);
3305 * Setup internal packet (icmd)
3307 icmd->ipkt_lun = NULL;
3308 icmd->ipkt_restart = 0;
3309 icmd->ipkt_retries = 0;
3310 icmd->ipkt_opcode = LA_ELS_PLOGI;
3313 * Setup fc_packet
3315 fpkt = icmd->ipkt_fpkt;
3317 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3318 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3319 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3322 * Setup FC frame header
3324 hp = &fpkt->pkt_cmd_fhdr;
3326 hp->s_id = pptr->port_id; /* source ID */
3327 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3328 hp->r_ctl = R_CTL_ELS_REQ;
3329 hp->type = FC_TYPE_EXTENDED_LS;
3330 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3331 hp->seq_id = 0;
3332 hp->rsvd = 0;
3333 hp->df_ctl = 0;
3334 hp->seq_cnt = 0;
3335 hp->ox_id = 0xffff; /* i.e. none */
3336 hp->rx_id = 0xffff; /* i.e. none */
3337 hp->ro = 0;
3340 * Setup PLOGI
3342 bzero(&logi, sizeof (struct la_els_logi));
3343 logi.ls_code.ls_code = LA_ELS_PLOGI;
3345 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3346 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 * Send PLOGI
3351 *fc_status = login_retval =
3352 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3353 if (*fc_status != FC_SUCCESS) {
3354 ret = EIO;
3359 * Wait for completion
3361 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3362 ret = fcp_ipkt_sema_wait(icmd);
3364 *fc_pkt_state = fpkt->pkt_state;
3365 *fc_pkt_reason = fpkt->pkt_reason;
3366 *fc_pkt_action = fpkt->pkt_action;
3370 * Cleanup transport data structures if icmd was alloc-ed AND if there
3371 * is going to be no callback (i.e if fc_ulp_login() failed).
3372 * Otherwise, cleanup happens in callback routine.
3374 if (icmd != NULL) {
3375 fcp_ipkt_sema_cleanup(icmd);
3378 return (ret);
3382 * Function: fcp_tgt_send_prli
3384 * Description: Does nothing as of today.
3386 * Argument: ptgt Target to send the prli to.
3387 * fc_status Status returned by fp/fctl in the PRLI request.
3388 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3389 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3390 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3392 * Return Value: 0
3394 /*ARGSUSED*/
3395 static int
3396 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3397 int *fc_pkt_reason, int *fc_pkt_action)
3399 return (0);
3403 * Function: fcp_ipkt_sema_init
3405 * Description: Initializes the semaphore contained in the internal packet.
3407 * Argument: icmd Internal packet the semaphore of which must be
3408 * initialized.
3410 * Return Value: None
3412 * Context: User context only.
3414 static void
3415 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3417 struct fc_packet *fpkt;
3419 fpkt = icmd->ipkt_fpkt;
3421 /* Create semaphore for sync */
3422 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3424 /* Setup the completion callback */
3425 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3429 * Function: fcp_ipkt_sema_wait
3431 * Description: Wait on the semaphore embedded in the internal packet. The
3432 * semaphore is released in the callback.
3434 * Argument: icmd Internal packet to wait on for completion.
3436 * Return Value: 0
3437 * EIO
3438 * EBUSY
3439 * EAGAIN
3441 * Context: User context only.
3443 * This function does a conversion between the field pkt_state of the fc_packet
3444 * embedded in the internal packet (icmd) and the code it returns.
3446 static int
3447 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3449 struct fc_packet *fpkt;
3450 int ret;
3452 ret = EIO;
3453 fpkt = icmd->ipkt_fpkt;
3456 * Wait on semaphore
3458 sema_p(&(icmd->ipkt_sema));
3461 * Check the status of the FC packet
3463 switch (fpkt->pkt_state) {
3464 case FC_PKT_SUCCESS:
3465 ret = 0;
3466 break;
3467 case FC_PKT_LOCAL_RJT:
3468 switch (fpkt->pkt_reason) {
3469 case FC_REASON_SEQ_TIMEOUT:
3470 case FC_REASON_RX_BUF_TIMEOUT:
3471 ret = EAGAIN;
3472 break;
3473 case FC_REASON_PKT_BUSY:
3474 ret = EBUSY;
3475 break;
3477 break;
3478 case FC_PKT_TIMEOUT:
3479 ret = EAGAIN;
3480 break;
3481 case FC_PKT_LOCAL_BSY:
3482 case FC_PKT_TRAN_BSY:
3483 case FC_PKT_NPORT_BSY:
3484 case FC_PKT_FABRIC_BSY:
3485 ret = EBUSY;
3486 break;
3487 case FC_PKT_LS_RJT:
3488 case FC_PKT_BA_RJT:
3489 switch (fpkt->pkt_reason) {
3490 case FC_REASON_LOGICAL_BSY:
3491 ret = EBUSY;
3492 break;
3494 break;
3495 case FC_PKT_FS_RJT:
3496 switch (fpkt->pkt_reason) {
3497 case FC_REASON_FS_LOGICAL_BUSY:
3498 ret = EBUSY;
3499 break;
3501 break;
3504 return (ret);
3508 * Function: fcp_ipkt_sema_callback
3510 * Description: Registered as the completion callback function for the FC
3511 * transport when the ipkt semaphore is used for sync. This will
3512 * cleanup the used data structures, if necessary and wake up
3513 * the user thread to complete the transaction.
3515 * Argument: fpkt FC packet (points to the icmd)
3517 * Return Value: None
3519 * Context: User context only
3521 static void
3522 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3524 struct fcp_ipkt *icmd;
3526 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 * Wake up user thread
3531 sema_v(&(icmd->ipkt_sema));
3535 * Function: fcp_ipkt_sema_cleanup
3537 * Description: Called to cleanup (if necessary) the data structures used
3538 * when ipkt sema is used for sync. This function will detect
3539 * whether the caller is the last thread (via counter) and
3540 * cleanup only if necessary.
3542 * Argument: icmd Internal command packet
3544 * Return Value: None
3546 * Context: User context only
3548 static void
3549 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3551 struct fcp_tgt *ptgt;
3552 struct fcp_port *pptr;
3554 ptgt = icmd->ipkt_tgt;
3555 pptr = icmd->ipkt_port;
3558 * Acquire data structure
3560 mutex_enter(&ptgt->tgt_mutex);
3563 * Destroy semaphore
3565 sema_destroy(&(icmd->ipkt_sema));
3568 * Cleanup internal packet
3570 mutex_exit(&ptgt->tgt_mutex);
3571 fcp_icmd_free(pptr, icmd);
3575 * Function: fcp_port_attach
3577 * Description: Called by the transport framework to resume, suspend or
3578 * attach a new port.
3580 * Argument: ulph Port handle
3581 * *pinfo Port information
3582 * cmd Command
3583 * s_id Port ID
3585 * Return Value: FC_FAILURE or FC_SUCCESS
3587 /*ARGSUSED*/
3588 static int
3589 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3590 fc_attach_cmd_t cmd, uint32_t s_id)
3592 int instance;
3593 int res = FC_FAILURE; /* default result */
3595 ASSERT(pinfo != NULL);
3597 instance = ddi_get_instance(pinfo->port_dip);
3599 switch (cmd) {
3600 case FC_CMD_ATTACH:
3602 * this port instance attaching for the first time (or after
3603 * being detached before)
3605 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3606 instance) == DDI_SUCCESS) {
3607 res = FC_SUCCESS;
3608 } else {
3609 ASSERT(ddi_get_soft_state(fcp_softstate,
3610 instance) == NULL);
3612 break;
3614 case FC_CMD_RESUME:
3615 case FC_CMD_POWER_UP:
3617 * this port instance was attached and the suspended and
3618 * will now be resumed
3620 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3621 instance) == DDI_SUCCESS) {
3622 res = FC_SUCCESS;
3624 break;
3626 default:
3627 /* shouldn't happen */
3628 FCP_TRACE(fcp_logq, "fcp",
3629 fcp_trace, FCP_BUF_LEVEL_2, 0,
3630 "port_attach: unknown cmdcommand: %d", cmd);
3631 break;
3634 /* return result */
3635 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3636 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3638 return (res);
3643 * detach or suspend this port instance
3645 * acquires and releases the global mutex
3647 * acquires and releases the mutex for this port
3649 * acquires and releases the hotplug mutex for this port
3651 /*ARGSUSED*/
3652 static int
3653 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3654 fc_detach_cmd_t cmd)
3656 int flag;
3657 int instance;
3658 struct fcp_port *pptr;
3660 instance = ddi_get_instance(info->port_dip);
3661 pptr = ddi_get_soft_state(fcp_softstate, instance);
3663 switch (cmd) {
3664 case FC_CMD_SUSPEND:
3665 FCP_DTRACE(fcp_logq, "fcp",
3666 fcp_trace, FCP_BUF_LEVEL_8, 0,
3667 "port suspend called for port %d", instance);
3668 flag = FCP_STATE_SUSPENDED;
3669 break;
3671 case FC_CMD_POWER_DOWN:
3672 FCP_DTRACE(fcp_logq, "fcp",
3673 fcp_trace, FCP_BUF_LEVEL_8, 0,
3674 "port power down called for port %d", instance);
3675 flag = FCP_STATE_POWER_DOWN;
3676 break;
3678 case FC_CMD_DETACH:
3679 FCP_DTRACE(fcp_logq, "fcp",
3680 fcp_trace, FCP_BUF_LEVEL_8, 0,
3681 "port detach called for port %d", instance);
3682 flag = FCP_STATE_DETACHING;
3683 break;
3685 default:
3686 /* shouldn't happen */
3687 return (FC_FAILURE);
3689 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3690 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3692 return (fcp_handle_port_detach(pptr, flag, instance));
3697 * called for ioctls on the transport's devctl interface, and the transport
3698 * has passed it to us
3700 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3702 * return FC_SUCCESS if we decide to claim the ioctl,
3703 * else return FC_UNCLAIMED
3705 * *rval is set iff we decide to claim the ioctl
3707 /*ARGSUSED*/
3708 static int
3709 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3710 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3712 int retval = FC_UNCLAIMED; /* return value */
3713 struct fcp_port *pptr = NULL; /* our soft state */
3714 struct devctl_iocdata *dcp = NULL; /* for devctl */
3715 dev_info_t *cdip;
3716 mdi_pathinfo_t *pip = NULL;
3717 char *ndi_nm; /* NDI name */
3718 char *ndi_addr; /* NDI addr */
3719 int is_mpxio, circ;
3720 int devi_entered = 0;
3721 clock_t end_time;
3723 ASSERT(rval != NULL);
3725 FCP_DTRACE(fcp_logq, "fcp",
3726 fcp_trace, FCP_BUF_LEVEL_8, 0,
3727 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3729 /* if already claimed then forget it */
3730 if (claimed) {
3732 * for now, if this ioctl has already been claimed, then
3733 * we just ignore it
3735 return (retval);
3738 /* get our port info */
3739 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3740 fcp_log(CE_WARN, NULL,
3741 "!fcp:Invalid port handle handle in ioctl");
3742 *rval = ENXIO;
3743 return (retval);
3745 is_mpxio = pptr->port_mpxio;
3747 switch (cmd) {
3748 case DEVCTL_BUS_GETSTATE:
3749 case DEVCTL_BUS_QUIESCE:
3750 case DEVCTL_BUS_UNQUIESCE:
3751 case DEVCTL_BUS_RESET:
3752 case DEVCTL_BUS_RESETALL:
3754 case DEVCTL_BUS_DEV_CREATE:
3755 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3756 return (retval);
3758 break;
3760 case DEVCTL_DEVICE_GETSTATE:
3761 case DEVCTL_DEVICE_OFFLINE:
3762 case DEVCTL_DEVICE_ONLINE:
3763 case DEVCTL_DEVICE_REMOVE:
3764 case DEVCTL_DEVICE_RESET:
3765 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3766 return (retval);
3769 ASSERT(dcp != NULL);
3771 /* ensure we have a name and address */
3772 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3773 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3774 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3775 fcp_trace, FCP_BUF_LEVEL_2, 0,
3776 "ioctl: can't get name (%s) or addr (%s)",
3777 ndi_nm ? ndi_nm : "<null ptr>",
3778 ndi_addr ? ndi_addr : "<null ptr>");
3779 ndi_dc_freehdl(dcp);
3780 return (retval);
3784 /* get our child's DIP */
3785 ASSERT(pptr != NULL);
3786 if (is_mpxio) {
3787 mdi_devi_enter(pptr->port_dip, &circ);
3788 } else {
3789 ndi_devi_enter(pptr->port_dip, &circ);
3791 devi_entered = 1;
3793 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3794 ndi_addr)) == NULL) {
3795 /* Look for virtually enumerated devices. */
3796 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3797 if (pip == NULL ||
3798 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3799 *rval = ENXIO;
3800 goto out;
3803 break;
3805 default:
3806 *rval = ENOTTY;
3807 return (retval);
3810 /* this ioctl is ours -- process it */
3812 retval = FC_SUCCESS; /* just means we claim the ioctl */
3814 /* we assume it will be a success; else we'll set error value */
3815 *rval = 0;
3818 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3819 fcp_trace, FCP_BUF_LEVEL_8, 0,
3820 "ioctl: claiming this one");
3822 /* handle ioctls now */
3823 switch (cmd) {
3824 case DEVCTL_DEVICE_GETSTATE:
3825 ASSERT(cdip != NULL);
3826 ASSERT(dcp != NULL);
3827 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3828 *rval = EFAULT;
3830 break;
3832 case DEVCTL_DEVICE_REMOVE:
3833 case DEVCTL_DEVICE_OFFLINE: {
3834 int flag = 0;
3835 int lcount;
3836 int tcount;
3837 struct fcp_pkt *head = NULL;
3838 struct fcp_lun *plun;
3839 child_info_t *cip = CIP(cdip);
3840 int all = 1;
3841 struct fcp_lun *tplun;
3842 struct fcp_tgt *ptgt;
3844 ASSERT(pptr != NULL);
3845 ASSERT(cdip != NULL);
3847 mutex_enter(&pptr->port_mutex);
3848 if (pip != NULL) {
3849 cip = CIP(pip);
3851 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3852 mutex_exit(&pptr->port_mutex);
3853 *rval = ENXIO;
3854 break;
3857 head = fcp_scan_commands(plun);
3858 if (head != NULL) {
3859 fcp_abort_commands(head, LUN_PORT);
3861 lcount = pptr->port_link_cnt;
3862 tcount = plun->lun_tgt->tgt_change_cnt;
3863 mutex_exit(&pptr->port_mutex);
3865 if (cmd == DEVCTL_DEVICE_REMOVE) {
3866 flag = NDI_DEVI_REMOVE;
3869 if (is_mpxio) {
3870 mdi_devi_exit(pptr->port_dip, circ);
3871 } else {
3872 ndi_devi_exit(pptr->port_dip, circ);
3874 devi_entered = 0;
3876 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3877 FCP_OFFLINE, lcount, tcount, flag);
3879 if (*rval != NDI_SUCCESS) {
3880 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3881 break;
3884 fcp_update_offline_flags(plun);
3886 ptgt = plun->lun_tgt;
3887 mutex_enter(&ptgt->tgt_mutex);
3888 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3889 tplun->lun_next) {
3890 mutex_enter(&tplun->lun_mutex);
3891 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3892 all = 0;
3894 mutex_exit(&tplun->lun_mutex);
3897 if (all) {
3898 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3900 * The user is unconfiguring/offlining the device.
3901 * If fabric and the auto configuration is set
3902 * then make sure the user is the only one who
3903 * can reconfigure the device.
3905 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3906 fcp_enable_auto_configuration) {
3907 ptgt->tgt_manual_config_only = 1;
3910 mutex_exit(&ptgt->tgt_mutex);
3911 break;
3914 case DEVCTL_DEVICE_ONLINE: {
3915 int lcount;
3916 int tcount;
3917 struct fcp_lun *plun;
3918 child_info_t *cip = CIP(cdip);
3920 ASSERT(cdip != NULL);
3921 ASSERT(pptr != NULL);
3923 mutex_enter(&pptr->port_mutex);
3924 if (pip != NULL) {
3925 cip = CIP(pip);
3927 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3928 mutex_exit(&pptr->port_mutex);
3929 *rval = ENXIO;
3930 break;
3932 lcount = pptr->port_link_cnt;
3933 tcount = plun->lun_tgt->tgt_change_cnt;
3934 mutex_exit(&pptr->port_mutex);
3937 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3938 * to allow the device attach to occur when the device is
3939 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3940 * from the scsi_probe()).
3942 mutex_enter(&LUN_TGT->tgt_mutex);
3943 plun->lun_state |= FCP_LUN_ONLINING;
3944 mutex_exit(&LUN_TGT->tgt_mutex);
3946 if (is_mpxio) {
3947 mdi_devi_exit(pptr->port_dip, circ);
3948 } else {
3949 ndi_devi_exit(pptr->port_dip, circ);
3951 devi_entered = 0;
3953 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3954 FCP_ONLINE, lcount, tcount, 0);
3956 if (*rval != NDI_SUCCESS) {
3957 /* Reset the FCP_LUN_ONLINING bit */
3958 mutex_enter(&LUN_TGT->tgt_mutex);
3959 plun->lun_state &= ~FCP_LUN_ONLINING;
3960 mutex_exit(&LUN_TGT->tgt_mutex);
3961 *rval = EIO;
3962 break;
3964 mutex_enter(&LUN_TGT->tgt_mutex);
3965 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3966 FCP_LUN_ONLINING);
3967 mutex_exit(&LUN_TGT->tgt_mutex);
3968 break;
3971 case DEVCTL_BUS_DEV_CREATE: {
3972 uchar_t *bytes = NULL;
3973 uint_t nbytes;
3974 struct fcp_tgt *ptgt = NULL;
3975 struct fcp_lun *plun = NULL;
3976 dev_info_t *useless_dip = NULL;
3978 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3979 DEVCTL_CONSTRUCT, &useless_dip);
3980 if (*rval != 0 || useless_dip == NULL) {
3981 break;
3984 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3985 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3986 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3987 *rval = EINVAL;
3988 (void) ndi_devi_free(useless_dip);
3989 if (bytes != NULL) {
3990 ddi_prop_free(bytes);
3992 break;
3995 *rval = fcp_create_on_demand(pptr, bytes);
3996 if (*rval == 0) {
3997 mutex_enter(&pptr->port_mutex);
3998 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3999 if (ptgt) {
4001 * We now have a pointer to the target that
4002 * was created. Lets point to the first LUN on
4003 * this new target.
4005 mutex_enter(&ptgt->tgt_mutex);
4007 plun = ptgt->tgt_lun;
4009 * There may be stale/offline LUN entries on
4010 * this list (this is by design) and so we have
4011 * to make sure we point to the first online
4012 * LUN
4014 while (plun &&
4015 plun->lun_state & FCP_LUN_OFFLINE) {
4016 plun = plun->lun_next;
4019 mutex_exit(&ptgt->tgt_mutex);
4021 mutex_exit(&pptr->port_mutex);
4024 if (*rval == 0 && ptgt && plun) {
4025 mutex_enter(&plun->lun_mutex);
4027 * Allow up to fcp_lun_ready_retry seconds to
4028 * configure all the luns behind the target.
4030 * The intent here is to allow targets with long
4031 * reboot/reset-recovery times to become available
4032 * while limiting the maximum wait time for an
4033 * unresponsive target.
4035 end_time = ddi_get_lbolt() +
4036 SEC_TO_TICK(fcp_lun_ready_retry);
4038 while (ddi_get_lbolt() < end_time) {
4039 retval = FC_SUCCESS;
4042 * The new ndi interfaces for on-demand creation
4043 * are inflexible, Do some more work to pass on
4044 * a path name of some LUN (design is broken !)
4046 if (plun->lun_cip) {
4047 if (plun->lun_mpxio == 0) {
4048 cdip = DIP(plun->lun_cip);
4049 } else {
4050 cdip = mdi_pi_get_client(
4051 PIP(plun->lun_cip));
4053 if (cdip == NULL) {
4054 *rval = ENXIO;
4055 break;
4058 if (!i_ddi_devi_attached(cdip)) {
4059 mutex_exit(&plun->lun_mutex);
4060 ddi_sleep(1);
4061 mutex_enter(&plun->lun_mutex);
4062 } else {
4064 * This Lun is ready, lets
4065 * check the next one.
4067 mutex_exit(&plun->lun_mutex);
4068 plun = plun->lun_next;
4069 while (plun && (plun->lun_state
4070 & FCP_LUN_OFFLINE)) {
4071 plun = plun->lun_next;
4073 if (!plun) {
4074 break;
4076 mutex_enter(&plun->lun_mutex);
4078 } else {
4080 * lun_cip field for a valid lun
4081 * should never be NULL. Fail the
4082 * command.
4084 *rval = ENXIO;
4085 break;
4088 if (plun) {
4089 mutex_exit(&plun->lun_mutex);
4090 } else {
4091 char devnm[MAXNAMELEN];
4092 int nmlen;
4094 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4095 ddi_node_name(cdip),
4096 ddi_get_name_addr(cdip));
4098 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4099 0) {
4100 *rval = EFAULT;
4103 } else {
4104 int i;
4105 char buf[25];
4107 for (i = 0; i < FC_WWN_SIZE; i++) {
4108 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 fcp_log(CE_WARN, pptr->port_dip,
4112 "!Failed to create nodes for pwwn=%s; error=%x",
4113 buf, *rval);
4116 (void) ndi_devi_free(useless_dip);
4117 ddi_prop_free(bytes);
4118 break;
4121 case DEVCTL_DEVICE_RESET: {
4122 struct fcp_lun *plun;
4123 child_info_t *cip = CIP(cdip);
4125 ASSERT(cdip != NULL);
4126 ASSERT(pptr != NULL);
4127 mutex_enter(&pptr->port_mutex);
4128 if (pip != NULL) {
4129 cip = CIP(pip);
4131 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4132 mutex_exit(&pptr->port_mutex);
4133 *rval = ENXIO;
4134 break;
4136 mutex_exit(&pptr->port_mutex);
4138 mutex_enter(&plun->lun_tgt->tgt_mutex);
4139 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4140 mutex_exit(&plun->lun_tgt->tgt_mutex);
4142 *rval = ENXIO;
4143 break;
4146 if (plun->lun_sd == NULL) {
4147 mutex_exit(&plun->lun_tgt->tgt_mutex);
4149 *rval = ENXIO;
4150 break;
4152 mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 * set up ap so that fcp_reset can figure out
4156 * which target to reset
4158 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4159 RESET_TARGET) == FALSE) {
4160 *rval = EIO;
4162 break;
4165 case DEVCTL_BUS_GETSTATE:
4166 ASSERT(dcp != NULL);
4167 ASSERT(pptr != NULL);
4168 ASSERT(pptr->port_dip != NULL);
4169 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4170 NDI_SUCCESS) {
4171 *rval = EFAULT;
4173 break;
4175 case DEVCTL_BUS_QUIESCE:
4176 case DEVCTL_BUS_UNQUIESCE:
4177 *rval = ENOTSUP;
4178 break;
4180 case DEVCTL_BUS_RESET:
4181 case DEVCTL_BUS_RESETALL:
4182 ASSERT(pptr != NULL);
4183 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4184 break;
4186 default:
4187 ASSERT(dcp != NULL);
4188 *rval = ENOTTY;
4189 break;
4192 /* all done -- clean up and return */
4193 out: if (devi_entered) {
4194 if (is_mpxio) {
4195 mdi_devi_exit(pptr->port_dip, circ);
4196 } else {
4197 ndi_devi_exit(pptr->port_dip, circ);
4201 if (dcp != NULL) {
4202 ndi_dc_freehdl(dcp);
4205 return (retval);
4209 /*ARGSUSED*/
4210 static int
4211 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4212 uint32_t claimed)
4214 uchar_t r_ctl;
4215 uchar_t ls_code;
4216 struct fcp_port *pptr;
4218 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4219 return (FC_UNCLAIMED);
4222 mutex_enter(&pptr->port_mutex);
4223 if (pptr->port_state & (FCP_STATE_DETACHING |
4224 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4225 mutex_exit(&pptr->port_mutex);
4226 return (FC_UNCLAIMED);
4228 mutex_exit(&pptr->port_mutex);
4230 r_ctl = buf->ub_frame.r_ctl;
4232 switch (r_ctl & R_CTL_ROUTING) {
4233 case R_CTL_EXTENDED_SVC:
4234 if (r_ctl == R_CTL_ELS_REQ) {
4235 ls_code = buf->ub_buffer[0];
4237 switch (ls_code) {
4238 case LA_ELS_PRLI:
4240 * We really don't care if something fails.
4241 * If the PRLI was not sent out, then the
4242 * other end will time it out.
4244 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4245 return (FC_SUCCESS);
4247 return (FC_UNCLAIMED);
4248 /* NOTREACHED */
4250 default:
4251 break;
4254 /* FALLTHROUGH */
4256 default:
4257 return (FC_UNCLAIMED);
4262 /*ARGSUSED*/
4263 static int
4264 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4265 uint32_t claimed)
4267 return (FC_UNCLAIMED);
4271 * Function: fcp_statec_callback
4273 * Description: The purpose of this function is to handle a port state change.
4274 * It is called from fp/fctl and, in a few instances, internally.
4276 * Argument: ulph fp/fctl port handle
4277 * port_handle fcp_port structure
4278 * port_state Physical state of the port
4279 * port_top Topology
4280 * *devlist Pointer to the first entry of a table
4281 * containing the remote ports that can be
4282 * reached.
4283 * dev_cnt Number of entries pointed by devlist.
4284 * port_sid Port ID of the local port.
4286 * Return Value: None
4288 /*ARGSUSED*/
4289 static void
4290 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4291 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4292 uint32_t dev_cnt, uint32_t port_sid)
4294 uint32_t link_count;
4295 int map_len = 0;
4296 struct fcp_port *pptr;
4297 fcp_map_tag_t *map_tag = NULL;
4299 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4300 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4301 return; /* nothing to work with! */
4304 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4305 fcp_trace, FCP_BUF_LEVEL_2, 0,
4306 "fcp_statec_callback: port state/dev_cnt/top ="
4307 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4308 dev_cnt, port_top);
4310 mutex_enter(&pptr->port_mutex);
4313 * If a thread is in detach, don't do anything.
4315 if (pptr->port_state & (FCP_STATE_DETACHING |
4316 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4317 mutex_exit(&pptr->port_mutex);
4318 return;
4322 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4323 * init_pkt is called, it knows whether or not the target's status
4324 * (or pd) might be changing.
4327 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4328 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4332 * the transport doesn't allocate or probe unless being
4333 * asked to by either the applications or ULPs
4335 * in cases where the port is OFFLINE at the time of port
4336 * attach callback and the link comes ONLINE later, for
4337 * easier automatic node creation (i.e. without you having to
4338 * go out and run the utility to perform LOGINs) the
4339 * following conditional is helpful
4341 pptr->port_phys_state = port_state;
4343 if (dev_cnt) {
4344 mutex_exit(&pptr->port_mutex);
4346 map_len = sizeof (*map_tag) * dev_cnt;
4347 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4348 if (map_tag == NULL) {
4349 fcp_log(CE_WARN, pptr->port_dip,
4350 "!fcp%d: failed to allocate for map tags; "
4351 " state change will not be processed",
4352 pptr->port_instance);
4354 mutex_enter(&pptr->port_mutex);
4355 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4356 mutex_exit(&pptr->port_mutex);
4358 return;
4361 mutex_enter(&pptr->port_mutex);
4364 if (pptr->port_id != port_sid) {
4365 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4366 fcp_trace, FCP_BUF_LEVEL_3, 0,
4367 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4368 port_sid);
4370 * The local port changed ID. It is the first time a port ID
4371 * is assigned or something drastic happened. We might have
4372 * been unplugged and replugged on another loop or fabric port
4373 * or somebody grabbed the AL_PA we had or somebody rezoned
4374 * the fabric we were plugged into.
4376 pptr->port_id = port_sid;
4379 switch (FC_PORT_STATE_MASK(port_state)) {
4380 case FC_STATE_OFFLINE:
4381 case FC_STATE_RESET_REQUESTED:
4383 * link has gone from online to offline -- just update the
4384 * state of this port to BUSY and MARKed to go offline
4386 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4387 fcp_trace, FCP_BUF_LEVEL_3, 0,
4388 "link went offline");
4389 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4391 * We were offline a while ago and this one
4392 * seems to indicate that the loop has gone
4393 * dead forever.
4395 pptr->port_tmp_cnt += dev_cnt;
4396 pptr->port_state &= ~FCP_STATE_OFFLINE;
4397 pptr->port_state |= FCP_STATE_INIT;
4398 link_count = pptr->port_link_cnt;
4399 fcp_handle_devices(pptr, devlist, dev_cnt,
4400 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4401 } else {
4402 pptr->port_link_cnt++;
4403 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4404 fcp_update_state(pptr, (FCP_LUN_BUSY |
4405 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4406 if (pptr->port_mpxio) {
4407 fcp_update_mpxio_path_verifybusy(pptr);
4409 pptr->port_state |= FCP_STATE_OFFLINE;
4410 pptr->port_state &=
4411 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4412 pptr->port_tmp_cnt = 0;
4414 mutex_exit(&pptr->port_mutex);
4415 break;
4417 case FC_STATE_ONLINE:
4418 case FC_STATE_LIP:
4419 case FC_STATE_LIP_LBIT_SET:
4421 * link has gone from offline to online
4423 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4424 fcp_trace, FCP_BUF_LEVEL_3, 0,
4425 "link went online");
4427 pptr->port_link_cnt++;
4429 while (pptr->port_ipkt_cnt) {
4430 mutex_exit(&pptr->port_mutex);
4431 ddi_sleep(1);
4432 mutex_enter(&pptr->port_mutex);
4435 pptr->port_topology = port_top;
4438 * The state of the targets and luns accessible through this
4439 * port is updated.
4441 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4442 FCP_CAUSE_LINK_CHANGE);
4444 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4445 pptr->port_state |= FCP_STATE_ONLINING;
4446 pptr->port_tmp_cnt = dev_cnt;
4447 link_count = pptr->port_link_cnt;
4449 pptr->port_deadline = fcp_watchdog_time +
4450 FCP_ICMD_DEADLINE;
4452 if (!dev_cnt) {
4454 * We go directly to the online state if no remote
4455 * ports were discovered.
4457 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4458 fcp_trace, FCP_BUF_LEVEL_3, 0,
4459 "No remote ports discovered");
4461 pptr->port_state &= ~FCP_STATE_ONLINING;
4462 pptr->port_state |= FCP_STATE_ONLINE;
4465 switch (port_top) {
4466 case FC_TOP_FABRIC:
4467 case FC_TOP_PUBLIC_LOOP:
4468 case FC_TOP_PRIVATE_LOOP:
4469 case FC_TOP_PT_PT:
4471 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4472 fcp_retry_ns_registry(pptr, port_sid);
4475 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4476 map_tag, FCP_CAUSE_LINK_CHANGE);
4477 break;
4479 default:
4481 * We got here because we were provided with an unknown
4482 * topology.
4484 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4485 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 pptr->port_tmp_cnt -= dev_cnt;
4489 fcp_log(CE_WARN, pptr->port_dip,
4490 "!unknown/unsupported topology (0x%x)", port_top);
4491 break;
4493 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4494 fcp_trace, FCP_BUF_LEVEL_3, 0,
4495 "Notify ssd of the reset to reinstate the reservations");
4497 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4498 &pptr->port_reset_notify_listf);
4500 mutex_exit(&pptr->port_mutex);
4502 break;
4504 case FC_STATE_RESET:
4505 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4506 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4507 fcp_trace, FCP_BUF_LEVEL_3, 0,
4508 "RESET state, waiting for Offline/Online state_cb");
4509 mutex_exit(&pptr->port_mutex);
4510 break;
4512 case FC_STATE_DEVICE_CHANGE:
4514 * We come here when an application has requested
4515 * Dynamic node creation/deletion in Fabric connectivity.
4517 if (pptr->port_state & (FCP_STATE_OFFLINE |
4518 FCP_STATE_INIT)) {
4520 * This case can happen when the FCTL is in the
4521 * process of giving us on online and the host on
4522 * the other side issues a PLOGI/PLOGO. Ideally
4523 * the state changes should be serialized unless
4524 * they are opposite (online-offline).
4525 * The transport will give us a final state change
4526 * so we can ignore this for the time being.
4528 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4529 mutex_exit(&pptr->port_mutex);
4530 break;
4533 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4534 fcp_retry_ns_registry(pptr, port_sid);
4538 * Extend the deadline under steady state conditions
4539 * to provide more time for the device-change-commands
4541 if (!pptr->port_ipkt_cnt) {
4542 pptr->port_deadline = fcp_watchdog_time +
4543 FCP_ICMD_DEADLINE;
4547 * There is another race condition here, where if we were
4548 * in ONLINEING state and a devices in the map logs out,
4549 * fp will give another state change as DEVICE_CHANGE
4550 * and OLD. This will result in that target being offlined.
4551 * The pd_handle is freed. If from the first statec callback
4552 * we were going to fire a PLOGI/PRLI, the system will
4553 * panic in fc_ulp_transport with invalid pd_handle.
4554 * The fix is to check for the link_cnt before issuing
4555 * any command down.
4557 fcp_update_targets(pptr, devlist, dev_cnt,
4558 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4560 link_count = pptr->port_link_cnt;
4562 fcp_handle_devices(pptr, devlist, dev_cnt,
4563 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4565 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4567 mutex_exit(&pptr->port_mutex);
4568 break;
4570 case FC_STATE_TARGET_PORT_RESET:
4571 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4572 fcp_retry_ns_registry(pptr, port_sid);
4575 /* Do nothing else */
4576 mutex_exit(&pptr->port_mutex);
4577 break;
4579 default:
4580 fcp_log(CE_WARN, pptr->port_dip,
4581 "!Invalid state change=0x%x", port_state);
4582 mutex_exit(&pptr->port_mutex);
4583 break;
4586 if (map_tag) {
4587 kmem_free(map_tag, map_len);
4592 * Function: fcp_handle_devices
4594 * Description: This function updates the devices currently known by
4595 * walking the list provided by the caller. The list passed
4596 * by the caller is supposed to be the list of reachable
4597 * devices.
4599 * Argument: *pptr Fcp port structure.
4600 * *devlist Pointer to the first entry of a table
4601 * containing the remote ports that can be
4602 * reached.
4603 * dev_cnt Number of entries pointed by devlist.
4604 * link_cnt Link state count.
4605 * *map_tag Array of fcp_map_tag_t structures.
4606 * cause What caused this function to be called.
4608 * Return Value: None
4610 * Notes: The pptr->port_mutex must be held.
4612 static void
4613 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4614 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4616 int i;
4617 int check_finish_init = 0;
4618 fc_portmap_t *map_entry;
4619 struct fcp_tgt *ptgt = NULL;
4621 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4622 fcp_trace, FCP_BUF_LEVEL_3, 0,
4623 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4625 if (dev_cnt) {
4626 ASSERT(map_tag != NULL);
4630 * The following code goes through the list of remote ports that are
4631 * accessible through this (pptr) local port (The list walked is the
4632 * one provided by the caller which is the list of the remote ports
4633 * currently reachable). It checks if any of them was already
4634 * known by looking for the corresponding target structure based on
4635 * the world wide name. If a target is part of the list it is tagged
4636 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4638 * Old comment
4639 * -----------
4640 * Before we drop port mutex; we MUST get the tags updated; This
4641 * two step process is somewhat slow, but more reliable.
4643 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4644 map_entry = &(devlist[i]);
4647 * get ptr to this map entry in our port's
4648 * list (if any)
4650 ptgt = fcp_lookup_target(pptr,
4651 (uchar_t *)&(map_entry->map_pwwn));
4653 if (ptgt) {
4654 map_tag[i] = ptgt->tgt_change_cnt;
4655 if (cause == FCP_CAUSE_LINK_CHANGE) {
4656 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4662 * At this point we know which devices of the new list were already
4663 * known (The field tgt_aux_state of the target structure has been
4664 * set to FCP_TGT_TAGGED).
4666 * The following code goes through the list of targets currently known
4667 * by the local port (the list is actually a hashing table). If a
4668 * target is found and is not tagged, it means the target cannot
4669 * be reached anymore through the local port (pptr). It is offlined.
4670 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4672 for (i = 0; i < FCP_NUM_HASH; i++) {
4673 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4674 ptgt = ptgt->tgt_next) {
4675 mutex_enter(&ptgt->tgt_mutex);
4676 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4677 (cause == FCP_CAUSE_LINK_CHANGE) &&
4678 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4679 fcp_offline_target_now(pptr, ptgt,
4680 link_cnt, ptgt->tgt_change_cnt, 0);
4682 mutex_exit(&ptgt->tgt_mutex);
4687 * At this point, the devices that were known but cannot be reached
4688 * anymore, have most likely been offlined.
4690 * The following section of code seems to go through the list of
4691 * remote ports that can now be reached. For every single one it
4692 * checks if it is already known or if it is a new port.
4694 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4696 if (check_finish_init) {
4697 ASSERT(i > 0);
4698 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4699 map_tag[i - 1], cause);
4700 check_finish_init = 0;
4703 /* get a pointer to this map entry */
4704 map_entry = &(devlist[i]);
4707 * Check for the duplicate map entry flag. If we have marked
4708 * this entry as a duplicate we skip it since the correct
4709 * (perhaps even same) state change will be encountered
4710 * later in the list.
4712 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4713 continue;
4716 /* get ptr to this map entry in our port's list (if any) */
4717 ptgt = fcp_lookup_target(pptr,
4718 (uchar_t *)&(map_entry->map_pwwn));
4720 if (ptgt) {
4722 * This device was already known. The field
4723 * tgt_aux_state is reset (was probably set to
4724 * FCP_TGT_TAGGED previously in this routine).
4726 ptgt->tgt_aux_state = 0;
4727 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4728 fcp_trace, FCP_BUF_LEVEL_3, 0,
4729 "handle_devices: map did/state/type/flags = "
4730 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4731 "tgt_state=%d",
4732 map_entry->map_did.port_id, map_entry->map_state,
4733 map_entry->map_type, map_entry->map_flags,
4734 ptgt->tgt_d_id, ptgt->tgt_state);
4737 if (map_entry->map_type == PORT_DEVICE_OLD ||
4738 map_entry->map_type == PORT_DEVICE_NEW ||
4739 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4740 map_entry->map_type == PORT_DEVICE_CHANGED) {
4741 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4742 fcp_trace, FCP_BUF_LEVEL_2, 0,
4743 "map_type=%x, did = %x",
4744 map_entry->map_type,
4745 map_entry->map_did.port_id);
4748 switch (map_entry->map_type) {
4749 case PORT_DEVICE_NOCHANGE:
4750 case PORT_DEVICE_USER_CREATE:
4751 case PORT_DEVICE_USER_LOGIN:
4752 case PORT_DEVICE_NEW:
4753 case PORT_DEVICE_REPORTLUN_CHANGED:
4754 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4756 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4757 link_cnt, (ptgt) ? map_tag[i] : 0,
4758 cause) == TRUE) {
4760 FCP_TGT_TRACE(ptgt, map_tag[i],
4761 FCP_TGT_TRACE_2);
4762 check_finish_init++;
4764 break;
4766 case PORT_DEVICE_OLD:
4767 if (ptgt != NULL) {
4768 FCP_TGT_TRACE(ptgt, map_tag[i],
4769 FCP_TGT_TRACE_3);
4771 mutex_enter(&ptgt->tgt_mutex);
4772 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4774 * Must do an in-line wait for I/Os
4775 * to get drained
4777 mutex_exit(&ptgt->tgt_mutex);
4778 mutex_exit(&pptr->port_mutex);
4780 mutex_enter(&ptgt->tgt_mutex);
4781 while (ptgt->tgt_ipkt_cnt ||
4782 fcp_outstanding_lun_cmds(ptgt)
4783 == FC_SUCCESS) {
4784 mutex_exit(&ptgt->tgt_mutex);
4785 ddi_sleep(1);
4786 mutex_enter(&ptgt->tgt_mutex);
4788 mutex_exit(&ptgt->tgt_mutex);
4790 mutex_enter(&pptr->port_mutex);
4791 mutex_enter(&ptgt->tgt_mutex);
4793 (void) fcp_offline_target(pptr, ptgt,
4794 link_cnt, map_tag[i], 0, 0);
4796 mutex_exit(&ptgt->tgt_mutex);
4798 check_finish_init++;
4799 break;
4801 case PORT_DEVICE_USER_DELETE:
4802 case PORT_DEVICE_USER_LOGOUT:
4803 if (ptgt != NULL) {
4804 FCP_TGT_TRACE(ptgt, map_tag[i],
4805 FCP_TGT_TRACE_4);
4807 mutex_enter(&ptgt->tgt_mutex);
4808 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4809 (void) fcp_offline_target(pptr, ptgt,
4810 link_cnt, map_tag[i], 1, 0);
4812 mutex_exit(&ptgt->tgt_mutex);
4814 check_finish_init++;
4815 break;
4817 case PORT_DEVICE_CHANGED:
4818 if (ptgt != NULL) {
4819 FCP_TGT_TRACE(ptgt, map_tag[i],
4820 FCP_TGT_TRACE_5);
4822 if (fcp_device_changed(pptr, ptgt,
4823 map_entry, link_cnt, map_tag[i],
4824 cause) == TRUE) {
4825 check_finish_init++;
4827 } else {
4828 if (fcp_handle_mapflags(pptr, ptgt,
4829 map_entry, link_cnt, 0, cause) == TRUE) {
4830 check_finish_init++;
4833 break;
4835 default:
4836 fcp_log(CE_WARN, pptr->port_dip,
4837 "!Invalid map_type=0x%x", map_entry->map_type);
4838 check_finish_init++;
4839 break;
4843 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4844 ASSERT(i > 0);
4845 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4846 map_tag[i-1], cause);
4847 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4848 fcp_offline_all(pptr, link_cnt, cause);
4852 static int
4853 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4855 struct fcp_lun *plun;
4856 struct fcp_port *pptr;
4857 int rscn_count;
4858 int lun0_newalloc;
4859 int ret = TRUE;
4861 ASSERT(ptgt);
4862 pptr = ptgt->tgt_port;
4863 lun0_newalloc = 0;
4864 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4866 * no LUN struct for LUN 0 yet exists,
4867 * so create one
4869 plun = fcp_alloc_lun(ptgt);
4870 if (plun == NULL) {
4871 fcp_log(CE_WARN, pptr->port_dip,
4872 "!Failed to allocate lun 0 for"
4873 " D_ID=%x", ptgt->tgt_d_id);
4874 return (ret);
4876 lun0_newalloc = 1;
4879 mutex_enter(&ptgt->tgt_mutex);
4881 * consider lun 0 as device not connected if it is
4882 * offlined or newly allocated
4884 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4885 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4887 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4888 plun->lun_state &= ~FCP_LUN_OFFLINE;
4889 ptgt->tgt_lun_cnt = 1;
4890 ptgt->tgt_report_lun_cnt = 0;
4891 mutex_exit(&ptgt->tgt_mutex);
4893 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4894 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4895 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4896 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4897 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4898 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4899 "to D_ID=%x", ptgt->tgt_d_id);
4900 } else {
4901 ret = FALSE;
4904 return (ret);
4908 * Function: fcp_handle_mapflags
4910 * Description: This function creates a target structure if the ptgt passed
4911 * is NULL. It also kicks off the PLOGI if we are not logged
4912 * into the target yet or the PRLI if we are logged into the
4913 * target already. The rest of the treatment is done in the
4914 * callbacks of the PLOGI or PRLI.
4916 * Argument: *pptr FCP Port structure.
4917 * *ptgt Target structure.
4918 * *map_entry Array of fc_portmap_t structures.
4919 * link_cnt Link state count.
4920 * tgt_cnt Target state count.
4921 * cause What caused this function to be called.
4923 * Return Value: TRUE Failed
4924 * FALSE Succeeded
4926 * Notes: pptr->port_mutex must be owned.
4928 static int
4929 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4930 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4932 int lcount;
4933 int tcount;
4934 int ret = TRUE;
4935 int alloc;
4936 struct fcp_ipkt *icmd;
4937 struct fcp_lun *pseq_lun = NULL;
4938 uchar_t opcode;
4939 int valid_ptgt_was_passed = FALSE;
4941 ASSERT(mutex_owned(&pptr->port_mutex));
4944 * This case is possible where the FCTL has come up and done discovery
4945 * before FCP was loaded and attached. FCTL would have discovered the
4946 * devices and later the ULP came online. In this case ULP's would get
4947 * PORT_DEVICE_NOCHANGE but target would be NULL.
4949 if (ptgt == NULL) {
4950 /* don't already have a target */
4951 mutex_exit(&pptr->port_mutex);
4952 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4953 mutex_enter(&pptr->port_mutex);
4955 if (ptgt == NULL) {
4956 fcp_log(CE_WARN, pptr->port_dip,
4957 "!FC target allocation failed");
4958 return (ret);
4960 mutex_enter(&ptgt->tgt_mutex);
4961 ptgt->tgt_statec_cause = cause;
4962 ptgt->tgt_tmp_cnt = 1;
4963 mutex_exit(&ptgt->tgt_mutex);
4964 } else {
4965 valid_ptgt_was_passed = TRUE;
4969 * Copy in the target parameters
4971 mutex_enter(&ptgt->tgt_mutex);
4972 ptgt->tgt_d_id = map_entry->map_did.port_id;
4973 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4974 ptgt->tgt_pd_handle = map_entry->map_pd;
4975 ptgt->tgt_fca_dev = NULL;
4977 /* Copy port and node WWNs */
4978 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4979 FC_WWN_SIZE);
4980 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4981 FC_WWN_SIZE);
4983 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4984 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4985 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4986 valid_ptgt_was_passed) {
4988 * determine if there are any tape LUNs on this target
4990 for (pseq_lun = ptgt->tgt_lun;
4991 pseq_lun != NULL;
4992 pseq_lun = pseq_lun->lun_next) {
4993 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4994 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4995 fcp_update_tgt_state(ptgt, FCP_RESET,
4996 FCP_LUN_MARK);
4997 mutex_exit(&ptgt->tgt_mutex);
4998 return (ret);
5004 * if UA'REPORT_LUN_CHANGED received,
5005 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5007 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5008 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5009 mutex_exit(&ptgt->tgt_mutex);
5010 mutex_exit(&pptr->port_mutex);
5012 ret = fcp_handle_reportlun_changed(ptgt, cause);
5014 mutex_enter(&pptr->port_mutex);
5015 return (ret);
5019 * If ptgt was NULL when this function was entered, then tgt_node_state
5020 * was never specifically initialized but zeroed out which means
5021 * FCP_TGT_NODE_NONE.
5023 switch (ptgt->tgt_node_state) {
5024 case FCP_TGT_NODE_NONE:
5025 case FCP_TGT_NODE_ON_DEMAND:
5026 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5027 !fcp_enable_auto_configuration &&
5028 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5029 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5030 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5031 fcp_enable_auto_configuration &&
5032 (ptgt->tgt_manual_config_only == 1) &&
5033 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5035 * If auto configuration is set and
5036 * the tgt_manual_config_only flag is set then
5037 * we only want the user to be able to change
5038 * the state through create_on_demand.
5040 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5041 } else {
5042 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5044 break;
5046 case FCP_TGT_NODE_PRESENT:
5047 break;
5050 * If we are booting from a fabric device, make sure we
5051 * mark the node state appropriately for this target to be
5052 * enumerated
5054 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5055 if (bcmp((caddr_t)pptr->port_boot_wwn,
5056 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5057 sizeof (ptgt->tgt_port_wwn)) == 0) {
5058 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 mutex_exit(&ptgt->tgt_mutex);
5063 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5064 fcp_trace, FCP_BUF_LEVEL_3, 0,
5065 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5066 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5067 map_entry->map_rscn_info.ulp_rscn_count);
5069 mutex_enter(&ptgt->tgt_mutex);
5072 * Reset target OFFLINE state and mark the target BUSY
5074 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5075 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5077 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5078 lcount = link_cnt;
5080 mutex_exit(&ptgt->tgt_mutex);
5081 mutex_exit(&pptr->port_mutex);
5084 * if we are already logged in, then we do a PRLI, else
5085 * we do a PLOGI first (to get logged in)
5087 * We will not check if we are the PLOGI initiator
5089 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5090 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5092 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5094 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5095 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5096 cause, map_entry->map_rscn_info.ulp_rscn_count);
5098 if (icmd == NULL) {
5099 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5101 * We've exited port_mutex before calling fcp_icmd_alloc,
5102 * we need to make sure we reacquire it before returning.
5104 mutex_enter(&pptr->port_mutex);
5105 return (FALSE);
5108 /* TRUE is only returned while target is intended skipped */
5109 ret = FALSE;
5110 /* discover info about this target */
5111 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5112 lcount, tcount, cause)) == DDI_SUCCESS) {
5113 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5114 } else {
5115 fcp_icmd_free(pptr, icmd);
5116 ret = TRUE;
5118 mutex_enter(&pptr->port_mutex);
5120 return (ret);
5124 * Function: fcp_send_els
5126 * Description: Sends an ELS to the target specified by the caller. Supports
5127 * PLOGI and PRLI.
5129 * Argument: *pptr Fcp port.
5130 * *ptgt Target to send the ELS to.
5131 * *icmd Internal packet
5132 * opcode ELS opcode
5133 * lcount Link state change counter
5134 * tcount Target state change counter
5135 * cause What caused the call
5137 * Return Value: DDI_SUCCESS
5138 * Others
5140 static int
5141 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5142 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5144 fc_packet_t *fpkt;
5145 fc_frame_hdr_t *hp;
5146 int internal = 0;
5147 int alloc;
5148 int cmd_len;
5149 int resp_len;
5150 int res = DDI_FAILURE; /* default result */
5151 int rval = DDI_FAILURE;
5153 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5154 ASSERT(ptgt->tgt_port == pptr);
5156 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5157 fcp_trace, FCP_BUF_LEVEL_5, 0,
5158 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5159 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5161 if (opcode == LA_ELS_PLOGI) {
5162 cmd_len = sizeof (la_els_logi_t);
5163 resp_len = sizeof (la_els_logi_t);
5164 } else {
5165 ASSERT(opcode == LA_ELS_PRLI);
5166 cmd_len = sizeof (la_els_prli_t);
5167 resp_len = sizeof (la_els_prli_t);
5170 if (icmd == NULL) {
5171 alloc = FCP_MAX(sizeof (la_els_logi_t),
5172 sizeof (la_els_prli_t));
5173 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5174 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5175 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5176 if (icmd == NULL) {
5177 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5178 return (res);
5180 internal++;
5182 fpkt = icmd->ipkt_fpkt;
5184 fpkt->pkt_cmdlen = cmd_len;
5185 fpkt->pkt_rsplen = resp_len;
5186 fpkt->pkt_datalen = 0;
5187 icmd->ipkt_retries = 0;
5189 /* fill in fpkt info */
5190 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5191 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5192 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5194 /* get ptr to frame hdr in fpkt */
5195 hp = &fpkt->pkt_cmd_fhdr;
5198 * fill in frame hdr
5200 hp->r_ctl = R_CTL_ELS_REQ;
5201 hp->s_id = pptr->port_id; /* source ID */
5202 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5203 hp->type = FC_TYPE_EXTENDED_LS;
5204 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5205 hp->seq_id = 0;
5206 hp->rsvd = 0;
5207 hp->df_ctl = 0;
5208 hp->seq_cnt = 0;
5209 hp->ox_id = 0xffff; /* i.e. none */
5210 hp->rx_id = 0xffff; /* i.e. none */
5211 hp->ro = 0;
5214 * at this point we have a filled in cmd pkt
5216 * fill in the respective info, then use the transport to send
5217 * the packet
5219 * for a PLOGI call fc_ulp_login(), and
5220 * for a PRLI call fc_ulp_issue_els()
5222 switch (opcode) {
5223 case LA_ELS_PLOGI: {
5224 struct la_els_logi logi;
5226 bzero(&logi, sizeof (struct la_els_logi));
5228 hp = &fpkt->pkt_cmd_fhdr;
5229 hp->r_ctl = R_CTL_ELS_REQ;
5230 logi.ls_code.ls_code = LA_ELS_PLOGI;
5231 logi.ls_code.mbz = 0;
5233 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5234 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5236 icmd->ipkt_opcode = LA_ELS_PLOGI;
5238 mutex_enter(&pptr->port_mutex);
5239 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5241 mutex_exit(&pptr->port_mutex);
5243 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5244 if (rval == FC_SUCCESS) {
5245 res = DDI_SUCCESS;
5246 break;
5249 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5251 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5252 rval, "PLOGI");
5253 } else {
5254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5255 fcp_trace, FCP_BUF_LEVEL_5, 0,
5256 "fcp_send_els1: state change occured"
5257 " for D_ID=0x%x", ptgt->tgt_d_id);
5258 mutex_exit(&pptr->port_mutex);
5259 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5261 break;
5264 case LA_ELS_PRLI: {
5265 struct la_els_prli prli;
5266 struct fcp_prli *fprli;
5268 bzero(&prli, sizeof (struct la_els_prli));
5270 hp = &fpkt->pkt_cmd_fhdr;
5271 hp->r_ctl = R_CTL_ELS_REQ;
5273 /* fill in PRLI cmd ELS fields */
5274 prli.ls_code = LA_ELS_PRLI;
5275 prli.page_length = 0x10; /* huh? */
5276 prli.payload_length = sizeof (struct la_els_prli);
5278 icmd->ipkt_opcode = LA_ELS_PRLI;
5280 /* get ptr to PRLI service params */
5281 fprli = (struct fcp_prli *)prli.service_params;
5283 /* fill in service params */
5284 fprli->type = 0x08;
5285 fprli->resvd1 = 0;
5286 fprli->orig_process_assoc_valid = 0;
5287 fprli->resp_process_assoc_valid = 0;
5288 fprli->establish_image_pair = 1;
5289 fprli->resvd2 = 0;
5290 fprli->resvd3 = 0;
5291 fprli->obsolete_1 = 0;
5292 fprli->obsolete_2 = 0;
5293 fprli->data_overlay_allowed = 0;
5294 fprli->initiator_fn = 1;
5295 fprli->confirmed_compl_allowed = 1;
5297 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5298 fprli->target_fn = 1;
5299 } else {
5300 fprli->target_fn = 0;
5303 fprli->retry = 1;
5304 fprli->read_xfer_rdy_disabled = 1;
5305 fprli->write_xfer_rdy_disabled = 0;
5307 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5308 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5310 /* issue the PRLI request */
5312 mutex_enter(&pptr->port_mutex);
5313 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5315 mutex_exit(&pptr->port_mutex);
5317 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5318 if (rval == FC_SUCCESS) {
5319 res = DDI_SUCCESS;
5320 break;
5323 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5325 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5326 rval, "PRLI");
5327 } else {
5328 mutex_exit(&pptr->port_mutex);
5329 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5331 break;
5334 default:
5335 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5336 break;
5339 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5340 fcp_trace, FCP_BUF_LEVEL_5, 0,
5341 "fcp_send_els: returning %d", res);
5343 if (res != DDI_SUCCESS) {
5344 if (internal) {
5345 fcp_icmd_free(pptr, icmd);
5349 return (res);
5354 * called internally update the state of all of the tgts and each LUN
5355 * for this port (i.e. each target known to be attached to this port)
5356 * if they are not already offline
5358 * must be called with the port mutex owned
5360 * acquires and releases the target mutexes for each target attached
5361 * to this port
5363 void
5364 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5366 int i;
5367 struct fcp_tgt *ptgt;
5369 ASSERT(mutex_owned(&pptr->port_mutex));
5371 for (i = 0; i < FCP_NUM_HASH; i++) {
5372 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5373 ptgt = ptgt->tgt_next) {
5374 mutex_enter(&ptgt->tgt_mutex);
5375 fcp_update_tgt_state(ptgt, FCP_SET, state);
5376 ptgt->tgt_change_cnt++;
5377 ptgt->tgt_statec_cause = cause;
5378 ptgt->tgt_tmp_cnt = 1;
5379 ptgt->tgt_done = 0;
5380 mutex_exit(&ptgt->tgt_mutex);
5386 static void
5387 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5389 int i;
5390 int ndevs;
5391 struct fcp_tgt *ptgt;
5393 ASSERT(mutex_owned(&pptr->port_mutex));
5395 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5396 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5397 ptgt = ptgt->tgt_next) {
5398 ndevs++;
5402 if (ndevs == 0) {
5403 return;
5405 pptr->port_tmp_cnt = ndevs;
5407 for (i = 0; i < FCP_NUM_HASH; i++) {
5408 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5409 ptgt = ptgt->tgt_next) {
5410 (void) fcp_call_finish_init_held(pptr, ptgt,
5411 lcount, ptgt->tgt_change_cnt, cause);
5417 * Function: fcp_update_tgt_state
5419 * Description: This function updates the field tgt_state of a target. That
5420 * field is a bitmap and which bit can be set or reset
5421 * individually. The action applied to the target state is also
5422 * applied to all the LUNs belonging to the target (provided the
5423 * LUN is not offline). A side effect of applying the state
5424 * modification to the target and the LUNs is the field tgt_trace
5425 * of the target and lun_trace of the LUNs is set to zero.
5428 * Argument: *ptgt Target structure.
5429 * flag Flag indication what action to apply (set/reset).
5430 * state State bits to update.
5432 * Return Value: None
5434 * Context: Interrupt, Kernel or User context.
5435 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5436 * calling this function.
5438 void
5439 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5441 struct fcp_lun *plun;
5443 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5445 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5446 /* The target is not offline. */
5447 if (flag == FCP_SET) {
5448 ptgt->tgt_state |= state;
5449 ptgt->tgt_trace = 0;
5450 } else {
5451 ptgt->tgt_state &= ~state;
5454 for (plun = ptgt->tgt_lun; plun != NULL;
5455 plun = plun->lun_next) {
5456 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5457 /* The LUN is not offline. */
5458 if (flag == FCP_SET) {
5459 plun->lun_state |= state;
5460 plun->lun_trace = 0;
5461 } else {
5462 plun->lun_state &= ~state;
5470 * Function: fcp_update_tgt_state
5472 * Description: This function updates the field lun_state of a LUN. That
5473 * field is a bitmap and which bit can be set or reset
5474 * individually.
5476 * Argument: *plun LUN structure.
5477 * flag Flag indication what action to apply (set/reset).
5478 * state State bits to update.
5480 * Return Value: None
5482 * Context: Interrupt, Kernel or User context.
5483 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5484 * calling this function.
5486 void
5487 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5489 struct fcp_tgt *ptgt = plun->lun_tgt;
5491 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5493 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5494 if (flag == FCP_SET) {
5495 plun->lun_state |= state;
5496 } else {
5497 plun->lun_state &= ~state;
5503 * Function: fcp_get_port
5505 * Description: This function returns the fcp_port structure from the opaque
5506 * handle passed by the caller. That opaque handle is the handle
5507 * used by fp/fctl to identify a particular local port. That
5508 * handle has been stored in the corresponding fcp_port
5509 * structure. This function is going to walk the global list of
5510 * fcp_port structures till one has a port_fp_handle that matches
5511 * the handle passed by the caller. This function enters the
5512 * mutex fcp_global_mutex while walking the global list and then
5513 * releases it.
5515 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5516 * particular port.
5518 * Return Value: NULL Not found.
5519 * Not NULL Pointer to the fcp_port structure.
5521 * Context: Interrupt, Kernel or User context.
5523 static struct fcp_port *
5524 fcp_get_port(opaque_t port_handle)
5526 struct fcp_port *pptr;
5528 ASSERT(port_handle != NULL);
5530 mutex_enter(&fcp_global_mutex);
5531 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5532 if (pptr->port_fp_handle == port_handle) {
5533 break;
5536 mutex_exit(&fcp_global_mutex);
5538 return (pptr);
5542 static void
5543 fcp_unsol_callback(fc_packet_t *fpkt)
5545 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5546 struct fcp_port *pptr = icmd->ipkt_port;
5548 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5549 caddr_t state, reason, action, expln;
5551 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5552 &action, &expln);
5554 fcp_log(CE_WARN, pptr->port_dip,
5555 "!couldn't post response to unsolicited request: "
5556 " state=%s reason=%s rx_id=%x ox_id=%x",
5557 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5558 fpkt->pkt_cmd_fhdr.rx_id);
5560 fcp_icmd_free(pptr, icmd);
5565 * Perform general purpose preparation of a response to an unsolicited request
5567 static void
5568 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5569 uchar_t r_ctl, uchar_t type)
5571 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5572 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5573 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5574 pkt->pkt_cmd_fhdr.type = type;
5575 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5576 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5577 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5578 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5579 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5580 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5581 pkt->pkt_cmd_fhdr.ro = 0;
5582 pkt->pkt_cmd_fhdr.rsvd = 0;
5583 pkt->pkt_comp = fcp_unsol_callback;
5584 pkt->pkt_pd = NULL;
5585 pkt->pkt_ub_resp_token = (opaque_t)buf;
5589 /*ARGSUSED*/
5590 static int
5591 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5593 fc_packet_t *fpkt;
5594 struct la_els_prli prli;
5595 struct fcp_prli *fprli;
5596 struct fcp_ipkt *icmd;
5597 struct la_els_prli *from;
5598 struct fcp_prli *orig;
5599 struct fcp_tgt *ptgt;
5600 int tcount = 0;
5601 int lcount;
5603 from = (struct la_els_prli *)buf->ub_buffer;
5604 orig = (struct fcp_prli *)from->service_params;
5605 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5606 NULL) {
5607 mutex_enter(&ptgt->tgt_mutex);
5608 tcount = ptgt->tgt_change_cnt;
5609 mutex_exit(&ptgt->tgt_mutex);
5612 mutex_enter(&pptr->port_mutex);
5613 lcount = pptr->port_link_cnt;
5614 mutex_exit(&pptr->port_mutex);
5616 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5617 sizeof (la_els_prli_t), 0,
5618 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5619 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5620 return (FC_FAILURE);
5623 fpkt = icmd->ipkt_fpkt;
5624 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5625 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5626 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5627 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5628 fpkt->pkt_rsplen = 0;
5629 fpkt->pkt_datalen = 0;
5631 icmd->ipkt_opcode = LA_ELS_PRLI;
5633 bzero(&prli, sizeof (struct la_els_prli));
5634 fprli = (struct fcp_prli *)prli.service_params;
5635 prli.ls_code = LA_ELS_ACC;
5636 prli.page_length = 0x10;
5637 prli.payload_length = sizeof (struct la_els_prli);
5639 /* fill in service params */
5640 fprli->type = 0x08;
5641 fprli->resvd1 = 0;
5642 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5643 fprli->orig_process_associator = orig->orig_process_associator;
5644 fprli->resp_process_assoc_valid = 0;
5645 fprli->establish_image_pair = 1;
5646 fprli->resvd2 = 0;
5647 fprli->resvd3 = 0;
5648 fprli->obsolete_1 = 0;
5649 fprli->obsolete_2 = 0;
5650 fprli->data_overlay_allowed = 0;
5651 fprli->initiator_fn = 1;
5652 fprli->confirmed_compl_allowed = 1;
5654 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5655 fprli->target_fn = 1;
5656 } else {
5657 fprli->target_fn = 0;
5660 fprli->retry = 1;
5661 fprli->read_xfer_rdy_disabled = 1;
5662 fprli->write_xfer_rdy_disabled = 0;
5664 /* save the unsol prli payload first */
5665 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5666 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5668 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5669 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5671 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5673 mutex_enter(&pptr->port_mutex);
5674 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5675 int rval;
5676 mutex_exit(&pptr->port_mutex);
5678 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5679 FC_SUCCESS) {
5680 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5681 ptgt != NULL) {
5682 fcp_queue_ipkt(pptr, fpkt);
5683 return (FC_SUCCESS);
5685 /* Let it timeout */
5686 fcp_icmd_free(pptr, icmd);
5687 return (FC_FAILURE);
5689 } else {
5690 mutex_exit(&pptr->port_mutex);
5691 fcp_icmd_free(pptr, icmd);
5692 return (FC_FAILURE);
5695 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5697 return (FC_SUCCESS);
5701 * Function: fcp_icmd_alloc
5703 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5704 * field is initialized to fcp_icmd_callback. Sometimes it is
5705 * modified by the caller (such as fcp_send_scsi). The
5706 * structure is also tied to the state of the line and of the
5707 * target at a particular time. That link is established by
5708 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5709 * and tcount which came respectively from pptr->link_cnt and
5710 * ptgt->tgt_change_cnt.
5712 * Argument: *pptr Fcp port.
5713 * *ptgt Target (destination of the command).
5714 * cmd_len Length of the command.
5715 * resp_len Length of the expected response.
5716 * data_len Length of the data.
5717 * nodma Indicates weither the command and response.
5718 * will be transfer through DMA or not.
5719 * lcount Link state change counter.
5720 * tcount Target state change counter.
5721 * cause Reason that lead to this call.
5723 * Return Value: NULL Failed.
5724 * Not NULL Internal packet address.
5726 static struct fcp_ipkt *
5727 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5728 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5729 uint32_t rscn_count)
5731 int dma_setup = 0;
5732 fc_packet_t *fpkt;
5733 struct fcp_ipkt *icmd = NULL;
5735 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5736 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5737 KM_NOSLEEP);
5738 if (icmd == NULL) {
5739 fcp_log(CE_WARN, pptr->port_dip,
5740 "!internal packet allocation failed");
5741 return (NULL);
5745 * initialize the allocated packet
5747 icmd->ipkt_nodma = nodma;
5748 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5749 icmd->ipkt_lun = NULL;
5751 icmd->ipkt_link_cnt = lcount;
5752 icmd->ipkt_change_cnt = tcount;
5753 icmd->ipkt_cause = cause;
5755 mutex_enter(&pptr->port_mutex);
5756 icmd->ipkt_port = pptr;
5757 mutex_exit(&pptr->port_mutex);
5759 /* keep track of amt of data to be sent in pkt */
5760 icmd->ipkt_cmdlen = cmd_len;
5761 icmd->ipkt_resplen = resp_len;
5762 icmd->ipkt_datalen = data_len;
5764 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5765 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5767 /* set pkt's private ptr to point to cmd pkt */
5768 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5770 /* set FCA private ptr to memory just beyond */
5771 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5772 ((char *)icmd + sizeof (struct fcp_ipkt) +
5773 pptr->port_dmacookie_sz);
5775 /* get ptr to fpkt substruct and fill it in */
5776 fpkt = icmd->ipkt_fpkt;
5777 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5778 sizeof (struct fcp_ipkt));
5780 if (ptgt != NULL) {
5781 icmd->ipkt_tgt = ptgt;
5782 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 fpkt->pkt_comp = fcp_icmd_callback;
5786 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5787 fpkt->pkt_cmdlen = cmd_len;
5788 fpkt->pkt_rsplen = resp_len;
5789 fpkt->pkt_datalen = data_len;
5792 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5793 * rscn_count as fcp knows down to the transport. If a valid count was
5794 * passed into this function, we allocate memory to actually pass down
5795 * this info.
5797 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5798 * basically mean that fcp will not be able to help transport
5799 * distinguish if a new RSCN has come after fcp was last informed about
5800 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5801 * 5068068 where the device might end up going offline in case of RSCN
5802 * storms.
5804 fpkt->pkt_ulp_rscn_infop = NULL;
5805 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5806 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5807 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5808 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5809 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5810 fcp_trace, FCP_BUF_LEVEL_6, 0,
5811 "Failed to alloc memory to pass rscn info");
5815 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5816 fc_ulp_rscn_info_t *rscnp;
5818 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5819 rscnp->ulp_rscn_count = rscn_count;
5822 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5823 goto fail;
5825 dma_setup++;
5828 * Must hold target mutex across setting of pkt_pd and call to
5829 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5830 * away while we're not looking.
5832 if (ptgt != NULL) {
5833 mutex_enter(&ptgt->tgt_mutex);
5834 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5836 /* ask transport to do its initialization on this pkt */
5837 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5838 != FC_SUCCESS) {
5839 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5840 fcp_trace, FCP_BUF_LEVEL_6, 0,
5841 "fc_ulp_init_packet failed");
5842 mutex_exit(&ptgt->tgt_mutex);
5843 goto fail;
5845 mutex_exit(&ptgt->tgt_mutex);
5846 } else {
5847 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5848 != FC_SUCCESS) {
5849 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5850 fcp_trace, FCP_BUF_LEVEL_6, 0,
5851 "fc_ulp_init_packet failed");
5852 goto fail;
5856 mutex_enter(&pptr->port_mutex);
5857 if (pptr->port_state & (FCP_STATE_DETACHING |
5858 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5859 int rval;
5861 mutex_exit(&pptr->port_mutex);
5863 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5864 ASSERT(rval == FC_SUCCESS);
5866 goto fail;
5869 if (ptgt != NULL) {
5870 mutex_enter(&ptgt->tgt_mutex);
5871 ptgt->tgt_ipkt_cnt++;
5872 mutex_exit(&ptgt->tgt_mutex);
5875 pptr->port_ipkt_cnt++;
5877 mutex_exit(&pptr->port_mutex);
5879 return (icmd);
5881 fail:
5882 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5883 kmem_free(fpkt->pkt_ulp_rscn_infop,
5884 sizeof (fc_ulp_rscn_info_t));
5885 fpkt->pkt_ulp_rscn_infop = NULL;
5888 if (dma_setup) {
5889 fcp_free_dma(pptr, icmd);
5891 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5892 (size_t)pptr->port_dmacookie_sz);
5894 return (NULL);
5898 * Function: fcp_icmd_free
5900 * Description: Frees the internal command passed by the caller.
5902 * Argument: *pptr Fcp port.
5903 * *icmd Internal packet to free.
5905 * Return Value: None
5907 static void
5908 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5910 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5912 /* Let the underlying layers do their cleanup. */
5913 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5914 icmd->ipkt_fpkt);
5916 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5917 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5918 sizeof (fc_ulp_rscn_info_t));
5921 fcp_free_dma(pptr, icmd);
5923 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5924 (size_t)pptr->port_dmacookie_sz);
5926 mutex_enter(&pptr->port_mutex);
5928 if (ptgt) {
5929 mutex_enter(&ptgt->tgt_mutex);
5930 ptgt->tgt_ipkt_cnt--;
5931 mutex_exit(&ptgt->tgt_mutex);
5934 pptr->port_ipkt_cnt--;
5935 mutex_exit(&pptr->port_mutex);
5939 * Function: fcp_alloc_dma
5941 * Description: Allocated the DMA resources required for the internal
5942 * packet.
5944 * Argument: *pptr FCP port.
5945 * *icmd Internal FCP packet.
5946 * nodma Indicates if the Cmd and Resp will be DMAed.
5947 * flags Allocation flags (Sleep or NoSleep).
5949 * Return Value: FC_SUCCESS
5950 * FC_NOMEM
5952 static int
5953 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5954 int nodma, int flags)
5956 int rval;
5957 size_t real_size;
5958 uint_t ccount;
5959 int bound = 0;
5960 int cmd_resp = 0;
5961 fc_packet_t *fpkt;
5962 ddi_dma_cookie_t pkt_data_cookie;
5963 ddi_dma_cookie_t *cp;
5964 uint32_t cnt;
5966 fpkt = &icmd->ipkt_fc_packet;
5968 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5969 fpkt->pkt_resp_dma == NULL);
5971 icmd->ipkt_nodma = nodma;
5973 if (nodma) {
5974 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5975 if (fpkt->pkt_cmd == NULL) {
5976 goto fail;
5979 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5980 if (fpkt->pkt_resp == NULL) {
5981 goto fail;
5983 } else {
5984 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5986 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5987 if (rval == FC_FAILURE) {
5988 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5989 fpkt->pkt_resp_dma == NULL);
5990 goto fail;
5992 cmd_resp++;
5995 if ((fpkt->pkt_datalen != 0) &&
5996 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5998 * set up DMA handle and memory for the data in this packet
6000 if (ddi_dma_alloc_handle(pptr->port_dip,
6001 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6002 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6003 goto fail;
6006 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6007 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6008 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6009 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6010 goto fail;
6013 /* was DMA mem size gotten < size asked for/needed ?? */
6014 if (real_size < fpkt->pkt_datalen) {
6015 goto fail;
6018 /* bind DMA address and handle together */
6019 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6020 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6021 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6022 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6023 goto fail;
6025 bound++;
6027 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6028 goto fail;
6031 fpkt->pkt_data_cookie_cnt = ccount;
6033 cp = fpkt->pkt_data_cookie;
6034 *cp = pkt_data_cookie;
6035 cp++;
6037 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6038 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6039 &pkt_data_cookie);
6040 *cp = pkt_data_cookie;
6043 } else if (fpkt->pkt_datalen != 0) {
6045 * If it's a pseudo FCA, then it can't support DMA even in
6046 * SCSI data phase.
6048 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6049 if (fpkt->pkt_data == NULL) {
6050 goto fail;
6055 return (FC_SUCCESS);
6057 fail:
6058 if (bound) {
6059 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 if (fpkt->pkt_data_dma) {
6063 if (fpkt->pkt_data) {
6064 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6066 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6067 } else {
6068 if (fpkt->pkt_data) {
6069 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6073 if (nodma) {
6074 if (fpkt->pkt_cmd) {
6075 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6077 if (fpkt->pkt_resp) {
6078 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6080 } else {
6081 if (cmd_resp) {
6082 fcp_free_cmd_resp(pptr, fpkt);
6086 return (FC_NOMEM);
6090 static void
6091 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6093 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6095 if (fpkt->pkt_data_dma) {
6096 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6097 if (fpkt->pkt_data) {
6098 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6100 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6101 } else {
6102 if (fpkt->pkt_data) {
6103 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 * Need we reset pkt_* to zero???
6110 if (icmd->ipkt_nodma) {
6111 if (fpkt->pkt_cmd) {
6112 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6114 if (fpkt->pkt_resp) {
6115 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6117 } else {
6118 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6120 fcp_free_cmd_resp(pptr, fpkt);
6125 * Function: fcp_lookup_target
6127 * Description: Finds a target given a WWN.
6129 * Argument: *pptr FCP port.
6130 * *wwn World Wide Name of the device to look for.
6132 * Return Value: NULL No target found
6133 * Not NULL Target structure
6135 * Context: Interrupt context.
6136 * The mutex pptr->port_mutex must be owned.
6138 /* ARGSUSED */
6139 static struct fcp_tgt *
6140 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6142 int hash;
6143 struct fcp_tgt *ptgt;
6145 ASSERT(mutex_owned(&pptr->port_mutex));
6147 hash = FCP_HASH(wwn);
6149 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6150 ptgt = ptgt->tgt_next) {
6151 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6152 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6153 sizeof (ptgt->tgt_port_wwn)) == 0) {
6154 break;
6158 return (ptgt);
6163 * Find target structure given a port identifier
6165 static struct fcp_tgt *
6166 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6168 fc_portid_t port_id;
6169 la_wwn_t pwwn;
6170 struct fcp_tgt *ptgt = NULL;
6172 port_id.priv_lilp_posit = 0;
6173 port_id.port_id = d_id;
6174 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6175 &pwwn) == FC_SUCCESS) {
6176 mutex_enter(&pptr->port_mutex);
6177 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6178 mutex_exit(&pptr->port_mutex);
6181 return (ptgt);
6186 * the packet completion callback routine for info cmd pkts
6188 * this means fpkt pts to a response to either a PLOGI or a PRLI
6190 * if there is an error an attempt is made to call a routine to resend
6191 * the command that failed
6193 static void
6194 fcp_icmd_callback(fc_packet_t *fpkt)
6196 struct fcp_ipkt *icmd;
6197 struct fcp_port *pptr;
6198 struct fcp_tgt *ptgt;
6199 struct la_els_prli *prli;
6200 struct la_els_prli prli_s;
6201 struct fcp_prli *fprli;
6202 struct fcp_lun *plun;
6203 int free_pkt = 1;
6204 int rval;
6205 ls_code_t resp;
6206 uchar_t prli_acc = 0;
6207 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6208 int lun0_newalloc;
6210 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6212 /* get ptrs to the port and target structs for the cmd */
6213 pptr = icmd->ipkt_port;
6214 ptgt = icmd->ipkt_tgt;
6216 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6218 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6219 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6220 sizeof (prli_s));
6221 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6225 fcp_trace, FCP_BUF_LEVEL_2, 0,
6226 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6227 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6228 ptgt->tgt_d_id);
6230 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6231 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6233 mutex_enter(&ptgt->tgt_mutex);
6234 if (ptgt->tgt_pd_handle == NULL) {
6236 * in a fabric environment the port device handles
6237 * get created only after successful LOGIN into the
6238 * transport, so the transport makes this port
6239 * device (pd) handle available in this packet, so
6240 * save it now
6242 ASSERT(fpkt->pkt_pd != NULL);
6243 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6245 mutex_exit(&ptgt->tgt_mutex);
6247 /* which ELS cmd is this response for ?? */
6248 switch (icmd->ipkt_opcode) {
6249 case LA_ELS_PLOGI:
6250 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6251 fcp_trace, FCP_BUF_LEVEL_5, 0,
6252 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6253 ptgt->tgt_d_id,
6254 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6255 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6257 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6258 FCP_TGT_TRACE_15);
6260 /* Note that we are not allocating a new icmd */
6261 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6262 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6263 icmd->ipkt_cause) != DDI_SUCCESS) {
6264 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6265 FCP_TGT_TRACE_16);
6266 goto fail;
6268 break;
6270 case LA_ELS_PRLI:
6271 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6272 fcp_trace, FCP_BUF_LEVEL_5, 0,
6273 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6275 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6276 FCP_TGT_TRACE_17);
6278 prli = &prli_s;
6280 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6281 sizeof (prli_s));
6283 fprli = (struct fcp_prli *)prli->service_params;
6285 mutex_enter(&ptgt->tgt_mutex);
6286 ptgt->tgt_icap = fprli->initiator_fn;
6287 ptgt->tgt_tcap = fprli->target_fn;
6288 mutex_exit(&ptgt->tgt_mutex);
6290 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6292 * this FCP device does not support target mode
6294 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6295 FCP_TGT_TRACE_18);
6296 goto fail;
6298 if (fprli->retry == 1) {
6299 fc_ulp_disable_relogin(pptr->port_fp_handle,
6300 &ptgt->tgt_port_wwn);
6303 /* target is no longer offline */
6304 mutex_enter(&pptr->port_mutex);
6305 mutex_enter(&ptgt->tgt_mutex);
6306 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6307 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6308 FCP_TGT_MARK);
6309 } else {
6310 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6311 fcp_trace, FCP_BUF_LEVEL_2, 0,
6312 "fcp_icmd_callback,1: state change "
6313 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6314 mutex_exit(&ptgt->tgt_mutex);
6315 mutex_exit(&pptr->port_mutex);
6316 goto fail;
6318 mutex_exit(&ptgt->tgt_mutex);
6319 mutex_exit(&pptr->port_mutex);
6322 * lun 0 should always respond to inquiry, so
6323 * get the LUN struct for LUN 0
6325 * Currently we deal with first level of addressing.
6326 * If / when we start supporting 0x device types
6327 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6328 * this logic will need revisiting.
6330 lun0_newalloc = 0;
6331 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6333 * no LUN struct for LUN 0 yet exists,
6334 * so create one
6336 plun = fcp_alloc_lun(ptgt);
6337 if (plun == NULL) {
6338 fcp_log(CE_WARN, pptr->port_dip,
6339 "!Failed to allocate lun 0 for"
6340 " D_ID=%x", ptgt->tgt_d_id);
6341 goto fail;
6343 lun0_newalloc = 1;
6346 /* fill in LUN info */
6347 mutex_enter(&ptgt->tgt_mutex);
6349 * consider lun 0 as device not connected if it is
6350 * offlined or newly allocated
6352 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6353 lun0_newalloc) {
6354 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6356 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6357 plun->lun_state &= ~FCP_LUN_OFFLINE;
6358 ptgt->tgt_lun_cnt = 1;
6359 ptgt->tgt_report_lun_cnt = 0;
6360 mutex_exit(&ptgt->tgt_mutex);
6362 /* Retrieve the rscn count (if a valid one exists) */
6363 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6364 rscn_count = ((fc_ulp_rscn_info_t *)
6365 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6366 ->ulp_rscn_count;
6367 } else {
6368 rscn_count = FC_INVALID_RSCN_COUNT;
6371 /* send Report Lun request to target */
6372 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6373 sizeof (struct fcp_reportlun_resp),
6374 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6375 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6376 mutex_enter(&pptr->port_mutex);
6377 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6378 fcp_log(CE_WARN, pptr->port_dip,
6379 "!Failed to send REPORT LUN to"
6380 " D_ID=%x", ptgt->tgt_d_id);
6381 } else {
6382 FCP_TRACE(fcp_logq,
6383 pptr->port_instbuf, fcp_trace,
6384 FCP_BUF_LEVEL_5, 0,
6385 "fcp_icmd_callback,2:state change"
6386 " occured for D_ID=0x%x",
6387 ptgt->tgt_d_id);
6389 mutex_exit(&pptr->port_mutex);
6391 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6392 FCP_TGT_TRACE_19);
6394 goto fail;
6395 } else {
6396 free_pkt = 0;
6397 fcp_icmd_free(pptr, icmd);
6399 break;
6401 default:
6402 fcp_log(CE_WARN, pptr->port_dip,
6403 "!fcp_icmd_callback Invalid opcode");
6404 goto fail;
6407 return;
6412 * Other PLOGI failures are not retried as the
6413 * transport does it already
6415 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6416 if (fcp_is_retryable(icmd) &&
6417 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6419 if (FCP_MUST_RETRY(fpkt)) {
6420 fcp_queue_ipkt(pptr, fpkt);
6421 return;
6424 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6425 fcp_trace, FCP_BUF_LEVEL_2, 0,
6426 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6427 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6428 fpkt->pkt_reason);
6431 * Retry by recalling the routine that
6432 * originally queued this packet
6434 mutex_enter(&pptr->port_mutex);
6435 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6436 caddr_t msg;
6438 mutex_exit(&pptr->port_mutex);
6440 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6442 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6443 fpkt->pkt_timeout +=
6444 FCP_TIMEOUT_DELTA;
6447 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6448 fpkt);
6449 if (rval == FC_SUCCESS) {
6450 return;
6453 if (rval == FC_STATEC_BUSY ||
6454 rval == FC_OFFLINE) {
6455 fcp_queue_ipkt(pptr, fpkt);
6456 return;
6458 (void) fc_ulp_error(rval, &msg);
6460 fcp_log(CE_NOTE, pptr->port_dip,
6461 "!ELS 0x%x failed to d_id=0x%x;"
6462 " %s", icmd->ipkt_opcode,
6463 ptgt->tgt_d_id, msg);
6464 } else {
6465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6466 fcp_trace, FCP_BUF_LEVEL_2, 0,
6467 "fcp_icmd_callback,3: state change "
6468 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6469 mutex_exit(&pptr->port_mutex);
6472 } else {
6473 if (fcp_is_retryable(icmd) &&
6474 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6475 if (FCP_MUST_RETRY(fpkt)) {
6476 fcp_queue_ipkt(pptr, fpkt);
6477 return;
6480 mutex_enter(&pptr->port_mutex);
6481 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6482 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6483 mutex_exit(&pptr->port_mutex);
6484 fcp_print_error(fpkt);
6485 } else {
6486 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6487 fcp_trace, FCP_BUF_LEVEL_2, 0,
6488 "fcp_icmd_callback,4: state change occured"
6489 " for D_ID=0x%x", ptgt->tgt_d_id);
6490 mutex_exit(&pptr->port_mutex);
6494 fail:
6495 if (free_pkt) {
6496 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6497 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6498 fcp_icmd_free(pptr, icmd);
6504 * called internally to send an info cmd using the transport
6506 * sends either an INQ or a REPORT_LUN
6508 * when the packet is completed fcp_scsi_callback is called
6510 static int
6511 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6512 int lcount, int tcount, int cause, uint32_t rscn_count)
6514 int nodma;
6515 struct fcp_ipkt *icmd;
6516 struct fcp_tgt *ptgt;
6517 struct fcp_port *pptr;
6518 fc_frame_hdr_t *hp;
6519 fc_packet_t *fpkt;
6520 struct fcp_cmd fcp_cmd;
6521 struct fcp_cmd *fcmd;
6522 union scsi_cdb *scsi_cdb;
6524 ASSERT(plun != NULL);
6526 ptgt = plun->lun_tgt;
6527 ASSERT(ptgt != NULL);
6529 pptr = ptgt->tgt_port;
6530 ASSERT(pptr != NULL);
6532 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6533 fcp_trace, FCP_BUF_LEVEL_5, 0,
6534 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6536 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6537 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6538 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6539 rscn_count);
6541 if (icmd == NULL) {
6542 return (DDI_FAILURE);
6545 fpkt = icmd->ipkt_fpkt;
6546 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6547 icmd->ipkt_retries = 0;
6548 icmd->ipkt_opcode = opcode;
6549 icmd->ipkt_lun = plun;
6551 if (nodma) {
6552 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6553 } else {
6554 fcmd = &fcp_cmd;
6556 bzero(fcmd, sizeof (struct fcp_cmd));
6558 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6560 hp = &fpkt->pkt_cmd_fhdr;
6562 hp->s_id = pptr->port_id;
6563 hp->d_id = ptgt->tgt_d_id;
6564 hp->r_ctl = R_CTL_COMMAND;
6565 hp->type = FC_TYPE_SCSI_FCP;
6566 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6567 hp->rsvd = 0;
6568 hp->seq_id = 0;
6569 hp->seq_cnt = 0;
6570 hp->ox_id = 0xffff;
6571 hp->rx_id = 0xffff;
6572 hp->ro = 0;
6574 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 * Request SCSI target for expedited processing
6581 * Set up for untagged queuing because we do not
6582 * know if the fibre device supports queuing.
6584 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6585 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6586 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6587 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6588 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6589 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6590 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6592 switch (opcode) {
6593 case SCMD_INQUIRY_PAGE83:
6595 * Prepare to get the Inquiry VPD page 83 information
6597 fcmd->fcp_cntl.cntl_read_data = 1;
6598 fcmd->fcp_cntl.cntl_write_data = 0;
6599 fcmd->fcp_data_len = alloc_len;
6601 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6602 fpkt->pkt_comp = fcp_scsi_callback;
6604 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6605 scsi_cdb->g0_addr2 = 0x01;
6606 scsi_cdb->g0_addr1 = 0x83;
6607 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6608 break;
6610 case SCMD_INQUIRY:
6611 fcmd->fcp_cntl.cntl_read_data = 1;
6612 fcmd->fcp_cntl.cntl_write_data = 0;
6613 fcmd->fcp_data_len = alloc_len;
6615 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6616 fpkt->pkt_comp = fcp_scsi_callback;
6618 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6619 scsi_cdb->g0_count0 = SUN_INQSIZE;
6620 break;
6622 case SCMD_REPORT_LUN: {
6623 fc_portid_t d_id;
6624 opaque_t fca_dev;
6626 ASSERT(alloc_len >= 16);
6628 d_id.priv_lilp_posit = 0;
6629 d_id.port_id = ptgt->tgt_d_id;
6631 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6633 mutex_enter(&ptgt->tgt_mutex);
6634 ptgt->tgt_fca_dev = fca_dev;
6635 mutex_exit(&ptgt->tgt_mutex);
6637 fcmd->fcp_cntl.cntl_read_data = 1;
6638 fcmd->fcp_cntl.cntl_write_data = 0;
6639 fcmd->fcp_data_len = alloc_len;
6641 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6642 fpkt->pkt_comp = fcp_scsi_callback;
6644 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6645 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6646 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6647 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6648 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6649 break;
6652 default:
6653 fcp_log(CE_WARN, pptr->port_dip,
6654 "!fcp_send_scsi Invalid opcode");
6655 break;
6658 if (!nodma) {
6659 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6660 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 mutex_enter(&pptr->port_mutex);
6664 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6666 mutex_exit(&pptr->port_mutex);
6667 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6668 FC_SUCCESS) {
6669 fcp_icmd_free(pptr, icmd);
6670 return (DDI_FAILURE);
6672 return (DDI_SUCCESS);
6673 } else {
6674 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6675 fcp_trace, FCP_BUF_LEVEL_2, 0,
6676 "fcp_send_scsi,1: state change occured"
6677 " for D_ID=0x%x", ptgt->tgt_d_id);
6678 mutex_exit(&pptr->port_mutex);
6679 fcp_icmd_free(pptr, icmd);
6680 return (DDI_FAILURE);
6686 * called by fcp_scsi_callback to check to handle the case where
6687 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6689 static int
6690 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6692 uchar_t rqlen;
6693 int rval = DDI_FAILURE;
6694 struct scsi_extended_sense sense_info, *sense;
6695 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6696 fpkt->pkt_ulp_private;
6697 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6698 struct fcp_port *pptr = ptgt->tgt_port;
6700 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6702 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6704 * SCSI-II Reserve Release support. Some older FC drives return
6705 * Reservation conflict for Report Luns command.
6707 if (icmd->ipkt_nodma) {
6708 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6709 rsp->fcp_u.fcp_status.sense_len_set = 0;
6710 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6711 } else {
6712 fcp_rsp_t new_resp;
6714 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6715 fpkt->pkt_resp_acc, sizeof (new_resp));
6717 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6718 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6719 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6721 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6722 fpkt->pkt_resp_acc, sizeof (new_resp));
6725 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6726 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6728 return (DDI_SUCCESS);
6731 sense = &sense_info;
6732 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6733 /* no need to continue if sense length is not set */
6734 return (rval);
6737 /* casting 64-bit integer to 8-bit */
6738 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6739 sizeof (struct scsi_extended_sense));
6741 if (rqlen < 14) {
6742 /* no need to continue if request length isn't long enough */
6743 return (rval);
6746 if (icmd->ipkt_nodma) {
6748 * We can safely use fcp_response_len here since the
6749 * only path that calls fcp_check_reportlun,
6750 * fcp_scsi_callback, has already called
6751 * fcp_validate_fcp_response.
6753 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6754 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6755 } else {
6756 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6757 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6758 sizeof (struct scsi_extended_sense));
6761 if (!FCP_SENSE_NO_LUN(sense)) {
6762 mutex_enter(&ptgt->tgt_mutex);
6763 /* clear the flag if any */
6764 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6765 mutex_exit(&ptgt->tgt_mutex);
6768 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6769 (sense->es_add_code == 0x20)) {
6770 if (icmd->ipkt_nodma) {
6771 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6772 rsp->fcp_u.fcp_status.sense_len_set = 0;
6773 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6774 } else {
6775 fcp_rsp_t new_resp;
6777 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6778 fpkt->pkt_resp_acc, sizeof (new_resp));
6780 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6781 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6782 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6784 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6785 fpkt->pkt_resp_acc, sizeof (new_resp));
6788 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6789 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6791 return (DDI_SUCCESS);
6795 * This is for the STK library which returns a check condition,
6796 * to indicate device is not ready, manual assistance needed.
6797 * This is to a report lun command when the door is open.
6799 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6800 if (icmd->ipkt_nodma) {
6801 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6802 rsp->fcp_u.fcp_status.sense_len_set = 0;
6803 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6804 } else {
6805 fcp_rsp_t new_resp;
6807 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6808 fpkt->pkt_resp_acc, sizeof (new_resp));
6810 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6811 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6812 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6814 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6815 fpkt->pkt_resp_acc, sizeof (new_resp));
6818 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6819 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6821 return (DDI_SUCCESS);
6824 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6825 (FCP_SENSE_NO_LUN(sense))) {
6826 mutex_enter(&ptgt->tgt_mutex);
6827 if ((FCP_SENSE_NO_LUN(sense)) &&
6828 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6829 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6830 mutex_exit(&ptgt->tgt_mutex);
6832 * reconfig was triggred by ILLEGAL REQUEST but
6833 * got ILLEGAL REQUEST again
6835 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6836 fcp_trace, FCP_BUF_LEVEL_3, 0,
6837 "!FCP: Unable to obtain Report Lun data"
6838 " target=%x", ptgt->tgt_d_id);
6839 } else {
6840 if (ptgt->tgt_tid == NULL) {
6841 timeout_id_t tid;
6843 * REPORT LUN data has changed. Kick off
6844 * rediscovery
6846 tid = timeout(fcp_reconfigure_luns,
6847 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6849 ptgt->tgt_tid = tid;
6850 ptgt->tgt_state |= FCP_TGT_BUSY;
6852 if (FCP_SENSE_NO_LUN(sense)) {
6853 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6855 mutex_exit(&ptgt->tgt_mutex);
6856 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6857 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6858 fcp_trace, FCP_BUF_LEVEL_3, 0,
6859 "!FCP:Report Lun Has Changed"
6860 " target=%x", ptgt->tgt_d_id);
6861 } else if (FCP_SENSE_NO_LUN(sense)) {
6862 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6863 fcp_trace, FCP_BUF_LEVEL_3, 0,
6864 "!FCP:LU Not Supported"
6865 " target=%x", ptgt->tgt_d_id);
6868 rval = DDI_SUCCESS;
6871 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6872 fcp_trace, FCP_BUF_LEVEL_5, 0,
6873 "D_ID=%x, sense=%x, status=%x",
6874 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6875 rsp->fcp_u.fcp_status.scsi_status);
6877 return (rval);
6881 * Function: fcp_scsi_callback
6883 * Description: This is the callback routine set by fcp_send_scsi() after
6884 * it calls fcp_icmd_alloc(). The SCSI command completed here
6885 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6886 * INQUIRY_PAGE83.
6888 * Argument: *fpkt FC packet used to convey the command
6890 * Return Value: None
6892 static void
6893 fcp_scsi_callback(fc_packet_t *fpkt)
6895 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6896 fpkt->pkt_ulp_private;
6897 struct fcp_rsp_info fcp_rsp_err, *bep;
6898 struct fcp_port *pptr;
6899 struct fcp_tgt *ptgt;
6900 struct fcp_lun *plun;
6901 struct fcp_rsp response, *rsp;
6903 ptgt = icmd->ipkt_tgt;
6904 pptr = ptgt->tgt_port;
6905 plun = icmd->ipkt_lun;
6907 if (icmd->ipkt_nodma) {
6908 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6909 } else {
6910 rsp = &response;
6911 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6912 sizeof (struct fcp_rsp));
6915 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6916 fcp_trace, FCP_BUF_LEVEL_2, 0,
6917 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6918 "status=%x, lun num=%x",
6919 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6920 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 * Pre-init LUN GUID with NWWN if it is not a device that
6924 * supports multiple luns and we know it's not page83
6925 * compliant. Although using a NWWN is not lun unique,
6926 * we will be fine since there is only one lun behind the taget
6927 * in this case.
6929 if ((plun->lun_guid_size == 0) &&
6930 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6931 (fcp_symmetric_device_probe(plun) == 0)) {
6933 char ascii_wwn[FC_WWN_SIZE*2+1];
6934 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6935 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6939 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6940 * when thay have more data than what is asked in CDB. An overrun
6941 * is really when FCP_DL is smaller than the data length in CDB.
6942 * In the case here we know that REPORT LUN command we formed within
6943 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6944 * behavior. In reality this is FC_SUCCESS.
6946 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6947 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6948 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6949 fpkt->pkt_state = FC_PKT_SUCCESS;
6952 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6953 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6954 fcp_trace, FCP_BUF_LEVEL_2, 0,
6955 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6956 ptgt->tgt_d_id);
6958 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6960 * Inquiry VPD page command on A5K SES devices would
6961 * result in data CRC errors.
6963 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6964 (void) fcp_handle_page83(fpkt, icmd, 1);
6965 return;
6968 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6969 FCP_MUST_RETRY(fpkt)) {
6970 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6971 fcp_retry_scsi_cmd(fpkt);
6972 return;
6975 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6976 FCP_TGT_TRACE_20);
6978 mutex_enter(&pptr->port_mutex);
6979 mutex_enter(&ptgt->tgt_mutex);
6980 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6981 mutex_exit(&ptgt->tgt_mutex);
6982 mutex_exit(&pptr->port_mutex);
6983 fcp_print_error(fpkt);
6984 } else {
6985 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6986 fcp_trace, FCP_BUF_LEVEL_2, 0,
6987 "fcp_scsi_callback,1: state change occured"
6988 " for D_ID=0x%x", ptgt->tgt_d_id);
6989 mutex_exit(&ptgt->tgt_mutex);
6990 mutex_exit(&pptr->port_mutex);
6992 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6993 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6994 fcp_icmd_free(pptr, icmd);
6995 return;
6998 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7000 mutex_enter(&pptr->port_mutex);
7001 mutex_enter(&ptgt->tgt_mutex);
7002 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7003 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7004 fcp_trace, FCP_BUF_LEVEL_2, 0,
7005 "fcp_scsi_callback,2: state change occured"
7006 " for D_ID=0x%x", ptgt->tgt_d_id);
7007 mutex_exit(&ptgt->tgt_mutex);
7008 mutex_exit(&pptr->port_mutex);
7009 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7010 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7011 fcp_icmd_free(pptr, icmd);
7012 return;
7014 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7016 mutex_exit(&ptgt->tgt_mutex);
7017 mutex_exit(&pptr->port_mutex);
7019 if (icmd->ipkt_nodma) {
7020 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7021 sizeof (struct fcp_rsp));
7022 } else {
7023 bep = &fcp_rsp_err;
7024 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7025 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7029 fcp_retry_scsi_cmd(fpkt);
7030 return;
7033 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7034 FCP_NO_FAILURE) {
7035 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7036 fcp_trace, FCP_BUF_LEVEL_2, 0,
7037 "rsp_code=0x%x, rsp_len_set=0x%x",
7038 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7039 fcp_retry_scsi_cmd(fpkt);
7040 return;
7043 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7044 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7045 fcp_queue_ipkt(pptr, fpkt);
7046 return;
7050 * Devices that do not support INQUIRY_PAGE83, return check condition
7051 * with illegal request as per SCSI spec.
7052 * Crossbridge is one such device and Daktari's SES node is another.
7053 * We want to ideally enumerate these devices as a non-mpxio devices.
7054 * SES nodes (Daktari only currently) are an exception to this.
7056 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7057 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7059 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7060 fcp_trace, FCP_BUF_LEVEL_3, 0,
7061 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7062 "check condition. May enumerate as non-mpxio device",
7063 ptgt->tgt_d_id, plun->lun_type);
7066 * If we let Daktari's SES be enumerated as a non-mpxio
7067 * device, there will be a discrepency in that the other
7068 * internal FC disks will get enumerated as mpxio devices.
7069 * Applications like luxadm expect this to be consistent.
7071 * So, we put in a hack here to check if this is an SES device
7072 * and handle it here.
7074 if (plun->lun_type == DTYPE_ESI) {
7076 * Since, pkt_state is actually FC_PKT_SUCCESS
7077 * at this stage, we fake a failure here so that
7078 * fcp_handle_page83 will create a device path using
7079 * the WWN instead of the GUID which is not there anyway
7081 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7082 (void) fcp_handle_page83(fpkt, icmd, 1);
7083 return;
7086 mutex_enter(&ptgt->tgt_mutex);
7087 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7088 FCP_LUN_MARK | FCP_LUN_BUSY);
7089 mutex_exit(&ptgt->tgt_mutex);
7091 (void) fcp_call_finish_init(pptr, ptgt,
7092 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7093 icmd->ipkt_cause);
7094 fcp_icmd_free(pptr, icmd);
7095 return;
7098 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7099 int rval = DDI_FAILURE;
7102 * handle cases where report lun isn't supported
7103 * by faking up our own REPORT_LUN response or
7104 * UNIT ATTENTION
7106 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7107 rval = fcp_check_reportlun(rsp, fpkt);
7110 * fcp_check_reportlun might have modified the
7111 * FCP response. Copy it in again to get an updated
7112 * FCP response
7114 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7115 rsp = &response;
7117 FCP_CP_IN(fpkt->pkt_resp, rsp,
7118 fpkt->pkt_resp_acc,
7119 sizeof (struct fcp_rsp));
7123 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7124 if (rval == DDI_SUCCESS) {
7125 (void) fcp_call_finish_init(pptr, ptgt,
7126 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7127 icmd->ipkt_cause);
7128 fcp_icmd_free(pptr, icmd);
7129 } else {
7130 fcp_retry_scsi_cmd(fpkt);
7133 return;
7135 } else {
7136 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7137 mutex_enter(&ptgt->tgt_mutex);
7138 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7139 mutex_exit(&ptgt->tgt_mutex);
7143 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7144 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7145 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7146 DDI_DMA_SYNC_FORCPU);
7149 switch (icmd->ipkt_opcode) {
7150 case SCMD_INQUIRY:
7151 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7152 fcp_handle_inquiry(fpkt, icmd);
7153 break;
7155 case SCMD_REPORT_LUN:
7156 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7157 FCP_TGT_TRACE_22);
7158 fcp_handle_reportlun(fpkt, icmd);
7159 break;
7161 case SCMD_INQUIRY_PAGE83:
7162 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7163 (void) fcp_handle_page83(fpkt, icmd, 0);
7164 break;
7166 default:
7167 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7168 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7169 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7170 fcp_icmd_free(pptr, icmd);
7171 break;
7176 static void
7177 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7179 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7180 fpkt->pkt_ulp_private;
7181 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7182 struct fcp_port *pptr = ptgt->tgt_port;
7184 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7185 fcp_is_retryable(icmd)) {
7186 mutex_enter(&pptr->port_mutex);
7187 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7188 mutex_exit(&pptr->port_mutex);
7189 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7190 fcp_trace, FCP_BUF_LEVEL_3, 0,
7191 "Retrying %s to %x; state=%x, reason=%x",
7192 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7193 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7194 fpkt->pkt_state, fpkt->pkt_reason);
7196 fcp_queue_ipkt(pptr, fpkt);
7197 } else {
7198 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7199 fcp_trace, FCP_BUF_LEVEL_3, 0,
7200 "fcp_retry_scsi_cmd,1: state change occured"
7201 " for D_ID=0x%x", ptgt->tgt_d_id);
7202 mutex_exit(&pptr->port_mutex);
7203 (void) fcp_call_finish_init(pptr, ptgt,
7204 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7205 icmd->ipkt_cause);
7206 fcp_icmd_free(pptr, icmd);
7208 } else {
7209 fcp_print_error(fpkt);
7210 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7211 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7212 fcp_icmd_free(pptr, icmd);
7217 * Function: fcp_handle_page83
7219 * Description: Treats the response to INQUIRY_PAGE83.
7221 * Argument: *fpkt FC packet used to convey the command.
7222 * *icmd Original fcp_ipkt structure.
7223 * ignore_page83_data
7224 * if it's 1, that means it's a special devices's
7225 * page83 response, it should be enumerated under mpxio
7227 * Return Value: None
7229 static void
7230 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7231 int ignore_page83_data)
7233 struct fcp_port *pptr;
7234 struct fcp_lun *plun;
7235 struct fcp_tgt *ptgt;
7236 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7237 int fail = 0;
7238 ddi_devid_t devid;
7239 char *guid = NULL;
7240 int ret;
7242 ASSERT(icmd != NULL && fpkt != NULL);
7244 pptr = icmd->ipkt_port;
7245 ptgt = icmd->ipkt_tgt;
7246 plun = icmd->ipkt_lun;
7248 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7249 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7251 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7252 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7255 fcp_trace, FCP_BUF_LEVEL_5, 0,
7256 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7257 "dtype=0x%x, lun num=%x",
7258 pptr->port_instance, ptgt->tgt_d_id,
7259 dev_id_page[0], plun->lun_num);
7261 ret = ddi_devid_scsi_encode(
7262 DEVID_SCSI_ENCODE_VERSION_LATEST,
7263 NULL, /* driver name */
7264 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7265 sizeof (plun->lun_inq), /* size of standard inquiry */
7266 NULL, /* page 80 data */
7267 0, /* page 80 len */
7268 dev_id_page, /* page 83 data */
7269 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7270 &devid);
7272 if (ret == DDI_SUCCESS) {
7274 guid = ddi_devid_to_guid(devid);
7276 if (guid) {
7278 * Check our current guid. If it's non null
7279 * and it has changed, we need to copy it into
7280 * lun_old_guid since we might still need it.
7282 if (plun->lun_guid &&
7283 strcmp(guid, plun->lun_guid)) {
7284 unsigned int len;
7287 * If the guid of the LUN changes,
7288 * reconfiguration should be triggered
7289 * to reflect the changes.
7290 * i.e. we should offline the LUN with
7291 * the old guid, and online the LUN with
7292 * the new guid.
7294 plun->lun_state |= FCP_LUN_CHANGED;
7296 if (plun->lun_old_guid) {
7297 kmem_free(plun->lun_old_guid,
7298 plun->lun_old_guid_size);
7301 len = plun->lun_guid_size;
7302 plun->lun_old_guid_size = len;
7304 plun->lun_old_guid = kmem_zalloc(len,
7305 KM_NOSLEEP);
7307 if (plun->lun_old_guid) {
7309 * The alloc was successful then
7310 * let's do the copy.
7312 bcopy(plun->lun_guid,
7313 plun->lun_old_guid, len);
7314 } else {
7315 fail = 1;
7316 plun->lun_old_guid_size = 0;
7319 if (!fail) {
7320 if (fcp_copy_guid_2_lun_block(
7321 plun, guid)) {
7322 fail = 1;
7325 ddi_devid_free_guid(guid);
7327 } else {
7328 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7329 fcp_trace, FCP_BUF_LEVEL_2, 0,
7330 "fcp_handle_page83: unable to create "
7331 "GUID");
7333 /* couldn't create good guid from devid */
7334 fail = 1;
7336 ddi_devid_free(devid);
7338 } else if (ret == DDI_NOT_WELL_FORMED) {
7339 /* NULL filled data for page 83 */
7340 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7341 fcp_trace, FCP_BUF_LEVEL_2, 0,
7342 "fcp_handle_page83: retry GUID");
7344 icmd->ipkt_retries = 0;
7345 fcp_retry_scsi_cmd(fpkt);
7346 return;
7347 } else {
7348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7349 fcp_trace, FCP_BUF_LEVEL_2, 0,
7350 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7351 ret);
7353 * Since the page83 validation
7354 * introduced late, we are being
7355 * tolerant to the existing devices
7356 * that already found to be working
7357 * under mpxio, like A5200's SES device,
7358 * its page83 response will not be standard-compliant,
7359 * but we still want it to be enumerated under mpxio.
7361 if (fcp_symmetric_device_probe(plun) != 0) {
7362 fail = 1;
7366 } else {
7367 /* bad packet state */
7368 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 * For some special devices (A5K SES and Daktari's SES devices),
7372 * they should be enumerated under mpxio
7373 * or "luxadm dis" will fail
7375 if (ignore_page83_data) {
7376 fail = 0;
7377 } else {
7378 fail = 1;
7380 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7381 fcp_trace, FCP_BUF_LEVEL_2, 0,
7382 "!Devid page cmd failed. "
7383 "fpkt_state: %x fpkt_reason: %x",
7384 "ignore_page83: %d",
7385 fpkt->pkt_state, fpkt->pkt_reason,
7386 ignore_page83_data);
7389 mutex_enter(&pptr->port_mutex);
7390 mutex_enter(&plun->lun_mutex);
7392 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7393 * mismatch between lun_cip and lun_mpxio.
7395 if (plun->lun_cip == NULL) {
7397 * If we don't have a guid for this lun it's because we were
7398 * unable to glean one from the page 83 response. Set the
7399 * control flag to 0 here to make sure that we don't attempt to
7400 * enumerate it under mpxio.
7402 if (fail || pptr->port_mpxio == 0) {
7403 plun->lun_mpxio = 0;
7404 } else {
7405 plun->lun_mpxio = 1;
7408 mutex_exit(&plun->lun_mutex);
7409 mutex_exit(&pptr->port_mutex);
7411 mutex_enter(&ptgt->tgt_mutex);
7412 plun->lun_state &=
7413 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7414 mutex_exit(&ptgt->tgt_mutex);
7416 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7417 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7419 fcp_icmd_free(pptr, icmd);
7423 * Function: fcp_handle_inquiry
7425 * Description: Called by fcp_scsi_callback to handle the response to an
7426 * INQUIRY request.
7428 * Argument: *fpkt FC packet used to convey the command.
7429 * *icmd Original fcp_ipkt structure.
7431 * Return Value: None
7433 static void
7434 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7436 struct fcp_port *pptr;
7437 struct fcp_lun *plun;
7438 struct fcp_tgt *ptgt;
7439 uchar_t dtype;
7440 uchar_t pqual;
7441 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7443 ASSERT(icmd != NULL && fpkt != NULL);
7445 pptr = icmd->ipkt_port;
7446 ptgt = icmd->ipkt_tgt;
7447 plun = icmd->ipkt_lun;
7449 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7450 sizeof (struct scsi_inquiry));
7452 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7453 pqual = plun->lun_inq.inq_dtype >> 5;
7455 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7456 fcp_trace, FCP_BUF_LEVEL_5, 0,
7457 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7458 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7459 plun->lun_num, dtype, pqual);
7461 if (pqual != 0) {
7463 * Non-zero peripheral qualifier
7465 fcp_log(CE_CONT, pptr->port_dip,
7466 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7467 "Device type=0x%x Peripheral qual=0x%x\n",
7468 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7470 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7471 fcp_trace, FCP_BUF_LEVEL_5, 0,
7472 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7473 "Device type=0x%x Peripheral qual=0x%x\n",
7474 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7476 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7478 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7479 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7480 fcp_icmd_free(pptr, icmd);
7481 return;
7485 * If the device is already initialized, check the dtype
7486 * for a change. If it has changed then update the flags
7487 * so the create_luns will offline the old device and
7488 * create the new device. Refer to bug: 4764752
7490 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7491 plun->lun_state |= FCP_LUN_CHANGED;
7493 plun->lun_type = plun->lun_inq.inq_dtype;
7496 * This code is setting/initializing the throttling in the FCA
7497 * driver.
7499 mutex_enter(&pptr->port_mutex);
7500 if (!pptr->port_notify) {
7501 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7502 uint32_t cmd = 0;
7503 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7504 ((cmd & 0xFFFFFF00 >> 8) |
7505 FCP_SVE_THROTTLE << 8));
7506 pptr->port_notify = 1;
7507 mutex_exit(&pptr->port_mutex);
7508 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7509 mutex_enter(&pptr->port_mutex);
7513 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7514 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7515 fcp_trace, FCP_BUF_LEVEL_2, 0,
7516 "fcp_handle_inquiry,1:state change occured"
7517 " for D_ID=0x%x", ptgt->tgt_d_id);
7518 mutex_exit(&pptr->port_mutex);
7520 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7521 (void) fcp_call_finish_init(pptr, ptgt,
7522 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7523 icmd->ipkt_cause);
7524 fcp_icmd_free(pptr, icmd);
7525 return;
7527 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7528 mutex_exit(&pptr->port_mutex);
7530 /* Retrieve the rscn count (if a valid one exists) */
7531 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7532 rscn_count = ((fc_ulp_rscn_info_t *)
7533 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7534 } else {
7535 rscn_count = FC_INVALID_RSCN_COUNT;
7538 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7539 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7540 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7541 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7542 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7543 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7544 (void) fcp_call_finish_init(pptr, ptgt,
7545 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7546 icmd->ipkt_cause);
7550 * Read Inquiry VPD Page 0x83 to uniquely
7551 * identify this logical unit.
7553 fcp_icmd_free(pptr, icmd);
7557 * Function: fcp_handle_reportlun
7559 * Description: Called by fcp_scsi_callback to handle the response to a
7560 * REPORT_LUN request.
7562 * Argument: *fpkt FC packet used to convey the command.
7563 * *icmd Original fcp_ipkt structure.
7565 * Return Value: None
7567 static void
7568 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7570 int i;
7571 int nluns_claimed;
7572 int nluns_bufmax;
7573 int len;
7574 uint16_t lun_num;
7575 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7576 struct fcp_port *pptr;
7577 struct fcp_tgt *ptgt;
7578 struct fcp_lun *plun;
7579 struct fcp_reportlun_resp *report_lun;
7581 pptr = icmd->ipkt_port;
7582 ptgt = icmd->ipkt_tgt;
7583 len = fpkt->pkt_datalen;
7585 if ((len < FCP_LUN_HEADER) ||
7586 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7587 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7588 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7589 fcp_icmd_free(pptr, icmd);
7590 return;
7593 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7594 fpkt->pkt_datalen);
7596 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7597 fcp_trace, FCP_BUF_LEVEL_5, 0,
7598 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7599 pptr->port_instance, ptgt->tgt_d_id);
7602 * Get the number of luns (which is supplied as LUNS * 8) the
7603 * device claims it has.
7605 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 * Get the maximum number of luns the buffer submitted can hold.
7610 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 * Due to limitations of certain hardware, we support only 16 bit LUNs
7615 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7616 kmem_free(report_lun, len);
7618 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7619 " 0x%x number of LUNs for target=%x", nluns_claimed,
7620 ptgt->tgt_d_id);
7622 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7623 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7624 fcp_icmd_free(pptr, icmd);
7625 return;
7629 * If there are more LUNs than we have allocated memory for,
7630 * allocate more space and send down yet another report lun if
7631 * the maximum number of attempts hasn't been reached.
7633 mutex_enter(&ptgt->tgt_mutex);
7635 if ((nluns_claimed > nluns_bufmax) &&
7636 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7638 struct fcp_lun *plun;
7640 ptgt->tgt_report_lun_cnt++;
7641 plun = ptgt->tgt_lun;
7642 ASSERT(plun != NULL);
7643 mutex_exit(&ptgt->tgt_mutex);
7645 kmem_free(report_lun, len);
7647 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7648 fcp_trace, FCP_BUF_LEVEL_5, 0,
7649 "!Dynamically discovered %d LUNs for D_ID=%x",
7650 nluns_claimed, ptgt->tgt_d_id);
7652 /* Retrieve the rscn count (if a valid one exists) */
7653 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7654 rscn_count = ((fc_ulp_rscn_info_t *)
7655 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7656 ulp_rscn_count;
7657 } else {
7658 rscn_count = FC_INVALID_RSCN_COUNT;
7661 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7662 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7663 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7664 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7665 (void) fcp_call_finish_init(pptr, ptgt,
7666 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7667 icmd->ipkt_cause);
7670 fcp_icmd_free(pptr, icmd);
7671 return;
7674 if (nluns_claimed > nluns_bufmax) {
7675 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7676 fcp_trace, FCP_BUF_LEVEL_5, 0,
7677 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7678 " Number of LUNs lost=%x",
7679 ptgt->tgt_port_wwn.raw_wwn[0],
7680 ptgt->tgt_port_wwn.raw_wwn[1],
7681 ptgt->tgt_port_wwn.raw_wwn[2],
7682 ptgt->tgt_port_wwn.raw_wwn[3],
7683 ptgt->tgt_port_wwn.raw_wwn[4],
7684 ptgt->tgt_port_wwn.raw_wwn[5],
7685 ptgt->tgt_port_wwn.raw_wwn[6],
7686 ptgt->tgt_port_wwn.raw_wwn[7],
7687 nluns_claimed - nluns_bufmax);
7689 nluns_claimed = nluns_bufmax;
7691 ptgt->tgt_lun_cnt = nluns_claimed;
7694 * Identify missing LUNs and print warning messages
7696 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7697 int offline;
7698 int exists = 0;
7700 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7702 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7703 uchar_t *lun_string;
7705 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7707 switch (lun_string[0] & 0xC0) {
7708 case FCP_LUN_ADDRESSING:
7709 case FCP_PD_ADDRESSING:
7710 case FCP_VOLUME_ADDRESSING:
7711 lun_num = ((lun_string[0] & 0x3F) << 8) |
7712 lun_string[1];
7713 if (plun->lun_num == lun_num) {
7714 exists++;
7715 break;
7717 break;
7719 default:
7720 break;
7724 if (!exists && !offline) {
7725 mutex_exit(&ptgt->tgt_mutex);
7727 mutex_enter(&pptr->port_mutex);
7728 mutex_enter(&ptgt->tgt_mutex);
7729 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7731 * set disappear flag when device was connected
7733 if (!(plun->lun_state &
7734 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7735 plun->lun_state |= FCP_LUN_DISAPPEARED;
7737 mutex_exit(&ptgt->tgt_mutex);
7738 mutex_exit(&pptr->port_mutex);
7739 if (!(plun->lun_state &
7740 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7741 fcp_log(CE_NOTE, pptr->port_dip,
7742 "!Lun=%x for target=%x disappeared",
7743 plun->lun_num, ptgt->tgt_d_id);
7745 mutex_enter(&ptgt->tgt_mutex);
7746 } else {
7747 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7748 fcp_trace, FCP_BUF_LEVEL_5, 0,
7749 "fcp_handle_reportlun,1: state change"
7750 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7751 mutex_exit(&ptgt->tgt_mutex);
7752 mutex_exit(&pptr->port_mutex);
7753 kmem_free(report_lun, len);
7754 (void) fcp_call_finish_init(pptr, ptgt,
7755 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7756 icmd->ipkt_cause);
7757 fcp_icmd_free(pptr, icmd);
7758 return;
7760 } else if (exists) {
7762 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7763 * actually exists in REPORT_LUN response
7765 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7766 plun->lun_state &=
7767 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7769 if (offline || plun->lun_num == 0) {
7770 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7771 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7772 mutex_exit(&ptgt->tgt_mutex);
7773 fcp_log(CE_NOTE, pptr->port_dip,
7774 "!Lun=%x for target=%x reappeared",
7775 plun->lun_num, ptgt->tgt_d_id);
7776 mutex_enter(&ptgt->tgt_mutex);
7782 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7783 mutex_exit(&ptgt->tgt_mutex);
7785 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7786 fcp_trace, FCP_BUF_LEVEL_5, 0,
7787 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7788 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7790 /* scan each lun */
7791 for (i = 0; i < nluns_claimed; i++) {
7792 uchar_t *lun_string;
7794 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7796 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7797 fcp_trace, FCP_BUF_LEVEL_5, 0,
7798 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7799 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7800 lun_string[0]);
7802 switch (lun_string[0] & 0xC0) {
7803 case FCP_LUN_ADDRESSING:
7804 case FCP_PD_ADDRESSING:
7805 case FCP_VOLUME_ADDRESSING:
7806 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7808 /* We will skip masked LUNs because of the blacklist. */
7809 if (fcp_lun_blacklist != NULL) {
7810 mutex_enter(&ptgt->tgt_mutex);
7811 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7812 lun_num) == TRUE) {
7813 ptgt->tgt_lun_cnt--;
7814 mutex_exit(&ptgt->tgt_mutex);
7815 break;
7817 mutex_exit(&ptgt->tgt_mutex);
7820 /* see if this LUN is already allocated */
7821 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7822 plun = fcp_alloc_lun(ptgt);
7823 if (plun == NULL) {
7824 fcp_log(CE_NOTE, pptr->port_dip,
7825 "!Lun allocation failed"
7826 " target=%x lun=%x",
7827 ptgt->tgt_d_id, lun_num);
7828 break;
7832 mutex_enter(&plun->lun_tgt->tgt_mutex);
7833 /* convert to LUN */
7834 plun->lun_addr.ent_addr_0 =
7835 BE_16(*(uint16_t *)&(lun_string[0]));
7836 plun->lun_addr.ent_addr_1 =
7837 BE_16(*(uint16_t *)&(lun_string[2]));
7838 plun->lun_addr.ent_addr_2 =
7839 BE_16(*(uint16_t *)&(lun_string[4]));
7840 plun->lun_addr.ent_addr_3 =
7841 BE_16(*(uint16_t *)&(lun_string[6]));
7843 plun->lun_num = lun_num;
7844 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7845 plun->lun_state &= ~FCP_LUN_OFFLINE;
7846 mutex_exit(&plun->lun_tgt->tgt_mutex);
7848 /* Retrieve the rscn count (if a valid one exists) */
7849 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7850 rscn_count = ((fc_ulp_rscn_info_t *)
7851 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7852 ulp_rscn_count;
7853 } else {
7854 rscn_count = FC_INVALID_RSCN_COUNT;
7857 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7858 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7859 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7860 mutex_enter(&pptr->port_mutex);
7861 mutex_enter(&plun->lun_tgt->tgt_mutex);
7862 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7863 fcp_log(CE_NOTE, pptr->port_dip,
7864 "!failed to send INQUIRY"
7865 " target=%x lun=%x",
7866 ptgt->tgt_d_id, plun->lun_num);
7867 } else {
7868 FCP_TRACE(fcp_logq,
7869 pptr->port_instbuf, fcp_trace,
7870 FCP_BUF_LEVEL_5, 0,
7871 "fcp_handle_reportlun,2: state"
7872 " change occured for D_ID=0x%x",
7873 ptgt->tgt_d_id);
7875 mutex_exit(&plun->lun_tgt->tgt_mutex);
7876 mutex_exit(&pptr->port_mutex);
7877 } else {
7878 continue;
7880 break;
7882 default:
7883 fcp_log(CE_WARN, NULL,
7884 "!Unsupported LUN Addressing method %x "
7885 "in response to REPORT_LUN", lun_string[0]);
7886 break;
7890 * each time through this loop we should decrement
7891 * the tmp_cnt by one -- since we go through this loop
7892 * one time for each LUN, the tmp_cnt should never be <=0
7894 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7895 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 if (i == 0) {
7899 fcp_log(CE_WARN, pptr->port_dip,
7900 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7901 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7902 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 kmem_free(report_lun, len);
7906 fcp_icmd_free(pptr, icmd);
7911 * called internally to return a LUN given a target and a LUN number
7913 static struct fcp_lun *
7914 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7916 struct fcp_lun *plun;
7918 mutex_enter(&ptgt->tgt_mutex);
7919 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7920 if (plun->lun_num == lun_num) {
7921 mutex_exit(&ptgt->tgt_mutex);
7922 return (plun);
7925 mutex_exit(&ptgt->tgt_mutex);
7927 return (NULL);
7932 * handle finishing one target for fcp_finish_init
7934 * return true (non-zero) if we want finish_init to continue with the
7935 * next target
7937 * called with the port mutex held
7939 /*ARGSUSED*/
7940 static int
7941 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7942 int link_cnt, int tgt_cnt, int cause)
7944 int rval = 1;
7945 ASSERT(pptr != NULL);
7946 ASSERT(ptgt != NULL);
7948 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7949 fcp_trace, FCP_BUF_LEVEL_5, 0,
7950 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7951 ptgt->tgt_state);
7953 ASSERT(mutex_owned(&pptr->port_mutex));
7955 if ((pptr->port_link_cnt != link_cnt) ||
7956 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7958 * oh oh -- another link reset or target change
7959 * must have occurred while we are in here
7961 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7963 return (0);
7964 } else {
7965 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 mutex_enter(&ptgt->tgt_mutex);
7970 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7972 * tgt is not offline -- is it marked (i.e. needs
7973 * to be offlined) ??
7975 if (ptgt->tgt_state & FCP_TGT_MARK) {
7977 * this target not offline *and*
7978 * marked
7980 ptgt->tgt_state &= ~FCP_TGT_MARK;
7981 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7982 tgt_cnt, 0, 0);
7983 } else {
7984 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7986 /* create the LUNs */
7987 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7988 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7989 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7990 cause);
7991 ptgt->tgt_device_created = 1;
7992 } else {
7993 fcp_update_tgt_state(ptgt, FCP_RESET,
7994 FCP_LUN_BUSY);
7999 mutex_exit(&ptgt->tgt_mutex);
8001 return (rval);
8006 * this routine is called to finish port initialization
8008 * Each port has a "temp" counter -- when a state change happens (e.g.
8009 * port online), the temp count is set to the number of devices in the map.
8010 * Then, as each device gets "discovered", the temp counter is decremented
8011 * by one. When this count reaches zero we know that all of the devices
8012 * in the map have been discovered (or an error has occurred), so we can
8013 * then finish initialization -- which is done by this routine (well, this
8014 * and fcp-finish_tgt())
8016 * acquires and releases the global mutex
8018 * called with the port mutex owned
8020 static void
8021 fcp_finish_init(struct fcp_port *pptr)
8023 #ifdef DEBUG
8024 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8025 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8026 FCP_STACK_DEPTH);
8027 #endif /* DEBUG */
8029 ASSERT(mutex_owned(&pptr->port_mutex));
8031 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8032 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8033 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8035 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8036 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8037 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8038 pptr->port_state &= ~FCP_STATE_ONLINING;
8039 pptr->port_state |= FCP_STATE_ONLINE;
8042 /* Wake up threads waiting on config done */
8043 cv_broadcast(&pptr->port_config_cv);
8048 * called from fcp_finish_init to create the LUNs for a target
8050 * called with the port mutex owned
8052 static void
8053 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8055 struct fcp_lun *plun;
8056 struct fcp_port *pptr;
8057 child_info_t *cip = NULL;
8059 ASSERT(ptgt != NULL);
8060 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8062 pptr = ptgt->tgt_port;
8064 ASSERT(pptr != NULL);
8066 /* scan all LUNs for this target */
8067 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8068 if (plun->lun_state & FCP_LUN_OFFLINE) {
8069 continue;
8072 if (plun->lun_state & FCP_LUN_MARK) {
8073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8074 fcp_trace, FCP_BUF_LEVEL_2, 0,
8075 "fcp_create_luns: offlining marked LUN!");
8076 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8077 continue;
8080 plun->lun_state &= ~FCP_LUN_BUSY;
8083 * There are conditions in which FCP_LUN_INIT flag is cleared
8084 * but we have a valid plun->lun_cip. To cover this case also
8085 * CLEAR_BUSY whenever we have a valid lun_cip.
8087 if (plun->lun_mpxio && plun->lun_cip &&
8088 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8089 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8090 0, 0))) {
8091 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8092 fcp_trace, FCP_BUF_LEVEL_2, 0,
8093 "fcp_create_luns: enable lun %p failed!",
8094 plun);
8097 if (plun->lun_state & FCP_LUN_INIT &&
8098 !(plun->lun_state & FCP_LUN_CHANGED)) {
8099 continue;
8102 if (cause == FCP_CAUSE_USER_CREATE) {
8103 continue;
8106 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8107 fcp_trace, FCP_BUF_LEVEL_6, 0,
8108 "create_luns: passing ONLINE elem to HP thread");
8111 * If lun has changed, prepare for offlining the old path.
8112 * Do not offline the old path right now, since it may be
8113 * still opened.
8115 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8116 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 /* pass an ONLINE element to the hotplug thread */
8120 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8121 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 * We can not synchronous attach (i.e pass
8125 * NDI_ONLINE_ATTACH) here as we might be
8126 * coming from an interrupt or callback
8127 * thread.
8129 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8130 link_cnt, tgt_cnt, 0, 0)) {
8131 fcp_log(CE_CONT, pptr->port_dip,
8132 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8133 plun->lun_tgt->tgt_d_id, plun->lun_num);
8141 * function to online/offline devices
8143 static int
8144 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8145 int online, int lcount, int tcount, int flags)
8147 int rval = NDI_FAILURE;
8148 int circ;
8149 child_info_t *ccip;
8150 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8151 int is_mpxio = pptr->port_mpxio;
8152 dev_info_t *cdip, *pdip;
8153 char *devname;
8155 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8157 * When this event gets serviced, lun_cip and lun_mpxio
8158 * has changed, so it should be invalidated now.
8160 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8161 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8162 "plun: %p, cip: %p, what:%d", plun, cip, online);
8163 return (rval);
8166 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8167 fcp_trace, FCP_BUF_LEVEL_2, 0,
8168 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8169 "flags=%x mpxio=%x\n",
8170 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8171 plun->lun_mpxio);
8174 * lun_mpxio needs checking here because we can end up in a race
8175 * condition where this task has been dispatched while lun_mpxio is
8176 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8177 * enable MPXIO for the LUN, but was unable to, and hence cleared
8178 * the flag. We rely on the serialization of the tasks here. We return
8179 * NDI_SUCCESS so any callers continue without reporting spurious
8180 * errors, and the still think we're an MPXIO LUN.
8183 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8184 online == FCP_MPXIO_PATH_SET_BUSY) {
8185 if (plun->lun_mpxio) {
8186 rval = fcp_update_mpxio_path(plun, cip, online);
8187 } else {
8188 rval = NDI_SUCCESS;
8190 return (rval);
8194 * Explicit devfs_clean() due to ndi_devi_offline() not
8195 * executing devfs_clean() if parent lock is held.
8197 ASSERT(!servicing_interrupt());
8198 if (online == FCP_OFFLINE) {
8199 if (plun->lun_mpxio == 0) {
8200 if (plun->lun_cip == cip) {
8201 cdip = DIP(plun->lun_cip);
8202 } else {
8203 cdip = DIP(cip);
8205 } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8206 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8207 } else if ((plun->lun_cip != cip) && cip) {
8209 * This means a DTYPE/GUID change, we shall get the
8210 * dip of the old cip instead of the current lun_cip.
8212 cdip = mdi_pi_get_client(PIP(cip));
8214 if (cdip) {
8215 if (i_ddi_devi_attached(cdip)) {
8216 pdip = ddi_get_parent(cdip);
8217 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8218 ndi_devi_enter(pdip, &circ);
8219 (void) ddi_deviname(cdip, devname);
8221 * Release parent lock before calling
8222 * devfs_clean().
8224 ndi_devi_exit(pdip, circ);
8225 (void) devfs_clean(pdip, devname + 1,
8226 DV_CLEAN_FORCE);
8227 kmem_free(devname, MAXNAMELEN + 1);
8232 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8233 return (NDI_FAILURE);
8236 if (is_mpxio) {
8237 mdi_devi_enter(pptr->port_dip, &circ);
8238 } else {
8239 ndi_devi_enter(pptr->port_dip, &circ);
8242 mutex_enter(&pptr->port_mutex);
8243 mutex_enter(&plun->lun_mutex);
8245 if (online == FCP_ONLINE) {
8246 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8247 if (ccip == NULL) {
8248 goto fail;
8250 } else {
8251 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8252 goto fail;
8254 ccip = cip;
8257 if (online == FCP_ONLINE) {
8258 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8259 &circ);
8260 fc_ulp_log_device_event(pptr->port_fp_handle,
8261 FC_ULP_DEVICE_ONLINE);
8262 } else {
8263 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8264 &circ);
8265 fc_ulp_log_device_event(pptr->port_fp_handle,
8266 FC_ULP_DEVICE_OFFLINE);
8269 fail: mutex_exit(&plun->lun_mutex);
8270 mutex_exit(&pptr->port_mutex);
8272 if (is_mpxio) {
8273 mdi_devi_exit(pptr->port_dip, circ);
8274 } else {
8275 ndi_devi_exit(pptr->port_dip, circ);
8278 fc_ulp_idle_port(pptr->port_fp_handle);
8280 return (rval);
8285 * take a target offline by taking all of its LUNs offline
8287 /*ARGSUSED*/
8288 static int
8289 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8290 int link_cnt, int tgt_cnt, int nowait, int flags)
8292 struct fcp_tgt_elem *elem;
8294 ASSERT(mutex_owned(&pptr->port_mutex));
8295 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8297 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8299 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8300 ptgt->tgt_change_cnt)) {
8301 mutex_exit(&ptgt->tgt_mutex);
8302 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8303 mutex_enter(&ptgt->tgt_mutex);
8305 return (0);
8308 ptgt->tgt_pd_handle = NULL;
8309 mutex_exit(&ptgt->tgt_mutex);
8310 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8311 mutex_enter(&ptgt->tgt_mutex);
8313 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8315 if (ptgt->tgt_tcap &&
8316 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8317 elem->flags = flags;
8318 elem->time = fcp_watchdog_time;
8319 if (nowait == 0) {
8320 elem->time += fcp_offline_delay;
8322 elem->ptgt = ptgt;
8323 elem->link_cnt = link_cnt;
8324 elem->tgt_cnt = tgt_cnt;
8325 elem->next = pptr->port_offline_tgts;
8326 pptr->port_offline_tgts = elem;
8327 } else {
8328 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8331 return (1);
8335 static void
8336 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8337 int link_cnt, int tgt_cnt, int flags)
8339 ASSERT(mutex_owned(&pptr->port_mutex));
8340 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8342 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8343 ptgt->tgt_state = FCP_TGT_OFFLINE;
8344 ptgt->tgt_pd_handle = NULL;
8345 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8349 static void
8350 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8351 int flags)
8353 struct fcp_lun *plun;
8355 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8356 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8358 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8359 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8360 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8367 * take a LUN offline
8369 * enters and leaves with the target mutex held, releasing it in the process
8371 * allocates memory in non-sleep mode
8373 static void
8374 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8375 int nowait, int flags)
8377 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8378 struct fcp_lun_elem *elem;
8380 ASSERT(plun != NULL);
8381 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8383 if (nowait) {
8384 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8385 return;
8388 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8389 elem->flags = flags;
8390 elem->time = fcp_watchdog_time;
8391 if (nowait == 0) {
8392 elem->time += fcp_offline_delay;
8394 elem->plun = plun;
8395 elem->link_cnt = link_cnt;
8396 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8397 elem->next = pptr->port_offline_luns;
8398 pptr->port_offline_luns = elem;
8399 } else {
8400 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8405 static void
8406 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8408 struct fcp_pkt *head = NULL;
8410 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8412 mutex_exit(&LUN_TGT->tgt_mutex);
8414 head = fcp_scan_commands(plun);
8415 if (head != NULL) {
8416 fcp_abort_commands(head, LUN_PORT);
8419 mutex_enter(&LUN_TGT->tgt_mutex);
8421 if (plun->lun_cip && plun->lun_mpxio) {
8423 * Intimate MPxIO lun busy is cleared
8425 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8426 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8427 0, 0)) {
8428 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8429 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8430 LUN_TGT->tgt_d_id, plun->lun_num);
8433 * Intimate MPxIO that the lun is now marked for offline
8435 mutex_exit(&LUN_TGT->tgt_mutex);
8436 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8437 mutex_enter(&LUN_TGT->tgt_mutex);
8441 static void
8442 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8443 int flags)
8445 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8447 mutex_exit(&LUN_TGT->tgt_mutex);
8448 fcp_update_offline_flags(plun);
8449 mutex_enter(&LUN_TGT->tgt_mutex);
8451 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8453 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8454 fcp_trace, FCP_BUF_LEVEL_4, 0,
8455 "offline_lun: passing OFFLINE elem to HP thread");
8457 if (plun->lun_cip) {
8458 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8459 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8460 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8461 LUN_TGT->tgt_trace);
8463 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8464 link_cnt, tgt_cnt, flags, 0)) {
8465 fcp_log(CE_CONT, LUN_PORT->port_dip,
8466 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8467 LUN_TGT->tgt_d_id, plun->lun_num);
8472 static void
8473 fcp_scan_offline_luns(struct fcp_port *pptr)
8475 struct fcp_lun_elem *elem;
8476 struct fcp_lun_elem *prev;
8477 struct fcp_lun_elem *next;
8479 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8481 prev = NULL;
8482 elem = pptr->port_offline_luns;
8483 while (elem) {
8484 next = elem->next;
8485 if (elem->time <= fcp_watchdog_time) {
8486 int changed = 1;
8487 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8489 mutex_enter(&ptgt->tgt_mutex);
8490 if (pptr->port_link_cnt == elem->link_cnt &&
8491 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8492 changed = 0;
8495 if (!changed &&
8496 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8497 fcp_offline_lun_now(elem->plun,
8498 elem->link_cnt, elem->tgt_cnt, elem->flags);
8500 mutex_exit(&ptgt->tgt_mutex);
8502 kmem_free(elem, sizeof (*elem));
8504 if (prev) {
8505 prev->next = next;
8506 } else {
8507 pptr->port_offline_luns = next;
8509 } else {
8510 prev = elem;
8512 elem = next;
8517 static void
8518 fcp_scan_offline_tgts(struct fcp_port *pptr)
8520 struct fcp_tgt_elem *elem;
8521 struct fcp_tgt_elem *prev;
8522 struct fcp_tgt_elem *next;
8524 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8526 prev = NULL;
8527 elem = pptr->port_offline_tgts;
8528 while (elem) {
8529 next = elem->next;
8530 if (elem->time <= fcp_watchdog_time) {
8531 int outdated = 1;
8532 struct fcp_tgt *ptgt = elem->ptgt;
8534 mutex_enter(&ptgt->tgt_mutex);
8536 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8537 /* No change on tgt since elem was created. */
8538 outdated = 0;
8539 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8540 pptr->port_link_cnt == elem->link_cnt + 1 &&
8541 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8543 * Exactly one thing happened to the target
8544 * inbetween: the local port went offline.
8545 * For fp the remote port is already gone so
8546 * it will not tell us again to offline the
8547 * target. We must offline it now.
8549 outdated = 0;
8552 if (!outdated && !(ptgt->tgt_state &
8553 FCP_TGT_OFFLINE)) {
8554 fcp_offline_target_now(pptr,
8555 ptgt, elem->link_cnt, elem->tgt_cnt,
8556 elem->flags);
8559 mutex_exit(&ptgt->tgt_mutex);
8561 kmem_free(elem, sizeof (*elem));
8563 if (prev) {
8564 prev->next = next;
8565 } else {
8566 pptr->port_offline_tgts = next;
8568 } else {
8569 prev = elem;
8571 elem = next;
8576 static void
8577 fcp_update_offline_flags(struct fcp_lun *plun)
8579 struct fcp_port *pptr = LUN_PORT;
8580 ASSERT(plun != NULL);
8582 mutex_enter(&LUN_TGT->tgt_mutex);
8583 plun->lun_state |= FCP_LUN_OFFLINE;
8584 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8586 mutex_enter(&plun->lun_mutex);
8587 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8588 dev_info_t *cdip = NULL;
8590 mutex_exit(&LUN_TGT->tgt_mutex);
8592 if (plun->lun_mpxio == 0) {
8593 cdip = DIP(plun->lun_cip);
8594 } else if (plun->lun_cip) {
8595 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8598 mutex_exit(&plun->lun_mutex);
8599 if (cdip) {
8600 (void) ndi_event_retrieve_cookie(
8601 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8602 &fcp_remove_eid, NDI_EVENT_NOPASS);
8603 (void) ndi_event_run_callbacks(
8604 pptr->port_ndi_event_hdl, cdip,
8605 fcp_remove_eid, NULL);
8607 } else {
8608 mutex_exit(&plun->lun_mutex);
8609 mutex_exit(&LUN_TGT->tgt_mutex);
8615 * Scan all of the command pkts for this port, moving pkts that
8616 * match our LUN onto our own list (headed by "head")
8618 static struct fcp_pkt *
8619 fcp_scan_commands(struct fcp_lun *plun)
8621 struct fcp_port *pptr = LUN_PORT;
8623 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8624 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8625 struct fcp_pkt *pcmd = NULL; /* the previous command */
8627 struct fcp_pkt *head = NULL; /* head of our list */
8628 struct fcp_pkt *tail = NULL; /* tail of our list */
8630 int cmds_found = 0;
8632 mutex_enter(&pptr->port_pkt_mutex);
8633 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8634 struct fcp_lun *tlun =
8635 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8637 ncmd = cmd->cmd_next; /* set next command */
8640 * if this pkt is for a different LUN or the
8641 * command is sent down, skip it.
8643 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8644 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8645 pcmd = cmd;
8646 continue;
8648 cmds_found++;
8649 if (pcmd != NULL) {
8650 ASSERT(pptr->port_pkt_head != cmd);
8651 pcmd->cmd_next = cmd->cmd_next;
8652 } else {
8653 ASSERT(cmd == pptr->port_pkt_head);
8654 pptr->port_pkt_head = cmd->cmd_next;
8657 if (cmd == pptr->port_pkt_tail) {
8658 pptr->port_pkt_tail = pcmd;
8659 if (pcmd) {
8660 pcmd->cmd_next = NULL;
8664 if (head == NULL) {
8665 head = tail = cmd;
8666 } else {
8667 ASSERT(tail != NULL);
8669 tail->cmd_next = cmd;
8670 tail = cmd;
8672 cmd->cmd_next = NULL;
8674 mutex_exit(&pptr->port_pkt_mutex);
8676 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8677 fcp_trace, FCP_BUF_LEVEL_8, 0,
8678 "scan commands: %d cmd(s) found", cmds_found);
8680 return (head);
8685 * Abort all the commands in the command queue
8687 static void
8688 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8690 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8691 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8693 ASSERT(mutex_owned(&pptr->port_mutex));
8695 /* scan through the pkts and invalid them */
8696 for (cmd = head; cmd != NULL; cmd = ncmd) {
8697 struct scsi_pkt *pkt = cmd->cmd_pkt;
8699 ncmd = cmd->cmd_next;
8700 ASSERT(pkt != NULL);
8703 * The lun is going to be marked offline. Indicate
8704 * the target driver not to requeue or retry this command
8705 * as the device is going to be offlined pretty soon.
8707 pkt->pkt_reason = CMD_DEV_GONE;
8708 pkt->pkt_statistics = 0;
8709 pkt->pkt_state = 0;
8711 /* reset cmd flags/state */
8712 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8713 cmd->cmd_state = FCP_PKT_IDLE;
8716 * ensure we have a packet completion routine,
8717 * then call it.
8719 ASSERT(pkt->pkt_comp != NULL);
8721 mutex_exit(&pptr->port_mutex);
8722 fcp_post_callback(cmd);
8723 mutex_enter(&pptr->port_mutex);
8729 * the pkt_comp callback for command packets
8731 static void
8732 fcp_cmd_callback(fc_packet_t *fpkt)
8734 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8735 struct scsi_pkt *pkt = cmd->cmd_pkt;
8736 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8738 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8740 if (cmd->cmd_state == FCP_PKT_IDLE) {
8741 cmn_err(CE_PANIC, "Packet already completed %p",
8742 (void *)cmd);
8746 * Watch thread should be freeing the packet, ignore the pkt.
8748 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8749 fcp_log(CE_CONT, pptr->port_dip,
8750 "!FCP: Pkt completed while aborting\n");
8751 return;
8753 cmd->cmd_state = FCP_PKT_IDLE;
8755 fcp_complete_pkt(fpkt);
8757 #ifdef DEBUG
8758 mutex_enter(&pptr->port_pkt_mutex);
8759 pptr->port_npkts--;
8760 mutex_exit(&pptr->port_pkt_mutex);
8761 #endif /* DEBUG */
8763 fcp_post_callback(cmd);
8767 static void
8768 fcp_complete_pkt(fc_packet_t *fpkt)
8770 int error = 0;
8771 struct fcp_pkt *cmd = (struct fcp_pkt *)
8772 fpkt->pkt_ulp_private;
8773 struct scsi_pkt *pkt = cmd->cmd_pkt;
8774 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8775 struct fcp_lun *plun;
8776 struct fcp_tgt *ptgt;
8777 struct fcp_rsp *rsp;
8778 struct scsi_address save;
8780 #ifdef DEBUG
8781 save = pkt->pkt_address;
8782 #endif /* DEBUG */
8784 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8786 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8787 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8788 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8789 sizeof (struct fcp_rsp));
8792 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8793 STATE_SENT_CMD | STATE_GOT_STATUS;
8795 pkt->pkt_resid = 0;
8797 if (fpkt->pkt_datalen) {
8798 pkt->pkt_state |= STATE_XFERRED_DATA;
8799 if (fpkt->pkt_data_resid) {
8800 error++;
8804 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8805 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8807 * The next two checks make sure that if there
8808 * is no sense data or a valid response and
8809 * the command came back with check condition,
8810 * the command should be retried.
8812 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8813 !rsp->fcp_u.fcp_status.sense_len_set) {
8814 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8815 pkt->pkt_resid = cmd->cmd_dmacount;
8819 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8820 return;
8823 plun = ADDR2LUN(&pkt->pkt_address);
8824 ptgt = plun->lun_tgt;
8825 ASSERT(ptgt != NULL);
8828 * Update the transfer resid, if appropriate
8830 if (rsp->fcp_u.fcp_status.resid_over ||
8831 rsp->fcp_u.fcp_status.resid_under) {
8832 pkt->pkt_resid = rsp->fcp_resid;
8836 * First see if we got a FCP protocol error.
8838 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8839 struct fcp_rsp_info *bep;
8840 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8841 sizeof (struct fcp_rsp));
8843 if (fcp_validate_fcp_response(rsp, pptr) !=
8844 FC_SUCCESS) {
8845 pkt->pkt_reason = CMD_CMPLT;
8846 *(pkt->pkt_scbp) = STATUS_CHECK;
8848 fcp_log(CE_WARN, pptr->port_dip,
8849 "!SCSI command to d_id=0x%x lun=0x%x"
8850 " failed, Bad FCP response values:"
8851 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8852 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8853 ptgt->tgt_d_id, plun->lun_num,
8854 rsp->reserved_0, rsp->reserved_1,
8855 rsp->fcp_u.fcp_status.reserved_0,
8856 rsp->fcp_u.fcp_status.reserved_1,
8857 rsp->fcp_response_len, rsp->fcp_sense_len);
8859 return;
8862 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8863 FCP_CP_IN(fpkt->pkt_resp +
8864 sizeof (struct fcp_rsp), bep,
8865 fpkt->pkt_resp_acc,
8866 sizeof (struct fcp_rsp_info));
8869 if (bep->rsp_code != FCP_NO_FAILURE) {
8870 child_info_t *cip;
8872 pkt->pkt_reason = CMD_TRAN_ERR;
8874 mutex_enter(&plun->lun_mutex);
8875 cip = plun->lun_cip;
8876 mutex_exit(&plun->lun_mutex);
8878 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8879 fcp_trace, FCP_BUF_LEVEL_2, 0,
8880 "FCP response error on cmd=%p"
8881 " target=0x%x, cip=%p", cmd,
8882 ptgt->tgt_d_id, cip);
8887 * See if we got a SCSI error with sense data
8889 if (rsp->fcp_u.fcp_status.sense_len_set) {
8890 uchar_t rqlen;
8891 caddr_t sense_from;
8892 child_info_t *cip;
8893 timeout_id_t tid;
8894 struct scsi_arq_status *arq;
8895 struct scsi_extended_sense *sense_to;
8897 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8898 sense_to = &arq->sts_sensedata;
8900 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8901 sizeof (struct scsi_extended_sense));
8903 sense_from = (caddr_t)fpkt->pkt_resp +
8904 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8906 if (fcp_validate_fcp_response(rsp, pptr) !=
8907 FC_SUCCESS) {
8908 pkt->pkt_reason = CMD_CMPLT;
8909 *(pkt->pkt_scbp) = STATUS_CHECK;
8911 fcp_log(CE_WARN, pptr->port_dip,
8912 "!SCSI command to d_id=0x%x lun=0x%x"
8913 " failed, Bad FCP response values:"
8914 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8915 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8916 ptgt->tgt_d_id, plun->lun_num,
8917 rsp->reserved_0, rsp->reserved_1,
8918 rsp->fcp_u.fcp_status.reserved_0,
8919 rsp->fcp_u.fcp_status.reserved_1,
8920 rsp->fcp_response_len, rsp->fcp_sense_len);
8922 return;
8926 * copy in sense information
8928 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8929 FCP_CP_IN(sense_from, sense_to,
8930 fpkt->pkt_resp_acc, rqlen);
8931 } else {
8932 bcopy(sense_from, sense_to, rqlen);
8935 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8936 (FCP_SENSE_NO_LUN(sense_to))) {
8937 mutex_enter(&ptgt->tgt_mutex);
8938 if (ptgt->tgt_tid == NULL) {
8940 * Kick off rediscovery
8942 tid = timeout(fcp_reconfigure_luns,
8943 (caddr_t)ptgt, drv_usectohz(1));
8945 ptgt->tgt_tid = tid;
8946 ptgt->tgt_state |= FCP_TGT_BUSY;
8948 mutex_exit(&ptgt->tgt_mutex);
8949 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8950 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8951 fcp_trace, FCP_BUF_LEVEL_3, 0,
8952 "!FCP: Report Lun Has Changed"
8953 " target=%x", ptgt->tgt_d_id);
8954 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8955 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8956 fcp_trace, FCP_BUF_LEVEL_3, 0,
8957 "!FCP: LU Not Supported"
8958 " target=%x", ptgt->tgt_d_id);
8961 ASSERT(pkt->pkt_scbp != NULL);
8963 pkt->pkt_state |= STATE_ARQ_DONE;
8965 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8967 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8968 arq->sts_rqpkt_reason = 0;
8969 arq->sts_rqpkt_statistics = 0;
8971 arq->sts_rqpkt_state = STATE_GOT_BUS |
8972 STATE_GOT_TARGET | STATE_SENT_CMD |
8973 STATE_GOT_STATUS | STATE_ARQ_DONE |
8974 STATE_XFERRED_DATA;
8976 mutex_enter(&plun->lun_mutex);
8977 cip = plun->lun_cip;
8978 mutex_exit(&plun->lun_mutex);
8980 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8981 fcp_trace, FCP_BUF_LEVEL_8, 0,
8982 "SCSI Check condition on cmd=%p target=0x%x"
8983 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8984 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8985 cmd->cmd_fcp_cmd.fcp_cdb[0],
8986 rsp->fcp_u.fcp_status.scsi_status,
8987 sense_to->es_key, sense_to->es_add_code,
8988 sense_to->es_qual_code);
8990 } else {
8991 plun = ADDR2LUN(&pkt->pkt_address);
8992 ptgt = plun->lun_tgt;
8993 ASSERT(ptgt != NULL);
8996 * Work harder to translate errors into target driver
8997 * understandable ones. Note with despair that the target
8998 * drivers don't decode pkt_state and pkt_reason exhaustively
8999 * They resort to using the big hammer most often, which
9000 * may not get fixed in the life time of this driver.
9002 pkt->pkt_state = 0;
9003 pkt->pkt_statistics = 0;
9005 switch (fpkt->pkt_state) {
9006 case FC_PKT_TRAN_ERROR:
9007 switch (fpkt->pkt_reason) {
9008 case FC_REASON_OVERRUN:
9009 pkt->pkt_reason = CMD_CMD_OVR;
9010 pkt->pkt_statistics |= STAT_ABORTED;
9011 break;
9013 case FC_REASON_XCHG_BSY: {
9014 caddr_t ptr;
9016 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9018 ptr = (caddr_t)pkt->pkt_scbp;
9019 if (ptr) {
9020 *ptr = STATUS_BUSY;
9022 break;
9025 case FC_REASON_ABORTED:
9026 pkt->pkt_reason = CMD_TRAN_ERR;
9027 pkt->pkt_statistics |= STAT_ABORTED;
9028 break;
9030 case FC_REASON_ABORT_FAILED:
9031 pkt->pkt_reason = CMD_ABORT_FAIL;
9032 break;
9034 case FC_REASON_NO_SEQ_INIT:
9035 case FC_REASON_CRC_ERROR:
9036 pkt->pkt_reason = CMD_TRAN_ERR;
9037 pkt->pkt_statistics |= STAT_ABORTED;
9038 break;
9039 default:
9040 pkt->pkt_reason = CMD_TRAN_ERR;
9041 break;
9043 break;
9045 case FC_PKT_PORT_OFFLINE: {
9046 dev_info_t *cdip = NULL;
9047 caddr_t ptr;
9049 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9050 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9051 fcp_trace, FCP_BUF_LEVEL_8, 0,
9052 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9053 ptgt->tgt_d_id);
9056 mutex_enter(&plun->lun_mutex);
9057 if (plun->lun_mpxio == 0) {
9058 cdip = DIP(plun->lun_cip);
9059 } else if (plun->lun_cip) {
9060 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9063 mutex_exit(&plun->lun_mutex);
9065 if (cdip) {
9066 (void) ndi_event_retrieve_cookie(
9067 pptr->port_ndi_event_hdl, cdip,
9068 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9069 NDI_EVENT_NOPASS);
9070 (void) ndi_event_run_callbacks(
9071 pptr->port_ndi_event_hdl, cdip,
9072 fcp_remove_eid, NULL);
9076 * If the link goes off-line for a lip,
9077 * this will cause a error to the ST SG
9078 * SGEN drivers. By setting BUSY we will
9079 * give the drivers the chance to retry
9080 * before it blows of the job. ST will
9081 * remember how many times it has retried.
9084 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9085 (plun->lun_type == DTYPE_CHANGER)) {
9086 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9087 ptr = (caddr_t)pkt->pkt_scbp;
9088 if (ptr) {
9089 *ptr = STATUS_BUSY;
9091 } else {
9092 pkt->pkt_reason = CMD_TRAN_ERR;
9093 pkt->pkt_statistics |= STAT_BUS_RESET;
9095 break;
9098 case FC_PKT_TRAN_BSY:
9100 * Use the ssd Qfull handling here.
9102 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9103 pkt->pkt_state = STATE_GOT_BUS;
9104 break;
9106 case FC_PKT_TIMEOUT:
9107 pkt->pkt_reason = CMD_TIMEOUT;
9108 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9109 pkt->pkt_statistics |= STAT_TIMEOUT;
9110 } else {
9111 pkt->pkt_statistics |= STAT_ABORTED;
9113 break;
9115 case FC_PKT_LOCAL_RJT:
9116 switch (fpkt->pkt_reason) {
9117 case FC_REASON_OFFLINE: {
9118 dev_info_t *cdip = NULL;
9120 mutex_enter(&plun->lun_mutex);
9121 if (plun->lun_mpxio == 0) {
9122 cdip = DIP(plun->lun_cip);
9123 } else if (plun->lun_cip) {
9124 cdip = mdi_pi_get_client(
9125 PIP(plun->lun_cip));
9127 mutex_exit(&plun->lun_mutex);
9129 if (cdip) {
9130 (void) ndi_event_retrieve_cookie(
9131 pptr->port_ndi_event_hdl, cdip,
9132 FCAL_REMOVE_EVENT,
9133 &fcp_remove_eid,
9134 NDI_EVENT_NOPASS);
9135 (void) ndi_event_run_callbacks(
9136 pptr->port_ndi_event_hdl,
9137 cdip, fcp_remove_eid, NULL);
9140 pkt->pkt_reason = CMD_TRAN_ERR;
9141 pkt->pkt_statistics |= STAT_BUS_RESET;
9143 break;
9146 case FC_REASON_NOMEM:
9147 case FC_REASON_QFULL: {
9148 caddr_t ptr;
9150 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9151 ptr = (caddr_t)pkt->pkt_scbp;
9152 if (ptr) {
9153 *ptr = STATUS_BUSY;
9155 break;
9158 case FC_REASON_DMA_ERROR:
9159 pkt->pkt_reason = CMD_DMA_DERR;
9160 pkt->pkt_statistics |= STAT_ABORTED;
9161 break;
9163 case FC_REASON_CRC_ERROR:
9164 case FC_REASON_UNDERRUN: {
9165 uchar_t status;
9167 * Work around for Bugid: 4240945.
9168 * IB on A5k doesn't set the Underrun bit
9169 * in the fcp status, when it is transferring
9170 * less than requested amount of data. Work
9171 * around the ses problem to keep luxadm
9172 * happy till ibfirmware is fixed.
9174 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9175 FCP_CP_IN(fpkt->pkt_resp, rsp,
9176 fpkt->pkt_resp_acc,
9177 sizeof (struct fcp_rsp));
9179 status = rsp->fcp_u.fcp_status.scsi_status;
9180 if (((plun->lun_type & DTYPE_MASK) ==
9181 DTYPE_ESI) && (status == STATUS_GOOD)) {
9182 pkt->pkt_reason = CMD_CMPLT;
9183 *pkt->pkt_scbp = status;
9184 pkt->pkt_resid = 0;
9185 } else {
9186 pkt->pkt_reason = CMD_TRAN_ERR;
9187 pkt->pkt_statistics |= STAT_ABORTED;
9189 break;
9192 case FC_REASON_NO_CONNECTION:
9193 case FC_REASON_UNSUPPORTED:
9194 case FC_REASON_ILLEGAL_REQ:
9195 case FC_REASON_BAD_SID:
9196 case FC_REASON_DIAG_BUSY:
9197 case FC_REASON_FCAL_OPN_FAIL:
9198 case FC_REASON_BAD_XID:
9199 default:
9200 pkt->pkt_reason = CMD_TRAN_ERR;
9201 pkt->pkt_statistics |= STAT_ABORTED;
9202 break;
9205 break;
9207 case FC_PKT_NPORT_RJT:
9208 case FC_PKT_FABRIC_RJT:
9209 case FC_PKT_NPORT_BSY:
9210 case FC_PKT_FABRIC_BSY:
9211 default:
9212 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9213 fcp_trace, FCP_BUF_LEVEL_8, 0,
9214 "FC Status 0x%x, reason 0x%x",
9215 fpkt->pkt_state, fpkt->pkt_reason);
9216 pkt->pkt_reason = CMD_TRAN_ERR;
9217 pkt->pkt_statistics |= STAT_ABORTED;
9218 break;
9221 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9222 fcp_trace, FCP_BUF_LEVEL_9, 0,
9223 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9224 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9225 fpkt->pkt_reason);
9228 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9232 static int
9233 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9235 if (rsp->reserved_0 || rsp->reserved_1 ||
9236 rsp->fcp_u.fcp_status.reserved_0 ||
9237 rsp->fcp_u.fcp_status.reserved_1) {
9239 * These reserved fields should ideally be zero. FCP-2 does say
9240 * that the recipient need not check for reserved fields to be
9241 * zero. If they are not zero, we will not make a fuss about it
9242 * - just log it (in debug to both trace buffer and messages
9243 * file and to trace buffer only in non-debug) and move on.
9245 * Non-zero reserved fields were seen with minnows.
9247 * qlc takes care of some of this but we cannot assume that all
9248 * FCAs will do so.
9250 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9251 FCP_BUF_LEVEL_5, 0,
9252 "Got fcp response packet with non-zero reserved fields "
9253 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9254 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9255 rsp->reserved_0, rsp->reserved_1,
9256 rsp->fcp_u.fcp_status.reserved_0,
9257 rsp->fcp_u.fcp_status.reserved_1);
9260 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9261 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9262 return (FC_FAILURE);
9265 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9266 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9267 sizeof (struct fcp_rsp))) {
9268 return (FC_FAILURE);
9271 return (FC_SUCCESS);
9276 * This is called when there is a change the in device state. The case we're
9277 * handling here is, if the d_id s does not match, offline this tgt and online
9278 * a new tgt with the new d_id. called from fcp_handle_devices with
9279 * port_mutex held.
9281 static int
9282 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9283 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9285 ASSERT(mutex_owned(&pptr->port_mutex));
9287 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9288 fcp_trace, FCP_BUF_LEVEL_3, 0,
9289 "Starting fcp_device_changed...");
9292 * The two cases where the port_device_changed is called is
9293 * either it changes it's d_id or it's hard address.
9295 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9296 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9297 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9299 /* offline this target */
9300 mutex_enter(&ptgt->tgt_mutex);
9301 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9302 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9303 0, 1, NDI_DEVI_REMOVE);
9305 mutex_exit(&ptgt->tgt_mutex);
9307 fcp_log(CE_NOTE, pptr->port_dip,
9308 "Change in target properties: Old D_ID=%x New D_ID=%x"
9309 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9310 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9311 map_entry->map_hard_addr.hard_addr);
9314 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9315 link_cnt, tgt_cnt, cause));
9319 * Function: fcp_alloc_lun
9321 * Description: Creates a new lun structure and adds it to the list
9322 * of luns of the target.
9324 * Argument: ptgt Target the lun will belong to.
9326 * Return Value: NULL Failed
9327 * Not NULL Succeeded
9329 * Context: Kernel context
9331 static struct fcp_lun *
9332 fcp_alloc_lun(struct fcp_tgt *ptgt)
9334 struct fcp_lun *plun;
9336 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9337 if (plun != NULL) {
9339 * Initialize the mutex before putting in the target list
9340 * especially before releasing the target mutex.
9342 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9343 plun->lun_tgt = ptgt;
9345 mutex_enter(&ptgt->tgt_mutex);
9346 plun->lun_next = ptgt->tgt_lun;
9347 ptgt->tgt_lun = plun;
9348 plun->lun_old_guid = NULL;
9349 plun->lun_old_guid_size = 0;
9350 mutex_exit(&ptgt->tgt_mutex);
9353 return (plun);
9357 * Function: fcp_dealloc_lun
9359 * Description: Frees the LUN structure passed by the caller.
9361 * Argument: plun LUN structure to free.
9363 * Return Value: None
9365 * Context: Kernel context.
9367 static void
9368 fcp_dealloc_lun(struct fcp_lun *plun)
9370 mutex_enter(&plun->lun_mutex);
9371 if (plun->lun_cip) {
9372 fcp_remove_child(plun);
9374 mutex_exit(&plun->lun_mutex);
9376 mutex_destroy(&plun->lun_mutex);
9377 if (plun->lun_guid) {
9378 kmem_free(plun->lun_guid, plun->lun_guid_size);
9380 if (plun->lun_old_guid) {
9381 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9383 kmem_free(plun, sizeof (*plun));
9387 * Function: fcp_alloc_tgt
9389 * Description: Creates a new target structure and adds it to the port
9390 * hash list.
9392 * Argument: pptr fcp port structure
9393 * *map_entry entry describing the target to create
9394 * link_cnt Link state change counter
9396 * Return Value: NULL Failed
9397 * Not NULL Succeeded
9399 * Context: Kernel context.
9401 static struct fcp_tgt *
9402 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9404 int hash;
9405 uchar_t *wwn;
9406 struct fcp_tgt *ptgt;
9408 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9409 if (ptgt != NULL) {
9410 mutex_enter(&pptr->port_mutex);
9411 if (link_cnt != pptr->port_link_cnt) {
9413 * oh oh -- another link reset
9414 * in progress -- give up
9416 mutex_exit(&pptr->port_mutex);
9417 kmem_free(ptgt, sizeof (*ptgt));
9418 ptgt = NULL;
9419 } else {
9421 * initialize the mutex before putting in the port
9422 * wwn list, especially before releasing the port
9423 * mutex.
9425 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9427 /* add new target entry to the port's hash list */
9428 wwn = (uchar_t *)&map_entry->map_pwwn;
9429 hash = FCP_HASH(wwn);
9431 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9432 pptr->port_tgt_hash_table[hash] = ptgt;
9434 /* save cross-ptr */
9435 ptgt->tgt_port = pptr;
9437 ptgt->tgt_change_cnt = 1;
9439 /* initialize the target manual_config_only flag */
9440 if (fcp_enable_auto_configuration) {
9441 ptgt->tgt_manual_config_only = 0;
9442 } else {
9443 ptgt->tgt_manual_config_only = 1;
9446 mutex_exit(&pptr->port_mutex);
9450 return (ptgt);
9454 * Function: fcp_dealloc_tgt
9456 * Description: Frees the target structure passed by the caller.
9458 * Argument: ptgt Target structure to free.
9460 * Return Value: None
9462 * Context: Kernel context.
9464 static void
9465 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9467 mutex_destroy(&ptgt->tgt_mutex);
9468 kmem_free(ptgt, sizeof (*ptgt));
9473 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9475 * Device discovery commands will not be retried for-ever as
9476 * this will have repercussions on other devices that need to
9477 * be submitted to the hotplug thread. After a quick glance
9478 * at the SCSI-3 spec, it was found that the spec doesn't
9479 * mandate a forever retry, rather recommends a delayed retry.
9481 * Since Photon IB is single threaded, STATUS_BUSY is common
9482 * in a 4+initiator environment. Make sure the total time
9483 * spent on retries (including command timeout) does not
9484 * 60 seconds
9486 static void
9487 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9489 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9490 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9492 mutex_enter(&pptr->port_mutex);
9493 mutex_enter(&ptgt->tgt_mutex);
9494 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9495 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9496 fcp_trace, FCP_BUF_LEVEL_2, 0,
9497 "fcp_queue_ipkt,1:state change occured"
9498 " for D_ID=0x%x", ptgt->tgt_d_id);
9499 mutex_exit(&ptgt->tgt_mutex);
9500 mutex_exit(&pptr->port_mutex);
9501 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9502 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9503 fcp_icmd_free(pptr, icmd);
9504 return;
9506 mutex_exit(&ptgt->tgt_mutex);
9508 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9510 if (pptr->port_ipkt_list != NULL) {
9511 /* add pkt to front of doubly-linked list */
9512 pptr->port_ipkt_list->ipkt_prev = icmd;
9513 icmd->ipkt_next = pptr->port_ipkt_list;
9514 pptr->port_ipkt_list = icmd;
9515 icmd->ipkt_prev = NULL;
9516 } else {
9517 /* this is the first/only pkt on the list */
9518 pptr->port_ipkt_list = icmd;
9519 icmd->ipkt_next = NULL;
9520 icmd->ipkt_prev = NULL;
9522 mutex_exit(&pptr->port_mutex);
9526 * Function: fcp_transport
9528 * Description: This function submits the Fibre Channel packet to the transort
9529 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9530 * fails the submission, the treatment depends on the value of
9531 * the variable internal.
9533 * Argument: port_handle fp/fctl port handle.
9534 * *fpkt Packet to submit to the transport layer.
9535 * internal Not zero when it's an internal packet.
9537 * Return Value: FC_TRAN_BUSY
9538 * FC_STATEC_BUSY
9539 * FC_OFFLINE
9540 * FC_LOGINREQ
9541 * FC_DEVICE_BUSY
9542 * FC_SUCCESS
9544 static int
9545 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9547 int rval;
9549 rval = fc_ulp_transport(port_handle, fpkt);
9550 if (rval == FC_SUCCESS) {
9551 return (rval);
9555 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9556 * a command, if the underlying modules see that there is a state
9557 * change, or if a port is OFFLINE, that means, that state change
9558 * hasn't reached FCP yet, so re-queue the command for deferred
9559 * submission.
9561 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9562 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9563 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9565 * Defer packet re-submission. Life hang is possible on
9566 * internal commands if the port driver sends FC_STATEC_BUSY
9567 * for ever, but that shouldn't happen in a good environment.
9568 * Limiting re-transport for internal commands is probably a
9569 * good idea..
9570 * A race condition can happen when a port sees barrage of
9571 * link transitions offline to online. If the FCTL has
9572 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9573 * internal commands should be queued to do the discovery.
9574 * The race condition is when an online comes and FCP starts
9575 * its internal discovery and the link goes offline. It is
9576 * possible that the statec_callback has not reached FCP
9577 * and FCP is carrying on with its internal discovery.
9578 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9579 * that the link has gone offline. At this point FCP should
9580 * drop all the internal commands and wait for the
9581 * statec_callback. It will be facilitated by incrementing
9582 * port_link_cnt.
9584 * For external commands, the (FC)pkt_timeout is decremented
9585 * by the QUEUE Delay added by our driver, Care is taken to
9586 * ensure that it doesn't become zero (zero means no timeout)
9587 * If the time expires right inside driver queue itself,
9588 * the watch thread will return it to the original caller
9589 * indicating that the command has timed-out.
9591 if (internal) {
9592 char *op;
9593 struct fcp_ipkt *icmd;
9595 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9596 switch (icmd->ipkt_opcode) {
9597 case SCMD_REPORT_LUN:
9598 op = "REPORT LUN";
9599 break;
9601 case SCMD_INQUIRY:
9602 op = "INQUIRY";
9603 break;
9605 case SCMD_INQUIRY_PAGE83:
9606 op = "INQUIRY-83";
9607 break;
9609 default:
9610 op = "Internal SCSI COMMAND";
9611 break;
9614 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9615 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9616 rval = FC_SUCCESS;
9618 } else {
9619 struct fcp_pkt *cmd;
9620 struct fcp_port *pptr;
9622 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9623 cmd->cmd_state = FCP_PKT_IDLE;
9624 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9626 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9627 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9628 fcp_trace, FCP_BUF_LEVEL_9, 0,
9629 "fcp_transport: xport busy for pkt %p",
9630 cmd->cmd_pkt);
9631 rval = FC_TRAN_BUSY;
9632 } else {
9633 fcp_queue_pkt(pptr, cmd);
9634 rval = FC_SUCCESS;
9639 return (rval);
9642 /*VARARGS3*/
9643 static void
9644 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9646 char buf[256];
9647 va_list ap;
9649 if (dip == NULL) {
9650 dip = fcp_global_dip;
9653 va_start(ap, fmt);
9654 (void) vsprintf(buf, fmt, ap);
9655 va_end(ap);
9657 scsi_log(dip, "fcp", level, buf);
9661 * This function retries NS registry of FC4 type.
9662 * It assumes that fcp_mutex is held.
9663 * The function does nothing if topology is not fabric
9664 * So, the topology has to be set before this function can be called
9666 static void
9667 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9669 int rval;
9671 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9673 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9674 ((pptr->port_topology != FC_TOP_FABRIC) &&
9675 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9676 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9677 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9679 return;
9681 mutex_exit(&pptr->port_mutex);
9682 rval = fcp_do_ns_registry(pptr, s_id);
9683 mutex_enter(&pptr->port_mutex);
9685 if (rval == 0) {
9686 /* Registry successful. Reset flag */
9687 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9692 * This function registers the ULP with the switch by calling transport i/f
9694 static int
9695 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9697 fc_ns_cmd_t ns_cmd;
9698 ns_rfc_type_t rfc;
9699 uint32_t types[8];
9702 * Prepare the Name server structure to
9703 * register with the transport in case of
9704 * Fabric configuration.
9706 bzero(&rfc, sizeof (rfc));
9707 bzero(types, sizeof (types));
9709 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9710 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9712 rfc.rfc_port_id.port_id = s_id;
9713 bcopy(types, rfc.rfc_types, sizeof (types));
9715 ns_cmd.ns_flags = 0;
9716 ns_cmd.ns_cmd = NS_RFT_ID;
9717 ns_cmd.ns_req_len = sizeof (rfc);
9718 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9719 ns_cmd.ns_resp_len = 0;
9720 ns_cmd.ns_resp_payload = NULL;
9723 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9725 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9726 fcp_log(CE_WARN, pptr->port_dip,
9727 "!ns_registry: failed name server registration");
9728 return (1);
9731 return (0);
9735 * Function: fcp_handle_port_attach
9737 * Description: This function is called from fcp_port_attach() to attach a
9738 * new port. This routine does the following:
9740 * 1) Allocates an fcp_port structure and initializes it.
9741 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9742 * server.
9743 * 3) Kicks off the enumeration of the targets/luns visible
9744 * through this new port. That is done by calling
9745 * fcp_statec_callback() if the port is online.
9747 * Argument: ulph fp/fctl port handle.
9748 * *pinfo Port information.
9749 * s_id Port ID.
9750 * instance Device instance number for the local port
9751 * (returned by ddi_get_instance()).
9753 * Return Value: DDI_SUCCESS
9754 * DDI_FAILURE
9756 * Context: User and Kernel context.
9758 /*ARGSUSED*/
9760 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9761 uint32_t s_id, int instance)
9763 int res = DDI_FAILURE;
9764 scsi_hba_tran_t *tran;
9765 int mutex_initted = FALSE;
9766 int hba_attached = FALSE;
9767 int soft_state_linked = FALSE;
9768 int event_bind = FALSE;
9769 struct fcp_port *pptr;
9770 fc_portmap_t *tmp_list = NULL;
9771 uint32_t max_cnt, alloc_cnt;
9772 uchar_t *boot_wwn = NULL;
9773 uint_t nbytes;
9774 int manual_cfg;
9777 * this port instance attaching for the first time (or after
9778 * being detached before)
9780 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9781 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9783 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9784 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9785 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9786 instance);
9787 return (res);
9790 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9791 /* this shouldn't happen */
9792 ddi_soft_state_free(fcp_softstate, instance);
9793 cmn_err(CE_WARN, "fcp: bad soft state");
9794 return (res);
9797 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9800 * Make a copy of ulp_port_info as fctl allocates
9801 * a temp struct.
9803 (void) fcp_cp_pinfo(pptr, pinfo);
9806 * Check for manual_configuration_only property.
9807 * Enable manual configurtion if the property is
9808 * set to 1, otherwise disable manual configuration.
9810 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9811 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9812 MANUAL_CFG_ONLY,
9813 -1)) != -1) {
9814 if (manual_cfg == 1) {
9815 char *pathname;
9816 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9817 (void) ddi_pathname(pptr->port_dip, pathname);
9818 cmn_err(CE_NOTE,
9819 "%s (%s%d) %s is enabled via %s.conf.",
9820 pathname,
9821 ddi_driver_name(pptr->port_dip),
9822 ddi_get_instance(pptr->port_dip),
9823 MANUAL_CFG_ONLY,
9824 ddi_driver_name(pptr->port_dip));
9825 fcp_enable_auto_configuration = 0;
9826 kmem_free(pathname, MAXPATHLEN);
9829 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9830 pptr->port_link_cnt = 1;
9831 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9832 pptr->port_id = s_id;
9833 pptr->port_instance = instance;
9834 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9835 pptr->port_state = FCP_STATE_INIT;
9836 if (pinfo->port_acc_attr == NULL) {
9838 * The corresponding FCA doesn't support DMA at all
9840 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9843 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9845 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9847 * If FCA supports DMA in SCSI data phase, we need preallocate
9848 * dma cookie, so stash the cookie size
9850 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9851 pptr->port_data_dma_attr.dma_attr_sgllen;
9855 * The two mutexes of fcp_port are initialized. The variable
9856 * mutex_initted is incremented to remember that fact. That variable
9857 * is checked when the routine fails and the mutexes have to be
9858 * destroyed.
9860 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9861 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9862 mutex_initted++;
9865 * The SCSI tran structure is allocate and initialized now.
9867 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9868 fcp_log(CE_WARN, pptr->port_dip,
9869 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9870 goto fail;
9873 /* link in the transport structure then fill it in */
9874 pptr->port_tran = tran;
9875 tran->tran_hba_private = pptr;
9876 tran->tran_tgt_init = fcp_scsi_tgt_init;
9877 tran->tran_tgt_probe = NULL;
9878 tran->tran_tgt_free = fcp_scsi_tgt_free;
9879 tran->tran_start = fcp_scsi_start;
9880 tran->tran_reset = fcp_scsi_reset;
9881 tran->tran_abort = fcp_scsi_abort;
9882 tran->tran_getcap = fcp_scsi_getcap;
9883 tran->tran_setcap = fcp_scsi_setcap;
9884 tran->tran_init_pkt = NULL;
9885 tran->tran_destroy_pkt = NULL;
9886 tran->tran_dmafree = NULL;
9887 tran->tran_sync_pkt = NULL;
9888 tran->tran_reset_notify = fcp_scsi_reset_notify;
9889 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9890 tran->tran_get_name = fcp_scsi_get_name;
9891 tran->tran_clear_aca = NULL;
9892 tran->tran_clear_task_set = NULL;
9893 tran->tran_terminate_task = NULL;
9894 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9895 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9896 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9897 tran->tran_post_event = fcp_scsi_bus_post_event;
9898 tran->tran_quiesce = NULL;
9899 tran->tran_unquiesce = NULL;
9900 tran->tran_bus_reset = NULL;
9901 tran->tran_bus_config = fcp_scsi_bus_config;
9902 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9903 tran->tran_bus_power = NULL;
9904 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9906 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9907 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9908 tran->tran_setup_pkt = fcp_pkt_setup;
9909 tran->tran_teardown_pkt = fcp_pkt_teardown;
9910 tran->tran_hba_len = pptr->port_priv_pkt_len +
9911 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9912 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9914 * If FCA don't support DMA, then we use different vectors to
9915 * minimize the effects on DMA code flow path
9917 tran->tran_start = fcp_pseudo_start;
9918 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9919 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9920 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9921 tran->tran_dmafree = fcp_pseudo_dmafree;
9922 tran->tran_setup_pkt = NULL;
9923 tran->tran_teardown_pkt = NULL;
9924 tran->tran_pkt_constructor = NULL;
9925 tran->tran_pkt_destructor = NULL;
9926 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9930 * Allocate an ndi event handle
9932 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9933 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9935 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9936 sizeof (fcp_ndi_event_defs));
9938 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9939 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9941 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9942 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9943 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9945 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9946 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9947 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9948 goto fail;
9950 event_bind++; /* Checked in fail case */
9952 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9953 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9954 != DDI_SUCCESS) {
9955 fcp_log(CE_WARN, pptr->port_dip,
9956 "!fcp%d: scsi_hba_attach_setup failed", instance);
9957 goto fail;
9959 hba_attached++; /* Checked in fail case */
9961 pptr->port_mpxio = 0;
9962 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9963 MDI_SUCCESS) {
9964 pptr->port_mpxio++;
9968 * The following code is putting the new port structure in the global
9969 * list of ports and, if it is the first port to attach, it start the
9970 * fcp_watchdog_tick.
9972 * Why put this new port in the global before we are done attaching it?
9973 * We are actually making the structure globally known before we are
9974 * done attaching it. The reason for that is: because of the code that
9975 * follows. At this point the resources to handle the port are
9976 * allocated. This function is now going to do the following:
9978 * 1) It is going to try to register with the name server advertizing
9979 * the new FCP capability of the port.
9980 * 2) It is going to play the role of the fp/fctl layer by building
9981 * a list of worlwide names reachable through this port and call
9982 * itself on fcp_statec_callback(). That requires the port to
9983 * be part of the global list.
9985 mutex_enter(&fcp_global_mutex);
9986 if (fcp_port_head == NULL) {
9987 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9989 pptr->port_next = fcp_port_head;
9990 fcp_port_head = pptr;
9991 soft_state_linked++;
9993 if (fcp_watchdog_init++ == 0) {
9994 fcp_watchdog_tick = fcp_watchdog_timeout *
9995 drv_usectohz(1000000);
9996 fcp_watchdog_id = timeout(fcp_watch, NULL,
9997 fcp_watchdog_tick);
9999 mutex_exit(&fcp_global_mutex);
10002 * Here an attempt is made to register with the name server, the new
10003 * FCP capability. That is done using an RTF_ID to the name server.
10004 * It is done synchronously. The function fcp_do_ns_registry()
10005 * doesn't return till the name server responded.
10006 * On failures, just ignore it for now and it will get retried during
10007 * state change callbacks. We'll set a flag to show this failure
10009 if (fcp_do_ns_registry(pptr, s_id)) {
10010 mutex_enter(&pptr->port_mutex);
10011 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10012 mutex_exit(&pptr->port_mutex);
10013 } else {
10014 mutex_enter(&pptr->port_mutex);
10015 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10016 mutex_exit(&pptr->port_mutex);
10020 * Lookup for boot WWN property
10022 if (modrootloaded != 1) {
10023 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10024 ddi_get_parent(pinfo->port_dip),
10025 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10026 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10027 (nbytes == FC_WWN_SIZE)) {
10028 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10030 if (boot_wwn) {
10031 ddi_prop_free(boot_wwn);
10036 * Handle various topologies and link states.
10038 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10039 case FC_STATE_OFFLINE:
10042 * we're attaching a port where the link is offline
10044 * Wait for ONLINE, at which time a state
10045 * change will cause a statec_callback
10047 * in the mean time, do not do anything
10049 res = DDI_SUCCESS;
10050 pptr->port_state |= FCP_STATE_OFFLINE;
10051 break;
10053 case FC_STATE_ONLINE: {
10054 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10055 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10056 res = DDI_SUCCESS;
10057 break;
10060 * discover devices and create nodes (a private
10061 * loop or point-to-point)
10063 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10066 * At this point we are going to build a list of all the ports
10067 * that can be reached through this local port. It looks like
10068 * we cannot handle more than FCP_MAX_DEVICES per local port
10069 * (128).
10071 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10072 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10073 KM_NOSLEEP)) == NULL) {
10074 fcp_log(CE_WARN, pptr->port_dip,
10075 "!fcp%d: failed to allocate portmap",
10076 instance);
10077 goto fail;
10081 * fc_ulp_getportmap() is going to provide us with the list of
10082 * remote ports in the buffer we just allocated. The way the
10083 * list is going to be retrieved depends on the topology.
10084 * However, if we are connected to a Fabric, a name server
10085 * request may be sent to get the list of FCP capable ports.
10086 * It should be noted that is the case the request is
10087 * synchronous. This means we are stuck here till the name
10088 * server replies. A lot of things can change during that time
10089 * and including, may be, being called on
10090 * fcp_statec_callback() for different reasons. I'm not sure
10091 * the code can handle that.
10093 max_cnt = FCP_MAX_DEVICES;
10094 alloc_cnt = FCP_MAX_DEVICES;
10095 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10096 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10097 FC_SUCCESS) {
10098 caddr_t msg;
10100 (void) fc_ulp_error(res, &msg);
10103 * this just means the transport is
10104 * busy perhaps building a portmap so,
10105 * for now, succeed this port attach
10106 * when the transport has a new map,
10107 * it'll send us a state change then
10109 fcp_log(CE_WARN, pptr->port_dip,
10110 "!failed to get port map : %s", msg);
10112 res = DDI_SUCCESS;
10113 break; /* go return result */
10115 if (max_cnt > alloc_cnt) {
10116 alloc_cnt = max_cnt;
10120 * We are now going to call fcp_statec_callback() ourselves.
10121 * By issuing this call we are trying to kick off the enumera-
10122 * tion process.
10125 * let the state change callback do the SCSI device
10126 * discovery and create the devinfos
10128 fcp_statec_callback(ulph, pptr->port_fp_handle,
10129 pptr->port_phys_state, pptr->port_topology, tmp_list,
10130 max_cnt, pptr->port_id);
10132 res = DDI_SUCCESS;
10133 break;
10136 default:
10137 /* unknown port state */
10138 fcp_log(CE_WARN, pptr->port_dip,
10139 "!fcp%d: invalid port state at attach=0x%x",
10140 instance, pptr->port_phys_state);
10142 mutex_enter(&pptr->port_mutex);
10143 pptr->port_phys_state = FCP_STATE_OFFLINE;
10144 mutex_exit(&pptr->port_mutex);
10146 res = DDI_SUCCESS;
10147 break;
10150 /* free temp list if used */
10151 if (tmp_list != NULL) {
10152 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10155 /* note the attach time */
10156 pptr->port_attach_time = ddi_get_lbolt64();
10158 /* all done */
10159 return (res);
10161 /* a failure we have to clean up after */
10162 fail:
10163 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10165 if (soft_state_linked) {
10166 /* remove this fcp_port from the linked list */
10167 (void) fcp_soft_state_unlink(pptr);
10170 /* unbind and free event set */
10171 if (pptr->port_ndi_event_hdl) {
10172 if (event_bind) {
10173 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10174 &pptr->port_ndi_events, NDI_SLEEP);
10176 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10179 if (pptr->port_ndi_event_defs) {
10180 (void) kmem_free(pptr->port_ndi_event_defs,
10181 sizeof (fcp_ndi_event_defs));
10185 * Clean up mpxio stuff
10187 if (pptr->port_mpxio) {
10188 (void) mdi_phci_unregister(pptr->port_dip, 0);
10189 pptr->port_mpxio--;
10192 /* undo SCSI HBA setup */
10193 if (hba_attached) {
10194 (void) scsi_hba_detach(pptr->port_dip);
10196 if (pptr->port_tran != NULL) {
10197 scsi_hba_tran_free(pptr->port_tran);
10200 mutex_enter(&fcp_global_mutex);
10203 * We check soft_state_linked, because it is incremented right before
10204 * we call increment fcp_watchdog_init. Therefore, we know if
10205 * soft_state_linked is still FALSE, we do not want to decrement
10206 * fcp_watchdog_init or possibly call untimeout.
10209 if (soft_state_linked) {
10210 if (--fcp_watchdog_init == 0) {
10211 timeout_id_t tid = fcp_watchdog_id;
10213 mutex_exit(&fcp_global_mutex);
10214 (void) untimeout(tid);
10215 } else {
10216 mutex_exit(&fcp_global_mutex);
10218 } else {
10219 mutex_exit(&fcp_global_mutex);
10222 if (mutex_initted) {
10223 mutex_destroy(&pptr->port_mutex);
10224 mutex_destroy(&pptr->port_pkt_mutex);
10227 if (tmp_list != NULL) {
10228 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10231 /* this makes pptr invalid */
10232 ddi_soft_state_free(fcp_softstate, instance);
10234 return (DDI_FAILURE);
10238 static int
10239 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10241 int count = 0;
10243 mutex_enter(&pptr->port_mutex);
10246 * if the port is powered down or suspended, nothing else
10247 * to do; just return.
10249 if (flag != FCP_STATE_DETACHING) {
10250 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10251 FCP_STATE_SUSPENDED)) {
10252 pptr->port_state |= flag;
10253 mutex_exit(&pptr->port_mutex);
10254 return (FC_SUCCESS);
10258 if (pptr->port_state & FCP_STATE_IN_MDI) {
10259 mutex_exit(&pptr->port_mutex);
10260 return (FC_FAILURE);
10263 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10264 fcp_trace, FCP_BUF_LEVEL_2, 0,
10265 "fcp_handle_port_detach: port is detaching");
10267 pptr->port_state |= flag;
10270 * Wait for any ongoing reconfig/ipkt to complete, that
10271 * ensures the freeing to targets/luns is safe.
10272 * No more ref to this port should happen from statec/ioctl
10273 * after that as it was removed from the global port list.
10275 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10276 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10278 * Let's give sufficient time for reconfig/ipkt
10279 * to complete.
10281 if (count++ >= FCP_ICMD_DEADLINE) {
10282 break;
10284 mutex_exit(&pptr->port_mutex);
10285 ddi_sleep(1);
10286 mutex_enter(&pptr->port_mutex);
10290 * if the driver is still busy then fail to
10291 * suspend/power down.
10293 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10294 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10295 pptr->port_state &= ~flag;
10296 mutex_exit(&pptr->port_mutex);
10297 return (FC_FAILURE);
10300 if (flag == FCP_STATE_DETACHING) {
10301 pptr = fcp_soft_state_unlink(pptr);
10302 ASSERT(pptr != NULL);
10305 pptr->port_link_cnt++;
10306 pptr->port_state |= FCP_STATE_OFFLINE;
10307 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10309 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10310 FCP_CAUSE_LINK_DOWN);
10311 mutex_exit(&pptr->port_mutex);
10313 /* kill watch dog timer if we're the last */
10314 mutex_enter(&fcp_global_mutex);
10315 if (--fcp_watchdog_init == 0) {
10316 timeout_id_t tid = fcp_watchdog_id;
10317 mutex_exit(&fcp_global_mutex);
10318 (void) untimeout(tid);
10319 } else {
10320 mutex_exit(&fcp_global_mutex);
10323 /* clean up the port structures */
10324 if (flag == FCP_STATE_DETACHING) {
10325 fcp_cleanup_port(pptr, instance);
10328 return (FC_SUCCESS);
10332 static void
10333 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10335 ASSERT(pptr != NULL);
10337 /* unbind and free event set */
10338 if (pptr->port_ndi_event_hdl) {
10339 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10340 &pptr->port_ndi_events, NDI_SLEEP);
10341 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10344 if (pptr->port_ndi_event_defs) {
10345 (void) kmem_free(pptr->port_ndi_event_defs,
10346 sizeof (fcp_ndi_event_defs));
10349 /* free the lun/target structures and devinfos */
10350 fcp_free_targets(pptr);
10353 * Clean up mpxio stuff
10355 if (pptr->port_mpxio) {
10356 (void) mdi_phci_unregister(pptr->port_dip, 0);
10357 pptr->port_mpxio--;
10360 /* clean up SCSA stuff */
10361 (void) scsi_hba_detach(pptr->port_dip);
10362 if (pptr->port_tran != NULL) {
10363 scsi_hba_tran_free(pptr->port_tran);
10366 #ifdef KSTATS_CODE
10367 /* clean up kstats */
10368 if (pptr->fcp_ksp != NULL) {
10369 kstat_delete(pptr->fcp_ksp);
10371 #endif
10373 /* clean up soft state mutexes/condition variables */
10374 mutex_destroy(&pptr->port_mutex);
10375 mutex_destroy(&pptr->port_pkt_mutex);
10377 /* all done with soft state */
10378 ddi_soft_state_free(fcp_softstate, instance);
10382 * Function: fcp_kmem_cache_constructor
10384 * Description: This function allocates and initializes the resources required
10385 * to build a scsi_pkt structure the target driver. The result
10386 * of the allocation and initialization will be cached in the
10387 * memory cache. As DMA resources may be allocated here, that
10388 * means DMA resources will be tied up in the cache manager.
10389 * This is a tradeoff that has been made for performance reasons.
10391 * Argument: *buf Memory to preinitialize.
10392 * *arg FCP port structure (fcp_port).
10393 * kmflags Value passed to kmem_cache_alloc() and
10394 * propagated to the constructor.
10396 * Return Value: 0 Allocation/Initialization was successful.
10397 * -1 Allocation or Initialization failed.
10400 * If the returned value is 0, the buffer is initialized like this:
10402 * +================================+
10403 * +----> | struct scsi_pkt |
10404 * | | |
10405 * | +--- | pkt_ha_private |
10406 * | | | |
10407 * | | +================================+
10408 * | |
10409 * | | +================================+
10410 * | +--> | struct fcp_pkt | <---------+
10411 * | | | |
10412 * +----- | cmd_pkt | |
10413 * | cmd_fp_pkt | ---+ |
10414 * +-------->| cmd_fcp_rsp[] | | |
10415 * | +--->| cmd_fcp_cmd[] | | |
10416 * | | |--------------------------------| | |
10417 * | | | struct fc_packet | <--+ |
10418 * | | | | |
10419 * | | | pkt_ulp_private | ----------+
10420 * | | | pkt_fca_private | -----+
10421 * | | | pkt_data_cookie | ---+ |
10422 * | | | pkt_cmdlen | | |
10423 * | |(a) | pkt_rsplen | | |
10424 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10425 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10426 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10427 * | pkt_resp_cookie | ---|-|--+ | | |
10428 * | pkt_cmd_dma | | | | | | |
10429 * | pkt_cmd_acc | | | | | | |
10430 * +================================+ | | | | | |
10431 * | dma_cookies | <--+ | | | | |
10432 * | | | | | | |
10433 * +================================+ | | | | |
10434 * | fca_private | <----+ | | | |
10435 * | | | | | |
10436 * +================================+ | | | |
10437 * | | | |
10438 * | | | |
10439 * +================================+ (d) | | | |
10440 * | fcp_resp cookies | <-------+ | | |
10441 * | | | | |
10442 * +================================+ | | |
10443 * | | |
10444 * +================================+ (d) | | |
10445 * | fcp_resp | <-----------+ | |
10446 * | (DMA resources associated) | | |
10447 * +================================+ | |
10448 * | |
10449 * | |
10450 * | |
10451 * +================================+ (c) | |
10452 * | fcp_cmd cookies | <---------------+ |
10453 * | | |
10454 * +================================+ |
10456 * +================================+ (c) |
10457 * | fcp_cmd | <--------------------+
10458 * | (DMA resources associated) |
10459 * +================================+
10461 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10462 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10463 * (c) Only if DMA is used for the FCP_CMD buffer.
10464 * (d) Only if DMA is used for the FCP_RESP buffer
10466 static int
10467 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10468 int kmflags)
10470 struct fcp_pkt *cmd;
10471 struct fcp_port *pptr;
10472 fc_packet_t *fpkt;
10474 pptr = (struct fcp_port *)tran->tran_hba_private;
10475 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10476 bzero(cmd, tran->tran_hba_len);
10478 cmd->cmd_pkt = pkt;
10479 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10480 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10481 cmd->cmd_fp_pkt = fpkt;
10483 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10484 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10485 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10486 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10488 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10489 sizeof (struct fcp_pkt));
10491 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10492 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10494 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10496 * The underlying HBA doesn't want to DMA the fcp_cmd or
10497 * fcp_resp. The transfer of information will be done by
10498 * bcopy.
10499 * The naming of the flags (that is actually a value) is
10500 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10501 * DMA" but instead "NO DMA".
10503 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10504 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10505 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10506 } else {
10508 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10509 * buffer. A buffer is allocated for each one the ddi_dma_*
10510 * interfaces.
10512 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10513 return (-1);
10517 return (0);
10521 * Function: fcp_kmem_cache_destructor
10523 * Description: Called by the destructor of the cache managed by SCSA.
10524 * All the resources pre-allocated in fcp_pkt_constructor
10525 * and the data also pre-initialized in fcp_pkt_constructor
10526 * are freed and uninitialized here.
10528 * Argument: *buf Memory to uninitialize.
10529 * *arg FCP port structure (fcp_port).
10531 * Return Value: None
10533 * Context: kernel
10535 static void
10536 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10538 struct fcp_pkt *cmd;
10539 struct fcp_port *pptr;
10541 pptr = (struct fcp_port *)(tran->tran_hba_private);
10542 cmd = pkt->pkt_ha_private;
10544 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10546 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10547 * buffer and DMA resources allocated to do so are released.
10549 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10554 * Function: fcp_alloc_cmd_resp
10556 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10557 * will be DMAed by the HBA. The buffer is allocated applying
10558 * the DMA requirements for the HBA. The buffers allocated will
10559 * also be bound. DMA resources are allocated in the process.
10560 * They will be released by fcp_free_cmd_resp().
10562 * Argument: *pptr FCP port.
10563 * *fpkt fc packet for which the cmd and resp packet should be
10564 * allocated.
10565 * flags Allocation flags.
10567 * Return Value: FC_FAILURE
10568 * FC_SUCCESS
10570 * Context: User or Kernel context only if flags == KM_SLEEP.
10571 * Interrupt context if the KM_SLEEP is not specified.
10573 static int
10574 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10576 int rval;
10577 int cmd_len;
10578 int resp_len;
10579 ulong_t real_len;
10580 int (*cb) (caddr_t);
10581 ddi_dma_cookie_t pkt_cookie;
10582 ddi_dma_cookie_t *cp;
10583 uint32_t cnt;
10585 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10587 cmd_len = fpkt->pkt_cmdlen;
10588 resp_len = fpkt->pkt_rsplen;
10590 ASSERT(fpkt->pkt_cmd_dma == NULL);
10592 /* Allocation of a DMA handle used in subsequent calls. */
10593 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10594 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10595 return (FC_FAILURE);
10598 /* A buffer is allocated that satisfies the DMA requirements. */
10599 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10600 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10601 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10603 if (rval != DDI_SUCCESS) {
10604 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10605 return (FC_FAILURE);
10608 if (real_len < cmd_len) {
10609 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10610 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10611 return (FC_FAILURE);
10614 /* The buffer allocated is DMA bound. */
10615 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10616 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10617 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10619 if (rval != DDI_DMA_MAPPED) {
10620 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10621 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10622 return (FC_FAILURE);
10625 if (fpkt->pkt_cmd_cookie_cnt >
10626 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10627 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10628 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10629 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10630 return (FC_FAILURE);
10633 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10636 * The buffer where the scatter/gather list is going to be built is
10637 * allocated.
10639 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10640 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10641 KM_NOSLEEP);
10643 if (cp == NULL) {
10644 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10645 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10646 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10647 return (FC_FAILURE);
10651 * The scatter/gather list for the buffer we just allocated is built
10652 * here.
10654 *cp = pkt_cookie;
10655 cp++;
10657 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10658 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10659 &pkt_cookie);
10660 *cp = pkt_cookie;
10663 ASSERT(fpkt->pkt_resp_dma == NULL);
10664 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10665 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10666 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10667 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10668 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10669 return (FC_FAILURE);
10672 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10673 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10674 (caddr_t *)&fpkt->pkt_resp, &real_len,
10675 &fpkt->pkt_resp_acc);
10677 if (rval != DDI_SUCCESS) {
10678 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10679 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10680 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10681 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10682 kmem_free(fpkt->pkt_cmd_cookie,
10683 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10684 return (FC_FAILURE);
10687 if (real_len < resp_len) {
10688 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10689 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10690 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10691 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10692 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10693 kmem_free(fpkt->pkt_cmd_cookie,
10694 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10695 return (FC_FAILURE);
10698 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10699 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10700 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10702 if (rval != DDI_DMA_MAPPED) {
10703 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10704 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10705 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10706 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10707 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10708 kmem_free(fpkt->pkt_cmd_cookie,
10709 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10710 return (FC_FAILURE);
10713 if (fpkt->pkt_resp_cookie_cnt >
10714 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10715 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10716 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10717 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10718 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10719 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10720 kmem_free(fpkt->pkt_cmd_cookie,
10721 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10722 return (FC_FAILURE);
10725 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10727 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10728 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10729 KM_NOSLEEP);
10731 if (cp == NULL) {
10732 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10733 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10734 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10735 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10736 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10737 kmem_free(fpkt->pkt_cmd_cookie,
10738 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10739 return (FC_FAILURE);
10742 *cp = pkt_cookie;
10743 cp++;
10745 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10746 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10747 &pkt_cookie);
10748 *cp = pkt_cookie;
10751 return (FC_SUCCESS);
10755 * Function: fcp_free_cmd_resp
10757 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10758 * allocated by fcp_alloc_cmd_resp() and all the resources
10759 * associated with them. That includes the DMA resources and the
10760 * buffer allocated for the cookies of each one of them.
10762 * Argument: *pptr FCP port context.
10763 * *fpkt fc packet containing the cmd and resp packet
10764 * to be released.
10766 * Return Value: None
10768 * Context: Interrupt, User and Kernel context.
10770 /* ARGSUSED */
10771 static void
10772 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10774 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10776 if (fpkt->pkt_resp_dma) {
10777 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10778 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10779 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10782 if (fpkt->pkt_resp_cookie) {
10783 kmem_free(fpkt->pkt_resp_cookie,
10784 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10785 fpkt->pkt_resp_cookie = NULL;
10788 if (fpkt->pkt_cmd_dma) {
10789 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10790 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10791 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10794 if (fpkt->pkt_cmd_cookie) {
10795 kmem_free(fpkt->pkt_cmd_cookie,
10796 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10797 fpkt->pkt_cmd_cookie = NULL;
10803 * called by the transport to do our own target initialization
10805 * can acquire and release the global mutex
10807 /* ARGSUSED */
10808 static int
10809 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10810 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10812 uchar_t *bytes;
10813 uint_t nbytes;
10814 uint16_t lun_num;
10815 struct fcp_tgt *ptgt;
10816 struct fcp_lun *plun;
10817 struct fcp_port *pptr = (struct fcp_port *)
10818 hba_tran->tran_hba_private;
10820 ASSERT(pptr != NULL);
10822 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10823 FCP_BUF_LEVEL_8, 0,
10824 "fcp_phys_tgt_init: called for %s (instance %d)",
10825 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10827 /* get our port WWN property */
10828 bytes = NULL;
10829 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10830 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10831 (nbytes != FC_WWN_SIZE)) {
10832 /* no port WWN property */
10833 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10834 FCP_BUF_LEVEL_8, 0,
10835 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10836 " for %s (instance %d): bytes=%p nbytes=%x",
10837 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10838 nbytes);
10840 if (bytes != NULL) {
10841 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10844 return (DDI_NOT_WELL_FORMED);
10846 ASSERT(bytes != NULL);
10848 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10849 LUN_PROP, 0xFFFF);
10850 if (lun_num == 0xFFFF) {
10851 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10852 FCP_BUF_LEVEL_8, 0,
10853 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10854 " for %s (instance %d)", ddi_get_name(tgt_dip),
10855 ddi_get_instance(tgt_dip));
10857 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10858 return (DDI_NOT_WELL_FORMED);
10861 mutex_enter(&pptr->port_mutex);
10862 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10863 mutex_exit(&pptr->port_mutex);
10864 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10865 FCP_BUF_LEVEL_8, 0,
10866 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10867 " for %s (instance %d)", ddi_get_name(tgt_dip),
10868 ddi_get_instance(tgt_dip));
10870 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10871 return (DDI_FAILURE);
10874 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10875 FC_WWN_SIZE) == 0);
10876 ASSERT(plun->lun_num == lun_num);
10878 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10880 ptgt = plun->lun_tgt;
10882 mutex_enter(&ptgt->tgt_mutex);
10883 plun->lun_tgt_count++;
10884 scsi_device_hba_private_set(sd, plun);
10885 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10886 plun->lun_sd = sd;
10887 mutex_exit(&ptgt->tgt_mutex);
10888 mutex_exit(&pptr->port_mutex);
10890 return (DDI_SUCCESS);
10893 /*ARGSUSED*/
10894 static int
10895 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10896 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10898 uchar_t *bytes;
10899 uint_t nbytes;
10900 uint16_t lun_num;
10901 struct fcp_tgt *ptgt;
10902 struct fcp_lun *plun;
10903 struct fcp_port *pptr = (struct fcp_port *)
10904 hba_tran->tran_hba_private;
10905 child_info_t *cip;
10907 ASSERT(pptr != NULL);
10909 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10910 fcp_trace, FCP_BUF_LEVEL_8, 0,
10911 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10912 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10913 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10915 cip = (child_info_t *)sd->sd_pathinfo;
10916 if (cip == NULL) {
10917 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10918 fcp_trace, FCP_BUF_LEVEL_8, 0,
10919 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10920 " for %s (instance %d)", ddi_get_name(tgt_dip),
10921 ddi_get_instance(tgt_dip));
10923 return (DDI_NOT_WELL_FORMED);
10926 /* get our port WWN property */
10927 bytes = NULL;
10928 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10929 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10930 (nbytes != FC_WWN_SIZE)) {
10931 if (bytes) {
10932 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10934 return (DDI_NOT_WELL_FORMED);
10937 ASSERT(bytes != NULL);
10939 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10940 LUN_PROP, 0xFFFF);
10941 if (lun_num == 0xFFFF) {
10942 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10943 fcp_trace, FCP_BUF_LEVEL_8, 0,
10944 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10945 " for %s (instance %d)", ddi_get_name(tgt_dip),
10946 ddi_get_instance(tgt_dip));
10948 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10949 return (DDI_NOT_WELL_FORMED);
10952 mutex_enter(&pptr->port_mutex);
10953 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10954 mutex_exit(&pptr->port_mutex);
10955 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10956 fcp_trace, FCP_BUF_LEVEL_8, 0,
10957 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10958 " for %s (instance %d)", ddi_get_name(tgt_dip),
10959 ddi_get_instance(tgt_dip));
10961 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10962 return (DDI_FAILURE);
10965 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10966 FC_WWN_SIZE) == 0);
10967 ASSERT(plun->lun_num == lun_num);
10969 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10971 ptgt = plun->lun_tgt;
10973 mutex_enter(&ptgt->tgt_mutex);
10974 plun->lun_tgt_count++;
10975 scsi_device_hba_private_set(sd, plun);
10976 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10977 plun->lun_sd = sd;
10978 mutex_exit(&ptgt->tgt_mutex);
10979 mutex_exit(&pptr->port_mutex);
10981 return (DDI_SUCCESS);
10986 * called by the transport to do our own target initialization
10988 * can acquire and release the global mutex
10990 /* ARGSUSED */
10991 static int
10992 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10993 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10995 struct fcp_port *pptr = (struct fcp_port *)
10996 hba_tran->tran_hba_private;
10997 int rval;
10999 ASSERT(pptr != NULL);
11002 * Child node is getting initialized. Look at the mpxio component
11003 * type on the child device to see if this device is mpxio managed
11004 * or not.
11006 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11007 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11008 } else {
11009 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11012 return (rval);
11016 /* ARGSUSED */
11017 static void
11018 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11019 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11021 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11022 struct fcp_tgt *ptgt;
11024 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11025 fcp_trace, FCP_BUF_LEVEL_8, 0,
11026 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11027 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11028 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11030 if (plun == NULL) {
11031 return;
11033 ptgt = plun->lun_tgt;
11035 ASSERT(ptgt != NULL);
11037 mutex_enter(&ptgt->tgt_mutex);
11038 ASSERT(plun->lun_tgt_count > 0);
11040 if (--plun->lun_tgt_count == 0) {
11041 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11043 plun->lun_sd = NULL;
11044 mutex_exit(&ptgt->tgt_mutex);
11048 * Function: fcp_scsi_start
11050 * Description: This function is called by the target driver to request a
11051 * command to be sent.
11053 * Argument: *ap SCSI address of the device.
11054 * *pkt SCSI packet containing the cmd to send.
11056 * Return Value: TRAN_ACCEPT
11057 * TRAN_BUSY
11058 * TRAN_BADPKT
11059 * TRAN_FATAL_ERROR
11061 static int
11062 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11064 struct fcp_port *pptr = ADDR2FCP(ap);
11065 struct fcp_lun *plun = ADDR2LUN(ap);
11066 struct fcp_pkt *cmd = PKT2CMD(pkt);
11067 struct fcp_tgt *ptgt = plun->lun_tgt;
11068 int rval;
11070 /* ensure command isn't already issued */
11071 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11073 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11074 fcp_trace, FCP_BUF_LEVEL_9, 0,
11075 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11078 * It is strange that we enter the fcp_port mutex and the target
11079 * mutex to check the lun state (which has a mutex of its own).
11081 mutex_enter(&pptr->port_mutex);
11082 mutex_enter(&ptgt->tgt_mutex);
11085 * If the device is offline and is not in the process of coming
11086 * online, fail the request.
11089 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11090 !(plun->lun_state & FCP_LUN_ONLINING)) {
11091 mutex_exit(&ptgt->tgt_mutex);
11092 mutex_exit(&pptr->port_mutex);
11094 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11095 pkt->pkt_reason = CMD_DEV_GONE;
11098 return (TRAN_FATAL_ERROR);
11100 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11103 * If we are suspended, kernel is trying to dump, so don't
11104 * block, fail or defer requests - send them down right away.
11105 * NOTE: If we are in panic (i.e. trying to dump), we can't
11106 * assume we have been suspended. There is hardware such as
11107 * the v880 that doesn't do PM. Thus, the check for
11108 * ddi_in_panic.
11110 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11111 * of changing. So, if we can queue the packet, do it. Eventually,
11112 * either the device will have gone away or changed and we can fail
11113 * the request, or we can proceed if the device didn't change.
11115 * If the pd in the target or the packet is NULL it's probably
11116 * because the device has gone away, we allow the request to be
11117 * put on the internal queue here in case the device comes back within
11118 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11119 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11120 * could be NULL because the device was disappearing during or since
11121 * packet initialization.
11124 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11125 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11126 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11127 (ptgt->tgt_pd_handle == NULL) ||
11128 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11130 * If ((LUN is busy AND
11131 * LUN not suspended AND
11132 * The system is not in panic state) OR
11133 * (The port is coming up))
11135 * We check to see if the any of the flags FLAG_NOINTR or
11136 * FLAG_NOQUEUE is set. If one of them is set the value
11137 * returned will be TRAN_BUSY. If not, the request is queued.
11139 mutex_exit(&ptgt->tgt_mutex);
11140 mutex_exit(&pptr->port_mutex);
11142 /* see if using interrupts is allowed (so queueing'll work) */
11143 if (pkt->pkt_flags & FLAG_NOINTR) {
11144 pkt->pkt_resid = 0;
11145 return (TRAN_BUSY);
11147 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11148 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11149 fcp_trace, FCP_BUF_LEVEL_9, 0,
11150 "fcp_scsi_start: lun busy for pkt %p", pkt);
11151 return (TRAN_BUSY);
11153 #ifdef DEBUG
11154 mutex_enter(&pptr->port_pkt_mutex);
11155 pptr->port_npkts++;
11156 mutex_exit(&pptr->port_pkt_mutex);
11157 #endif /* DEBUG */
11159 /* got queue up the pkt for later */
11160 fcp_queue_pkt(pptr, cmd);
11161 return (TRAN_ACCEPT);
11163 cmd->cmd_state = FCP_PKT_ISSUED;
11165 mutex_exit(&ptgt->tgt_mutex);
11166 mutex_exit(&pptr->port_mutex);
11169 * Now that we released the mutexes, what was protected by them can
11170 * change.
11174 * If there is a reconfiguration in progress, wait for it to complete.
11176 fcp_reconfig_wait(pptr);
11178 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11179 pkt->pkt_time : 0;
11181 /* prepare the packet */
11183 fcp_prepare_pkt(pptr, cmd, plun);
11185 if (cmd->cmd_pkt->pkt_time) {
11186 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11187 } else {
11188 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11192 * if interrupts aren't allowed (e.g. at dump time) then we'll
11193 * have to do polled I/O
11195 if (pkt->pkt_flags & FLAG_NOINTR) {
11196 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11197 return (fcp_dopoll(pptr, cmd));
11200 #ifdef DEBUG
11201 mutex_enter(&pptr->port_pkt_mutex);
11202 pptr->port_npkts++;
11203 mutex_exit(&pptr->port_pkt_mutex);
11204 #endif /* DEBUG */
11206 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11207 if (rval == FC_SUCCESS) {
11208 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11209 fcp_trace, FCP_BUF_LEVEL_9, 0,
11210 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11211 return (TRAN_ACCEPT);
11214 cmd->cmd_state = FCP_PKT_IDLE;
11216 #ifdef DEBUG
11217 mutex_enter(&pptr->port_pkt_mutex);
11218 pptr->port_npkts--;
11219 mutex_exit(&pptr->port_pkt_mutex);
11220 #endif /* DEBUG */
11223 * For lack of clearer definitions, choose
11224 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11227 if (rval == FC_TRAN_BUSY) {
11228 pkt->pkt_resid = 0;
11229 rval = TRAN_BUSY;
11230 } else {
11231 mutex_enter(&ptgt->tgt_mutex);
11232 if (plun->lun_state & FCP_LUN_OFFLINE) {
11233 child_info_t *cip;
11235 mutex_enter(&plun->lun_mutex);
11236 cip = plun->lun_cip;
11237 mutex_exit(&plun->lun_mutex);
11239 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11240 fcp_trace, FCP_BUF_LEVEL_6, 0,
11241 "fcp_transport failed 2 for %x: %x; dip=%p",
11242 plun->lun_tgt->tgt_d_id, rval, cip);
11244 rval = TRAN_FATAL_ERROR;
11245 } else {
11246 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11247 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11248 fcp_trace, FCP_BUF_LEVEL_9, 0,
11249 "fcp_scsi_start: FC_BUSY for pkt %p",
11250 pkt);
11251 rval = TRAN_BUSY;
11252 } else {
11253 rval = TRAN_ACCEPT;
11254 fcp_queue_pkt(pptr, cmd);
11257 mutex_exit(&ptgt->tgt_mutex);
11260 return (rval);
11264 * called by the transport to abort a packet
11266 /*ARGSUSED*/
11267 static int
11268 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11270 int tgt_cnt;
11271 struct fcp_port *pptr = ADDR2FCP(ap);
11272 struct fcp_lun *plun = ADDR2LUN(ap);
11273 struct fcp_tgt *ptgt = plun->lun_tgt;
11275 if (pkt == NULL) {
11276 if (ptgt) {
11277 mutex_enter(&ptgt->tgt_mutex);
11278 tgt_cnt = ptgt->tgt_change_cnt;
11279 mutex_exit(&ptgt->tgt_mutex);
11280 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11281 return (TRUE);
11284 return (FALSE);
11289 * Perform reset
11292 fcp_scsi_reset(struct scsi_address *ap, int level)
11294 int rval = 0;
11295 struct fcp_port *pptr = ADDR2FCP(ap);
11296 struct fcp_lun *plun = ADDR2LUN(ap);
11297 struct fcp_tgt *ptgt = plun->lun_tgt;
11299 if (level == RESET_ALL) {
11300 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11301 rval = 1;
11303 } else if (level == RESET_TARGET || level == RESET_LUN) {
11305 * If we are in the middle of discovery, return
11306 * SUCCESS as this target will be rediscovered
11307 * anyway
11309 mutex_enter(&ptgt->tgt_mutex);
11310 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11311 mutex_exit(&ptgt->tgt_mutex);
11312 return (1);
11314 mutex_exit(&ptgt->tgt_mutex);
11316 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11317 rval = 1;
11320 return (rval);
11325 * called by the framework to get a SCSI capability
11327 static int
11328 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11330 return (fcp_commoncap(ap, cap, 0, whom, 0));
11335 * called by the framework to set a SCSI capability
11337 static int
11338 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11340 return (fcp_commoncap(ap, cap, value, whom, 1));
11344 * Function: fcp_pkt_setup
11346 * Description: This function sets up the scsi_pkt structure passed by the
11347 * caller. This function assumes fcp_pkt_constructor has been
11348 * called previously for the packet passed by the caller. If
11349 * successful this call will have the following results:
11351 * - The resources needed that will be constant through out
11352 * the whole transaction are allocated.
11353 * - The fields that will be constant through out the whole
11354 * transaction are initialized.
11355 * - The scsi packet will be linked to the LUN structure
11356 * addressed by the transaction.
11358 * Argument:
11359 * *pkt Pointer to a scsi_pkt structure.
11360 * callback
11361 * arg
11363 * Return Value: 0 Success
11364 * !0 Failure
11366 * Context: Kernel context or interrupt context
11368 /* ARGSUSED */
11369 static int
11370 fcp_pkt_setup(struct scsi_pkt *pkt,
11371 int (*callback)(caddr_t arg),
11372 caddr_t arg)
11374 struct fcp_pkt *cmd;
11375 struct fcp_port *pptr;
11376 struct fcp_lun *plun;
11377 struct fcp_tgt *ptgt;
11378 int kf;
11379 fc_packet_t *fpkt;
11380 fc_frame_hdr_t *hp;
11382 pptr = ADDR2FCP(&pkt->pkt_address);
11383 plun = ADDR2LUN(&pkt->pkt_address);
11384 ptgt = plun->lun_tgt;
11386 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11387 fpkt = cmd->cmd_fp_pkt;
11390 * this request is for dma allocation only
11393 * First step of fcp_scsi_init_pkt: pkt allocation
11394 * We determine if the caller is willing to wait for the
11395 * resources.
11397 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11400 * Selective zeroing of the pkt.
11402 cmd->cmd_back = NULL;
11403 cmd->cmd_next = NULL;
11406 * Zero out fcp command
11408 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11410 cmd->cmd_state = FCP_PKT_IDLE;
11412 fpkt = cmd->cmd_fp_pkt;
11413 fpkt->pkt_data_acc = NULL;
11416 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11417 * could be destroyed. We need fail pkt_setup.
11419 if (pptr->port_state & FCP_STATE_OFFLINE) {
11420 return (-1);
11423 mutex_enter(&ptgt->tgt_mutex);
11424 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11426 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11427 != FC_SUCCESS) {
11428 mutex_exit(&ptgt->tgt_mutex);
11429 return (-1);
11432 mutex_exit(&ptgt->tgt_mutex);
11434 /* Fill in the Fabric Channel Header */
11435 hp = &fpkt->pkt_cmd_fhdr;
11436 hp->r_ctl = R_CTL_COMMAND;
11437 hp->rsvd = 0;
11438 hp->type = FC_TYPE_SCSI_FCP;
11439 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11440 hp->seq_id = 0;
11441 hp->df_ctl = 0;
11442 hp->seq_cnt = 0;
11443 hp->ox_id = 0xffff;
11444 hp->rx_id = 0xffff;
11445 hp->ro = 0;
11448 * A doubly linked list (cmd_forw, cmd_back) is built
11449 * out of every allocated packet on a per-lun basis
11451 * The packets are maintained in the list so as to satisfy
11452 * scsi_abort() requests. At present (which is unlikely to
11453 * change in the future) nobody performs a real scsi_abort
11454 * in the SCSI target drivers (as they don't keep the packets
11455 * after doing scsi_transport - so they don't know how to
11456 * abort a packet other than sending a NULL to abort all
11457 * outstanding packets)
11459 mutex_enter(&plun->lun_mutex);
11460 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11461 plun->lun_pkt_head->cmd_back = cmd;
11462 } else {
11463 plun->lun_pkt_tail = cmd;
11465 plun->lun_pkt_head = cmd;
11466 mutex_exit(&plun->lun_mutex);
11467 return (0);
11471 * Function: fcp_pkt_teardown
11473 * Description: This function releases a scsi_pkt structure and all the
11474 * resources attached to it.
11476 * Argument: *pkt Pointer to a scsi_pkt structure.
11478 * Return Value: None
11480 * Context: User, Kernel or Interrupt context.
11482 static void
11483 fcp_pkt_teardown(struct scsi_pkt *pkt)
11485 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11486 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11487 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11490 * Remove the packet from the per-lun list
11492 mutex_enter(&plun->lun_mutex);
11493 if (cmd->cmd_back) {
11494 ASSERT(cmd != plun->lun_pkt_head);
11495 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11496 } else {
11497 ASSERT(cmd == plun->lun_pkt_head);
11498 plun->lun_pkt_head = cmd->cmd_forw;
11501 if (cmd->cmd_forw) {
11502 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11503 } else {
11504 ASSERT(cmd == plun->lun_pkt_tail);
11505 plun->lun_pkt_tail = cmd->cmd_back;
11508 mutex_exit(&plun->lun_mutex);
11510 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11514 * Routine for reset notification setup, to register or cancel.
11515 * This function is called by SCSA
11517 /*ARGSUSED*/
11518 static int
11519 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11520 void (*callback)(caddr_t), caddr_t arg)
11522 struct fcp_port *pptr = ADDR2FCP(ap);
11524 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11525 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11529 static int
11530 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11531 ddi_eventcookie_t *event_cookiep)
11533 struct fcp_port *pptr = fcp_dip2port(dip);
11535 if (pptr == NULL) {
11536 return (DDI_FAILURE);
11539 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11540 event_cookiep, NDI_EVENT_NOPASS));
11544 static int
11545 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11546 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11547 ddi_callback_id_t *cb_id)
11549 struct fcp_port *pptr = fcp_dip2port(dip);
11551 if (pptr == NULL) {
11552 return (DDI_FAILURE);
11555 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11556 eventid, callback, arg, NDI_SLEEP, cb_id));
11560 static int
11561 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11564 struct fcp_port *pptr = fcp_dip2port(dip);
11566 if (pptr == NULL) {
11567 return (DDI_FAILURE);
11569 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11574 * called by the transport to post an event
11576 static int
11577 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11578 ddi_eventcookie_t eventid, void *impldata)
11580 struct fcp_port *pptr = fcp_dip2port(dip);
11582 if (pptr == NULL) {
11583 return (DDI_FAILURE);
11586 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11587 eventid, impldata));
11592 * A target in in many cases in Fibre Channel has a one to one relation
11593 * with a port identifier (which is also known as D_ID and also as AL_PA
11594 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11595 * will most likely result in resetting all LUNs (which means a reset will
11596 * occur on all the SCSI devices connected at the other end of the bridge)
11597 * That is the latest favorite topic for discussion, for, one can debate as
11598 * hot as one likes and come up with arguably a best solution to one's
11599 * satisfaction
11601 * To stay on track and not digress much, here are the problems stated
11602 * briefly:
11604 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11605 * target drivers use RESET_TARGET even if their instance is on a
11606 * LUN. Doesn't that sound a bit broken ?
11608 * FCP SCSI (the current spec) only defines RESET TARGET in the
11609 * control fields of an FCP_CMND structure. It should have been
11610 * fixed right there, giving flexibility to the initiators to
11611 * minimize havoc that could be caused by resetting a target.
11613 static int
11614 fcp_reset_target(struct scsi_address *ap, int level)
11616 int rval = FC_FAILURE;
11617 char lun_id[25];
11618 struct fcp_port *pptr = ADDR2FCP(ap);
11619 struct fcp_lun *plun = ADDR2LUN(ap);
11620 struct fcp_tgt *ptgt = plun->lun_tgt;
11621 struct scsi_pkt *pkt;
11622 struct fcp_pkt *cmd;
11623 struct fcp_rsp *rsp;
11624 uint32_t tgt_cnt;
11625 struct fcp_rsp_info *rsp_info;
11626 struct fcp_reset_elem *p;
11627 int bval;
11629 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11630 KM_NOSLEEP)) == NULL) {
11631 return (rval);
11634 mutex_enter(&ptgt->tgt_mutex);
11635 if (level == RESET_TARGET) {
11636 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11637 mutex_exit(&ptgt->tgt_mutex);
11638 kmem_free(p, sizeof (struct fcp_reset_elem));
11639 return (rval);
11641 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11642 (void) strcpy(lun_id, " ");
11643 } else {
11644 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11645 mutex_exit(&ptgt->tgt_mutex);
11646 kmem_free(p, sizeof (struct fcp_reset_elem));
11647 return (rval);
11649 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11651 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11653 tgt_cnt = ptgt->tgt_change_cnt;
11655 mutex_exit(&ptgt->tgt_mutex);
11657 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11658 0, 0, NULL, 0)) == NULL) {
11659 kmem_free(p, sizeof (struct fcp_reset_elem));
11660 mutex_enter(&ptgt->tgt_mutex);
11661 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11662 mutex_exit(&ptgt->tgt_mutex);
11663 return (rval);
11665 pkt->pkt_time = FCP_POLL_TIMEOUT;
11667 /* fill in cmd part of packet */
11668 cmd = PKT2CMD(pkt);
11669 if (level == RESET_TARGET) {
11670 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11671 } else {
11672 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11674 cmd->cmd_fp_pkt->pkt_comp = NULL;
11675 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11677 /* prepare a packet for transport */
11678 fcp_prepare_pkt(pptr, cmd, plun);
11680 if (cmd->cmd_pkt->pkt_time) {
11681 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11682 } else {
11683 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11686 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11687 bval = fcp_dopoll(pptr, cmd);
11688 fc_ulp_idle_port(pptr->port_fp_handle);
11690 /* submit the packet */
11691 if (bval == TRAN_ACCEPT) {
11692 int error = 3;
11694 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11695 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11696 sizeof (struct fcp_rsp));
11698 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11699 if (fcp_validate_fcp_response(rsp, pptr) ==
11700 FC_SUCCESS) {
11701 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11702 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11703 sizeof (struct fcp_rsp), rsp_info,
11704 cmd->cmd_fp_pkt->pkt_resp_acc,
11705 sizeof (struct fcp_rsp_info));
11707 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11708 rval = FC_SUCCESS;
11709 error = 0;
11710 } else {
11711 error = 1;
11713 } else {
11714 error = 2;
11718 switch (error) {
11719 case 0:
11720 fcp_log(CE_WARN, pptr->port_dip,
11721 "!FCP: WWN 0x%08x%08x %s reset successfully",
11722 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11723 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11724 break;
11726 case 1:
11727 fcp_log(CE_WARN, pptr->port_dip,
11728 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11729 " response code=%x",
11730 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11731 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11732 rsp_info->rsp_code);
11733 break;
11735 case 2:
11736 fcp_log(CE_WARN, pptr->port_dip,
11737 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11738 " Bad FCP response values: rsvd1=%x,"
11739 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11740 " rsplen=%x, senselen=%x",
11741 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11742 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11743 rsp->reserved_0, rsp->reserved_1,
11744 rsp->fcp_u.fcp_status.reserved_0,
11745 rsp->fcp_u.fcp_status.reserved_1,
11746 rsp->fcp_response_len, rsp->fcp_sense_len);
11747 break;
11749 default:
11750 fcp_log(CE_WARN, pptr->port_dip,
11751 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11752 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11753 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11754 break;
11757 scsi_destroy_pkt(pkt);
11759 if (rval == FC_FAILURE) {
11760 mutex_enter(&ptgt->tgt_mutex);
11761 if (level == RESET_TARGET) {
11762 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11763 } else {
11764 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11766 mutex_exit(&ptgt->tgt_mutex);
11767 kmem_free(p, sizeof (struct fcp_reset_elem));
11768 return (rval);
11771 mutex_enter(&pptr->port_mutex);
11772 if (level == RESET_TARGET) {
11773 p->tgt = ptgt;
11774 p->lun = NULL;
11775 } else {
11776 p->tgt = NULL;
11777 p->lun = plun;
11779 p->tgt = ptgt;
11780 p->tgt_cnt = tgt_cnt;
11781 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11782 p->next = pptr->port_reset_list;
11783 pptr->port_reset_list = p;
11785 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11786 fcp_trace, FCP_BUF_LEVEL_3, 0,
11787 "Notify ssd of the reset to reinstate the reservations");
11789 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11790 &pptr->port_reset_notify_listf);
11792 mutex_exit(&pptr->port_mutex);
11794 return (rval);
11799 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11800 * SCSI capabilities
11802 /* ARGSUSED */
11803 static int
11804 fcp_commoncap(struct scsi_address *ap, char *cap,
11805 int val, int tgtonly, int doset)
11807 struct fcp_port *pptr = ADDR2FCP(ap);
11808 struct fcp_lun *plun = ADDR2LUN(ap);
11809 struct fcp_tgt *ptgt = plun->lun_tgt;
11810 int cidx;
11811 int rval = FALSE;
11813 if (cap == NULL) {
11814 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11815 fcp_trace, FCP_BUF_LEVEL_3, 0,
11816 "fcp_commoncap: invalid arg");
11817 return (rval);
11820 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11821 return (UNDEFINED);
11825 * Process setcap request.
11827 if (doset) {
11829 * At present, we can only set binary (0/1) values
11831 switch (cidx) {
11832 case SCSI_CAP_ARQ:
11833 if (val == 0) {
11834 rval = FALSE;
11835 } else {
11836 rval = TRUE;
11838 break;
11840 case SCSI_CAP_LUN_RESET:
11841 if (val) {
11842 plun->lun_cap |= FCP_LUN_CAP_RESET;
11843 } else {
11844 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11846 rval = TRUE;
11847 break;
11849 case SCSI_CAP_SECTOR_SIZE:
11850 rval = TRUE;
11851 break;
11852 default:
11853 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11854 fcp_trace, FCP_BUF_LEVEL_4, 0,
11855 "fcp_setcap: unsupported %d", cidx);
11856 rval = UNDEFINED;
11857 break;
11860 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11861 fcp_trace, FCP_BUF_LEVEL_5, 0,
11862 "set cap: cap=%s, val/tgtonly/doset/rval = "
11863 "0x%x/0x%x/0x%x/%d",
11864 cap, val, tgtonly, doset, rval);
11866 } else {
11868 * Process getcap request.
11870 switch (cidx) {
11871 case SCSI_CAP_DMA_MAX:
11872 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11875 * Need to make an adjustment qlc is uint_t 64
11876 * st is int, so we will make the adjustment here
11877 * being as nobody wants to touch this.
11878 * It still leaves the max single block length
11879 * of 2 gig. This should last .
11882 if (rval == -1) {
11883 rval = MAX_INT_DMA;
11886 break;
11888 case SCSI_CAP_INITIATOR_ID:
11889 rval = pptr->port_id;
11890 break;
11892 case SCSI_CAP_ARQ:
11893 case SCSI_CAP_RESET_NOTIFICATION:
11894 case SCSI_CAP_TAGGED_QING:
11895 rval = TRUE;
11896 break;
11898 case SCSI_CAP_SCSI_VERSION:
11899 rval = 3;
11900 break;
11902 case SCSI_CAP_INTERCONNECT_TYPE:
11903 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11904 (ptgt->tgt_hard_addr == 0)) {
11905 rval = INTERCONNECT_FABRIC;
11906 } else {
11907 rval = INTERCONNECT_FIBRE;
11909 break;
11911 case SCSI_CAP_LUN_RESET:
11912 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11913 TRUE : FALSE;
11914 break;
11916 default:
11917 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11918 fcp_trace, FCP_BUF_LEVEL_4, 0,
11919 "fcp_getcap: unsupported %d", cidx);
11920 rval = UNDEFINED;
11921 break;
11924 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11925 fcp_trace, FCP_BUF_LEVEL_8, 0,
11926 "get cap: cap=%s, val/tgtonly/doset/rval = "
11927 "0x%x/0x%x/0x%x/%d",
11928 cap, val, tgtonly, doset, rval);
11931 return (rval);
11935 * called by the transport to get the port-wwn and lun
11936 * properties of this device, and to create a "name" based on them
11938 * these properties don't exist on sun4m
11940 * return 1 for success else return 0
11942 /* ARGSUSED */
11943 static int
11944 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11946 int i;
11947 int *lun;
11948 int numChars;
11949 uint_t nlun;
11950 uint_t count;
11951 uint_t nbytes;
11952 uchar_t *bytes;
11953 uint16_t lun_num;
11954 uint32_t tgt_id;
11955 char **conf_wwn;
11956 char tbuf[(FC_WWN_SIZE << 1) + 1];
11957 uchar_t barray[FC_WWN_SIZE];
11958 dev_info_t *tgt_dip;
11959 struct fcp_tgt *ptgt;
11960 struct fcp_port *pptr;
11961 struct fcp_lun *plun;
11963 ASSERT(sd != NULL);
11964 ASSERT(name != NULL);
11966 tgt_dip = sd->sd_dev;
11967 pptr = ddi_get_soft_state(fcp_softstate,
11968 ddi_get_instance(ddi_get_parent(tgt_dip)));
11969 if (pptr == NULL) {
11970 return (0);
11973 ASSERT(tgt_dip != NULL);
11975 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11976 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11977 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11978 name[0] = '\0';
11979 return (0);
11982 if (nlun == 0) {
11983 ddi_prop_free(lun);
11984 return (0);
11987 lun_num = lun[0];
11988 ddi_prop_free(lun);
11991 * Lookup for .conf WWN property
11993 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11994 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11995 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11996 ASSERT(count >= 1);
11998 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11999 ddi_prop_free(conf_wwn);
12000 mutex_enter(&pptr->port_mutex);
12001 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12002 mutex_exit(&pptr->port_mutex);
12003 return (0);
12005 ptgt = plun->lun_tgt;
12006 mutex_exit(&pptr->port_mutex);
12008 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12009 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12011 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12012 ptgt->tgt_hard_addr != 0) {
12013 tgt_id = (uint32_t)fcp_alpa_to_switch[
12014 ptgt->tgt_hard_addr];
12015 } else {
12016 tgt_id = ptgt->tgt_d_id;
12019 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12020 TARGET_PROP, tgt_id);
12023 /* get the our port-wwn property */
12024 bytes = NULL;
12025 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12026 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12027 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12028 if (bytes != NULL) {
12029 ddi_prop_free(bytes);
12031 return (0);
12034 for (i = 0; i < FC_WWN_SIZE; i++) {
12035 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12038 /* Stick in the address of the form "wWWN,LUN" */
12039 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12041 ASSERT(numChars < len);
12042 if (numChars >= len) {
12043 fcp_log(CE_WARN, pptr->port_dip,
12044 "!fcp_scsi_get_name: "
12045 "name parameter length too small, it needs to be %d",
12046 numChars+1);
12049 ddi_prop_free(bytes);
12051 return (1);
12056 * called by the transport to get the SCSI target id value, returning
12057 * it in "name"
12059 * this isn't needed/used on sun4m
12061 * return 1 for success else return 0
12063 /* ARGSUSED */
12064 static int
12065 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12067 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12068 struct fcp_tgt *ptgt;
12069 int numChars;
12071 if (plun == NULL) {
12072 return (0);
12075 if ((ptgt = plun->lun_tgt) == NULL) {
12076 return (0);
12079 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12081 ASSERT(numChars < len);
12082 if (numChars >= len) {
12083 fcp_log(CE_WARN, NULL,
12084 "!fcp_scsi_get_bus_addr: "
12085 "name parameter length too small, it needs to be %d",
12086 numChars+1);
12089 return (1);
12094 * called internally to reset the link where the specified port lives
12096 static int
12097 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12099 la_wwn_t wwn;
12100 struct fcp_lun *plun;
12101 struct fcp_tgt *ptgt;
12103 /* disable restart of lip if we're suspended */
12104 mutex_enter(&pptr->port_mutex);
12106 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12107 FCP_STATE_POWER_DOWN)) {
12108 mutex_exit(&pptr->port_mutex);
12109 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12110 fcp_trace, FCP_BUF_LEVEL_2, 0,
12111 "fcp_linkreset, fcp%d: link reset "
12112 "disabled due to DDI_SUSPEND",
12113 ddi_get_instance(pptr->port_dip));
12114 return (FC_FAILURE);
12117 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12118 mutex_exit(&pptr->port_mutex);
12119 return (FC_SUCCESS);
12122 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12123 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12126 * If ap == NULL assume local link reset.
12128 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12129 plun = ADDR2LUN(ap);
12130 ptgt = plun->lun_tgt;
12131 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12132 } else {
12133 bzero((caddr_t)&wwn, sizeof (wwn));
12135 mutex_exit(&pptr->port_mutex);
12137 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12142 * called from fcp_port_attach() to resume a port
12143 * return DDI_* success/failure status
12144 * acquires and releases the global mutex
12145 * acquires and releases the port mutex
12147 /*ARGSUSED*/
12149 static int
12150 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12151 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12153 int res = DDI_FAILURE; /* default result */
12154 struct fcp_port *pptr; /* port state ptr */
12155 uint32_t alloc_cnt;
12156 uint32_t max_cnt;
12157 fc_portmap_t *tmp_list = NULL;
12159 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12160 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12161 instance);
12163 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12164 cmn_err(CE_WARN, "fcp: bad soft state");
12165 return (res);
12168 mutex_enter(&pptr->port_mutex);
12169 switch (cmd) {
12170 case FC_CMD_RESUME:
12171 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12172 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12173 break;
12175 case FC_CMD_POWER_UP:
12177 * If the port is DDI_SUSPENded, defer rediscovery
12178 * until DDI_RESUME occurs
12180 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12181 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12182 mutex_exit(&pptr->port_mutex);
12183 return (DDI_SUCCESS);
12185 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12187 pptr->port_id = s_id;
12188 pptr->port_state = FCP_STATE_INIT;
12189 mutex_exit(&pptr->port_mutex);
12192 * Make a copy of ulp_port_info as fctl allocates
12193 * a temp struct.
12195 (void) fcp_cp_pinfo(pptr, pinfo);
12197 mutex_enter(&fcp_global_mutex);
12198 if (fcp_watchdog_init++ == 0) {
12199 fcp_watchdog_tick = fcp_watchdog_timeout *
12200 drv_usectohz(1000000);
12201 fcp_watchdog_id = timeout(fcp_watch,
12202 NULL, fcp_watchdog_tick);
12204 mutex_exit(&fcp_global_mutex);
12207 * Handle various topologies and link states.
12209 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12210 case FC_STATE_OFFLINE:
12212 * Wait for ONLINE, at which time a state
12213 * change will cause a statec_callback
12215 res = DDI_SUCCESS;
12216 break;
12218 case FC_STATE_ONLINE:
12220 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12221 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12222 res = DDI_SUCCESS;
12223 break;
12226 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12227 !fcp_enable_auto_configuration) {
12228 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12229 if (tmp_list == NULL) {
12230 if (!alloc_cnt) {
12231 res = DDI_SUCCESS;
12233 break;
12235 max_cnt = alloc_cnt;
12236 } else {
12237 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12239 alloc_cnt = FCP_MAX_DEVICES;
12241 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12242 (sizeof (fc_portmap_t)) * alloc_cnt,
12243 KM_NOSLEEP)) == NULL) {
12244 fcp_log(CE_WARN, pptr->port_dip,
12245 "!fcp%d: failed to allocate portmap",
12246 instance);
12247 break;
12250 max_cnt = alloc_cnt;
12251 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12252 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12253 FC_SUCCESS) {
12254 caddr_t msg;
12256 (void) fc_ulp_error(res, &msg);
12258 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12259 fcp_trace, FCP_BUF_LEVEL_2, 0,
12260 "resume failed getportmap: reason=0x%x",
12261 res);
12263 fcp_log(CE_WARN, pptr->port_dip,
12264 "!failed to get port map : %s", msg);
12265 break;
12267 if (max_cnt > alloc_cnt) {
12268 alloc_cnt = max_cnt;
12273 * do the SCSI device discovery and create
12274 * the devinfos
12276 fcp_statec_callback(ulph, pptr->port_fp_handle,
12277 pptr->port_phys_state, pptr->port_topology, tmp_list,
12278 max_cnt, pptr->port_id);
12280 res = DDI_SUCCESS;
12281 break;
12283 default:
12284 fcp_log(CE_WARN, pptr->port_dip,
12285 "!fcp%d: invalid port state at attach=0x%x",
12286 instance, pptr->port_phys_state);
12288 mutex_enter(&pptr->port_mutex);
12289 pptr->port_phys_state = FCP_STATE_OFFLINE;
12290 mutex_exit(&pptr->port_mutex);
12291 res = DDI_SUCCESS;
12293 break;
12296 if (tmp_list != NULL) {
12297 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12300 return (res);
12304 static void
12305 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12307 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12308 pptr->port_dip = pinfo->port_dip;
12309 pptr->port_fp_handle = pinfo->port_handle;
12310 if (pinfo->port_acc_attr != NULL) {
12312 * FCA supports DMA
12314 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12315 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12316 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12317 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12319 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12320 pptr->port_max_exch = pinfo->port_fca_max_exch;
12321 pptr->port_phys_state = pinfo->port_state;
12322 pptr->port_topology = pinfo->port_flags;
12323 pptr->port_reset_action = pinfo->port_reset_action;
12324 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12325 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12326 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12327 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12329 /* Clear FMA caps to avoid fm-capability ereport */
12330 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12331 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12332 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12333 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12334 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12335 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12339 * If the elements wait field is set to 1 then
12340 * another thread is waiting for the operation to complete. Once
12341 * it is complete, the waiting thread is signaled and the element is
12342 * freed by the waiting thread. If the elements wait field is set to 0
12343 * the element is freed.
12345 static void
12346 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12348 ASSERT(elem != NULL);
12349 mutex_enter(&elem->mutex);
12350 elem->result = result;
12351 if (elem->wait) {
12352 elem->wait = 0;
12353 cv_signal(&elem->cv);
12354 mutex_exit(&elem->mutex);
12355 } else {
12356 mutex_exit(&elem->mutex);
12357 cv_destroy(&elem->cv);
12358 mutex_destroy(&elem->mutex);
12359 kmem_free(elem, sizeof (struct fcp_hp_elem));
12364 * This function is invoked from the taskq thread to allocate
12365 * devinfo nodes and to online/offline them.
12367 static void
12368 fcp_hp_task(void *arg)
12370 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12371 struct fcp_lun *plun = elem->lun;
12372 struct fcp_port *pptr = elem->port;
12373 int result;
12375 ASSERT(elem->what == FCP_ONLINE ||
12376 elem->what == FCP_OFFLINE ||
12377 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12378 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12380 mutex_enter(&pptr->port_mutex);
12381 mutex_enter(&plun->lun_mutex);
12382 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12383 plun->lun_event_count != elem->event_cnt) ||
12384 pptr->port_state & (FCP_STATE_SUSPENDED |
12385 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12386 mutex_exit(&plun->lun_mutex);
12387 mutex_exit(&pptr->port_mutex);
12388 fcp_process_elem(elem, NDI_FAILURE);
12389 return;
12391 mutex_exit(&plun->lun_mutex);
12392 mutex_exit(&pptr->port_mutex);
12394 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12395 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12396 fcp_process_elem(elem, result);
12400 static child_info_t *
12401 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12402 int tcount)
12404 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12406 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12407 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12409 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12411 * Child has not been created yet. Create the child device
12412 * based on the per-Lun flags.
12414 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12415 plun->lun_cip =
12416 CIP(fcp_create_dip(plun, lcount, tcount));
12417 plun->lun_mpxio = 0;
12418 } else {
12419 plun->lun_cip =
12420 CIP(fcp_create_pip(plun, lcount, tcount));
12421 plun->lun_mpxio = 1;
12423 } else {
12424 plun->lun_cip = cip;
12427 return (plun->lun_cip);
12431 static int
12432 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12434 int rval = FC_FAILURE;
12435 dev_info_t *pdip;
12436 struct dev_info *dip;
12437 int circular;
12439 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12441 pdip = plun->lun_tgt->tgt_port->port_dip;
12443 if (plun->lun_cip == NULL) {
12444 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12445 fcp_trace, FCP_BUF_LEVEL_3, 0,
12446 "fcp_is_dip_present: plun->lun_cip is NULL: "
12447 "plun: %p lun state: %x num: %d target state: %x",
12448 plun, plun->lun_state, plun->lun_num,
12449 plun->lun_tgt->tgt_port->port_state);
12450 return (rval);
12452 ndi_devi_enter(pdip, &circular);
12453 dip = DEVI(pdip)->devi_child;
12454 while (dip) {
12455 if (dip == DEVI(cdip)) {
12456 rval = FC_SUCCESS;
12457 break;
12459 dip = dip->devi_sibling;
12461 ndi_devi_exit(pdip, circular);
12462 return (rval);
12465 static int
12466 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12468 int rval = FC_FAILURE;
12470 ASSERT(plun != NULL);
12471 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12473 if (plun->lun_mpxio == 0) {
12474 rval = fcp_is_dip_present(plun, DIP(cip));
12475 } else {
12476 rval = fcp_is_pip_present(plun, PIP(cip));
12479 return (rval);
12483 * Function: fcp_create_dip
12485 * Description: Creates a dev_info_t structure for the LUN specified by the
12486 * caller.
12488 * Argument: plun Lun structure
12489 * link_cnt Link state count.
12490 * tgt_cnt Target state change count.
12492 * Return Value: NULL if it failed
12493 * dev_info_t structure address if it succeeded
12495 * Context: Kernel context
12497 static dev_info_t *
12498 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12500 int failure = 0;
12501 uint32_t tgt_id;
12502 uint64_t sam_lun;
12503 struct fcp_tgt *ptgt = plun->lun_tgt;
12504 struct fcp_port *pptr = ptgt->tgt_port;
12505 dev_info_t *pdip = pptr->port_dip;
12506 dev_info_t *cdip = NULL;
12507 dev_info_t *old_dip = DIP(plun->lun_cip);
12508 char *nname = NULL;
12509 char **compatible = NULL;
12510 int ncompatible;
12511 char *scsi_binding_set;
12512 char t_pwwn[17];
12514 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12515 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12517 /* get the 'scsi-binding-set' property */
12518 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12519 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12520 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12521 scsi_binding_set = NULL;
12524 /* determine the node name and compatible */
12525 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12526 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12527 if (scsi_binding_set) {
12528 ddi_prop_free(scsi_binding_set);
12531 if (nname == NULL) {
12532 #ifdef DEBUG
12533 cmn_err(CE_WARN, "%s%d: no driver for "
12534 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12535 " compatible: %s",
12536 ddi_driver_name(pdip), ddi_get_instance(pdip),
12537 ptgt->tgt_port_wwn.raw_wwn[0],
12538 ptgt->tgt_port_wwn.raw_wwn[1],
12539 ptgt->tgt_port_wwn.raw_wwn[2],
12540 ptgt->tgt_port_wwn.raw_wwn[3],
12541 ptgt->tgt_port_wwn.raw_wwn[4],
12542 ptgt->tgt_port_wwn.raw_wwn[5],
12543 ptgt->tgt_port_wwn.raw_wwn[6],
12544 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12545 *compatible);
12546 #endif /* DEBUG */
12547 failure++;
12548 goto end_of_fcp_create_dip;
12551 cdip = fcp_find_existing_dip(plun, pdip, nname);
12554 * if the old_dip does not match the cdip, that means there is
12555 * some property change. since we'll be using the cdip, we need
12556 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12557 * then the dtype for the device has been updated. Offline the
12558 * the old device and create a new device with the new device type
12559 * Refer to bug: 4764752
12561 if (old_dip && (cdip != old_dip ||
12562 plun->lun_state & FCP_LUN_CHANGED)) {
12563 plun->lun_state &= ~(FCP_LUN_INIT);
12564 mutex_exit(&plun->lun_mutex);
12565 mutex_exit(&pptr->port_mutex);
12567 mutex_enter(&ptgt->tgt_mutex);
12568 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12569 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12570 mutex_exit(&ptgt->tgt_mutex);
12572 #ifdef DEBUG
12573 if (cdip != NULL) {
12574 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12575 fcp_trace, FCP_BUF_LEVEL_2, 0,
12576 "Old dip=%p; New dip=%p don't match", old_dip,
12577 cdip);
12578 } else {
12579 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12580 fcp_trace, FCP_BUF_LEVEL_2, 0,
12581 "Old dip=%p; New dip=NULL don't match", old_dip);
12583 #endif
12585 mutex_enter(&pptr->port_mutex);
12586 mutex_enter(&plun->lun_mutex);
12589 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12590 plun->lun_state &= ~(FCP_LUN_CHANGED);
12591 if (ndi_devi_alloc(pptr->port_dip, nname,
12592 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12593 failure++;
12594 goto end_of_fcp_create_dip;
12599 * Previously all the properties for the devinfo were destroyed here
12600 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12601 * the devid property (and other properties established by the target
12602 * driver or framework) which the code does not always recreate, this
12603 * call was removed.
12604 * This opens a theoretical possibility that we may return with a
12605 * stale devid on the node if the scsi entity behind the fibre channel
12606 * lun has changed.
12609 /* decorate the node with compatible */
12610 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12611 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12612 failure++;
12613 goto end_of_fcp_create_dip;
12616 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12617 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12618 failure++;
12619 goto end_of_fcp_create_dip;
12622 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12623 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12624 failure++;
12625 goto end_of_fcp_create_dip;
12628 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12629 t_pwwn[16] = '\0';
12630 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12631 != DDI_PROP_SUCCESS) {
12632 failure++;
12633 goto end_of_fcp_create_dip;
12637 * If there is no hard address - We might have to deal with
12638 * that by using WWN - Having said that it is important to
12639 * recognize this problem early so ssd can be informed of
12640 * the right interconnect type.
12642 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12643 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12644 } else {
12645 tgt_id = ptgt->tgt_d_id;
12648 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12649 tgt_id) != DDI_PROP_SUCCESS) {
12650 failure++;
12651 goto end_of_fcp_create_dip;
12654 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12655 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12656 failure++;
12657 goto end_of_fcp_create_dip;
12659 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12660 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12661 sam_lun) != DDI_PROP_SUCCESS) {
12662 failure++;
12663 goto end_of_fcp_create_dip;
12666 end_of_fcp_create_dip:
12667 scsi_hba_nodename_compatible_free(nname, compatible);
12669 if (cdip != NULL && failure) {
12670 (void) ndi_prop_remove_all(cdip);
12671 (void) ndi_devi_free(cdip);
12672 cdip = NULL;
12675 return (cdip);
12679 * Function: fcp_create_pip
12681 * Description: Creates a Path Id for the LUN specified by the caller.
12683 * Argument: plun Lun structure
12684 * link_cnt Link state count.
12685 * tgt_cnt Target state count.
12687 * Return Value: NULL if it failed
12688 * mdi_pathinfo_t structure address if it succeeded
12690 * Context: Kernel context
12692 static mdi_pathinfo_t *
12693 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12695 int i;
12696 char buf[MAXNAMELEN];
12697 char uaddr[MAXNAMELEN];
12698 int failure = 0;
12699 uint32_t tgt_id;
12700 uint64_t sam_lun;
12701 struct fcp_tgt *ptgt = plun->lun_tgt;
12702 struct fcp_port *pptr = ptgt->tgt_port;
12703 dev_info_t *pdip = pptr->port_dip;
12704 mdi_pathinfo_t *pip = NULL;
12705 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12706 char *nname = NULL;
12707 char **compatible = NULL;
12708 int ncompatible;
12709 char *scsi_binding_set;
12710 char t_pwwn[17];
12712 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12713 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12715 scsi_binding_set = "vhci";
12717 /* determine the node name and compatible */
12718 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12719 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12721 if (nname == NULL) {
12722 #ifdef DEBUG
12723 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12724 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12725 " compatible: %s",
12726 ddi_driver_name(pdip), ddi_get_instance(pdip),
12727 ptgt->tgt_port_wwn.raw_wwn[0],
12728 ptgt->tgt_port_wwn.raw_wwn[1],
12729 ptgt->tgt_port_wwn.raw_wwn[2],
12730 ptgt->tgt_port_wwn.raw_wwn[3],
12731 ptgt->tgt_port_wwn.raw_wwn[4],
12732 ptgt->tgt_port_wwn.raw_wwn[5],
12733 ptgt->tgt_port_wwn.raw_wwn[6],
12734 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12735 *compatible);
12736 #endif /* DEBUG */
12737 failure++;
12738 goto end_of_fcp_create_pip;
12741 pip = fcp_find_existing_pip(plun, pdip);
12744 * if the old_dip does not match the cdip, that means there is
12745 * some property change. since we'll be using the cdip, we need
12746 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12747 * then the dtype for the device has been updated. Offline the
12748 * the old device and create a new device with the new device type
12749 * Refer to bug: 4764752
12751 if (old_pip && (pip != old_pip ||
12752 plun->lun_state & FCP_LUN_CHANGED)) {
12753 plun->lun_state &= ~(FCP_LUN_INIT);
12754 mutex_exit(&plun->lun_mutex);
12755 mutex_exit(&pptr->port_mutex);
12757 mutex_enter(&ptgt->tgt_mutex);
12758 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12759 FCP_OFFLINE, lcount, tcount,
12760 NDI_DEVI_REMOVE, 0);
12761 mutex_exit(&ptgt->tgt_mutex);
12763 if (pip != NULL) {
12764 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12765 fcp_trace, FCP_BUF_LEVEL_2, 0,
12766 "Old pip=%p; New pip=%p don't match",
12767 old_pip, pip);
12768 } else {
12769 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12770 fcp_trace, FCP_BUF_LEVEL_2, 0,
12771 "Old pip=%p; New pip=NULL don't match",
12772 old_pip);
12775 mutex_enter(&pptr->port_mutex);
12776 mutex_enter(&plun->lun_mutex);
12780 * Since FC_WWN_SIZE is 8 bytes and its not like the
12781 * lun_guid_size which is dependent on the target, I don't
12782 * believe the same trancation happens here UNLESS the standards
12783 * change the FC_WWN_SIZE value to something larger than
12784 * MAXNAMELEN(currently 255 bytes).
12787 for (i = 0; i < FC_WWN_SIZE; i++) {
12788 (void) sprintf(&buf[i << 1], "%02x",
12789 ptgt->tgt_port_wwn.raw_wwn[i]);
12792 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12793 buf, plun->lun_num);
12795 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12797 * Release the locks before calling into
12798 * mdi_pi_alloc_compatible() since this can result in a
12799 * callback into fcp which can result in a deadlock
12800 * (see bug # 4870272).
12802 * Basically, what we are trying to avoid is the scenario where
12803 * one thread does ndi_devi_enter() and tries to grab
12804 * fcp_mutex and another does it the other way round.
12806 * But before we do that, make sure that nobody releases the
12807 * port in the meantime. We can do this by setting a flag.
12809 plun->lun_state &= ~(FCP_LUN_CHANGED);
12810 pptr->port_state |= FCP_STATE_IN_MDI;
12811 mutex_exit(&plun->lun_mutex);
12812 mutex_exit(&pptr->port_mutex);
12813 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12814 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12815 fcp_log(CE_WARN, pptr->port_dip,
12816 "!path alloc failed:0x%x", plun);
12817 mutex_enter(&pptr->port_mutex);
12818 mutex_enter(&plun->lun_mutex);
12819 pptr->port_state &= ~FCP_STATE_IN_MDI;
12820 failure++;
12821 goto end_of_fcp_create_pip;
12823 mutex_enter(&pptr->port_mutex);
12824 mutex_enter(&plun->lun_mutex);
12825 pptr->port_state &= ~FCP_STATE_IN_MDI;
12826 } else {
12827 (void) mdi_prop_remove(pip, NULL);
12830 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12832 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12833 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12834 != DDI_PROP_SUCCESS) {
12835 failure++;
12836 goto end_of_fcp_create_pip;
12839 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12840 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12841 != DDI_PROP_SUCCESS) {
12842 failure++;
12843 goto end_of_fcp_create_pip;
12846 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12847 t_pwwn[16] = '\0';
12848 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12849 != DDI_PROP_SUCCESS) {
12850 failure++;
12851 goto end_of_fcp_create_pip;
12855 * If there is no hard address - We might have to deal with
12856 * that by using WWN - Having said that it is important to
12857 * recognize this problem early so ssd can be informed of
12858 * the right interconnect type.
12860 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12861 ptgt->tgt_hard_addr != 0) {
12862 tgt_id = (uint32_t)
12863 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12864 } else {
12865 tgt_id = ptgt->tgt_d_id;
12868 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12869 != DDI_PROP_SUCCESS) {
12870 failure++;
12871 goto end_of_fcp_create_pip;
12874 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12875 != DDI_PROP_SUCCESS) {
12876 failure++;
12877 goto end_of_fcp_create_pip;
12879 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12880 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12881 != DDI_PROP_SUCCESS) {
12882 failure++;
12883 goto end_of_fcp_create_pip;
12886 end_of_fcp_create_pip:
12887 scsi_hba_nodename_compatible_free(nname, compatible);
12889 if (pip != NULL && failure) {
12890 (void) mdi_prop_remove(pip, NULL);
12891 mutex_exit(&plun->lun_mutex);
12892 mutex_exit(&pptr->port_mutex);
12893 (void) mdi_pi_free(pip, 0);
12894 mutex_enter(&pptr->port_mutex);
12895 mutex_enter(&plun->lun_mutex);
12896 pip = NULL;
12899 return (pip);
12902 static dev_info_t *
12903 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12905 uint_t nbytes;
12906 uchar_t *bytes;
12907 uint_t nwords;
12908 uint32_t tgt_id;
12909 int *words;
12910 dev_info_t *cdip;
12911 dev_info_t *ndip;
12912 struct fcp_tgt *ptgt = plun->lun_tgt;
12913 struct fcp_port *pptr = ptgt->tgt_port;
12914 int circular;
12916 ndi_devi_enter(pdip, &circular);
12918 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12919 while ((cdip = ndip) != NULL) {
12920 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12922 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12923 continue;
12926 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12927 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12928 &nbytes) != DDI_PROP_SUCCESS) {
12929 continue;
12932 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12933 if (bytes != NULL) {
12934 ddi_prop_free(bytes);
12936 continue;
12938 ASSERT(bytes != NULL);
12940 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12941 ddi_prop_free(bytes);
12942 continue;
12945 ddi_prop_free(bytes);
12947 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12948 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12949 &nbytes) != DDI_PROP_SUCCESS) {
12950 continue;
12953 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12954 if (bytes != NULL) {
12955 ddi_prop_free(bytes);
12957 continue;
12959 ASSERT(bytes != NULL);
12961 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12962 ddi_prop_free(bytes);
12963 continue;
12966 ddi_prop_free(bytes);
12968 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12969 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12970 &nwords) != DDI_PROP_SUCCESS) {
12971 continue;
12974 if (nwords != 1 || words == NULL) {
12975 if (words != NULL) {
12976 ddi_prop_free(words);
12978 continue;
12980 ASSERT(words != NULL);
12983 * If there is no hard address - We might have to deal with
12984 * that by using WWN - Having said that it is important to
12985 * recognize this problem early so ssd can be informed of
12986 * the right interconnect type.
12988 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12989 ptgt->tgt_hard_addr != 0) {
12990 tgt_id =
12991 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12992 } else {
12993 tgt_id = ptgt->tgt_d_id;
12996 if (tgt_id != (uint32_t)*words) {
12997 ddi_prop_free(words);
12998 continue;
13000 ddi_prop_free(words);
13002 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13003 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13004 &nwords) != DDI_PROP_SUCCESS) {
13005 continue;
13008 if (nwords != 1 || words == NULL) {
13009 if (words != NULL) {
13010 ddi_prop_free(words);
13012 continue;
13014 ASSERT(words != NULL);
13016 if (plun->lun_num == (uint16_t)*words) {
13017 ddi_prop_free(words);
13018 break;
13020 ddi_prop_free(words);
13022 ndi_devi_exit(pdip, circular);
13024 return (cdip);
13028 static int
13029 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13031 dev_info_t *pdip;
13032 char buf[MAXNAMELEN];
13033 char uaddr[MAXNAMELEN];
13034 int rval = FC_FAILURE;
13036 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13038 pdip = plun->lun_tgt->tgt_port->port_dip;
13041 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13042 * non-NULL even when the LUN is not there as in the case when a LUN is
13043 * configured and then deleted on the device end (for T3/T4 case). In
13044 * such cases, pip will be NULL.
13046 * If the device generates an RSCN, it will end up getting offlined when
13047 * it disappeared and a new LUN will get created when it is rediscovered
13048 * on the device. If we check for lun_cip here, the LUN will not end
13049 * up getting onlined since this function will end up returning a
13050 * FC_SUCCESS.
13052 * The behavior is different on other devices. For instance, on a HDS,
13053 * there was no RSCN generated by the device but the next I/O generated
13054 * a check condition and rediscovery got triggered that way. So, in
13055 * such cases, this path will not be exercised
13057 if (pip == NULL) {
13058 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13059 fcp_trace, FCP_BUF_LEVEL_4, 0,
13060 "fcp_is_pip_present: plun->lun_cip is NULL: "
13061 "plun: %p lun state: %x num: %d target state: %x",
13062 plun, plun->lun_state, plun->lun_num,
13063 plun->lun_tgt->tgt_port->port_state);
13064 return (rval);
13067 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13069 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13071 if (plun->lun_old_guid) {
13072 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13073 rval = FC_SUCCESS;
13075 } else {
13076 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13077 rval = FC_SUCCESS;
13080 return (rval);
13083 static mdi_pathinfo_t *
13084 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13086 char buf[MAXNAMELEN];
13087 char uaddr[MAXNAMELEN];
13088 mdi_pathinfo_t *pip;
13089 struct fcp_tgt *ptgt = plun->lun_tgt;
13090 struct fcp_port *pptr = ptgt->tgt_port;
13092 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13094 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13095 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13097 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13099 return (pip);
13103 static int
13104 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13105 int tcount, int flags, int *circ)
13107 int rval;
13108 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13109 struct fcp_tgt *ptgt = plun->lun_tgt;
13110 dev_info_t *cdip = NULL;
13112 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13113 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13115 if (plun->lun_cip == NULL) {
13116 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13117 fcp_trace, FCP_BUF_LEVEL_3, 0,
13118 "fcp_online_child: plun->lun_cip is NULL: "
13119 "plun: %p state: %x num: %d target state: %x",
13120 plun, plun->lun_state, plun->lun_num,
13121 plun->lun_tgt->tgt_port->port_state);
13122 return (NDI_FAILURE);
13124 again:
13125 if (plun->lun_mpxio == 0) {
13126 cdip = DIP(cip);
13127 mutex_exit(&plun->lun_mutex);
13128 mutex_exit(&pptr->port_mutex);
13130 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13131 fcp_trace, FCP_BUF_LEVEL_3, 0,
13132 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13133 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13136 * We could check for FCP_LUN_INIT here but chances
13137 * of getting here when it's already in FCP_LUN_INIT
13138 * is rare and a duplicate ndi_devi_online wouldn't
13139 * hurt either (as the node would already have been
13140 * in CF2)
13142 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13143 rval = ndi_devi_bind_driver(cdip, flags);
13144 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13145 fcp_trace, FCP_BUF_LEVEL_3, 0,
13146 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13147 } else {
13148 rval = ndi_devi_online(cdip, flags);
13152 * We log the message into trace buffer if the device
13153 * is "ses" and into syslog for any other device
13154 * type. This is to prevent the ndi_devi_online failure
13155 * message that appears for V880/A5K ses devices.
13157 if (rval == NDI_SUCCESS) {
13158 mutex_enter(&ptgt->tgt_mutex);
13159 plun->lun_state |= FCP_LUN_INIT;
13160 mutex_exit(&ptgt->tgt_mutex);
13161 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13162 fcp_log(CE_NOTE, pptr->port_dip,
13163 "!ndi_devi_online:"
13164 " failed for %s: target=%x lun=%x %x",
13165 ddi_get_name(cdip), ptgt->tgt_d_id,
13166 plun->lun_num, rval);
13167 } else {
13168 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13169 fcp_trace, FCP_BUF_LEVEL_3, 0,
13170 " !ndi_devi_online:"
13171 " failed for %s: target=%x lun=%x %x",
13172 ddi_get_name(cdip), ptgt->tgt_d_id,
13173 plun->lun_num, rval);
13175 } else {
13176 cdip = mdi_pi_get_client(PIP(cip));
13177 mutex_exit(&plun->lun_mutex);
13178 mutex_exit(&pptr->port_mutex);
13180 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13181 fcp_trace, FCP_BUF_LEVEL_3, 0,
13182 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13183 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13186 * Hold path and exit phci to avoid deadlock with power
13187 * management code during mdi_pi_online.
13189 mdi_hold_path(PIP(cip));
13190 mdi_devi_exit_phci(pptr->port_dip, *circ);
13192 rval = mdi_pi_online(PIP(cip), flags);
13194 mdi_devi_enter_phci(pptr->port_dip, circ);
13195 mdi_rele_path(PIP(cip));
13197 if (rval == MDI_SUCCESS) {
13198 mutex_enter(&ptgt->tgt_mutex);
13199 plun->lun_state |= FCP_LUN_INIT;
13200 mutex_exit(&ptgt->tgt_mutex);
13203 * Clear MPxIO path permanent disable in case
13204 * fcp hotplug dropped the offline event.
13206 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13208 } else if (rval == MDI_NOT_SUPPORTED) {
13209 child_info_t *old_cip = cip;
13212 * MPxIO does not support this device yet.
13213 * Enumerate in legacy mode.
13215 mutex_enter(&pptr->port_mutex);
13216 mutex_enter(&plun->lun_mutex);
13217 plun->lun_mpxio = 0;
13218 plun->lun_cip = NULL;
13219 cdip = fcp_create_dip(plun, lcount, tcount);
13220 plun->lun_cip = cip = CIP(cdip);
13221 if (cip == NULL) {
13222 fcp_log(CE_WARN, pptr->port_dip,
13223 "!fcp_online_child: "
13224 "Create devinfo failed for LU=%p", plun);
13225 mutex_exit(&plun->lun_mutex);
13227 mutex_enter(&ptgt->tgt_mutex);
13228 plun->lun_state |= FCP_LUN_OFFLINE;
13229 mutex_exit(&ptgt->tgt_mutex);
13231 mutex_exit(&pptr->port_mutex);
13234 * free the mdi_pathinfo node
13236 (void) mdi_pi_free(PIP(old_cip), 0);
13237 } else {
13238 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13239 fcp_trace, FCP_BUF_LEVEL_3, 0,
13240 "fcp_online_child: creating devinfo "
13241 "node 0x%p for plun 0x%p",
13242 cip, plun);
13243 mutex_exit(&plun->lun_mutex);
13244 mutex_exit(&pptr->port_mutex);
13246 * free the mdi_pathinfo node
13248 (void) mdi_pi_free(PIP(old_cip), 0);
13249 mutex_enter(&pptr->port_mutex);
13250 mutex_enter(&plun->lun_mutex);
13251 goto again;
13253 } else {
13254 if (cdip) {
13255 fcp_log(CE_NOTE, pptr->port_dip,
13256 "!fcp_online_child: mdi_pi_online:"
13257 " failed for %s: target=%x lun=%x %x",
13258 ddi_get_name(cdip), ptgt->tgt_d_id,
13259 plun->lun_num, rval);
13262 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13265 if (rval == NDI_SUCCESS) {
13266 if (cdip) {
13267 (void) ndi_event_retrieve_cookie(
13268 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13269 &fcp_insert_eid, NDI_EVENT_NOPASS);
13270 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13271 cdip, fcp_insert_eid, NULL);
13274 mutex_enter(&pptr->port_mutex);
13275 mutex_enter(&plun->lun_mutex);
13276 return (rval);
13279 /* ARGSUSED */
13280 static int
13281 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13282 int tcount, int flags, int *circ)
13284 int rval;
13285 int lun_mpxio;
13286 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13287 struct fcp_tgt *ptgt = plun->lun_tgt;
13288 dev_info_t *cdip;
13290 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13291 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13293 if (plun->lun_cip == NULL) {
13294 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13295 fcp_trace, FCP_BUF_LEVEL_3, 0,
13296 "fcp_offline_child: plun->lun_cip is NULL: "
13297 "plun: %p lun state: %x num: %d target state: %x",
13298 plun, plun->lun_state, plun->lun_num,
13299 plun->lun_tgt->tgt_port->port_state);
13300 return (NDI_FAILURE);
13304 * We will use this value twice. Make a copy to be sure we use
13305 * the same value in both places.
13307 lun_mpxio = plun->lun_mpxio;
13309 if (lun_mpxio == 0) {
13310 cdip = DIP(cip);
13311 mutex_exit(&plun->lun_mutex);
13312 mutex_exit(&pptr->port_mutex);
13313 rval = ndi_devi_offline(DIP(cip), flags);
13314 if (rval != NDI_SUCCESS) {
13315 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13316 fcp_trace, FCP_BUF_LEVEL_3, 0,
13317 "fcp_offline_child: ndi_devi_offline failed "
13318 "rval=%x cip=%p", rval, cip);
13320 } else {
13321 cdip = mdi_pi_get_client(PIP(cip));
13322 mutex_exit(&plun->lun_mutex);
13323 mutex_exit(&pptr->port_mutex);
13326 * Exit phci to avoid deadlock with power management code
13327 * during mdi_pi_offline
13329 mdi_hold_path(PIP(cip));
13330 mdi_devi_exit_phci(pptr->port_dip, *circ);
13332 rval = mdi_pi_offline(PIP(cip), flags);
13334 mdi_devi_enter_phci(pptr->port_dip, circ);
13335 mdi_rele_path(PIP(cip));
13337 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13340 mutex_enter(&ptgt->tgt_mutex);
13341 plun->lun_state &= ~FCP_LUN_INIT;
13342 mutex_exit(&ptgt->tgt_mutex);
13344 if (rval == NDI_SUCCESS) {
13345 cdip = NULL;
13346 if (flags & NDI_DEVI_REMOVE) {
13347 mutex_enter(&plun->lun_mutex);
13349 * If the guid of the LUN changes, lun_cip will not
13350 * equal to cip, and after offlining the LUN with the
13351 * old guid, we should keep lun_cip since it's the cip
13352 * of the LUN with the new guid.
13353 * Otherwise remove our reference to child node.
13355 * This must be done before the child node is freed,
13356 * otherwise other threads could see a stale lun_cip
13357 * pointer.
13359 if (plun->lun_cip == cip) {
13360 plun->lun_cip = NULL;
13362 if (plun->lun_old_guid) {
13363 kmem_free(plun->lun_old_guid,
13364 plun->lun_old_guid_size);
13365 plun->lun_old_guid = NULL;
13366 plun->lun_old_guid_size = 0;
13368 mutex_exit(&plun->lun_mutex);
13372 if (lun_mpxio != 0) {
13373 if (rval == NDI_SUCCESS) {
13375 * Clear MPxIO path permanent disable as the path is
13376 * already offlined.
13378 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13380 if (flags & NDI_DEVI_REMOVE) {
13381 (void) mdi_pi_free(PIP(cip), 0);
13383 } else {
13384 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13385 fcp_trace, FCP_BUF_LEVEL_3, 0,
13386 "fcp_offline_child: mdi_pi_offline failed "
13387 "rval=%x cip=%p", rval, cip);
13391 mutex_enter(&pptr->port_mutex);
13392 mutex_enter(&plun->lun_mutex);
13394 if (cdip) {
13395 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13396 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13397 " target=%x lun=%x", "ndi_offline",
13398 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13401 return (rval);
13404 static void
13405 fcp_remove_child(struct fcp_lun *plun)
13407 child_info_t *cip;
13408 int circ;
13410 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13412 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13413 if (plun->lun_mpxio == 0) {
13414 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13415 (void) ndi_devi_free(DIP(plun->lun_cip));
13416 plun->lun_cip = NULL;
13417 } else {
13419 * Clear reference to the child node in the lun.
13420 * This must be done before freeing it with mdi_pi_free
13421 * and with lun_mutex held so that other threads always
13422 * see either valid lun_cip or NULL when holding
13423 * lun_mutex. We keep a copy in cip.
13425 cip = plun->lun_cip;
13426 plun->lun_cip = NULL;
13428 mutex_exit(&plun->lun_mutex);
13429 mutex_exit(&plun->lun_tgt->tgt_mutex);
13430 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13432 mdi_devi_enter(
13433 plun->lun_tgt->tgt_port->port_dip, &circ);
13436 * Exit phci to avoid deadlock with power management
13437 * code during mdi_pi_offline
13439 mdi_hold_path(PIP(cip));
13440 mdi_devi_exit_phci(
13441 plun->lun_tgt->tgt_port->port_dip, circ);
13442 (void) mdi_pi_offline(PIP(cip),
13443 NDI_DEVI_REMOVE);
13444 mdi_devi_enter_phci(
13445 plun->lun_tgt->tgt_port->port_dip, &circ);
13446 mdi_rele_path(PIP(cip));
13448 mdi_devi_exit(
13449 plun->lun_tgt->tgt_port->port_dip, circ);
13451 FCP_TRACE(fcp_logq,
13452 plun->lun_tgt->tgt_port->port_instbuf,
13453 fcp_trace, FCP_BUF_LEVEL_3, 0,
13454 "lun=%p pip freed %p", plun, cip);
13456 (void) mdi_prop_remove(PIP(cip), NULL);
13457 (void) mdi_pi_free(PIP(cip), 0);
13459 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13460 mutex_enter(&plun->lun_tgt->tgt_mutex);
13461 mutex_enter(&plun->lun_mutex);
13463 } else {
13464 plun->lun_cip = NULL;
13469 * called when a timeout occurs
13471 * can be scheduled during an attach or resume (if not already running)
13473 * one timeout is set up for all ports
13475 * acquires and releases the global mutex
13477 /*ARGSUSED*/
13478 static void
13479 fcp_watch(void *arg)
13481 struct fcp_port *pptr;
13482 struct fcp_ipkt *icmd;
13483 struct fcp_ipkt *nicmd;
13484 struct fcp_pkt *cmd;
13485 struct fcp_pkt *ncmd;
13486 struct fcp_pkt *tail;
13487 struct fcp_pkt *pcmd;
13488 struct fcp_pkt *save_head;
13489 struct fcp_port *save_port;
13491 /* increment global watchdog time */
13492 fcp_watchdog_time += fcp_watchdog_timeout;
13494 mutex_enter(&fcp_global_mutex);
13496 /* scan each port in our list */
13497 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13498 save_port = fcp_port_head;
13499 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13500 mutex_exit(&fcp_global_mutex);
13502 mutex_enter(&pptr->port_mutex);
13503 if (pptr->port_ipkt_list == NULL &&
13504 (pptr->port_state & (FCP_STATE_SUSPENDED |
13505 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13506 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13507 mutex_exit(&pptr->port_mutex);
13508 mutex_enter(&fcp_global_mutex);
13509 goto end_of_watchdog;
13513 * We check if a list of targets need to be offlined.
13515 if (pptr->port_offline_tgts) {
13516 fcp_scan_offline_tgts(pptr);
13520 * We check if a list of luns need to be offlined.
13522 if (pptr->port_offline_luns) {
13523 fcp_scan_offline_luns(pptr);
13527 * We check if a list of targets or luns need to be reset.
13529 if (pptr->port_reset_list) {
13530 fcp_check_reset_delay(pptr);
13533 mutex_exit(&pptr->port_mutex);
13536 * This is where the pending commands (pkt) are checked for
13537 * timeout.
13539 mutex_enter(&pptr->port_pkt_mutex);
13540 tail = pptr->port_pkt_tail;
13542 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13543 cmd != NULL; cmd = ncmd) {
13544 ncmd = cmd->cmd_next;
13546 * If a command is in this queue the bit CFLAG_IN_QUEUE
13547 * must be set.
13549 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13551 * FCP_INVALID_TIMEOUT will be set for those
13552 * command that need to be failed. Mostly those
13553 * cmds that could not be queued down for the
13554 * "timeout" value. cmd->cmd_timeout is used
13555 * to try and requeue the command regularly.
13557 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13559 * This command hasn't timed out yet. Let's
13560 * go to the next one.
13562 pcmd = cmd;
13563 goto end_of_loop;
13566 if (cmd == pptr->port_pkt_head) {
13567 ASSERT(pcmd == NULL);
13568 pptr->port_pkt_head = cmd->cmd_next;
13569 } else {
13570 ASSERT(pcmd != NULL);
13571 pcmd->cmd_next = cmd->cmd_next;
13574 if (cmd == pptr->port_pkt_tail) {
13575 ASSERT(cmd->cmd_next == NULL);
13576 pptr->port_pkt_tail = pcmd;
13577 if (pcmd) {
13578 pcmd->cmd_next = NULL;
13581 cmd->cmd_next = NULL;
13584 * save the current head before dropping the
13585 * mutex - If the head doesn't remain the
13586 * same after re acquiring the mutex, just
13587 * bail out and revisit on next tick.
13589 * PS: The tail pointer can change as the commands
13590 * get requeued after failure to retransport
13592 save_head = pptr->port_pkt_head;
13593 mutex_exit(&pptr->port_pkt_mutex);
13595 if (cmd->cmd_fp_pkt->pkt_timeout ==
13596 FCP_INVALID_TIMEOUT) {
13597 struct scsi_pkt *pkt = cmd->cmd_pkt;
13598 struct fcp_lun *plun;
13599 struct fcp_tgt *ptgt;
13601 plun = ADDR2LUN(&pkt->pkt_address);
13602 ptgt = plun->lun_tgt;
13604 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13605 fcp_trace, FCP_BUF_LEVEL_2, 0,
13606 "SCSI cmd 0x%x to D_ID=%x timed out",
13607 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13609 cmd->cmd_state == FCP_PKT_ABORTING ?
13610 fcp_fail_cmd(cmd, CMD_RESET,
13611 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13612 CMD_TIMEOUT, STAT_ABORTED);
13613 } else {
13614 fcp_retransport_cmd(pptr, cmd);
13616 mutex_enter(&pptr->port_pkt_mutex);
13617 if (save_head && save_head != pptr->port_pkt_head) {
13619 * Looks like linked list got changed (mostly
13620 * happens when an an OFFLINE LUN code starts
13621 * returning overflow queue commands in
13622 * parallel. So bail out and revisit during
13623 * next tick
13625 break;
13627 end_of_loop:
13629 * Scan only upto the previously known tail pointer
13630 * to avoid excessive processing - lots of new packets
13631 * could have been added to the tail or the old ones
13632 * re-queued.
13634 if (cmd == tail) {
13635 break;
13638 mutex_exit(&pptr->port_pkt_mutex);
13640 mutex_enter(&pptr->port_mutex);
13641 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13642 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13644 nicmd = icmd->ipkt_next;
13645 if ((icmd->ipkt_restart != 0) &&
13646 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13647 /* packet has not timed out */
13648 continue;
13651 /* time for packet re-transport */
13652 if (icmd == pptr->port_ipkt_list) {
13653 pptr->port_ipkt_list = icmd->ipkt_next;
13654 if (pptr->port_ipkt_list) {
13655 pptr->port_ipkt_list->ipkt_prev =
13656 NULL;
13658 } else {
13659 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13660 if (icmd->ipkt_next) {
13661 icmd->ipkt_next->ipkt_prev =
13662 icmd->ipkt_prev;
13665 icmd->ipkt_next = NULL;
13666 icmd->ipkt_prev = NULL;
13667 mutex_exit(&pptr->port_mutex);
13669 if (fcp_is_retryable(icmd)) {
13670 fc_ulp_rscn_info_t *rscnp =
13671 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13672 pkt_ulp_rscn_infop;
13674 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13675 fcp_trace, FCP_BUF_LEVEL_2, 0,
13676 "%x to D_ID=%x Retrying..",
13677 icmd->ipkt_opcode,
13678 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13681 * Update the RSCN count in the packet
13682 * before resending.
13685 if (rscnp != NULL) {
13686 rscnp->ulp_rscn_count =
13687 fc_ulp_get_rscn_count(pptr->
13688 port_fp_handle);
13691 mutex_enter(&pptr->port_mutex);
13692 mutex_enter(&ptgt->tgt_mutex);
13693 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13694 mutex_exit(&ptgt->tgt_mutex);
13695 mutex_exit(&pptr->port_mutex);
13696 switch (icmd->ipkt_opcode) {
13697 int rval;
13698 case LA_ELS_PLOGI:
13699 if ((rval = fc_ulp_login(
13700 pptr->port_fp_handle,
13701 &icmd->ipkt_fpkt, 1)) ==
13702 FC_SUCCESS) {
13703 mutex_enter(
13704 &pptr->port_mutex);
13705 continue;
13707 if (fcp_handle_ipkt_errors(
13708 pptr, ptgt, icmd, rval,
13709 "PLOGI") == DDI_SUCCESS) {
13710 mutex_enter(
13711 &pptr->port_mutex);
13712 continue;
13714 break;
13716 case LA_ELS_PRLI:
13717 if ((rval = fc_ulp_issue_els(
13718 pptr->port_fp_handle,
13719 icmd->ipkt_fpkt)) ==
13720 FC_SUCCESS) {
13721 mutex_enter(
13722 &pptr->port_mutex);
13723 continue;
13725 if (fcp_handle_ipkt_errors(
13726 pptr, ptgt, icmd, rval,
13727 "PRLI") == DDI_SUCCESS) {
13728 mutex_enter(
13729 &pptr->port_mutex);
13730 continue;
13732 break;
13734 default:
13735 if ((rval = fcp_transport(
13736 pptr->port_fp_handle,
13737 icmd->ipkt_fpkt, 1)) ==
13738 FC_SUCCESS) {
13739 mutex_enter(
13740 &pptr->port_mutex);
13741 continue;
13743 if (fcp_handle_ipkt_errors(
13744 pptr, ptgt, icmd, rval,
13745 "PRLI") == DDI_SUCCESS) {
13746 mutex_enter(
13747 &pptr->port_mutex);
13748 continue;
13750 break;
13752 } else {
13753 mutex_exit(&ptgt->tgt_mutex);
13754 mutex_exit(&pptr->port_mutex);
13756 } else {
13757 fcp_print_error(icmd->ipkt_fpkt);
13760 (void) fcp_call_finish_init(pptr, ptgt,
13761 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13762 icmd->ipkt_cause);
13763 fcp_icmd_free(pptr, icmd);
13764 mutex_enter(&pptr->port_mutex);
13767 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13768 mutex_exit(&pptr->port_mutex);
13769 mutex_enter(&fcp_global_mutex);
13771 end_of_watchdog:
13773 * Bail out early before getting into trouble
13775 if (save_port != fcp_port_head) {
13776 break;
13780 if (fcp_watchdog_init > 0) {
13781 /* reschedule timeout to go again */
13782 fcp_watchdog_id =
13783 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13785 mutex_exit(&fcp_global_mutex);
13789 static void
13790 fcp_check_reset_delay(struct fcp_port *pptr)
13792 uint32_t tgt_cnt;
13793 int level;
13794 struct fcp_tgt *ptgt;
13795 struct fcp_lun *plun;
13796 struct fcp_reset_elem *cur = NULL;
13797 struct fcp_reset_elem *next = NULL;
13798 struct fcp_reset_elem *prev = NULL;
13800 ASSERT(mutex_owned(&pptr->port_mutex));
13802 next = pptr->port_reset_list;
13803 while ((cur = next) != NULL) {
13804 next = cur->next;
13806 if (cur->timeout < fcp_watchdog_time) {
13807 prev = cur;
13808 continue;
13811 ptgt = cur->tgt;
13812 plun = cur->lun;
13813 tgt_cnt = cur->tgt_cnt;
13815 if (ptgt) {
13816 level = RESET_TARGET;
13817 } else {
13818 ASSERT(plun != NULL);
13819 level = RESET_LUN;
13820 ptgt = plun->lun_tgt;
13822 if (prev) {
13823 prev->next = next;
13824 } else {
13826 * Because we drop port mutex while doing aborts for
13827 * packets, we can't rely on reset_list pointing to
13828 * our head
13830 if (cur == pptr->port_reset_list) {
13831 pptr->port_reset_list = next;
13832 } else {
13833 struct fcp_reset_elem *which;
13835 which = pptr->port_reset_list;
13836 while (which && which->next != cur) {
13837 which = which->next;
13839 ASSERT(which != NULL);
13841 which->next = next;
13842 prev = which;
13846 kmem_free(cur, sizeof (*cur));
13848 if (tgt_cnt == ptgt->tgt_change_cnt) {
13849 mutex_enter(&ptgt->tgt_mutex);
13850 if (level == RESET_TARGET) {
13851 fcp_update_tgt_state(ptgt,
13852 FCP_RESET, FCP_LUN_BUSY);
13853 } else {
13854 fcp_update_lun_state(plun,
13855 FCP_RESET, FCP_LUN_BUSY);
13857 mutex_exit(&ptgt->tgt_mutex);
13859 mutex_exit(&pptr->port_mutex);
13860 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13861 mutex_enter(&pptr->port_mutex);
13867 static void
13868 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13869 struct fcp_lun *rlun, int tgt_cnt)
13871 int rval;
13872 struct fcp_lun *tlun, *nlun;
13873 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13874 *cmd = NULL, *head = NULL,
13875 *tail = NULL;
13877 mutex_enter(&pptr->port_pkt_mutex);
13878 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13879 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13880 struct fcp_tgt *ptgt = plun->lun_tgt;
13882 ncmd = cmd->cmd_next;
13884 if (ptgt != ttgt && plun != rlun) {
13885 pcmd = cmd;
13886 continue;
13889 if (pcmd != NULL) {
13890 ASSERT(pptr->port_pkt_head != cmd);
13891 pcmd->cmd_next = ncmd;
13892 } else {
13893 ASSERT(cmd == pptr->port_pkt_head);
13894 pptr->port_pkt_head = ncmd;
13896 if (pptr->port_pkt_tail == cmd) {
13897 ASSERT(cmd->cmd_next == NULL);
13898 pptr->port_pkt_tail = pcmd;
13899 if (pcmd != NULL) {
13900 pcmd->cmd_next = NULL;
13904 if (head == NULL) {
13905 head = tail = cmd;
13906 } else {
13907 ASSERT(tail != NULL);
13908 tail->cmd_next = cmd;
13909 tail = cmd;
13911 cmd->cmd_next = NULL;
13913 mutex_exit(&pptr->port_pkt_mutex);
13915 for (cmd = head; cmd != NULL; cmd = ncmd) {
13916 struct scsi_pkt *pkt = cmd->cmd_pkt;
13918 ncmd = cmd->cmd_next;
13919 ASSERT(pkt != NULL);
13921 mutex_enter(&pptr->port_mutex);
13922 if (ttgt->tgt_change_cnt == tgt_cnt) {
13923 mutex_exit(&pptr->port_mutex);
13924 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13925 pkt->pkt_reason = CMD_RESET;
13926 pkt->pkt_statistics |= STAT_DEV_RESET;
13927 cmd->cmd_state = FCP_PKT_IDLE;
13928 fcp_post_callback(cmd);
13929 } else {
13930 mutex_exit(&pptr->port_mutex);
13935 * If the FCA will return all the commands in its queue then our
13936 * work is easy, just return.
13939 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13940 return;
13944 * For RESET_LUN get hold of target pointer
13946 if (ttgt == NULL) {
13947 ASSERT(rlun != NULL);
13949 ttgt = rlun->lun_tgt;
13951 ASSERT(ttgt != NULL);
13955 * There are some severe race conditions here.
13956 * While we are trying to abort the pkt, it might be completing
13957 * so mark it aborted and if the abort does not succeed then
13958 * handle it in the watch thread.
13960 mutex_enter(&ttgt->tgt_mutex);
13961 nlun = ttgt->tgt_lun;
13962 mutex_exit(&ttgt->tgt_mutex);
13963 while ((tlun = nlun) != NULL) {
13964 int restart = 0;
13965 if (rlun && rlun != tlun) {
13966 mutex_enter(&ttgt->tgt_mutex);
13967 nlun = tlun->lun_next;
13968 mutex_exit(&ttgt->tgt_mutex);
13969 continue;
13971 mutex_enter(&tlun->lun_mutex);
13972 cmd = tlun->lun_pkt_head;
13973 while (cmd != NULL) {
13974 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13975 struct scsi_pkt *pkt;
13977 restart = 1;
13978 cmd->cmd_state = FCP_PKT_ABORTING;
13979 mutex_exit(&tlun->lun_mutex);
13980 rval = fc_ulp_abort(pptr->port_fp_handle,
13981 cmd->cmd_fp_pkt, KM_SLEEP);
13982 if (rval == FC_SUCCESS) {
13983 pkt = cmd->cmd_pkt;
13984 pkt->pkt_reason = CMD_RESET;
13985 pkt->pkt_statistics |= STAT_DEV_RESET;
13986 cmd->cmd_state = FCP_PKT_IDLE;
13987 fcp_post_callback(cmd);
13988 } else {
13989 caddr_t msg;
13991 (void) fc_ulp_error(rval, &msg);
13994 * This part is tricky. The abort
13995 * failed and now the command could
13996 * be completing. The cmd_state ==
13997 * FCP_PKT_ABORTING should save
13998 * us in fcp_cmd_callback. If we
13999 * are already aborting ignore the
14000 * command in fcp_cmd_callback.
14001 * Here we leave this packet for 20
14002 * sec to be aborted in the
14003 * fcp_watch thread.
14005 fcp_log(CE_WARN, pptr->port_dip,
14006 "!Abort failed after reset %s",
14007 msg);
14009 cmd->cmd_timeout =
14010 fcp_watchdog_time +
14011 cmd->cmd_pkt->pkt_time +
14012 FCP_FAILED_DELAY;
14014 cmd->cmd_fp_pkt->pkt_timeout =
14015 FCP_INVALID_TIMEOUT;
14017 * This is a hack, cmd is put in the
14018 * overflow queue so that it can be
14019 * timed out finally
14021 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14023 mutex_enter(&pptr->port_pkt_mutex);
14024 if (pptr->port_pkt_head) {
14025 ASSERT(pptr->port_pkt_tail
14026 != NULL);
14027 pptr->port_pkt_tail->cmd_next
14028 = cmd;
14029 pptr->port_pkt_tail = cmd;
14030 } else {
14031 ASSERT(pptr->port_pkt_tail
14032 == NULL);
14033 pptr->port_pkt_head =
14034 pptr->port_pkt_tail
14035 = cmd;
14037 cmd->cmd_next = NULL;
14038 mutex_exit(&pptr->port_pkt_mutex);
14040 mutex_enter(&tlun->lun_mutex);
14041 cmd = tlun->lun_pkt_head;
14042 } else {
14043 cmd = cmd->cmd_forw;
14046 mutex_exit(&tlun->lun_mutex);
14048 mutex_enter(&ttgt->tgt_mutex);
14049 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14050 mutex_exit(&ttgt->tgt_mutex);
14052 mutex_enter(&pptr->port_mutex);
14053 if (tgt_cnt != ttgt->tgt_change_cnt) {
14054 mutex_exit(&pptr->port_mutex);
14055 return;
14056 } else {
14057 mutex_exit(&pptr->port_mutex);
14064 * unlink the soft state, returning the soft state found (if any)
14066 * acquires and releases the global mutex
14068 struct fcp_port *
14069 fcp_soft_state_unlink(struct fcp_port *pptr)
14071 struct fcp_port *hptr; /* ptr index */
14072 struct fcp_port *tptr; /* prev hptr */
14074 mutex_enter(&fcp_global_mutex);
14075 for (hptr = fcp_port_head, tptr = NULL;
14076 hptr != NULL;
14077 tptr = hptr, hptr = hptr->port_next) {
14078 if (hptr == pptr) {
14079 /* we found a match -- remove this item */
14080 if (tptr == NULL) {
14081 /* we're at the head of the list */
14082 fcp_port_head = hptr->port_next;
14083 } else {
14084 tptr->port_next = hptr->port_next;
14086 break; /* success */
14089 if (fcp_port_head == NULL) {
14090 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14092 mutex_exit(&fcp_global_mutex);
14093 return (hptr);
14098 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14099 * WWN and a LUN number
14101 /* ARGSUSED */
14102 static struct fcp_lun *
14103 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14105 int hash;
14106 struct fcp_tgt *ptgt;
14107 struct fcp_lun *plun;
14109 ASSERT(mutex_owned(&pptr->port_mutex));
14111 hash = FCP_HASH(wwn);
14112 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14113 ptgt = ptgt->tgt_next) {
14114 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14115 sizeof (ptgt->tgt_port_wwn)) == 0) {
14116 mutex_enter(&ptgt->tgt_mutex);
14117 for (plun = ptgt->tgt_lun;
14118 plun != NULL;
14119 plun = plun->lun_next) {
14120 if (plun->lun_num == lun) {
14121 mutex_exit(&ptgt->tgt_mutex);
14122 return (plun);
14125 mutex_exit(&ptgt->tgt_mutex);
14126 return (NULL);
14129 return (NULL);
14133 * Function: fcp_prepare_pkt
14135 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14136 * for fcp_start(). It binds the data or partially maps it.
14137 * Builds the FCP header and starts the initialization of the
14138 * Fibre Channel header.
14140 * Argument: *pptr FCP port.
14141 * *cmd FCP packet.
14142 * *plun LUN the command will be sent to.
14144 * Context: User, Kernel and Interrupt context.
14146 static void
14147 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14148 struct fcp_lun *plun)
14150 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14151 struct fcp_tgt *ptgt = plun->lun_tgt;
14152 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14154 ASSERT(cmd->cmd_pkt->pkt_comp ||
14155 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14157 if (cmd->cmd_pkt->pkt_numcookies) {
14158 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14159 fcmd->fcp_cntl.cntl_read_data = 1;
14160 fcmd->fcp_cntl.cntl_write_data = 0;
14161 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14162 } else {
14163 fcmd->fcp_cntl.cntl_read_data = 0;
14164 fcmd->fcp_cntl.cntl_write_data = 1;
14165 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14168 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14170 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14171 ASSERT(fpkt->pkt_data_cookie_cnt <=
14172 pptr->port_data_dma_attr.dma_attr_sgllen);
14174 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14176 /* FCA needs pkt_datalen to be set */
14177 fpkt->pkt_datalen = cmd->cmd_dmacount;
14178 fcmd->fcp_data_len = cmd->cmd_dmacount;
14179 } else {
14180 fcmd->fcp_cntl.cntl_read_data = 0;
14181 fcmd->fcp_cntl.cntl_write_data = 0;
14182 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14183 fpkt->pkt_datalen = 0;
14184 fcmd->fcp_data_len = 0;
14187 /* set up the Tagged Queuing type */
14188 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14189 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14190 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14191 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14192 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14193 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14194 } else {
14195 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14198 fcmd->fcp_ent_addr = plun->lun_addr;
14200 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14201 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14202 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14203 } else {
14204 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14207 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14208 cmd->cmd_pkt->pkt_state = 0;
14209 cmd->cmd_pkt->pkt_statistics = 0;
14210 cmd->cmd_pkt->pkt_resid = 0;
14212 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14214 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14215 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14216 fpkt->pkt_comp = NULL;
14217 } else {
14218 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14219 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14220 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14222 fpkt->pkt_comp = fcp_cmd_callback;
14225 mutex_enter(&pptr->port_mutex);
14226 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14227 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14229 mutex_exit(&pptr->port_mutex);
14231 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14232 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14235 * Save a few kernel cycles here
14237 #ifndef __lock_lint
14238 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14239 #endif /* __lock_lint */
14242 static void
14243 fcp_post_callback(struct fcp_pkt *cmd)
14245 scsi_hba_pkt_comp(cmd->cmd_pkt);
14250 * called to do polled I/O by fcp_start()
14252 * return a transport status value, i.e. TRAN_ACCECPT for success
14254 static int
14255 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14257 int rval;
14259 #ifdef DEBUG
14260 mutex_enter(&pptr->port_pkt_mutex);
14261 pptr->port_npkts++;
14262 mutex_exit(&pptr->port_pkt_mutex);
14263 #endif /* DEBUG */
14265 if (cmd->cmd_fp_pkt->pkt_timeout) {
14266 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14267 } else {
14268 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14271 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14273 cmd->cmd_state = FCP_PKT_ISSUED;
14275 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14277 #ifdef DEBUG
14278 mutex_enter(&pptr->port_pkt_mutex);
14279 pptr->port_npkts--;
14280 mutex_exit(&pptr->port_pkt_mutex);
14281 #endif /* DEBUG */
14283 cmd->cmd_state = FCP_PKT_IDLE;
14285 switch (rval) {
14286 case FC_SUCCESS:
14287 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14288 fcp_complete_pkt(cmd->cmd_fp_pkt);
14289 rval = TRAN_ACCEPT;
14290 } else {
14291 rval = TRAN_FATAL_ERROR;
14293 break;
14295 case FC_TRAN_BUSY:
14296 rval = TRAN_BUSY;
14297 cmd->cmd_pkt->pkt_resid = 0;
14298 break;
14300 case FC_BADPACKET:
14301 rval = TRAN_BADPKT;
14302 break;
14304 default:
14305 rval = TRAN_FATAL_ERROR;
14306 break;
14309 return (rval);
14314 * called by some of the following transport-called routines to convert
14315 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14317 static struct fcp_port *
14318 fcp_dip2port(dev_info_t *dip)
14320 int instance;
14322 instance = ddi_get_instance(dip);
14323 return (ddi_get_soft_state(fcp_softstate, instance));
14328 * called internally to return a LUN given a dip
14330 struct fcp_lun *
14331 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14333 struct fcp_tgt *ptgt;
14334 struct fcp_lun *plun;
14335 int i;
14338 ASSERT(mutex_owned(&pptr->port_mutex));
14340 for (i = 0; i < FCP_NUM_HASH; i++) {
14341 for (ptgt = pptr->port_tgt_hash_table[i];
14342 ptgt != NULL;
14343 ptgt = ptgt->tgt_next) {
14344 mutex_enter(&ptgt->tgt_mutex);
14345 for (plun = ptgt->tgt_lun; plun != NULL;
14346 plun = plun->lun_next) {
14347 mutex_enter(&plun->lun_mutex);
14348 if (plun->lun_cip == cip) {
14349 mutex_exit(&plun->lun_mutex);
14350 mutex_exit(&ptgt->tgt_mutex);
14351 return (plun); /* match found */
14353 mutex_exit(&plun->lun_mutex);
14355 mutex_exit(&ptgt->tgt_mutex);
14358 return (NULL); /* no LUN found */
14362 * pass an element to the hotplug list, kick the hotplug thread
14363 * and wait for the element to get processed by the hotplug thread.
14364 * on return the element is freed.
14366 * return zero success and non-zero on failure
14368 * acquires/releases the target mutex
14371 static int
14372 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14373 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14375 struct fcp_hp_elem *elem;
14376 int rval;
14378 mutex_enter(&plun->lun_tgt->tgt_mutex);
14379 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14380 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14381 mutex_exit(&plun->lun_tgt->tgt_mutex);
14382 fcp_log(CE_CONT, pptr->port_dip,
14383 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14384 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14385 return (NDI_FAILURE);
14387 mutex_exit(&plun->lun_tgt->tgt_mutex);
14388 mutex_enter(&elem->mutex);
14389 if (elem->wait) {
14390 while (elem->wait) {
14391 cv_wait(&elem->cv, &elem->mutex);
14394 rval = (elem->result);
14395 mutex_exit(&elem->mutex);
14396 mutex_destroy(&elem->mutex);
14397 cv_destroy(&elem->cv);
14398 kmem_free(elem, sizeof (struct fcp_hp_elem));
14399 return (rval);
14403 * pass an element to the hotplug list, and then
14404 * kick the hotplug thread
14406 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14408 * acquires/releases the hotplug mutex
14410 * called with the target mutex owned
14412 * memory acquired in NOSLEEP mode
14413 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14414 * for the hp daemon to process the request and is responsible for
14415 * freeing the element
14417 static struct fcp_hp_elem *
14418 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14419 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14421 struct fcp_hp_elem *elem;
14422 dev_info_t *pdip;
14424 ASSERT(pptr != NULL);
14425 ASSERT(plun != NULL);
14426 ASSERT(plun->lun_tgt != NULL);
14427 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14429 /* create space for a hotplug element */
14430 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14431 == NULL) {
14432 fcp_log(CE_WARN, NULL,
14433 "!can't allocate memory for hotplug element");
14434 return (NULL);
14437 /* fill in hotplug element */
14438 elem->port = pptr;
14439 elem->lun = plun;
14440 elem->cip = cip;
14441 elem->old_lun_mpxio = plun->lun_mpxio;
14442 elem->what = what;
14443 elem->flags = flags;
14444 elem->link_cnt = link_cnt;
14445 elem->tgt_cnt = tgt_cnt;
14446 elem->wait = wait;
14447 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14448 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14450 /* schedule the hotplug task */
14451 pdip = pptr->port_dip;
14452 mutex_enter(&plun->lun_mutex);
14453 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14454 plun->lun_event_count++;
14455 elem->event_cnt = plun->lun_event_count;
14457 mutex_exit(&plun->lun_mutex);
14458 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14459 (void *)elem, KM_NOSLEEP) == (uintptr_t)NULL) {
14460 mutex_enter(&plun->lun_mutex);
14461 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14462 plun->lun_event_count--;
14464 mutex_exit(&plun->lun_mutex);
14465 kmem_free(elem, sizeof (*elem));
14466 return (0);
14469 return (elem);
14473 static void
14474 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14476 int rval;
14477 struct scsi_address *ap;
14478 struct fcp_lun *plun;
14479 struct fcp_tgt *ptgt;
14480 fc_packet_t *fpkt;
14482 ap = &cmd->cmd_pkt->pkt_address;
14483 plun = ADDR2LUN(ap);
14484 ptgt = plun->lun_tgt;
14486 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14488 cmd->cmd_state = FCP_PKT_IDLE;
14490 mutex_enter(&pptr->port_mutex);
14491 mutex_enter(&ptgt->tgt_mutex);
14492 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14493 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14494 fc_ulp_rscn_info_t *rscnp;
14496 cmd->cmd_state = FCP_PKT_ISSUED;
14499 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14500 * originally NULL, hence we try to set it to the pd pointed
14501 * to by the SCSI device we're trying to get to.
14504 fpkt = cmd->cmd_fp_pkt;
14505 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14506 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14508 * We need to notify the transport that we now have a
14509 * reference to the remote port handle.
14511 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14514 mutex_exit(&ptgt->tgt_mutex);
14515 mutex_exit(&pptr->port_mutex);
14517 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14519 /* prepare the packet */
14521 fcp_prepare_pkt(pptr, cmd, plun);
14523 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14524 pkt_ulp_rscn_infop;
14526 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14527 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14529 if (rscnp != NULL) {
14530 rscnp->ulp_rscn_count =
14531 fc_ulp_get_rscn_count(pptr->
14532 port_fp_handle);
14535 rval = fcp_transport(pptr->port_fp_handle,
14536 cmd->cmd_fp_pkt, 0);
14538 if (rval == FC_SUCCESS) {
14539 return;
14541 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14542 } else {
14543 mutex_exit(&ptgt->tgt_mutex);
14544 mutex_exit(&pptr->port_mutex);
14547 fcp_queue_pkt(pptr, cmd);
14551 static void
14552 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14554 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14556 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14557 cmd->cmd_state = FCP_PKT_IDLE;
14559 cmd->cmd_pkt->pkt_reason = reason;
14560 cmd->cmd_pkt->pkt_state = 0;
14561 cmd->cmd_pkt->pkt_statistics = statistics;
14563 fcp_post_callback(cmd);
14567 * Function: fcp_queue_pkt
14569 * Description: This function queues the packet passed by the caller into
14570 * the list of packets of the FCP port.
14572 * Argument: *pptr FCP port.
14573 * *cmd FCP packet to queue.
14575 * Return Value: None
14577 * Context: User, Kernel and Interrupt context.
14579 static void
14580 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14582 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == 0);
14584 mutex_enter(&pptr->port_pkt_mutex);
14585 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14586 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14587 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14590 * zero pkt_time means hang around for ever
14592 if (cmd->cmd_pkt->pkt_time) {
14593 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14594 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14595 } else {
14597 * Indicate the watch thread to fail the
14598 * command by setting it to highest value
14600 cmd->cmd_timeout = fcp_watchdog_time;
14601 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14605 if (pptr->port_pkt_head) {
14606 ASSERT(pptr->port_pkt_tail != NULL);
14608 pptr->port_pkt_tail->cmd_next = cmd;
14609 pptr->port_pkt_tail = cmd;
14610 } else {
14611 ASSERT(pptr->port_pkt_tail == NULL);
14613 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14615 cmd->cmd_next = NULL;
14616 mutex_exit(&pptr->port_pkt_mutex);
14620 * Function: fcp_update_targets
14622 * Description: This function applies the specified change of state to all
14623 * the targets listed. The operation applied is 'set'.
14625 * Argument: *pptr FCP port.
14626 * *dev_list Array of fc_portmap_t structures.
14627 * count Length of dev_list.
14628 * state State bits to update.
14629 * cause Reason for the update.
14631 * Return Value: None
14633 * Context: User, Kernel and Interrupt context.
14634 * The mutex pptr->port_mutex must be held.
14636 static void
14637 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14638 uint32_t count, uint32_t state, int cause)
14640 fc_portmap_t *map_entry;
14641 struct fcp_tgt *ptgt;
14643 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14645 while (count--) {
14646 map_entry = &(dev_list[count]);
14647 ptgt = fcp_lookup_target(pptr,
14648 (uchar_t *)&(map_entry->map_pwwn));
14649 if (ptgt == NULL) {
14650 continue;
14653 mutex_enter(&ptgt->tgt_mutex);
14654 ptgt->tgt_trace = 0;
14655 ptgt->tgt_change_cnt++;
14656 ptgt->tgt_statec_cause = cause;
14657 ptgt->tgt_tmp_cnt = 1;
14658 fcp_update_tgt_state(ptgt, FCP_SET, state);
14659 mutex_exit(&ptgt->tgt_mutex);
14663 static int
14664 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14665 int lcount, int tcount, int cause)
14667 int rval;
14669 mutex_enter(&pptr->port_mutex);
14670 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14671 mutex_exit(&pptr->port_mutex);
14673 return (rval);
14677 static int
14678 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14679 int lcount, int tcount, int cause)
14681 int finish_init = 0;
14682 int finish_tgt = 0;
14683 int do_finish_init = 0;
14684 int rval = FCP_NO_CHANGE;
14686 if (cause == FCP_CAUSE_LINK_CHANGE ||
14687 cause == FCP_CAUSE_LINK_DOWN) {
14688 do_finish_init = 1;
14691 if (ptgt != NULL) {
14692 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14693 FCP_BUF_LEVEL_2, 0,
14694 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14695 " cause = %d, d_id = 0x%x, tgt_done = %d",
14696 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14697 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14698 ptgt->tgt_d_id, ptgt->tgt_done);
14700 mutex_enter(&ptgt->tgt_mutex);
14702 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14703 rval = FCP_DEV_CHANGE;
14704 if (do_finish_init && ptgt->tgt_done == 0) {
14705 ptgt->tgt_done++;
14706 finish_init = 1;
14708 } else {
14709 if (--ptgt->tgt_tmp_cnt <= 0) {
14710 ptgt->tgt_tmp_cnt = 0;
14711 finish_tgt = 1;
14713 if (do_finish_init) {
14714 finish_init = 1;
14718 mutex_exit(&ptgt->tgt_mutex);
14719 } else {
14720 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14721 FCP_BUF_LEVEL_2, 0,
14722 "Call Finish Init for NO target");
14724 if (do_finish_init) {
14725 finish_init = 1;
14729 if (finish_tgt) {
14730 ASSERT(ptgt != NULL);
14732 mutex_enter(&ptgt->tgt_mutex);
14733 #ifdef DEBUG
14734 bzero(ptgt->tgt_tmp_cnt_stack,
14735 sizeof (ptgt->tgt_tmp_cnt_stack));
14737 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14738 FCP_STACK_DEPTH);
14739 #endif /* DEBUG */
14740 mutex_exit(&ptgt->tgt_mutex);
14742 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14745 if (finish_init && lcount == pptr->port_link_cnt) {
14746 ASSERT(pptr->port_tmp_cnt > 0);
14747 if (--pptr->port_tmp_cnt == 0) {
14748 fcp_finish_init(pptr);
14750 } else if (lcount != pptr->port_link_cnt) {
14751 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14752 fcp_trace, FCP_BUF_LEVEL_2, 0,
14753 "fcp_call_finish_init_held,1: state change occured"
14754 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14757 return (rval);
14760 static void
14761 fcp_reconfigure_luns(void * tgt_handle)
14763 uint32_t dev_cnt;
14764 fc_portmap_t *devlist;
14765 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14766 struct fcp_port *pptr = ptgt->tgt_port;
14769 * If the timer that fires this off got canceled too late, the
14770 * target could have been destroyed.
14773 if (ptgt->tgt_tid == NULL) {
14774 return;
14777 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14778 if (devlist == NULL) {
14779 fcp_log(CE_WARN, pptr->port_dip,
14780 "!fcp%d: failed to allocate for portmap",
14781 pptr->port_instance);
14782 return;
14785 dev_cnt = 1;
14786 devlist->map_pd = ptgt->tgt_pd_handle;
14787 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14788 devlist->map_did.port_id = ptgt->tgt_d_id;
14790 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14791 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14793 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14794 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14795 devlist->map_flags = 0;
14797 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14798 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14801 * Clear the tgt_tid after no more references to
14802 * the fcp_tgt
14804 mutex_enter(&ptgt->tgt_mutex);
14805 ptgt->tgt_tid = NULL;
14806 mutex_exit(&ptgt->tgt_mutex);
14808 kmem_free(devlist, sizeof (*devlist));
14812 static void
14813 fcp_free_targets(struct fcp_port *pptr)
14815 int i;
14816 struct fcp_tgt *ptgt;
14818 mutex_enter(&pptr->port_mutex);
14819 for (i = 0; i < FCP_NUM_HASH; i++) {
14820 ptgt = pptr->port_tgt_hash_table[i];
14821 while (ptgt != NULL) {
14822 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14824 fcp_free_target(ptgt);
14825 ptgt = next_tgt;
14828 mutex_exit(&pptr->port_mutex);
14832 static void
14833 fcp_free_target(struct fcp_tgt *ptgt)
14835 struct fcp_lun *plun;
14836 timeout_id_t tid;
14838 mutex_enter(&ptgt->tgt_mutex);
14839 tid = ptgt->tgt_tid;
14842 * Cancel any pending timeouts for this target.
14845 if (tid != NULL) {
14847 * Set tgt_tid to NULL first to avoid a race in the callback.
14848 * If tgt_tid is NULL, the callback will simply return.
14850 ptgt->tgt_tid = NULL;
14851 mutex_exit(&ptgt->tgt_mutex);
14852 (void) untimeout(tid);
14853 mutex_enter(&ptgt->tgt_mutex);
14856 plun = ptgt->tgt_lun;
14857 while (plun != NULL) {
14858 struct fcp_lun *next_lun = plun->lun_next;
14860 fcp_dealloc_lun(plun);
14861 plun = next_lun;
14864 mutex_exit(&ptgt->tgt_mutex);
14865 fcp_dealloc_tgt(ptgt);
14869 * Function: fcp_is_retryable
14871 * Description: Indicates if the internal packet is retryable.
14873 * Argument: *icmd FCP internal packet.
14875 * Return Value: 0 Not retryable
14876 * 1 Retryable
14878 * Context: User, Kernel and Interrupt context
14880 static int
14881 fcp_is_retryable(struct fcp_ipkt *icmd)
14883 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14884 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14885 return (0);
14888 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14889 icmd->ipkt_port->port_deadline) ? 1 : 0);
14893 * Function: fcp_create_on_demand
14895 * Argument: *pptr FCP port.
14896 * *pwwn Port WWN.
14898 * Return Value: 0 Success
14899 * EIO
14900 * ENOMEM
14901 * EBUSY
14902 * EINVAL
14904 * Context: User and Kernel context
14906 static int
14907 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14909 int wait_ms;
14910 int tcount;
14911 int lcount;
14912 int ret;
14913 int error;
14914 int rval = EIO;
14915 int ntries;
14916 fc_portmap_t *devlist;
14917 opaque_t pd;
14918 struct fcp_lun *plun;
14919 struct fcp_tgt *ptgt;
14920 int old_manual = 0;
14922 /* Allocates the fc_portmap_t structure. */
14923 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14926 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14927 * in the commented statement below:
14929 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14931 * Below, the deadline for the discovery process is set.
14933 mutex_enter(&pptr->port_mutex);
14934 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14935 mutex_exit(&pptr->port_mutex);
14938 * We try to find the remote port based on the WWN provided by the
14939 * caller. We actually ask fp/fctl if it has it.
14941 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14942 (la_wwn_t *)pwwn, &error, 1);
14944 if (pd == NULL) {
14945 kmem_free(devlist, sizeof (*devlist));
14946 return (rval);
14950 * The remote port was found. We ask fp/fctl to update our
14951 * fc_portmap_t structure.
14953 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14954 (la_wwn_t *)pwwn, devlist);
14955 if (ret != FC_SUCCESS) {
14956 kmem_free(devlist, sizeof (*devlist));
14957 return (rval);
14961 * The map flag field is set to indicates that the creation is being
14962 * done at the user request (Ioclt probably luxadm or cfgadm).
14964 devlist->map_type = PORT_DEVICE_USER_CREATE;
14966 mutex_enter(&pptr->port_mutex);
14969 * We check to see if fcp already has a target that describes the
14970 * device being created. If not it is created.
14972 ptgt = fcp_lookup_target(pptr, pwwn);
14973 if (ptgt == NULL) {
14974 lcount = pptr->port_link_cnt;
14975 mutex_exit(&pptr->port_mutex);
14977 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14978 if (ptgt == NULL) {
14979 fcp_log(CE_WARN, pptr->port_dip,
14980 "!FC target allocation failed");
14981 return (ENOMEM);
14984 mutex_enter(&pptr->port_mutex);
14987 mutex_enter(&ptgt->tgt_mutex);
14988 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14989 ptgt->tgt_tmp_cnt = 1;
14990 ptgt->tgt_device_created = 0;
14992 * If fabric and auto config is set but the target was
14993 * manually unconfigured then reset to the manual_config_only to
14994 * 0 so the device will get configured.
14996 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14997 fcp_enable_auto_configuration &&
14998 ptgt->tgt_manual_config_only == 1) {
14999 old_manual = 1;
15000 ptgt->tgt_manual_config_only = 0;
15002 mutex_exit(&ptgt->tgt_mutex);
15004 fcp_update_targets(pptr, devlist, 1,
15005 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15007 lcount = pptr->port_link_cnt;
15008 tcount = ptgt->tgt_change_cnt;
15010 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15011 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15012 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15013 fcp_enable_auto_configuration && old_manual) {
15014 mutex_enter(&ptgt->tgt_mutex);
15015 ptgt->tgt_manual_config_only = 1;
15016 mutex_exit(&ptgt->tgt_mutex);
15019 if (pptr->port_link_cnt != lcount ||
15020 ptgt->tgt_change_cnt != tcount) {
15021 rval = EBUSY;
15023 mutex_exit(&pptr->port_mutex);
15025 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15026 FCP_BUF_LEVEL_3, 0,
15027 "fcp_create_on_demand: mapflags ptgt=%x, "
15028 "lcount=%x::port_link_cnt=%x, "
15029 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15030 ptgt, lcount, pptr->port_link_cnt,
15031 tcount, ptgt->tgt_change_cnt, rval);
15032 return (rval);
15036 * Due to lack of synchronization mechanisms, we perform
15037 * periodic monitoring of our request; Because requests
15038 * get dropped when another one supercedes (either because
15039 * of a link change or a target change), it is difficult to
15040 * provide a clean synchronization mechanism (such as a
15041 * semaphore or a conditional variable) without exhaustively
15042 * rewriting the mainline discovery code of this driver.
15044 wait_ms = 500;
15046 ntries = fcp_max_target_retries;
15048 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15049 FCP_BUF_LEVEL_3, 0,
15050 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15051 "lcount=%x::port_link_cnt=%x, "
15052 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15053 "tgt_tmp_cnt =%x",
15054 ntries, ptgt, lcount, pptr->port_link_cnt,
15055 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15056 ptgt->tgt_tmp_cnt);
15058 mutex_enter(&ptgt->tgt_mutex);
15059 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15060 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15061 mutex_exit(&ptgt->tgt_mutex);
15062 mutex_exit(&pptr->port_mutex);
15064 ddi_msleep(wait_ms);
15066 mutex_enter(&pptr->port_mutex);
15067 mutex_enter(&ptgt->tgt_mutex);
15071 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15072 rval = EBUSY;
15073 } else {
15074 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15075 FCP_TGT_NODE_PRESENT) {
15076 rval = 0;
15080 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15081 FCP_BUF_LEVEL_3, 0,
15082 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15083 "lcount=%x::port_link_cnt=%x, "
15084 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15085 "tgt_tmp_cnt =%x",
15086 ntries, ptgt, lcount, pptr->port_link_cnt,
15087 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15088 ptgt->tgt_tmp_cnt);
15090 if (rval) {
15091 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15092 fcp_enable_auto_configuration && old_manual) {
15093 ptgt->tgt_manual_config_only = 1;
15095 mutex_exit(&ptgt->tgt_mutex);
15096 mutex_exit(&pptr->port_mutex);
15097 kmem_free(devlist, sizeof (*devlist));
15099 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15100 FCP_BUF_LEVEL_3, 0,
15101 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15102 "lcount=%x::port_link_cnt=%x, "
15103 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15104 "tgt_device_created=%x, tgt D_ID=%x",
15105 ntries, ptgt, lcount, pptr->port_link_cnt,
15106 tcount, ptgt->tgt_change_cnt, rval,
15107 ptgt->tgt_device_created, ptgt->tgt_d_id);
15108 return (rval);
15111 if ((plun = ptgt->tgt_lun) != NULL) {
15112 tcount = plun->lun_tgt->tgt_change_cnt;
15113 } else {
15114 rval = EINVAL;
15116 lcount = pptr->port_link_cnt;
15119 * Configuring the target with no LUNs will fail. We
15120 * should reset the node state so that it is not
15121 * automatically configured when the LUNs are added
15122 * to this target.
15124 if (ptgt->tgt_lun_cnt == 0) {
15125 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15127 mutex_exit(&ptgt->tgt_mutex);
15128 mutex_exit(&pptr->port_mutex);
15130 while (plun) {
15131 child_info_t *cip;
15133 mutex_enter(&plun->lun_mutex);
15134 cip = plun->lun_cip;
15135 mutex_exit(&plun->lun_mutex);
15137 mutex_enter(&ptgt->tgt_mutex);
15138 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15139 mutex_exit(&ptgt->tgt_mutex);
15141 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15142 FCP_ONLINE, lcount, tcount,
15143 NDI_ONLINE_ATTACH);
15144 if (rval != NDI_SUCCESS) {
15145 FCP_TRACE(fcp_logq,
15146 pptr->port_instbuf, fcp_trace,
15147 FCP_BUF_LEVEL_3, 0,
15148 "fcp_create_on_demand: "
15149 "pass_to_hp_and_wait failed "
15150 "rval=%x", rval);
15151 rval = EIO;
15152 } else {
15153 mutex_enter(&LUN_TGT->tgt_mutex);
15154 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15155 FCP_LUN_BUSY);
15156 mutex_exit(&LUN_TGT->tgt_mutex);
15158 mutex_enter(&ptgt->tgt_mutex);
15161 plun = plun->lun_next;
15162 mutex_exit(&ptgt->tgt_mutex);
15165 kmem_free(devlist, sizeof (*devlist));
15167 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15168 fcp_enable_auto_configuration && old_manual) {
15169 mutex_enter(&ptgt->tgt_mutex);
15170 /* if successful then set manual to 0 */
15171 if (rval == 0) {
15172 ptgt->tgt_manual_config_only = 0;
15173 } else {
15174 /* reset to 1 so the user has to do the config */
15175 ptgt->tgt_manual_config_only = 1;
15177 mutex_exit(&ptgt->tgt_mutex);
15180 return (rval);
15184 static void
15185 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15187 int count;
15188 uchar_t byte;
15190 count = 0;
15191 while (*string) {
15192 byte = FCP_ATOB(*string); string++;
15193 byte = byte << 4 | FCP_ATOB(*string); string++;
15194 bytes[count++] = byte;
15196 if (count >= byte_len) {
15197 break;
15202 static void
15203 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15205 int i;
15207 for (i = 0; i < FC_WWN_SIZE; i++) {
15208 (void) sprintf(string + (i * 2),
15209 "%02x", wwn[i]);
15214 static void
15215 fcp_print_error(fc_packet_t *fpkt)
15217 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15218 fpkt->pkt_ulp_private;
15219 struct fcp_port *pptr;
15220 struct fcp_tgt *ptgt;
15221 struct fcp_lun *plun;
15222 caddr_t buf;
15223 int scsi_cmd = 0;
15225 ptgt = icmd->ipkt_tgt;
15226 plun = icmd->ipkt_lun;
15227 pptr = ptgt->tgt_port;
15229 buf = kmem_zalloc(256, KM_NOSLEEP);
15230 if (buf == NULL) {
15231 return;
15234 switch (icmd->ipkt_opcode) {
15235 case SCMD_REPORT_LUN:
15236 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15237 " lun=0x%%x failed");
15238 scsi_cmd++;
15239 break;
15241 case SCMD_INQUIRY_PAGE83:
15242 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15243 " lun=0x%%x failed");
15244 scsi_cmd++;
15245 break;
15247 case SCMD_INQUIRY:
15248 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15249 " lun=0x%%x failed");
15250 scsi_cmd++;
15251 break;
15253 case LA_ELS_PLOGI:
15254 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15255 break;
15257 case LA_ELS_PRLI:
15258 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15259 break;
15262 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15263 struct fcp_rsp response, *rsp;
15264 uchar_t asc, ascq;
15265 caddr_t sense_key = NULL;
15266 struct fcp_rsp_info fcp_rsp_err, *bep;
15268 if (icmd->ipkt_nodma) {
15269 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15270 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15271 sizeof (struct fcp_rsp));
15272 } else {
15273 rsp = &response;
15274 bep = &fcp_rsp_err;
15276 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15277 sizeof (struct fcp_rsp));
15279 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15280 bep, fpkt->pkt_resp_acc,
15281 sizeof (struct fcp_rsp_info));
15285 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15286 (void) sprintf(buf + strlen(buf),
15287 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15288 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15289 " senselen=%%x. Giving up");
15291 fcp_log(CE_WARN, pptr->port_dip, buf,
15292 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15293 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15294 rsp->fcp_u.fcp_status.reserved_1,
15295 rsp->fcp_response_len, rsp->fcp_sense_len);
15297 kmem_free(buf, 256);
15298 return;
15301 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15302 bep->rsp_code != FCP_NO_FAILURE) {
15303 (void) sprintf(buf + strlen(buf),
15304 " FCP Response code = 0x%x", bep->rsp_code);
15307 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15308 struct scsi_extended_sense sense_info, *sense_ptr;
15310 if (icmd->ipkt_nodma) {
15311 sense_ptr = (struct scsi_extended_sense *)
15312 ((caddr_t)fpkt->pkt_resp +
15313 sizeof (struct fcp_rsp) +
15314 rsp->fcp_response_len);
15315 } else {
15316 sense_ptr = &sense_info;
15318 FCP_CP_IN(fpkt->pkt_resp +
15319 sizeof (struct fcp_rsp) +
15320 rsp->fcp_response_len, &sense_info,
15321 fpkt->pkt_resp_acc,
15322 sizeof (struct scsi_extended_sense));
15325 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15326 NUM_IMPL_SENSE_KEYS) {
15327 sense_key = sense_keys[sense_ptr->es_key];
15328 } else {
15329 sense_key = "Undefined";
15332 asc = sense_ptr->es_add_code;
15333 ascq = sense_ptr->es_qual_code;
15335 (void) sprintf(buf + strlen(buf),
15336 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15337 " Giving up");
15339 fcp_log(CE_WARN, pptr->port_dip, buf,
15340 ptgt->tgt_d_id, plun->lun_num, sense_key,
15341 asc, ascq);
15342 } else {
15343 (void) sprintf(buf + strlen(buf),
15344 " : SCSI status=%%x. Giving up");
15346 fcp_log(CE_WARN, pptr->port_dip, buf,
15347 ptgt->tgt_d_id, plun->lun_num,
15348 rsp->fcp_u.fcp_status.scsi_status);
15350 } else {
15351 caddr_t state, reason, action, expln;
15353 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15354 &action, &expln);
15356 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15357 " Reason:%%s. Giving up");
15359 if (scsi_cmd) {
15360 fcp_log(CE_WARN, pptr->port_dip, buf,
15361 ptgt->tgt_d_id, plun->lun_num, state, reason);
15362 } else {
15363 fcp_log(CE_WARN, pptr->port_dip, buf,
15364 ptgt->tgt_d_id, state, reason);
15368 kmem_free(buf, 256);
15372 static int
15373 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15374 struct fcp_ipkt *icmd, int rval, caddr_t op)
15376 int ret = DDI_FAILURE;
15377 char *error;
15379 switch (rval) {
15380 case FC_DEVICE_BUSY_NEW_RSCN:
15382 * This means that there was a new RSCN that the transport
15383 * knows about (which the ULP *may* know about too) but the
15384 * pkt that was sent down was related to an older RSCN. So, we
15385 * are just going to reset the retry count and deadline and
15386 * continue to retry. The idea is that transport is currently
15387 * working on the new RSCN and will soon let the ULPs know
15388 * about it and when it does the existing logic will kick in
15389 * where it will change the tcount to indicate that something
15390 * changed on the target. So, rediscovery will start and there
15391 * will not be an infinite retry.
15393 * For a full flow of how the RSCN info is transferred back and
15394 * forth, see fp.c
15396 icmd->ipkt_retries = 0;
15397 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15398 FCP_ICMD_DEADLINE;
15400 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15401 FCP_BUF_LEVEL_3, 0,
15402 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15403 rval, ptgt->tgt_d_id);
15404 /* FALLTHROUGH */
15406 case FC_STATEC_BUSY:
15407 case FC_DEVICE_BUSY:
15408 case FC_PBUSY:
15409 case FC_FBUSY:
15410 case FC_TRAN_BUSY:
15411 case FC_OFFLINE:
15412 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15413 FCP_BUF_LEVEL_3, 0,
15414 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15415 rval, ptgt->tgt_d_id);
15416 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15417 fcp_is_retryable(icmd)) {
15418 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15419 ret = DDI_SUCCESS;
15421 break;
15423 case FC_LOGINREQ:
15425 * FC_LOGINREQ used to be handled just like all the cases
15426 * above. It has been changed to handled a PRLI that fails
15427 * with FC_LOGINREQ different than other ipkts that fail
15428 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15429 * a simple matter to turn it into a PLOGI instead, so that's
15430 * exactly what we do here.
15432 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15433 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15434 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15435 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15436 } else {
15437 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15438 FCP_BUF_LEVEL_3, 0,
15439 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15440 rval, ptgt->tgt_d_id);
15441 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15442 fcp_is_retryable(icmd)) {
15443 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15444 ret = DDI_SUCCESS;
15447 break;
15449 default:
15450 mutex_enter(&pptr->port_mutex);
15451 mutex_enter(&ptgt->tgt_mutex);
15452 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15453 mutex_exit(&ptgt->tgt_mutex);
15454 mutex_exit(&pptr->port_mutex);
15456 (void) fc_ulp_error(rval, &error);
15457 fcp_log(CE_WARN, pptr->port_dip,
15458 "!Failed to send %s to D_ID=%x error=%s",
15459 op, ptgt->tgt_d_id, error);
15460 } else {
15461 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15462 fcp_trace, FCP_BUF_LEVEL_2, 0,
15463 "fcp_handle_ipkt_errors,1: state change occured"
15464 " for D_ID=0x%x", ptgt->tgt_d_id);
15465 mutex_exit(&ptgt->tgt_mutex);
15466 mutex_exit(&pptr->port_mutex);
15468 break;
15471 return (ret);
15476 * Check of outstanding commands on any LUN for this target
15478 static int
15479 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15481 struct fcp_lun *plun;
15482 struct fcp_pkt *cmd;
15484 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15485 mutex_enter(&plun->lun_mutex);
15486 for (cmd = plun->lun_pkt_head; cmd != NULL;
15487 cmd = cmd->cmd_forw) {
15488 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15489 mutex_exit(&plun->lun_mutex);
15490 return (FC_SUCCESS);
15493 mutex_exit(&plun->lun_mutex);
15496 return (FC_FAILURE);
15499 static fc_portmap_t *
15500 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15502 int i;
15503 fc_portmap_t *devlist;
15504 fc_portmap_t *devptr = NULL;
15505 struct fcp_tgt *ptgt;
15507 mutex_enter(&pptr->port_mutex);
15508 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15509 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15510 ptgt = ptgt->tgt_next) {
15511 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15512 ++*dev_cnt;
15517 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15518 KM_NOSLEEP);
15519 if (devlist == NULL) {
15520 mutex_exit(&pptr->port_mutex);
15521 fcp_log(CE_WARN, pptr->port_dip,
15522 "!fcp%d: failed to allocate for portmap for construct map",
15523 pptr->port_instance);
15524 return (devptr);
15527 for (i = 0; i < FCP_NUM_HASH; i++) {
15528 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15529 ptgt = ptgt->tgt_next) {
15530 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15531 int ret;
15533 ret = fc_ulp_pwwn_to_portmap(
15534 pptr->port_fp_handle,
15535 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15536 devlist);
15538 if (ret == FC_SUCCESS) {
15539 devlist++;
15540 continue;
15543 devlist->map_pd = NULL;
15544 devlist->map_did.port_id = ptgt->tgt_d_id;
15545 devlist->map_hard_addr.hard_addr =
15546 ptgt->tgt_hard_addr;
15548 devlist->map_state = PORT_DEVICE_INVALID;
15549 devlist->map_type = PORT_DEVICE_OLD;
15551 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15552 &devlist->map_nwwn, FC_WWN_SIZE);
15554 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15555 &devlist->map_pwwn, FC_WWN_SIZE);
15557 devlist++;
15562 mutex_exit(&pptr->port_mutex);
15564 return (devptr);
15567 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15569 static void
15570 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15572 int i;
15573 struct fcp_tgt *ptgt;
15574 struct fcp_lun *plun;
15576 for (i = 0; i < FCP_NUM_HASH; i++) {
15577 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15578 ptgt = ptgt->tgt_next) {
15579 mutex_enter(&ptgt->tgt_mutex);
15580 for (plun = ptgt->tgt_lun; plun != NULL;
15581 plun = plun->lun_next) {
15582 if (plun->lun_mpxio &&
15583 plun->lun_state & FCP_LUN_BUSY) {
15584 if (!fcp_pass_to_hp(pptr, plun,
15585 plun->lun_cip,
15586 FCP_MPXIO_PATH_SET_BUSY,
15587 pptr->port_link_cnt,
15588 ptgt->tgt_change_cnt, 0, 0)) {
15589 FCP_TRACE(fcp_logq,
15590 pptr->port_instbuf,
15591 fcp_trace,
15592 FCP_BUF_LEVEL_2, 0,
15593 "path_verifybusy: "
15594 "disable lun %p failed!",
15595 plun);
15599 mutex_exit(&ptgt->tgt_mutex);
15604 static int
15605 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15607 dev_info_t *cdip = NULL;
15608 dev_info_t *pdip = NULL;
15610 ASSERT(plun);
15612 mutex_enter(&plun->lun_mutex);
15613 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15614 mutex_exit(&plun->lun_mutex);
15615 return (NDI_FAILURE);
15617 mutex_exit(&plun->lun_mutex);
15618 cdip = mdi_pi_get_client(PIP(cip));
15619 pdip = mdi_pi_get_phci(PIP(cip));
15621 ASSERT(cdip != NULL);
15622 ASSERT(pdip != NULL);
15624 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15625 /* LUN ready for IO */
15626 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15627 } else {
15628 /* LUN busy to accept IO */
15629 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15631 return (NDI_SUCCESS);
15635 * Caller must free the returned string of MAXPATHLEN len
15636 * If the device is offline (-1 instance number) NULL
15637 * will be returned.
15639 static char *
15640 fcp_get_lun_path(struct fcp_lun *plun)
15642 dev_info_t *dip = NULL;
15643 char *path = NULL;
15644 mdi_pathinfo_t *pip = NULL;
15646 if (plun == NULL) {
15647 return (NULL);
15650 mutex_enter(&plun->lun_mutex);
15651 if (plun->lun_mpxio == 0) {
15652 dip = DIP(plun->lun_cip);
15653 mutex_exit(&plun->lun_mutex);
15654 } else {
15656 * lun_cip must be accessed with lun_mutex held. Here
15657 * plun->lun_cip either points to a valid node or it is NULL.
15658 * Make a copy so that we can release lun_mutex.
15660 pip = PIP(plun->lun_cip);
15663 * Increase ref count on the path so that we can release
15664 * lun_mutex and still be sure that the pathinfo node (and thus
15665 * also the client) is not deallocated. If pip is NULL, this
15666 * has no effect.
15668 mdi_hold_path(pip);
15670 mutex_exit(&plun->lun_mutex);
15672 /* Get the client. If pip is NULL, we get NULL. */
15673 dip = mdi_pi_get_client(pip);
15676 if (dip == NULL)
15677 goto out;
15678 if (ddi_get_instance(dip) < 0)
15679 goto out;
15681 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15682 if (path == NULL)
15683 goto out;
15685 (void) ddi_pathname(dip, path);
15687 /* Clean up. */
15688 out:
15689 if (pip != NULL)
15690 mdi_rele_path(pip);
15693 * In reality, the user wants a fully valid path (one they can open)
15694 * but this string is lacking the mount point, and the minor node.
15695 * It would be nice if we could "figure these out" somehow
15696 * and fill them in. Otherwise, the userland code has to understand
15697 * driver specific details of which minor node is the "best" or
15698 * "right" one to expose. (Ex: which slice is the whole disk, or
15699 * which tape doesn't rewind)
15701 return (path);
15704 static int
15705 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15706 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15708 int64_t reset_delay;
15709 int rval, retry = 0;
15710 struct fcp_port *pptr = fcp_dip2port(parent);
15712 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15713 (ddi_get_lbolt64() - pptr->port_attach_time);
15714 if (reset_delay < 0) {
15715 reset_delay = 0;
15718 if (fcp_bus_config_debug) {
15719 flag |= NDI_DEVI_DEBUG;
15722 switch (op) {
15723 case BUS_CONFIG_ONE:
15725 * Retry the command since we need to ensure
15726 * the fabric devices are available for root
15728 while (retry++ < fcp_max_bus_config_retries) {
15729 rval = (ndi_busop_bus_config(parent,
15730 flag | NDI_MDI_FALLBACK, op,
15731 arg, childp, (clock_t)reset_delay));
15732 if (rval == 0) {
15733 return (rval);
15738 * drain taskq to make sure nodes are created and then
15739 * try again.
15741 taskq_wait(DEVI(parent)->devi_taskq);
15742 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15743 op, arg, childp, 0));
15745 case BUS_CONFIG_DRIVER:
15746 case BUS_CONFIG_ALL: {
15748 * delay till all devices report in (port_tmp_cnt == 0)
15749 * or FCP_INIT_WAIT_TIMEOUT
15751 mutex_enter(&pptr->port_mutex);
15752 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15753 (void) cv_timedwait(&pptr->port_config_cv,
15754 &pptr->port_mutex,
15755 ddi_get_lbolt() + (clock_t)reset_delay);
15756 reset_delay =
15757 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15758 (ddi_get_lbolt64() - pptr->port_attach_time);
15760 mutex_exit(&pptr->port_mutex);
15761 /* drain taskq to make sure nodes are created */
15762 taskq_wait(DEVI(parent)->devi_taskq);
15763 return (ndi_busop_bus_config(parent, flag, op,
15764 arg, childp, 0));
15767 default:
15768 return (NDI_FAILURE);
15770 /*NOTREACHED*/
15773 static int
15774 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15775 ddi_bus_config_op_t op, void *arg)
15777 if (fcp_bus_config_debug) {
15778 flag |= NDI_DEVI_DEBUG;
15781 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15786 * Routine to copy GUID into the lun structure.
15787 * returns 0 if copy was successful and 1 if encountered a
15788 * failure and did not copy the guid.
15790 static int
15791 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15794 int retval = 0;
15796 /* add one for the null terminator */
15797 const unsigned int len = strlen(guidp) + 1;
15799 if ((guidp == NULL) || (plun == NULL)) {
15800 return (1);
15804 * if the plun->lun_guid already has been allocated,
15805 * then check the size. if the size is exact, reuse
15806 * it....if not free it an allocate the required size.
15807 * The reallocation should NOT typically happen
15808 * unless the GUIDs reported changes between passes.
15809 * We free up and alloc again even if the
15810 * size was more than required. This is due to the
15811 * fact that the field lun_guid_size - serves
15812 * dual role of indicating the size of the wwn
15813 * size and ALSO the allocation size.
15815 if (plun->lun_guid) {
15816 if (plun->lun_guid_size != len) {
15818 * free the allocated memory and
15819 * initialize the field
15820 * lun_guid_size to 0.
15822 kmem_free(plun->lun_guid, plun->lun_guid_size);
15823 plun->lun_guid = NULL;
15824 plun->lun_guid_size = 0;
15828 * alloc only if not already done.
15830 if (plun->lun_guid == NULL) {
15831 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15832 if (plun->lun_guid == NULL) {
15833 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15834 "Unable to allocate"
15835 "Memory for GUID!!! size %d", len);
15836 retval = 1;
15837 } else {
15838 plun->lun_guid_size = len;
15841 if (plun->lun_guid) {
15843 * now copy the GUID
15845 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15847 return (retval);
15851 * fcp_reconfig_wait
15853 * Wait for a rediscovery/reconfiguration to complete before continuing.
15856 static void
15857 fcp_reconfig_wait(struct fcp_port *pptr)
15859 clock_t reconfig_start, wait_timeout;
15862 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15863 * reconfiguration in progress.
15866 mutex_enter(&pptr->port_mutex);
15867 if (pptr->port_tmp_cnt == 0) {
15868 mutex_exit(&pptr->port_mutex);
15869 return;
15871 mutex_exit(&pptr->port_mutex);
15874 * If we cause a reconfig by raising power, delay until all devices
15875 * report in (port_tmp_cnt returns to 0)
15878 reconfig_start = ddi_get_lbolt();
15879 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15881 mutex_enter(&pptr->port_mutex);
15883 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15884 pptr->port_tmp_cnt) {
15886 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15887 reconfig_start + wait_timeout);
15890 mutex_exit(&pptr->port_mutex);
15893 * Even if fcp_tmp_count isn't 0, continue without error. The port
15894 * we want may still be ok. If not, it will error out later
15899 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15900 * We rely on the fcp_global_mutex to provide protection against changes to
15901 * the fcp_lun_blacklist.
15903 * You can describe a list of target port WWNs and LUN numbers which will
15904 * not be configured. LUN numbers will be interpreted as decimal. White
15905 * spaces and ',' can be used in the list of LUN numbers.
15907 * To prevent LUNs 1 and 2 from being configured for target
15908 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15910 * pwwn-lun-blacklist=
15911 * "510000f010fd92a1,1,2",
15912 * "510000e012079df1,1,2";
15914 static void
15915 fcp_read_blacklist(dev_info_t *dip,
15916 struct fcp_black_list_entry **pplun_blacklist)
15918 char **prop_array = NULL;
15919 char *curr_pwwn = NULL;
15920 char *curr_lun = NULL;
15921 uint32_t prop_item = 0;
15922 int idx = 0;
15923 int len = 0;
15925 ASSERT(mutex_owned(&fcp_global_mutex));
15926 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15927 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15928 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15929 return;
15932 for (idx = 0; idx < prop_item; idx++) {
15934 curr_pwwn = prop_array[idx];
15935 while (*curr_pwwn == ' ') {
15936 curr_pwwn++;
15938 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15939 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15940 ", please check.", curr_pwwn);
15941 continue;
15943 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15944 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15945 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15946 ", please check.", curr_pwwn);
15947 continue;
15949 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15950 if (isxdigit(curr_pwwn[len]) != TRUE) {
15951 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15952 "blacklist, please check.", curr_pwwn);
15953 break;
15956 if (len != sizeof (la_wwn_t) * 2) {
15957 continue;
15960 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15961 *(curr_lun - 1) = '\0';
15962 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15965 ddi_prop_free(prop_array);
15969 * Get the masking info about one remote target port designated by wwn.
15970 * Lun ids could be separated by ',' or white spaces.
15972 static void
15973 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15974 struct fcp_black_list_entry **pplun_blacklist)
15976 int idx = 0;
15977 uint32_t offset = 0;
15978 unsigned long lun_id = 0;
15979 char lunid_buf[16];
15980 char *pend = NULL;
15981 int illegal_digit = 0;
15983 while (offset < strlen(curr_lun)) {
15984 while ((curr_lun[offset + idx] != ',') &&
15985 (curr_lun[offset + idx] != '\0') &&
15986 (curr_lun[offset + idx] != ' ')) {
15987 if (isdigit(curr_lun[offset + idx]) == 0) {
15988 illegal_digit++;
15990 idx++;
15992 if (illegal_digit > 0) {
15993 offset += (idx+1); /* To the start of next lun */
15994 idx = 0;
15995 illegal_digit = 0;
15996 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15997 "the blacklist, please check digits.",
15998 curr_lun, curr_pwwn);
15999 continue;
16001 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16002 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16003 "the blacklist, please check the length of LUN#.",
16004 curr_lun, curr_pwwn);
16005 break;
16007 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
16008 offset++;
16009 continue;
16012 bcopy(curr_lun + offset, lunid_buf, idx);
16013 lunid_buf[idx] = '\0';
16014 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16015 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16016 } else {
16017 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16018 "the blacklist, please check %s.",
16019 curr_lun, curr_pwwn, lunid_buf);
16021 offset += (idx+1); /* To the start of next lun */
16022 idx = 0;
16027 * Add one masking record
16029 static void
16030 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16031 struct fcp_black_list_entry **pplun_blacklist)
16033 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16034 struct fcp_black_list_entry *new_entry = NULL;
16035 la_wwn_t wwn;
16037 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16038 while (tmp_entry) {
16039 if ((bcmp(&tmp_entry->wwn, &wwn,
16040 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16041 return;
16044 tmp_entry = tmp_entry->next;
16047 /* add to black list */
16048 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16049 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16050 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16051 new_entry->lun = lun_id;
16052 new_entry->masked = 0;
16053 new_entry->next = *pplun_blacklist;
16054 *pplun_blacklist = new_entry;
16058 * Check if we should mask the specified lun of this fcp_tgt
16060 static int
16061 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16063 struct fcp_black_list_entry *remote_port;
16065 remote_port = fcp_lun_blacklist;
16066 while (remote_port != NULL) {
16067 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16068 if (remote_port->lun == lun_id) {
16069 remote_port->masked++;
16070 if (remote_port->masked == 1) {
16071 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16072 "%02x%02x%02x%02x%02x%02x%02x%02x "
16073 "is masked due to black listing.\n",
16074 lun_id, wwn->raw_wwn[0],
16075 wwn->raw_wwn[1], wwn->raw_wwn[2],
16076 wwn->raw_wwn[3], wwn->raw_wwn[4],
16077 wwn->raw_wwn[5], wwn->raw_wwn[6],
16078 wwn->raw_wwn[7]);
16080 return (TRUE);
16083 remote_port = remote_port->next;
16085 return (FALSE);
16089 * Release all allocated resources
16091 static void
16092 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16094 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16095 struct fcp_black_list_entry *current_entry = NULL;
16097 ASSERT(mutex_owned(&fcp_global_mutex));
16099 * Traverse all luns
16101 while (tmp_entry) {
16102 current_entry = tmp_entry;
16103 tmp_entry = tmp_entry->next;
16104 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16106 *pplun_blacklist = NULL;
16110 * In fcp module,
16111 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16113 static struct scsi_pkt *
16114 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16115 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16116 int flags, int (*callback)(), caddr_t arg)
16118 fcp_port_t *pptr = ADDR2FCP(ap);
16119 fcp_pkt_t *cmd = NULL;
16120 fc_frame_hdr_t *hp;
16123 * First step: get the packet
16125 if (pkt == NULL) {
16126 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16127 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16128 callback, arg);
16129 if (pkt == NULL) {
16130 return (NULL);
16134 * All fields in scsi_pkt will be initialized properly or
16135 * set to zero. We need do nothing for scsi_pkt.
16138 * But it's our responsibility to link other related data
16139 * structures. Their initialization will be done, just
16140 * before the scsi_pkt will be sent to FCA.
16142 cmd = PKT2CMD(pkt);
16143 cmd->cmd_pkt = pkt;
16144 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16146 * fc_packet_t
16148 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16149 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16150 sizeof (struct fcp_pkt));
16151 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16152 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16153 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16154 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16156 * Fill in the Fabric Channel Header
16158 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16159 hp->r_ctl = R_CTL_COMMAND;
16160 hp->rsvd = 0;
16161 hp->type = FC_TYPE_SCSI_FCP;
16162 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16163 hp->seq_id = 0;
16164 hp->df_ctl = 0;
16165 hp->seq_cnt = 0;
16166 hp->ox_id = 0xffff;
16167 hp->rx_id = 0xffff;
16168 hp->ro = 0;
16169 } else {
16171 * We need think if we should reset any elements in
16172 * related data structures.
16174 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16175 fcp_trace, FCP_BUF_LEVEL_6, 0,
16176 "reusing pkt, flags %d", flags);
16177 cmd = PKT2CMD(pkt);
16178 if (cmd->cmd_fp_pkt->pkt_pd) {
16179 cmd->cmd_fp_pkt->pkt_pd = NULL;
16184 * Second step: dma allocation/move
16186 if (bp && bp->b_bcount != 0) {
16188 * Mark if it's read or write
16190 if (bp->b_flags & B_READ) {
16191 cmd->cmd_flags |= CFLAG_IS_READ;
16192 } else {
16193 cmd->cmd_flags &= ~CFLAG_IS_READ;
16196 bp_mapin(bp);
16197 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16198 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16199 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16200 } else {
16202 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16203 * to send zero-length read/write.
16205 cmd->cmd_fp_pkt->pkt_data = NULL;
16206 cmd->cmd_fp_pkt->pkt_datalen = 0;
16209 return (pkt);
16212 static void
16213 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16215 fcp_port_t *pptr = ADDR2FCP(ap);
16218 * First we let FCA to uninitilize private part.
16220 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16221 PKT2CMD(pkt)->cmd_fp_pkt);
16224 * Then we uninitialize fc_packet.
16228 * Thirdly, we uninitializae fcp_pkt.
16232 * In the end, we free scsi_pkt.
16234 scsi_hba_pkt_free(ap, pkt);
16237 static int
16238 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16240 fcp_port_t *pptr = ADDR2FCP(ap);
16241 fcp_lun_t *plun = ADDR2LUN(ap);
16242 fcp_tgt_t *ptgt = plun->lun_tgt;
16243 fcp_pkt_t *cmd = PKT2CMD(pkt);
16244 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16245 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16246 int rval;
16248 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16249 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16252 * Firstly, we need initialize fcp_pkt_t
16253 * Secondly, we need initialize fcp_cmd_t.
16255 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16256 fcmd->fcp_data_len = fpkt->pkt_datalen;
16257 fcmd->fcp_ent_addr = plun->lun_addr;
16258 if (pkt->pkt_flags & FLAG_HTAG) {
16259 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16260 } else if (pkt->pkt_flags & FLAG_OTAG) {
16261 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16262 } else if (pkt->pkt_flags & FLAG_STAG) {
16263 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16264 } else {
16265 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16268 if (cmd->cmd_flags & CFLAG_IS_READ) {
16269 fcmd->fcp_cntl.cntl_read_data = 1;
16270 fcmd->fcp_cntl.cntl_write_data = 0;
16271 } else {
16272 fcmd->fcp_cntl.cntl_read_data = 0;
16273 fcmd->fcp_cntl.cntl_write_data = 1;
16277 * Then we need initialize fc_packet_t too.
16279 fpkt->pkt_timeout = pkt->pkt_time + 2;
16280 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16281 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16282 if (cmd->cmd_flags & CFLAG_IS_READ) {
16283 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16284 } else {
16285 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16288 if (pkt->pkt_flags & FLAG_NOINTR) {
16289 fpkt->pkt_comp = NULL;
16290 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16291 } else {
16292 fpkt->pkt_comp = fcp_cmd_callback;
16293 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16294 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16295 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16300 * Lastly, we need initialize scsi_pkt
16302 pkt->pkt_reason = CMD_CMPLT;
16303 pkt->pkt_state = 0;
16304 pkt->pkt_statistics = 0;
16305 pkt->pkt_resid = 0;
16308 * if interrupts aren't allowed (e.g. at dump time) then we'll
16309 * have to do polled I/O
16311 if (pkt->pkt_flags & FLAG_NOINTR) {
16312 return (fcp_dopoll(pptr, cmd));
16315 cmd->cmd_state = FCP_PKT_ISSUED;
16316 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16317 if (rval == FC_SUCCESS) {
16318 return (TRAN_ACCEPT);
16322 * Need more consideration
16324 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16326 cmd->cmd_state = FCP_PKT_IDLE;
16327 if (rval == FC_TRAN_BUSY) {
16328 return (TRAN_BUSY);
16329 } else {
16330 return (TRAN_FATAL_ERROR);
16335 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16336 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16338 static void
16339 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16341 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16342 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16346 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16348 static void
16349 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16351 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16352 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);