4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
28 * SunOS 5.x Multithreaded STREAMS DLPI FCIP Module
29 * This is a pseudo driver module to handle encapsulation of IP and ARP
30 * datagrams over FibreChannel interfaces. FCIP is a cloneable STREAMS
31 * driver module which interfaces with IP/ARP using DLPI. This module
32 * is a Style-2 DLS provider.
34 * The implementation of this module is based on RFC 2625 which gives
35 * details on the encapsulation of IP/ARP data over FibreChannel.
36 * The fcip module needs to resolve an IP address to a port address before
37 * sending data to a destination port. A FC device port has 2 addresses
38 * associated with it: A 8 byte World Wide unique Port Name and a 3 byte
39 * volatile Port number or Port_ID.
41 * The mapping between a IP address and the World Wide Port Name is handled
42 * by the ARP layer since the IP over FC draft requires the MAC address to
43 * be the least significant six bytes of the WorldWide Port Names. The
44 * fcip module however needs to identify the destination port uniquely when
45 * the destination FC device has multiple FC ports.
47 * The FC layer mapping between the World Wide Port Name and the Port_ID
48 * will be handled through the use of a fabric name server or through the
49 * use of the FARP ELS command as described in the draft. Since the Port_IDs
50 * are volatile, the mapping between the World Wide Port Name and Port_IDs
51 * must be maintained and validated before use each time a datagram
52 * needs to be sent to the destination ports. The FC transport module
53 * informs the fcip module of all changes to states of ports on the
54 * fabric through registered callbacks. This enables the fcip module
55 * to maintain the WW_PN to Port_ID mappings current.
57 * For details on how this module interfaces with the FibreChannel Transport
58 * modules, refer to PSARC/1997/385. Chapter 3 of the FibreChannel Transport
59 * Programming guide details the APIs between ULPs and the Transport.
61 * Now for some Caveats:
63 * RFC 2625 requires that a FibreChannel Port name (the Port WWN) have
64 * the NAA bits set to '0001' indicating a IEEE 48bit address which
65 * corresponds to a ULA (Universal LAN MAC address). But with FibreChannel
66 * adapters containing 2 or more ports, IEEE naming cannot identify the
67 * ports on an adapter uniquely so we will in the first implementation
68 * be operating only on Port 0 of each adapter.
71 #include <sys/types.h>
72 #include <sys/errno.h>
73 #include <sys/debug.h>
75 #include <sys/sysmacros.h>
76 #include <sys/systm.h>
78 #include <sys/stropts.h>
79 #include <sys/stream.h>
80 #include <sys/strlog.h>
81 #include <sys/strsubr.h>
82 #include <sys/cmn_err.h>
87 #include <sys/sunddi.h>
88 #include <sys/ksynch.h>
90 #include <sys/kstat.h>
91 #include <sys/vtrace.h>
92 #include <sys/strsun.h>
93 #include <sys/varargs.h>
94 #include <sys/modctl.h>
95 #include <sys/thread.h>
98 #include <inet/common.h>
99 #include <netinet/ip6.h>
101 #include <inet/arp.h>
104 #include <sys/dlpi.h>
105 #include <sys/ethernet.h>
106 #include <sys/file.h>
107 #include <sys/syslog.h>
108 #include <sys/disp.h>
109 #include <sys/taskq.h>
115 #include <sys/fibre-channel/fc.h>
116 #include <sys/fibre-channel/impl/fc_ulpif.h>
117 #include <sys/fibre-channel/ulp/fcip.h>
119 #define FCIP_ESBALLOC
122 * Function prototypes
125 /* standard loadable modules entry points */
126 static int fcip_attach(dev_info_t
*, ddi_attach_cmd_t
);
127 static int fcip_detach(dev_info_t
*, ddi_detach_cmd_t
);
128 static void fcip_dodetach(struct fcipstr
*slp
);
129 static int fcip_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
,
130 void *arg
, void **result
);
133 /* streams specific */
134 static void fcip_setipq(struct fcip
*fptr
);
135 static int fcip_wput(queue_t
*, mblk_t
*);
136 static int fcip_wsrv(queue_t
*);
137 static void fcip_proto(queue_t
*, mblk_t
*);
138 static void fcip_ioctl(queue_t
*, mblk_t
*);
139 static int fcip_open(queue_t
*wq
, dev_t
*devp
, int flag
,
140 int sflag
, cred_t
*credp
);
141 static int fcip_close(queue_t
*rq
, int flag
, cred_t
*credp
);
142 static int fcip_start(queue_t
*wq
, mblk_t
*mp
, struct fcip
*fptr
,
143 struct fcip_dest
*fdestp
, int flags
);
144 static void fcip_sendup(struct fcip
*fptr
, mblk_t
*mp
,
145 struct fcipstr
*(*acceptfunc
)());
146 static struct fcipstr
*fcip_accept(struct fcipstr
*slp
, struct fcip
*fptr
,
147 int type
, la_wwn_t
*dhostp
);
148 static mblk_t
*fcip_addudind(struct fcip
*fptr
, mblk_t
*mp
,
149 fcph_network_hdr_t
*nhdr
, int type
);
150 static int fcip_setup_mac_addr(struct fcip
*fptr
);
151 static void fcip_kstat_init(struct fcip
*fptr
);
152 static int fcip_stat_update(kstat_t
*, int);
156 static void fcip_spareq(queue_t
*wq
, mblk_t
*mp
);
157 static void fcip_pareq(queue_t
*wq
, mblk_t
*mp
);
158 static void fcip_ubreq(queue_t
*wq
, mblk_t
*mp
);
159 static void fcip_breq(queue_t
*wq
, mblk_t
*mp
);
160 static void fcip_dreq(queue_t
*wq
, mblk_t
*mp
);
161 static void fcip_areq(queue_t
*wq
, mblk_t
*mp
);
162 static void fcip_udreq(queue_t
*wq
, mblk_t
*mp
);
163 static void fcip_ireq(queue_t
*wq
, mblk_t
*mp
);
164 static void fcip_dl_ioc_hdr_info(queue_t
*wq
, mblk_t
*mp
);
167 /* solaris sundry, DR/CPR etc */
168 static int fcip_cache_constructor(void *buf
, void *arg
, int size
);
169 static void fcip_cache_destructor(void *buf
, void *size
);
170 static int fcip_handle_suspend(fcip_port_info_t
*fport
, fc_detach_cmd_t cmd
);
171 static int fcip_handle_resume(fcip_port_info_t
*fport
,
172 fc_ulp_port_info_t
*port_info
, fc_attach_cmd_t cmd
);
173 static fcip_port_info_t
*fcip_softstate_free(fcip_port_info_t
*fport
);
174 static int fcip_port_attach_handler(struct fcip
*fptr
);
178 * ulp - transport interface function prototypes
180 static int fcip_port_attach(opaque_t ulp_handle
, fc_ulp_port_info_t
*,
181 fc_attach_cmd_t cmd
, uint32_t sid
);
182 static int fcip_port_detach(opaque_t ulp_handle
, fc_ulp_port_info_t
*,
183 fc_detach_cmd_t cmd
);
184 static int fcip_port_ioctl(opaque_t ulp_handle
, opaque_t port_handle
,
185 dev_t dev
, int cmd
, intptr_t data
, int mode
, cred_t
*credp
, int *rval
,
187 static void fcip_statec_cb(opaque_t ulp_handle
, opaque_t phandle
,
188 uint32_t port_state
, uint32_t port_top
, fc_portmap_t changelist
[],
189 uint32_t listlen
, uint32_t sid
);
190 static int fcip_els_cb(opaque_t ulp_handle
, opaque_t phandle
,
191 fc_unsol_buf_t
*buf
, uint32_t claimed
);
192 static int fcip_data_cb(opaque_t ulp_handle
, opaque_t phandle
,
193 fc_unsol_buf_t
*payload
, uint32_t claimed
);
196 /* Routing table specific */
197 static void fcip_handle_topology(struct fcip
*fptr
);
198 static int fcip_init_port(struct fcip
*fptr
);
199 struct fcip_routing_table
*fcip_lookup_rtable(struct fcip
*fptr
,
200 la_wwn_t
*pwwn
, int matchflag
);
201 static void fcip_rt_update(struct fcip
*fptr
, fc_portmap_t
*devlist
,
203 static void fcip_rt_flush(struct fcip
*fptr
);
204 static void fcip_rte_remove_deferred(void *arg
);
205 static int fcip_do_plogi(struct fcip
*fptr
, struct fcip_routing_table
*frp
);
208 /* dest table specific */
209 static struct fcip_dest
*fcip_get_dest(struct fcip
*fptr
,
211 static struct fcip_dest
*fcip_add_dest(struct fcip
*fptr
,
212 struct fcip_routing_table
*frp
);
213 static int fcip_dest_add_broadcast_entry(struct fcip
*fptr
, int new_flag
);
214 static uint32_t fcip_get_broadcast_did(struct fcip
*fptr
);
215 static void fcip_cleanup_dest(struct fcip
*fptr
);
218 /* helper functions */
219 static fcip_port_info_t
*fcip_get_port(opaque_t phandle
);
220 static int fcip_wwn_compare(la_wwn_t
*wwn1
, la_wwn_t
*wwn2
, int flag
);
221 static void fcip_ether_to_str(struct ether_addr
*e
, caddr_t s
);
222 static int fcip_port_get_num_pkts(struct fcip
*fptr
);
223 static int fcip_check_port_busy(struct fcip
*fptr
);
224 static void fcip_check_remove_minor_node(void);
225 static int fcip_set_wwn(la_wwn_t
*pwwn
);
226 static int fcip_plogi_in_progress(struct fcip
*fptr
);
227 static int fcip_check_port_exists(struct fcip
*fptr
);
228 static int fcip_is_supported_fc_topology(int fc_topology
);
232 static fcip_pkt_t
*fcip_pkt_alloc(struct fcip
*fptr
, mblk_t
*bp
,
233 int flags
, int datalen
);
234 static void fcip_pkt_free(struct fcip_pkt
*fcip_pkt
, int flags
);
235 static fcip_pkt_t
*fcip_ipkt_alloc(struct fcip
*fptr
, int cmdlen
,
236 int resplen
, opaque_t pd
, int flags
);
237 static void fcip_ipkt_free(fcip_pkt_t
*fcip_pkt
);
238 static void fcip_ipkt_callback(fc_packet_t
*fc_pkt
);
239 static void fcip_free_pkt_dma(fcip_pkt_t
*fcip_pkt
);
240 static void fcip_pkt_callback(fc_packet_t
*fc_pkt
);
241 static void fcip_init_unicast_pkt(fcip_pkt_t
*fcip_pkt
, fc_portid_t sid
,
242 fc_portid_t did
, void (*comp
) ());
243 static int fcip_transport(fcip_pkt_t
*fcip_pkt
);
244 static void fcip_pkt_timeout(void *arg
);
245 static void fcip_timeout(void *arg
);
246 static void fcip_fdestp_enqueue_pkt(struct fcip_dest
*fdestp
,
247 fcip_pkt_t
*fcip_pkt
);
248 static int fcip_fdestp_dequeue_pkt(struct fcip_dest
*fdestp
,
249 fcip_pkt_t
*fcip_pkt
);
250 static int fcip_sendup_constructor(void *buf
, void *arg
, int flags
);
251 static void fcip_sendup_thr(void *arg
);
252 static int fcip_sendup_alloc_enque(struct fcip
*ftpr
, mblk_t
*mp
,
253 struct fcipstr
*(*f
)());
256 * zero copy inbound data handling
259 static void fcip_ubfree(char *arg
);
260 #endif /* FCIP_ESBALLOC */
262 #if !defined(FCIP_ESBALLOC)
263 static void *fcip_allocb(size_t size
, uint_t pri
);
267 /* FCIP FARP support functions */
268 static struct fcip_dest
*fcip_do_farp(struct fcip
*fptr
, la_wwn_t
*pwwn
,
269 char *ip_addr
, size_t ip_addr_len
, int flags
);
270 static void fcip_init_broadcast_pkt(fcip_pkt_t
*fcip_pkt
, void (*comp
) (),
272 static int fcip_handle_farp_request(struct fcip
*fptr
, la_els_farp_t
*fcmd
);
273 static int fcip_handle_farp_response(struct fcip
*fptr
, la_els_farp_t
*fcmd
);
274 static void fcip_cache_arp_broadcast(struct fcip
*ftpr
, fc_unsol_buf_t
*buf
);
275 static void fcip_port_ns(void *arg
);
279 #include <sys/debug.h>
281 #define FCIP_DEBUG_DEFAULT 0x1
282 #define FCIP_DEBUG_ATTACH 0x2
283 #define FCIP_DEBUG_INIT 0x4
284 #define FCIP_DEBUG_DETACH 0x8
285 #define FCIP_DEBUG_DLPI 0x10
286 #define FCIP_DEBUG_ELS 0x20
287 #define FCIP_DEBUG_DOWNSTREAM 0x40
288 #define FCIP_DEBUG_UPSTREAM 0x80
289 #define FCIP_DEBUG_MISC 0x100
291 #define FCIP_DEBUG_STARTUP (FCIP_DEBUG_ATTACH|FCIP_DEBUG_INIT)
292 #define FCIP_DEBUG_DATAOUT (FCIP_DEBUG_DLPI|FCIP_DEBUG_DOWNSTREAM)
293 #define FCIP_DEBUG_DATAIN (FCIP_DEBUG_ELS|FCIP_DEBUG_UPSTREAM)
295 static int fcip_debug
= FCIP_DEBUG_DEFAULT
;
297 #define FCIP_DEBUG(level, args) \
298 if (fcip_debug & (level)) cmn_err args;
302 #define FCIP_DEBUG(level, args) /* do nothing */
306 #define KIOIP KSTAT_INTR_PTR(fcip->fcip_intrstats)
309 * Endian independent ethernet to WWN copy
311 #define ether_to_wwn(E, W) \
312 bzero((void *)(W), sizeof (la_wwn_t)); \
313 bcopy((void *)(E), (void *)&((W)->raw_wwn[2]), ETHERADDRL); \
314 (W)->raw_wwn[0] |= 0x10
317 * wwn_to_ether : Endian independent, copies a WWN to struct ether_addr.
318 * The args to the macro are pointers to WWN and ether_addr structures
320 #define wwn_to_ether(W, E) \
321 bcopy((void *)&((W)->raw_wwn[2]), (void *)E, ETHERADDRL)
324 * The module_info structure contains identification and limit values.
325 * All queues associated with a certain driver share the same module_info
326 * structures. This structure defines the characteristics of that driver/
327 * module's queues. The module name must be unique. The max and min packet
328 * sizes limit the no. of characters in M_DATA messages. The Hi and Lo
329 * water marks are for flow control when a module has a service procedure.
331 static struct module_info fcipminfo
= {
332 FCIPIDNUM
, /* mi_idnum : Module ID num */
333 FCIPNAME
, /* mi_idname: Module Name */
334 FCIPMINPSZ
, /* mi_minpsz: Min packet size */
335 FCIPMAXPSZ
, /* mi_maxpsz: Max packet size */
336 FCIPHIWAT
, /* mi_hiwat : High water mark */
337 FCIPLOWAT
/* mi_lowat : Low water mark */
341 * The qinit structres contain the module put, service. open and close
342 * procedure pointers. All modules and drivers with the same streamtab
343 * file (i.e same fmodsw or cdevsw entry points) point to the same
344 * upstream (read) and downstream (write) qinit structs.
346 static struct qinit fcip_rinit
= {
349 fcip_open
, /* qi_qopen */
350 fcip_close
, /* qi_qclose */
351 NULL
, /* qi_qadmin */
352 &fcipminfo
, /* qi_minfo */
356 static struct qinit fcip_winit
= {
357 fcip_wput
, /* qi_putp */
358 fcip_wsrv
, /* qi_srvp */
360 NULL
, /* qi_qclose */
361 NULL
, /* qi_qadmin */
362 &fcipminfo
, /* qi_minfo */
367 * streamtab contains pointers to the read and write qinit structures
370 static struct streamtab fcip_info
= {
371 &fcip_rinit
, /* st_rdinit */
372 &fcip_winit
, /* st_wrinit */
373 NULL
, /* st_muxrinit */
374 NULL
, /* st_muxwrinit */
377 static struct cb_ops fcip_cb_ops
= {
380 nodev
, /* strategy */
390 ddi_prop_op
, /* cb_prop_op */
391 &fcip_info
, /* streamtab */
392 D_MP
| D_HOTPLUG
, /* Driver compatibility flag */
394 nodev
, /* int (*cb_aread)() */
395 nodev
/* int (*cb_awrite)() */
399 * autoconfiguration routines.
401 static struct dev_ops fcip_ops
= {
402 DEVO_REV
, /* devo_rev, */
404 fcip_getinfo
, /* info */
405 nulldev
, /* identify */
407 fcip_attach
, /* attach */
408 fcip_detach
, /* detach */
410 &fcip_cb_ops
, /* driver operations */
411 NULL
, /* bus operations */
412 ddi_power
/* power management */
415 #define FCIP_VERSION "1.61"
416 #define FCIP_NAME "SunFC FCIP v" FCIP_VERSION
418 #define PORT_DRIVER "fp"
420 #define GETSTRUCT(struct, number) \
421 ((struct *)kmem_zalloc((size_t)(sizeof (struct) * (number)), \
424 static struct modldrv modldrv
= {
425 &mod_driverops
, /* Type of module - driver */
426 FCIP_NAME
, /* Name of module */
427 &fcip_ops
, /* driver ops */
430 static struct modlinkage modlinkage
= {
431 MODREV_1
, (void *)&modldrv
, NULL
436 * Now for some global statics
438 static uint32_t fcip_ub_nbufs
= FCIP_UB_NBUFS
;
439 static uint32_t fcip_ub_size
= FCIP_UB_SIZE
;
440 static int fcip_pkt_ttl_ticks
= FCIP_PKT_TTL
;
441 static int fcip_tick_incr
= 1;
442 static int fcip_wait_cmds
= FCIP_WAIT_CMDS
;
443 static int fcip_num_attaching
= 0;
444 static int fcip_port_attach_pending
= 0;
445 static int fcip_create_nodes_on_demand
= 1; /* keep it similar to fcp */
446 static int fcip_cache_on_arp_broadcast
= 0;
447 static int fcip_farp_supported
= 0;
448 static int fcip_minor_node_created
= 0;
453 #define QLC_PORT_1_ID_BITS 0x100
454 #define QLC_PORT_2_ID_BITS 0x101
455 #define QLC_PORT_NAA 0x2
456 #define QLC_MODULE_NAME "qlc"
457 #define IS_QLC_PORT(port_dip) \
458 (strcmp(ddi_driver_name(ddi_get_parent((port_dip))),\
459 QLC_MODULE_NAME) == 0)
463 * fcip softstate structures head.
466 static void *fcip_softp
= NULL
;
469 * linked list of active (inuse) driver streams
472 static int fcip_num_instances
= 0;
473 static dev_info_t
*fcip_module_dip
= NULL
;
477 * Ethernet broadcast address: Broadcast addressing in IP over fibre
478 * channel should be the IEEE ULA (also the low 6 bytes of the Port WWN).
480 * The broadcast addressing varies for differing topologies a node may be in:
481 * - On a private loop the ARP broadcast is a class 3 sequence sent
482 * using OPNfr (Open Broadcast Replicate primitive) followed by
483 * the ARP frame to D_ID 0xFFFFFF
485 * - On a public Loop the broadcast sequence is sent to AL_PA 0x00
486 * (no OPNfr primitive).
488 * - For direct attach and point to point topologies we just send
489 * the frame to D_ID 0xFFFFFF
491 * For public loop the handling would probably be different - for now
492 * I'll just declare this struct - It can be deleted if not necessary.
498 * DL_INFO_ACK template for the fcip module. The dl_info_ack_t structure is
499 * returned as a part of an DL_INFO_ACK message which is a M_PCPROTO message
500 * returned in response to a DL_INFO_REQ message sent to us from a DLS user
501 * Let us fake an ether header as much as possible.
503 * dl_addr_length is the Provider's DLSAP addr which is SAP addr +
504 * Physical addr of the provider. We set this to
505 * ushort_t + sizeof (la_wwn_t) for Fibre Channel ports.
506 * dl_mac_type Lets just use DL_ETHER - we can try using DL_IPFC, a new
507 * dlpi.h define later.
508 * dl_sap_length -2 indicating the SAP address follows the Physical addr
509 * component in the DLSAP addr.
510 * dl_service_mode: DLCLDS - connectionless data link service.
514 static dl_info_ack_t fcip_infoack
= {
515 DL_INFO_ACK
, /* dl_primitive */
516 FCIPMTU
, /* dl_max_sdu */
518 FCIPADDRL
, /* dl_addr_length */
519 DL_ETHER
, /* dl_mac_type */
521 0, /* dl_current_state */
522 -2, /* dl_sap_length */
523 DL_CLDLS
, /* dl_service_mode */
524 0, /* dl_qos_length */
525 0, /* dl_qos_offset */
526 0, /* dl_range_length */
527 0, /* dl_range_offset */
528 DL_STYLE2
, /* dl_provider_style */
529 sizeof (dl_info_ack_t
), /* dl_addr_offset */
530 DL_VERSION_2
, /* dl_version */
531 ETHERADDRL
, /* dl_brdcst_addr_length */
532 sizeof (dl_info_ack_t
) + FCIPADDRL
, /* dl_brdcst_addr_offset */
537 * FCIP broadcast address definition.
539 static struct ether_addr fcipnhbroadcastaddr
= {
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
544 * RFC2625 requires the broadcast ARP address in the ARP data payload to
545 * be set to 0x00 00 00 00 00 00 for ARP broadcast packets
547 static struct ether_addr fcip_arpbroadcast_addr
= {
548 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
552 #define ether_bcopy(src, dest) bcopy((src), (dest), ETHERADDRL);
555 * global kernel locks
557 static kcondvar_t fcip_global_cv
;
558 static kmutex_t fcip_global_mutex
;
561 * fctl external defines
563 extern int fc_ulp_add(fc_ulp_modinfo_t
*);
566 * fctl data structures
569 #define FCIP_REV 0x07
571 /* linked list of port info structures */
572 static fcip_port_info_t
*fcip_port_head
= NULL
;
574 /* linked list of fcip structures */
575 static struct fcipstr
*fcipstrup
= NULL
;
576 static krwlock_t fcipstruplock
;
580 * Module information structure. This structure gives the FC Transport modules
581 * information about an ULP that registers with it.
583 static fc_ulp_modinfo_t fcip_modinfo
= {
584 0, /* for xref checks? */
585 FCTL_ULP_MODREV_4
, /* FCIP revision */
586 FC_TYPE_IS8802_SNAP
, /* type 5 for SNAP encapsulated datagrams */
587 FCIP_NAME
, /* module name as in the modldrv struct */
588 0x0, /* get all statec callbacks for now */
589 fcip_port_attach
, /* port attach callback */
590 fcip_port_detach
, /* port detach callback */
591 fcip_port_ioctl
, /* port ioctl callback */
592 fcip_els_cb
, /* els callback */
593 fcip_data_cb
, /* data callback */
594 fcip_statec_cb
/* state change callback */
599 * Solaris 9 and up, the /kernel/drv/fp.conf file will have the following entry
603 * This will ensure that fp is loaded at bootup. No additional checks are needed
613 * Initialize the mutexs used by port attach and other callbacks.
614 * The transport can call back into our port_attach_callback
615 * routine even before _init() completes and bad things can happen.
617 mutex_init(&fcip_global_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
618 cv_init(&fcip_global_cv
, NULL
, CV_DRIVER
, NULL
);
619 rw_init(&fcipstruplock
, NULL
, RW_DRIVER
, NULL
);
621 mutex_enter(&fcip_global_mutex
);
622 fcip_port_attach_pending
= 1;
623 mutex_exit(&fcip_global_mutex
);
626 * Now attempt to register fcip with the transport.
627 * If fc_ulp_add fails, fcip module will not be loaded.
629 rval
= fc_ulp_add(&fcip_modinfo
);
630 if (rval
!= FC_SUCCESS
) {
631 mutex_destroy(&fcip_global_mutex
);
632 cv_destroy(&fcip_global_cv
);
633 rw_destroy(&fcipstruplock
);
635 case FC_ULP_SAMEMODULE
:
636 FCIP_DEBUG(FCIP_DEBUG_DEFAULT
, (CE_WARN
,
637 "!fcip: module is already registered with"
641 case FC_ULP_SAMETYPE
:
642 FCIP_DEBUG(FCIP_DEBUG_DEFAULT
, (CE_WARN
,
643 "!fcip: Another module of the same ULP type 0x%x"
644 " is already registered with the transport",
645 fcip_modinfo
.ulp_type
));
649 FCIP_DEBUG(FCIP_DEBUG_DEFAULT
, (CE_WARN
,
650 "!fcip: Current fcip version 0x%x does not match"
652 fcip_modinfo
.ulp_rev
));
656 FCIP_DEBUG(FCIP_DEBUG_DEFAULT
, (CE_WARN
,
657 "!fcip: fc_ulp_add failed with status 0x%x", rval
));
661 FCIP_TNF_UNLOAD(&modlinkage
);
665 if ((rval
= ddi_soft_state_init(&fcip_softp
, sizeof (struct fcip
),
666 FCIP_NUM_INSTANCES
)) != 0) {
667 mutex_destroy(&fcip_global_mutex
);
668 cv_destroy(&fcip_global_cv
);
669 rw_destroy(&fcipstruplock
);
670 (void) fc_ulp_remove(&fcip_modinfo
);
671 FCIP_TNF_UNLOAD(&modlinkage
);
675 if ((rval
= mod_install(&modlinkage
)) != 0) {
676 FCIP_TNF_UNLOAD(&modlinkage
);
677 (void) fc_ulp_remove(&fcip_modinfo
);
678 mutex_destroy(&fcip_global_mutex
);
679 cv_destroy(&fcip_global_cv
);
680 rw_destroy(&fcipstruplock
);
681 ddi_soft_state_fini(&fcip_softp
);
687 * Unload the port driver if this was the only ULP loaded and then
688 * deregister with the transport.
697 * Do not permit the module to be unloaded before a port
698 * attach callback has happened.
700 mutex_enter(&fcip_global_mutex
);
701 if (fcip_num_attaching
|| fcip_port_attach_pending
) {
702 mutex_exit(&fcip_global_mutex
);
705 mutex_exit(&fcip_global_mutex
);
707 if ((rval
= mod_remove(&modlinkage
)) != 0) {
712 * unregister with the transport layer
714 rval1
= fc_ulp_remove(&fcip_modinfo
);
717 * If the ULP was not registered with the transport, init should
718 * have failed. If transport has no knowledge of our existence
719 * we should simply bail out and succeed
722 if (rval1
== FC_BADULP
) {
723 FCIP_DEBUG(FCIP_DEBUG_DEFAULT
, (CE_WARN
,
724 "fcip: ULP was never registered with the transport"));
726 } else if (rval1
== FC_BADTYPE
) {
727 FCIP_DEBUG(FCIP_DEBUG_DEFAULT
, (CE_WARN
,
728 "fcip: No ULP of this type 0x%x was registered with "
729 "transport", fcip_modinfo
.ulp_type
));
734 mutex_destroy(&fcip_global_mutex
);
735 rw_destroy(&fcipstruplock
);
736 cv_destroy(&fcip_global_cv
);
737 ddi_soft_state_fini(&fcip_softp
);
739 FCIP_TNF_UNLOAD(&modlinkage
);
745 * Info about this loadable module
748 _info(struct modinfo
*modinfop
)
750 return (mod_info(&modlinkage
, modinfop
));
754 * The port attach callback is invoked by the port driver when a FCA
755 * port comes online and binds with the transport layer. The transport
756 * then callsback into all ULP modules registered with it. The Port attach
757 * call back will also provide the ULP module with the Port's WWN and S_ID
761 fcip_port_attach(opaque_t ulp_handle
, fc_ulp_port_info_t
*port_info
,
762 fc_attach_cmd_t cmd
, uint32_t sid
)
764 int rval
= FC_FAILURE
;
767 fcip_port_info_t
*fport
= NULL
;
768 fcip_port_info_t
*cur_fport
;
772 case FC_CMD_ATTACH
: {
773 la_wwn_t
*ww_pn
= NULL
;
775 * It was determined that, as per spec, the lower 48 bits of
776 * the port-WWN will always be unique. This will make the MAC
777 * address (i.e the lower 48 bits of the WWN), that IP/ARP
778 * depend on, unique too. Hence we should be able to remove the
779 * restriction of attaching to only one of the ports of
782 * Earlier, fcip used to attach only to qlc module and fail
783 * silently for attach failures resulting from unknown FCAs or
784 * unsupported FCA ports. Now, we'll do no such checks.
786 ww_pn
= &port_info
->port_pwwn
;
788 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_NOTE
,
789 "port id bits: 0x%x", ww_pn
->w
.nport_id
));
791 * A port has come online
793 mutex_enter(&fcip_global_mutex
);
794 fcip_num_instances
++;
795 fcip_num_attaching
++;
797 if (fcip_port_head
== NULL
) {
798 /* OK to sleep here ? */
799 fport
= kmem_zalloc(sizeof (fcip_port_info_t
),
802 fcip_num_instances
--;
803 fcip_num_attaching
--;
804 ASSERT(fcip_num_attaching
>= 0);
805 mutex_exit(&fcip_global_mutex
);
807 cmn_err(CE_WARN
, "!fcip(%d): port attach "
808 "failed: alloc failed",
809 ddi_get_instance(port_info
->port_dip
));
812 fcip_port_head
= fport
;
815 * traverse the port list and also check for
816 * duplicate port attaches - Nothing wrong in being
819 cur_fport
= fcip_port_head
;
820 while (cur_fport
!= NULL
) {
821 if (cur_fport
->fcipp_handle
==
822 port_info
->port_handle
) {
823 fcip_num_instances
--;
824 fcip_num_attaching
--;
825 ASSERT(fcip_num_attaching
>= 0);
826 mutex_exit(&fcip_global_mutex
);
827 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_WARN
,
828 "!fcip(%d): port already "
829 "attached!!", ddi_get_instance(
830 port_info
->port_dip
)));
834 cur_fport
= cur_fport
->fcipp_next
;
836 fport
= kmem_zalloc(sizeof (fcip_port_info_t
),
840 fcip_num_instances
--;
841 fcip_num_attaching
--;
842 ASSERT(fcip_num_attaching
>= 0);
843 mutex_exit(&fcip_global_mutex
);
844 cmn_err(CE_WARN
, "!fcip(%d): port attach "
845 "failed: alloc failed",
846 ddi_get_instance(port_info
->port_dip
));
849 fport
->fcipp_next
= fcip_port_head
;
850 fcip_port_head
= fport
;
853 mutex_exit(&fcip_global_mutex
);
856 * now fill in the details about the port itself
858 fport
->fcipp_linkage
= *port_info
->port_linkage
;
859 fport
->fcipp_handle
= port_info
->port_handle
;
860 fport
->fcipp_dip
= port_info
->port_dip
;
861 fport
->fcipp_topology
= port_info
->port_flags
;
862 fport
->fcipp_pstate
= port_info
->port_state
;
863 fport
->fcipp_naa
= port_info
->port_pwwn
.w
.naa_id
;
864 bcopy(&port_info
->port_pwwn
, &fport
->fcipp_pwwn
,
866 bcopy(&port_info
->port_nwwn
, &fport
->fcipp_nwwn
,
868 fport
->fcipp_fca_pkt_size
= port_info
->port_fca_pkt_size
;
869 fport
->fcipp_cmd_dma_attr
= *port_info
->port_cmd_dma_attr
;
870 fport
->fcipp_resp_dma_attr
= *port_info
->port_resp_dma_attr
;
871 fport
->fcipp_fca_acc_attr
= *port_info
->port_acc_attr
;
872 src_id
.port_id
= sid
;
873 src_id
.priv_lilp_posit
= 0;
874 fport
->fcipp_sid
= src_id
;
877 * allocate soft state for this instance
879 instance
= ddi_get_instance(fport
->fcipp_dip
);
880 if (ddi_soft_state_zalloc(fcip_softp
,
881 instance
) != DDI_SUCCESS
) {
883 cmn_err(CE_WARN
, "!fcip(%d): port attach failed: "
884 "soft state alloc failed", instance
);
888 fptr
= ddi_get_soft_state(fcip_softp
, instance
);
892 cmn_err(CE_WARN
, "!fcip(%d): port attach failed: "
893 "failure to get soft state", instance
);
898 * initialize all mutexes and locks required for this module
900 mutex_init(&fptr
->fcip_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
901 mutex_init(&fptr
->fcip_ub_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
902 mutex_init(&fptr
->fcip_rt_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
903 mutex_init(&fptr
->fcip_dest_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
904 mutex_init(&fptr
->fcip_sendup_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
905 cv_init(&fptr
->fcip_farp_cv
, NULL
, CV_DRIVER
, NULL
);
906 cv_init(&fptr
->fcip_sendup_cv
, NULL
, CV_DRIVER
, NULL
);
907 cv_init(&fptr
->fcip_ub_cv
, NULL
, CV_DRIVER
, NULL
);
909 mutex_enter(&fptr
->fcip_mutex
);
911 fptr
->fcip_dip
= fport
->fcipp_dip
; /* parent's dip */
912 fptr
->fcip_instance
= instance
;
913 fptr
->fcip_ub_upstream
= 0;
915 if (FC_PORT_STATE_MASK(port_info
->port_state
) ==
917 fptr
->fcip_port_state
= FCIP_PORT_ONLINE
;
918 if (fptr
->fcip_flags
& FCIP_LINK_DOWN
) {
919 fptr
->fcip_flags
&= ~FCIP_LINK_DOWN
;
922 fptr
->fcip_port_state
= FCIP_PORT_OFFLINE
;
925 fptr
->fcip_flags
|= FCIP_ATTACHING
;
926 fptr
->fcip_port_info
= fport
;
929 * Extract our MAC addr from our port's WWN. The lower 48
930 * bits will be our MAC address
932 wwn_to_ether(&fport
->fcipp_nwwn
, &fptr
->fcip_macaddr
);
934 fport
->fcipp_fcip
= fptr
;
936 FCIP_DEBUG(FCIP_DEBUG_ATTACH
,
937 (CE_NOTE
, "fcipdest : 0x%lx, rtable : 0x%lx",
938 (long)(sizeof (fptr
->fcip_dest
)),
939 (long)(sizeof (fptr
->fcip_rtable
))));
941 bzero(fptr
->fcip_dest
, sizeof (fptr
->fcip_dest
));
942 bzero(fptr
->fcip_rtable
, sizeof (fptr
->fcip_rtable
));
945 * create a taskq to handle sundry jobs for the driver
946 * This way we can have jobs run in parallel
948 fptr
->fcip_tq
= taskq_create("fcip_tasks",
949 FCIP_NUM_THREADS
, MINCLSYSPRI
, FCIP_MIN_TASKS
,
950 FCIP_MAX_TASKS
, TASKQ_PREPOPULATE
);
952 mutex_exit(&fptr
->fcip_mutex
);
955 * create a separate thread to handle all unsolicited
956 * callback handling. This is because unsolicited_callback
957 * can happen from an interrupt context and the upstream
958 * modules can put new messages right back in the same
959 * thread context. This usually works fine, but sometimes
960 * we may have to block to obtain the dest struct entries
961 * for some remote ports.
963 mutex_enter(&fptr
->fcip_sendup_mutex
);
964 if (thread_create(NULL
, DEFAULTSTKSZ
,
965 (void (*)())fcip_sendup_thr
, (caddr_t
)fptr
, 0, &p0
,
966 TS_RUN
, minclsyspri
) == NULL
) {
967 mutex_exit(&fptr
->fcip_sendup_mutex
);
969 "!unable to create fcip sendup thread for "
970 " instance: 0x%x", instance
);
974 fptr
->fcip_sendup_thr_initted
= 1;
975 fptr
->fcip_sendup_head
= fptr
->fcip_sendup_tail
= NULL
;
976 mutex_exit(&fptr
->fcip_sendup_mutex
);
979 /* Let the attach handler do the rest */
980 if (fcip_port_attach_handler(fptr
) != FC_SUCCESS
) {
982 * We have already cleaned up so return
985 cmn_err(CE_WARN
, "!fcip(%d): port attach failed",
990 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_CONT
,
991 "!fcip attach for port instance (0x%x) successful",
997 case FC_CMD_POWER_UP
:
1000 mutex_enter(&fcip_global_mutex
);
1001 fport
= fcip_port_head
;
1002 while (fport
!= NULL
) {
1003 if (fport
->fcipp_handle
== port_info
->port_handle
) {
1006 fport
= fport
->fcipp_next
;
1008 if (fport
== NULL
) {
1010 mutex_exit(&fcip_global_mutex
);
1013 rval
= fcip_handle_resume(fport
, port_info
, cmd
);
1014 mutex_exit(&fcip_global_mutex
);
1018 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_WARN
,
1019 "unknown cmd type 0x%x in port_attach", cmd
));
1026 mutex_enter(&fcip_global_mutex
);
1027 fcip_num_attaching
--;
1028 ASSERT(fcip_num_attaching
>= 0);
1029 (void) fcip_softstate_free(fport
);
1030 fcip_port_attach_pending
= 0;
1031 mutex_exit(&fcip_global_mutex
);
1036 mutex_enter(&fcip_global_mutex
);
1037 fcip_port_attach_pending
= 0;
1038 mutex_exit(&fcip_global_mutex
);
1043 * fcip_port_attach_handler : Completes the port attach operation after
1044 * the ulp_port_attach routine has completed its ground work. The job
1045 * of this function among other things is to obtain and handle topology
1046 * specifics, initialize a port, setup broadcast address entries in
1047 * the fcip tables etc. This routine cleans up behind itself on failures.
1048 * Returns FC_SUCCESS or FC_FAILURE.
1051 fcip_port_attach_handler(struct fcip
*fptr
)
1053 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
1054 int rval
= FC_FAILURE
;
1056 ASSERT(fport
!= NULL
);
1058 mutex_enter(&fcip_global_mutex
);
1060 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_NOTE
,
1061 "fcip module dip: %p instance: %d",
1062 (void *)fcip_module_dip
, ddi_get_instance(fptr
->fcip_dip
)));
1064 if (fcip_module_dip
== NULL
) {
1067 fcip_lbolt
= ddi_get_lbolt();
1069 * we need to use the fcip devinfo for creating
1070 * the clone device node, but the fcip attach
1071 * (from its conf file entry claiming to be a
1072 * child of pseudo) may not have happened yet.
1073 * wait here for 10 seconds and fail port attach
1074 * if the fcip devinfo is not attached yet
1076 fcip_lbolt
+= drv_usectohz(FCIP_INIT_DELAY
);
1078 FCIP_DEBUG(FCIP_DEBUG_ATTACH
,
1079 (CE_WARN
, "cv_timedwait lbolt %lx", fcip_lbolt
));
1081 (void) cv_timedwait(&fcip_global_cv
, &fcip_global_mutex
,
1084 if (fcip_module_dip
== NULL
) {
1085 mutex_exit(&fcip_global_mutex
);
1087 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_WARN
,
1088 "fcip attach did not happen"));
1089 goto port_attach_cleanup
;
1093 if ((!fcip_minor_node_created
) &&
1094 fcip_is_supported_fc_topology(fport
->fcipp_topology
)) {
1096 * Checking for same topologies which are considered valid
1097 * by fcip_handle_topology(). Dont create a minor node if
1098 * nothing is hanging off the FC port.
1100 if (ddi_create_minor_node(fcip_module_dip
, "fcip", S_IFCHR
,
1101 ddi_get_instance(fptr
->fcip_dip
), DDI_PSEUDO
,
1102 CLONE_DEV
) == DDI_FAILURE
) {
1103 mutex_exit(&fcip_global_mutex
);
1104 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_WARN
,
1105 "failed to create minor node for fcip(%d)",
1106 ddi_get_instance(fptr
->fcip_dip
)));
1107 goto port_attach_cleanup
;
1109 fcip_minor_node_created
++;
1111 mutex_exit(&fcip_global_mutex
);
1114 * initialize port for traffic
1116 if (fcip_init_port(fptr
) != FC_SUCCESS
) {
1117 /* fcip_init_port has already cleaned up its stuff */
1119 mutex_enter(&fcip_global_mutex
);
1121 if ((fcip_num_instances
== 1) &&
1122 (fcip_minor_node_created
== 1)) {
1123 /* Remove minor node iff this is the last instance */
1124 ddi_remove_minor_node(fcip_module_dip
, NULL
);
1127 mutex_exit(&fcip_global_mutex
);
1129 goto port_attach_cleanup
;
1132 mutex_enter(&fptr
->fcip_mutex
);
1133 fptr
->fcip_flags
&= ~FCIP_ATTACHING
;
1134 fptr
->fcip_flags
|= FCIP_INITED
;
1135 fptr
->fcip_timeout_ticks
= 0;
1138 * start the timeout threads
1140 fptr
->fcip_timeout_id
= timeout(fcip_timeout
, fptr
,
1141 drv_usectohz(1000000));
1143 mutex_exit(&fptr
->fcip_mutex
);
1144 mutex_enter(&fcip_global_mutex
);
1145 fcip_num_attaching
--;
1146 ASSERT(fcip_num_attaching
>= 0);
1147 mutex_exit(&fcip_global_mutex
);
1151 port_attach_cleanup
:
1152 mutex_enter(&fcip_global_mutex
);
1153 (void) fcip_softstate_free(fport
);
1154 fcip_num_attaching
--;
1155 ASSERT(fcip_num_attaching
>= 0);
1156 mutex_exit(&fcip_global_mutex
);
1163 * Handler for DDI_RESUME operations. Port must be ready to restart IP
1167 fcip_handle_resume(fcip_port_info_t
*fport
, fc_ulp_port_info_t
*port_info
,
1168 fc_attach_cmd_t cmd
)
1170 int rval
= FC_SUCCESS
;
1171 struct fcip
*fptr
= fport
->fcipp_fcip
;
1172 struct fcipstr
*tslp
;
1176 ASSERT(fptr
!= NULL
);
1178 mutex_enter(&fptr
->fcip_mutex
);
1180 if (cmd
== FC_CMD_POWER_UP
) {
1181 fptr
->fcip_flags
&= ~(FCIP_POWER_DOWN
);
1182 if (fptr
->fcip_flags
& FCIP_SUSPENDED
) {
1183 mutex_exit(&fptr
->fcip_mutex
);
1184 return (FC_SUCCESS
);
1186 } else if (cmd
== FC_CMD_RESUME
) {
1187 fptr
->fcip_flags
&= ~(FCIP_SUSPENDED
);
1189 mutex_exit(&fptr
->fcip_mutex
);
1190 return (FC_FAILURE
);
1194 * set the current port state and topology
1196 fport
->fcipp_topology
= port_info
->port_flags
;
1197 fport
->fcipp_pstate
= port_info
->port_state
;
1199 rw_enter(&fcipstruplock
, RW_READER
);
1200 for (tslp
= fcipstrup
; tslp
; tslp
= tslp
->sl_nextp
) {
1201 if (tslp
->sl_fcip
== fptr
) {
1205 rw_exit(&fcipstruplock
);
1208 * No active streams on this port
1215 mutex_enter(&fptr
->fcip_rt_mutex
);
1216 for (index
= 0; index
< FCIP_RT_HASH_ELEMS
; index
++) {
1217 struct fcip_routing_table
*frp
;
1219 frp
= fptr
->fcip_rtable
[index
];
1223 * Mark the broadcast RTE available again. It
1224 * was marked SUSPENDED during SUSPEND.
1226 did
= fcip_get_broadcast_did(fptr
);
1227 if (frp
->fcipr_d_id
.port_id
== did
) {
1228 frp
->fcipr_state
= 0;
1229 index
= FCIP_RT_HASH_ELEMS
;
1232 frp
= frp
->fcipr_next
;
1235 mutex_exit(&fptr
->fcip_rt_mutex
);
1238 * fcip_handle_topology will update the port entries in the
1240 * fcip_handle_topology also takes care of resetting the
1241 * fcipr_state field in the routing table structure. The entries
1242 * were set to RT_INVALID during suspend.
1244 fcip_handle_topology(fptr
);
1248 * Restart the timeout thread
1250 fptr
->fcip_timeout_id
= timeout(fcip_timeout
, fptr
,
1251 drv_usectohz(1000000));
1252 mutex_exit(&fptr
->fcip_mutex
);
1258 * Insert a destination port entry into the routing table for
1262 fcip_rt_update(struct fcip
*fptr
, fc_portmap_t
*devlist
, uint32_t listlen
)
1264 struct fcip_routing_table
*frp
;
1265 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
1270 ASSERT(!mutex_owned(&fptr
->fcip_mutex
));
1271 mutex_enter(&fptr
->fcip_rt_mutex
);
1273 for (i
= 0; i
< listlen
; i
++) {
1274 pmap
= &(devlist
[i
]);
1276 frp
= fcip_lookup_rtable(fptr
, &(pmap
->map_pwwn
),
1279 * If an entry for a port in the devlist exists in the
1280 * in the per port routing table, make sure the data
1281 * is current. We need to do this irrespective of the
1282 * underlying port topology.
1284 switch (pmap
->map_type
) {
1286 case PORT_DEVICE_NOCHANGE
:
1288 case PORT_DEVICE_USER_LOGIN
:
1290 case PORT_DEVICE_CHANGED
:
1292 case PORT_DEVICE_NEW
:
1301 case PORT_DEVICE_OLD
:
1303 case PORT_DEVICE_USER_LOGOUT
:
1305 * Mark entry for removal from Routing Table if
1306 * one exists. Let the timeout thread actually
1307 * remove the entry after we've given up hopes
1308 * of the port ever showing up.
1314 * Mark the routing table as invalid to bail
1315 * the packets early that are in transit
1317 did
= fptr
->fcip_broadcast_did
;
1318 if (frp
->fcipr_d_id
.port_id
!= did
) {
1319 frp
->fcipr_pd
= NULL
;
1320 frp
->fcipr_state
= FCIP_RT_INVALID
;
1321 frp
->fcipr_invalid_timeout
=
1322 fptr
->fcip_timeout_ticks
+
1329 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_WARN
,
1330 "unknown map flags in rt_update"));
1334 ASSERT(frp
== NULL
);
1335 hash_bucket
= FCIP_RT_HASH(pmap
->map_pwwn
.raw_wwn
);
1337 ASSERT(hash_bucket
< FCIP_RT_HASH_ELEMS
);
1339 frp
= (struct fcip_routing_table
*)
1340 kmem_zalloc(sizeof (struct fcip_routing_table
), KM_SLEEP
);
1341 /* insert at beginning of hash bucket */
1342 frp
->fcipr_next
= fptr
->fcip_rtable
[hash_bucket
];
1343 fptr
->fcip_rtable
[hash_bucket
] = frp
;
1344 fc_wwn_to_str(&pmap
->map_pwwn
, wwn_buf
);
1345 FCIP_DEBUG(FCIP_DEBUG_ATTACH
, (CE_NOTE
,
1346 "added entry for pwwn %s and d_id 0x%x",
1347 wwn_buf
, pmap
->map_did
.port_id
));
1349 bcopy((void *)&pmap
->map_pwwn
,
1350 (void *)&frp
->fcipr_pwwn
, sizeof (la_wwn_t
));
1351 bcopy((void *)&pmap
->map_nwwn
, (void *)&frp
->fcipr_nwwn
,
1353 frp
->fcipr_d_id
= pmap
->map_did
;
1354 frp
->fcipr_state
= pmap
->map_state
;
1355 frp
->fcipr_pd
= pmap
->map_pd
;
1358 * If there is no pd for a destination port that is not
1359 * a broadcast entry, the port is pretty much unusable - so
1360 * mark the port for removal so we can try adding back the
1363 if ((frp
->fcipr_pd
== NULL
) &&
1364 (frp
->fcipr_d_id
.port_id
!= fptr
->fcip_broadcast_did
)) {
1365 frp
->fcipr_state
= PORT_DEVICE_INVALID
;
1366 frp
->fcipr_invalid_timeout
= fptr
->fcip_timeout_ticks
+
1367 (FCIP_RTE_TIMEOUT
/ 2);
1369 frp
->fcipr_fca_dev
=
1370 fc_ulp_get_fca_device(fport
->fcipp_handle
, pmap
->map_did
);
1373 * login to the remote port. Don't worry about
1374 * plogi failures for now
1376 if (pmap
->map_pd
!= NULL
) {
1377 (void) fcip_do_plogi(fptr
, frp
);
1378 } else if (FC_TOP_EXTERNAL(fport
->fcipp_topology
)) {
1379 fc_wwn_to_str(&frp
->fcipr_pwwn
, wwn_buf
);
1380 FCIP_DEBUG(FCIP_DEBUG_MISC
, (CE_NOTE
,
1381 "logging into pwwn %s, d_id 0x%x",
1382 wwn_buf
, frp
->fcipr_d_id
.port_id
));
1383 (void) fcip_do_plogi(fptr
, frp
);
1386 FCIP_TNF_BYTE_ARRAY(fcip_rt_update
, "fcip io", "detail",
1387 "new wwn in rt", pwwn
,
1388 &frp
->fcipr_pwwn
, sizeof (la_wwn_t
));
1390 mutex_exit(&fptr
->fcip_rt_mutex
);
1395 * return a matching routing table entry for a given fcip instance
1397 struct fcip_routing_table
*
1398 fcip_lookup_rtable(struct fcip
*fptr
, la_wwn_t
*wwn
, int matchflag
)
1400 struct fcip_routing_table
*frp
= NULL
;
1404 FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable
, "fcip io", "detail",
1405 "rtable lookup for", wwn
,
1406 &wwn
->raw_wwn
, sizeof (la_wwn_t
));
1408 ASSERT(mutex_owned(&fptr
->fcip_rt_mutex
));
1410 hash_bucket
= FCIP_RT_HASH(wwn
->raw_wwn
);
1411 frp
= fptr
->fcip_rtable
[hash_bucket
];
1412 while (frp
!= NULL
) {
1414 FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable
, "fcip io", "detail",
1415 "rtable entry", nwwn
,
1416 &(frp
->fcipr_nwwn
.raw_wwn
), sizeof (la_wwn_t
));
1418 if (fcip_wwn_compare(&frp
->fcipr_pwwn
, wwn
, matchflag
) == 0) {
1422 frp
= frp
->fcipr_next
;
1428 * Attach of fcip under pseudo. The actual setup of the interface
1429 * actually happens in fcip_port_attach on a callback from the
1430 * transport. The port_attach callback however can proceed only
1431 * after the devinfo for fcip has been created under pseudo
1434 fcip_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
1439 ASSERT(fcip_module_dip
== NULL
);
1440 fcip_module_dip
= dip
;
1443 * this call originates as a result of fcip's conf
1444 * file entry and will result in a fcip instance being
1445 * a child of pseudo. We should ensure here that the port
1446 * driver (fp) has been loaded and initted since we would
1447 * never get a port attach callback without fp being loaded.
1448 * If we are unable to succesfully load and initalize fp -
1449 * just fail this attach.
1451 mutex_enter(&fcip_global_mutex
);
1453 FCIP_DEBUG(FCIP_DEBUG_ATTACH
,
1454 (CE_WARN
, "global cv - signaling"));
1456 cv_signal(&fcip_global_cv
);
1458 FCIP_DEBUG(FCIP_DEBUG_ATTACH
,
1459 (CE_WARN
, "global cv - signaled"));
1460 mutex_exit(&fcip_global_mutex
);
1461 return (DDI_SUCCESS
);
1465 * Resume appears trickier
1467 return (DDI_SUCCESS
);
1469 return (DDI_FAILURE
);
1475 * The detach entry point to permit unloading fcip. We make sure
1476 * there are no active streams before we proceed with the detach
1480 fcip_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
1483 fcip_port_info_t
*fport
;
1489 * If we got here, any active streams should have been
1490 * unplumbed but check anyway
1492 mutex_enter(&fcip_global_mutex
);
1493 if (fcipstrup
!= NULL
) {
1494 mutex_exit(&fcip_global_mutex
);
1495 return (DDI_FAILURE
);
1498 if (fcip_port_head
!= NULL
) {
1500 * Check to see if we have unattached/unbound
1501 * ports. If all the ports are unattached/unbound go
1502 * ahead and unregister with the transport
1504 fport
= fcip_port_head
;
1505 while (fport
!= NULL
) {
1506 fptr
= fport
->fcipp_fcip
;
1510 mutex_enter(&fptr
->fcip_mutex
);
1511 fptr
->fcip_flags
|= FCIP_DETACHING
;
1512 if (fptr
->fcip_ipq
||
1513 fptr
->fcip_flags
& (FCIP_IN_TIMEOUT
|
1514 FCIP_IN_CALLBACK
| FCIP_ATTACHING
|
1515 FCIP_SUSPENDED
| FCIP_POWER_DOWN
|
1516 FCIP_REG_INPROGRESS
)) {
1517 mutex_exit(&fptr
->fcip_mutex
);
1518 FCIP_DEBUG(FCIP_DEBUG_DETACH
, (CE_WARN
,
1519 "fcip instance busy"));
1523 * Check for any outstanding pkts. If yes
1526 mutex_enter(&fptr
->fcip_dest_mutex
);
1527 if (fcip_port_get_num_pkts(fptr
) > 0) {
1528 mutex_exit(&fptr
->fcip_dest_mutex
);
1529 mutex_exit(&fptr
->fcip_mutex
);
1530 FCIP_DEBUG(FCIP_DEBUG_DETACH
, (CE_WARN
,
1531 "fcip instance busy - pkts "
1535 mutex_exit(&fptr
->fcip_dest_mutex
);
1537 mutex_enter(&fptr
->fcip_rt_mutex
);
1538 if (fcip_plogi_in_progress(fptr
)) {
1539 mutex_exit(&fptr
->fcip_rt_mutex
);
1540 mutex_exit(&fptr
->fcip_mutex
);
1541 FCIP_DEBUG(FCIP_DEBUG_DETACH
, (CE_WARN
,
1542 "fcip instance busy - plogi in "
1546 mutex_exit(&fptr
->fcip_rt_mutex
);
1548 mutex_exit(&fptr
->fcip_mutex
);
1549 fport
= fport
->fcipp_next
;
1552 * if fport is non NULL - we have active ports
1554 if (fport
!= NULL
) {
1556 * Remove the DETACHING flags on the ports
1558 fport
= fcip_port_head
;
1559 while (fport
!= NULL
) {
1560 fptr
= fport
->fcipp_fcip
;
1561 mutex_enter(&fptr
->fcip_mutex
);
1562 fptr
->fcip_flags
&= ~(FCIP_DETACHING
);
1563 mutex_exit(&fptr
->fcip_mutex
);
1564 fport
= fport
->fcipp_next
;
1566 mutex_exit(&fcip_global_mutex
);
1567 return (DDI_FAILURE
);
1572 * free up all softstate structures
1574 fport
= fcip_port_head
;
1575 while (fport
!= NULL
) {
1578 fptr
= fport
->fcipp_fcip
;
1580 mutex_enter(&fptr
->fcip_mutex
);
1582 * Check to see if somebody beat us to the
1585 detached
= fptr
->fcip_flags
& FCIP_DETACHED
;
1586 fptr
->fcip_flags
&= ~(FCIP_DETACHING
);
1587 fptr
->fcip_flags
|= FCIP_DETACHED
;
1588 mutex_exit(&fptr
->fcip_mutex
);
1592 fport
= fcip_softstate_free(fport
);
1595 * If the port was marked as detached
1596 * but it was still in the list, that
1597 * means another thread has marked it
1598 * but we got in while it released the
1599 * fcip_global_mutex in softstate_free.
1600 * Given that, we're still safe to use
1601 * fport->fcipp_next to find out what
1602 * the next port on the list is.
1604 fport
= fport
->fcipp_next
;
1607 FCIP_DEBUG(FCIP_DEBUG_DETACH
,
1608 (CE_NOTE
, "detaching port"));
1612 * If we haven't removed all the port structures, we
1613 * aren't yet ready to be detached.
1615 if (fcip_port_head
!= NULL
) {
1616 mutex_exit(&fcip_global_mutex
);
1617 return (DDI_FAILURE
);
1620 fcip_num_instances
= 0;
1621 mutex_exit(&fcip_global_mutex
);
1622 fcip_module_dip
= NULL
;
1623 return (DDI_SUCCESS
);
1626 return (DDI_SUCCESS
);
1628 return (DDI_FAILURE
);
1633 * The port_detach callback is called from the transport when a
1634 * FC port is being removed from the transport's control. This routine
1635 * provides fcip with an opportunity to cleanup all activities and
1636 * structures on the port marked for removal.
1640 fcip_port_detach(opaque_t ulp_handle
, fc_ulp_port_info_t
*port_info
,
1641 fc_detach_cmd_t cmd
)
1643 int rval
= FC_FAILURE
;
1644 fcip_port_info_t
*fport
;
1646 struct fcipstr
*strp
;
1649 case FC_CMD_DETACH
: {
1650 mutex_enter(&fcip_global_mutex
);
1652 if (fcip_port_head
== NULL
) {
1654 * we are all done but our fini has not been
1655 * called yet!! Let's hope we have no active
1656 * fcip instances here. - strange secnario but
1657 * no harm in having this return a success.
1659 fcip_check_remove_minor_node();
1661 mutex_exit(&fcip_global_mutex
);
1662 return (FC_SUCCESS
);
1665 * traverse the port list
1667 fport
= fcip_port_head
;
1668 while (fport
!= NULL
) {
1669 if (fport
->fcipp_handle
==
1670 port_info
->port_handle
) {
1671 fptr
= fport
->fcipp_fcip
;
1674 * Fail the port detach if there is
1675 * still an attached, bound stream on
1679 rw_enter(&fcipstruplock
, RW_READER
);
1681 for (strp
= fcipstrup
; strp
!= NULL
;
1682 strp
= strp
->sl_nextp
) {
1683 if (strp
->sl_fcip
== fptr
) {
1684 rw_exit(&fcipstruplock
);
1686 &fcip_global_mutex
);
1687 return (FC_FAILURE
);
1691 rw_exit(&fcipstruplock
);
1694 * fail port detach if we are in
1695 * the middle of a deferred port attach
1696 * or if the port has outstanding pkts
1699 mutex_enter(&fptr
->fcip_mutex
);
1700 if (fcip_check_port_busy
1707 &fcip_global_mutex
);
1708 return (FC_FAILURE
);
1713 mutex_exit(&fptr
->fcip_mutex
);
1715 (void) fcip_softstate_free(fport
);
1717 fcip_check_remove_minor_node();
1718 mutex_exit(&fcip_global_mutex
);
1719 return (FC_SUCCESS
);
1721 fport
= fport
->fcipp_next
;
1723 ASSERT(fport
== NULL
);
1725 mutex_exit(&fcip_global_mutex
);
1728 case FC_CMD_POWER_DOWN
:
1730 case FC_CMD_SUSPEND
:
1731 mutex_enter(&fcip_global_mutex
);
1732 fport
= fcip_port_head
;
1733 while (fport
!= NULL
) {
1734 if (fport
->fcipp_handle
== port_info
->port_handle
) {
1737 fport
= fport
->fcipp_next
;
1739 if (fport
== NULL
) {
1740 mutex_exit(&fcip_global_mutex
);
1743 rval
= fcip_handle_suspend(fport
, cmd
);
1744 mutex_exit(&fcip_global_mutex
);
1747 FCIP_DEBUG(FCIP_DEBUG_DETACH
,
1748 (CE_WARN
, "unknown port detach command!!"));
1756 * Returns 0 if the port is not busy, else returns non zero.
1759 fcip_check_port_busy(struct fcip
*fptr
)
1761 int rval
= 0, num_pkts
= 0;
1763 ASSERT(fptr
!= NULL
);
1764 ASSERT(MUTEX_HELD(&fptr
->fcip_mutex
));
1766 mutex_enter(&fptr
->fcip_dest_mutex
);
1768 if (fptr
->fcip_flags
& FCIP_PORT_BUSY
||
1769 ((num_pkts
= fcip_port_get_num_pkts(fptr
)) > 0) ||
1770 fptr
->fcip_num_ipkts_pending
) {
1772 FCIP_DEBUG(FCIP_DEBUG_DETACH
,
1773 (CE_NOTE
, "!fcip_check_port_busy: port is busy "
1774 "fcip_flags: 0x%x, num_pkts: 0x%x, ipkts_pending: 0x%lx!",
1775 fptr
->fcip_flags
, num_pkts
, fptr
->fcip_num_ipkts_pending
));
1778 mutex_exit(&fptr
->fcip_dest_mutex
);
1783 * Helper routine to remove fcip's minor node
1784 * There is one minor node per system and it should be removed if there are no
1785 * other fcip instances (which has a 1:1 mapping for fp instances) present
1788 fcip_check_remove_minor_node(void)
1790 ASSERT(MUTEX_HELD(&fcip_global_mutex
));
1793 * If there are no more fcip (fp) instances, remove the
1794 * minor node for fcip.
1795 * Reset fcip_minor_node_created to invalidate it.
1797 if (fcip_num_instances
== 0 && (fcip_module_dip
!= NULL
)) {
1798 ddi_remove_minor_node(fcip_module_dip
, NULL
);
1799 fcip_minor_node_created
= 0;
1804 * This routine permits the suspend operation during a CPR/System
1805 * power management operation. The routine basically quiesces I/Os
1806 * on all active interfaces
1809 fcip_handle_suspend(fcip_port_info_t
*fport
, fc_detach_cmd_t cmd
)
1811 struct fcip
*fptr
= fport
->fcipp_fcip
;
1816 struct fcipstr
*tslp
;
1819 ASSERT(fptr
!= NULL
);
1820 mutex_enter(&fptr
->fcip_mutex
);
1823 * Fail if we are in the middle of a callback. Don't use delay during
1824 * suspend since clock intrs are not available so busy wait
1827 while (count
++ < 15 &&
1828 ((fptr
->fcip_flags
& FCIP_IN_CALLBACK
) ||
1829 (fptr
->fcip_flags
& FCIP_IN_TIMEOUT
))) {
1830 mutex_exit(&fptr
->fcip_mutex
);
1831 drv_usecwait(1000000);
1832 mutex_enter(&fptr
->fcip_mutex
);
1835 if (fptr
->fcip_flags
& FCIP_IN_CALLBACK
||
1836 fptr
->fcip_flags
& FCIP_IN_TIMEOUT
) {
1837 mutex_exit(&fptr
->fcip_mutex
);
1838 return (FC_FAILURE
);
1841 if (cmd
== FC_CMD_POWER_DOWN
) {
1842 if (fptr
->fcip_flags
& FCIP_SUSPENDED
) {
1843 fptr
->fcip_flags
|= FCIP_POWER_DOWN
;
1844 mutex_exit(&fptr
->fcip_mutex
);
1847 fptr
->fcip_flags
|= FCIP_POWER_DOWN
;
1849 } else if (cmd
== FC_CMD_SUSPEND
) {
1850 fptr
->fcip_flags
|= FCIP_SUSPENDED
;
1852 mutex_exit(&fptr
->fcip_mutex
);
1853 return (FC_FAILURE
);
1856 mutex_exit(&fptr
->fcip_mutex
);
1858 * If no streams are plumbed - its the easiest case - Just
1859 * bail out without having to do much
1862 rw_enter(&fcipstruplock
, RW_READER
);
1863 for (tslp
= fcipstrup
; tslp
; tslp
= tslp
->sl_nextp
) {
1864 if (tslp
->sl_fcip
== fptr
) {
1868 rw_exit(&fcipstruplock
);
1871 * No active streams on this port
1878 * Walk through each Routing table structure and check if
1879 * the destination table has any outstanding commands. If yes
1880 * wait for the commands to drain. Since we go through each
1881 * routing table entry in succession, it may be wise to wait
1882 * only a few seconds for each entry.
1884 mutex_enter(&fptr
->fcip_rt_mutex
);
1888 for (index
= 0; index
< FCIP_RT_HASH_ELEMS
; index
++) {
1889 struct fcip_routing_table
*frp
;
1890 struct fcip_dest
*fdestp
;
1894 frp
= fptr
->fcip_rtable
[index
];
1897 * Mark the routing table as SUSPENDED. Even
1898 * mark the broadcast entry SUSPENDED to
1899 * prevent any ARP or other broadcasts. We
1900 * can reset the state of the broadcast
1901 * RTE when we resume.
1903 frp
->fcipr_state
= FCIP_RT_SUSPENDED
;
1904 pwwn
= &frp
->fcipr_pwwn
;
1907 * Get hold of destination pointer
1909 mutex_enter(&fptr
->fcip_dest_mutex
);
1911 hash_bucket
= FCIP_DEST_HASH(pwwn
->raw_wwn
);
1912 ASSERT(hash_bucket
< FCIP_DEST_HASH_ELEMS
);
1914 fdestp
= fptr
->fcip_dest
[hash_bucket
];
1915 while (fdestp
!= NULL
) {
1916 mutex_enter(&fdestp
->fcipd_mutex
);
1917 if (fdestp
->fcipd_rtable
) {
1918 if (fcip_wwn_compare(pwwn
,
1919 &fdestp
->fcipd_pwwn
,
1920 FCIP_COMPARE_PWWN
) == 0) {
1922 &fdestp
->fcipd_mutex
);
1926 mutex_exit(&fdestp
->fcipd_mutex
);
1927 fdestp
= fdestp
->fcipd_next
;
1930 mutex_exit(&fptr
->fcip_dest_mutex
);
1931 if (fdestp
== NULL
) {
1932 frp
= frp
->fcipr_next
;
1937 * Wait for fcip_wait_cmds seconds for
1938 * the commands to drain.
1941 mutex_enter(&fdestp
->fcipd_mutex
);
1942 while (fdestp
->fcipd_ncmds
&&
1943 count
< fcip_wait_cmds
) {
1944 mutex_exit(&fdestp
->fcipd_mutex
);
1945 mutex_exit(&fptr
->fcip_rt_mutex
);
1946 drv_usecwait(1000000);
1947 mutex_enter(&fptr
->fcip_rt_mutex
);
1948 mutex_enter(&fdestp
->fcipd_mutex
);
1952 * Check if we were able to drain all cmds
1953 * successfully. Else continue with other
1954 * ports and try during the second pass
1956 if (fdestp
->fcipd_ncmds
) {
1959 mutex_exit(&fdestp
->fcipd_mutex
);
1961 frp
= frp
->fcipr_next
;
1964 if (tryagain
== 0) {
1968 mutex_exit(&fptr
->fcip_rt_mutex
);
1971 mutex_enter(&fptr
->fcip_mutex
);
1972 fptr
->fcip_flags
&= ~(FCIP_SUSPENDED
| FCIP_POWER_DOWN
);
1973 mutex_exit(&fptr
->fcip_mutex
);
1974 return (FC_FAILURE
);
1978 mutex_enter(&fptr
->fcip_mutex
);
1979 tid
= fptr
->fcip_timeout_id
;
1980 fptr
->fcip_timeout_id
= NULL
;
1981 mutex_exit(&fptr
->fcip_mutex
);
1983 (void) untimeout(tid
);
1985 return (FC_SUCCESS
);
1989 * the getinfo(9E) entry point
1993 fcip_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
1995 int rval
= DDI_FAILURE
;
1998 case DDI_INFO_DEVT2DEVINFO
:
1999 *result
= fcip_module_dip
;
2004 case DDI_INFO_DEVT2INSTANCE
:
2016 * called from fcip_attach to initialize kstats for the link
2020 fcip_kstat_init(struct fcip
*fptr
)
2024 struct fcipstat
*fcipstatp
;
2026 ASSERT(mutex_owned(&fptr
->fcip_mutex
));
2028 instance
= ddi_get_instance(fptr
->fcip_dip
);
2029 (void) sprintf(buf
, "fcip%d", instance
);
2032 fptr
->fcip_kstatp
= kstat_create("fcip", instance
, buf
, "net",
2034 (sizeof (struct fcipstat
)/ sizeof (kstat_named_t
)),
2035 KSTAT_FLAG_PERSISTENT
);
2037 fptr
->fcip_kstatp
= kstat_create("fcip", instance
, buf
, "net",
2039 (sizeof (struct fcipstat
)/ sizeof (kstat_named_t
)), 0);
2041 if (fptr
->fcip_kstatp
== NULL
) {
2042 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_WARN
, "kstat created failed"));
2046 fcipstatp
= (struct fcipstat
*)fptr
->fcip_kstatp
->ks_data
;
2047 kstat_named_init(&fcipstatp
->fcips_ipackets
, "ipackets",
2049 kstat_named_init(&fcipstatp
->fcips_ierrors
, "ierrors",
2051 kstat_named_init(&fcipstatp
->fcips_opackets
, "opackets",
2053 kstat_named_init(&fcipstatp
->fcips_oerrors
, "oerrors",
2055 kstat_named_init(&fcipstatp
->fcips_collisions
, "collisions",
2057 kstat_named_init(&fcipstatp
->fcips_nocanput
, "nocanput",
2059 kstat_named_init(&fcipstatp
->fcips_allocbfail
, "allocbfail",
2062 kstat_named_init(&fcipstatp
->fcips_defer
, "defer",
2064 kstat_named_init(&fcipstatp
->fcips_fram
, "fram",
2066 kstat_named_init(&fcipstatp
->fcips_crc
, "crc",
2068 kstat_named_init(&fcipstatp
->fcips_oflo
, "oflo",
2070 kstat_named_init(&fcipstatp
->fcips_uflo
, "uflo",
2072 kstat_named_init(&fcipstatp
->fcips_missed
, "missed",
2074 kstat_named_init(&fcipstatp
->fcips_tlcol
, "tlcol",
2076 kstat_named_init(&fcipstatp
->fcips_trtry
, "trtry",
2078 kstat_named_init(&fcipstatp
->fcips_tnocar
, "tnocar",
2080 kstat_named_init(&fcipstatp
->fcips_inits
, "inits",
2082 kstat_named_init(&fcipstatp
->fcips_notbufs
, "notbufs",
2084 kstat_named_init(&fcipstatp
->fcips_norbufs
, "norbufs",
2086 kstat_named_init(&fcipstatp
->fcips_allocbfail
, "allocbfail",
2090 * required by kstat for MIB II objects(RFC 1213)
2092 kstat_named_init(&fcipstatp
->fcips_rcvbytes
, "fcips_rcvbytes",
2093 KSTAT_DATA_ULONG
); /* # octets received */
2094 /* MIB - ifInOctets */
2095 kstat_named_init(&fcipstatp
->fcips_xmtbytes
, "fcips_xmtbytes",
2096 KSTAT_DATA_ULONG
); /* # octets xmitted */
2097 /* MIB - ifOutOctets */
2098 kstat_named_init(&fcipstatp
->fcips_multircv
, "fcips_multircv",
2099 KSTAT_DATA_ULONG
); /* # multicast packets */
2100 /* delivered to upper layer */
2101 /* MIB - ifInNUcastPkts */
2102 kstat_named_init(&fcipstatp
->fcips_multixmt
, "fcips_multixmt",
2103 KSTAT_DATA_ULONG
); /* # multicast packets */
2104 /* requested to be sent */
2105 /* MIB - ifOutNUcastPkts */
2106 kstat_named_init(&fcipstatp
->fcips_brdcstrcv
, "fcips_brdcstrcv",
2107 KSTAT_DATA_ULONG
); /* # broadcast packets */
2108 /* delivered to upper layer */
2109 /* MIB - ifInNUcastPkts */
2110 kstat_named_init(&fcipstatp
->fcips_brdcstxmt
, "fcips_brdcstxmt",
2111 KSTAT_DATA_ULONG
); /* # broadcast packets */
2112 /* requested to be sent */
2113 /* MIB - ifOutNUcastPkts */
2114 kstat_named_init(&fcipstatp
->fcips_norcvbuf
, "fcips_norcvbuf",
2115 KSTAT_DATA_ULONG
); /* # rcv packets discarded */
2116 /* MIB - ifInDiscards */
2117 kstat_named_init(&fcipstatp
->fcips_noxmtbuf
, "fcips_noxmtbuf",
2118 KSTAT_DATA_ULONG
); /* # xmt packets discarded */
2120 fptr
->fcip_kstatp
->ks_update
= fcip_stat_update
;
2121 fptr
->fcip_kstatp
->ks_private
= (void *) fptr
;
2122 kstat_install(fptr
->fcip_kstatp
);
2126 * Update the defined kstats for netstat et al to use
2130 fcip_stat_update(kstat_t
*fcip_statp
, int val
)
2132 struct fcipstat
*fcipstatp
;
2135 fptr
= (struct fcip
*)fcip_statp
->ks_private
;
2136 fcipstatp
= (struct fcipstat
*)fcip_statp
->ks_data
;
2138 if (val
== KSTAT_WRITE
) {
2139 fptr
->fcip_ipackets
= fcipstatp
->fcips_ipackets
.value
.ul
;
2140 fptr
->fcip_ierrors
= fcipstatp
->fcips_ierrors
.value
.ul
;
2141 fptr
->fcip_opackets
= fcipstatp
->fcips_opackets
.value
.ul
;
2142 fptr
->fcip_oerrors
= fcipstatp
->fcips_oerrors
.value
.ul
;
2143 fptr
->fcip_collisions
= fcipstatp
->fcips_collisions
.value
.ul
;
2144 fptr
->fcip_defer
= fcipstatp
->fcips_defer
.value
.ul
;
2145 fptr
->fcip_fram
= fcipstatp
->fcips_fram
.value
.ul
;
2146 fptr
->fcip_crc
= fcipstatp
->fcips_crc
.value
.ul
;
2147 fptr
->fcip_oflo
= fcipstatp
->fcips_oflo
.value
.ul
;
2148 fptr
->fcip_uflo
= fcipstatp
->fcips_uflo
.value
.ul
;
2149 fptr
->fcip_missed
= fcipstatp
->fcips_missed
.value
.ul
;
2150 fptr
->fcip_tlcol
= fcipstatp
->fcips_tlcol
.value
.ul
;
2151 fptr
->fcip_trtry
= fcipstatp
->fcips_trtry
.value
.ul
;
2152 fptr
->fcip_tnocar
= fcipstatp
->fcips_tnocar
.value
.ul
;
2153 fptr
->fcip_inits
= fcipstatp
->fcips_inits
.value
.ul
;
2154 fptr
->fcip_notbufs
= fcipstatp
->fcips_notbufs
.value
.ul
;
2155 fptr
->fcip_norbufs
= fcipstatp
->fcips_norbufs
.value
.ul
;
2156 fptr
->fcip_nocanput
= fcipstatp
->fcips_nocanput
.value
.ul
;
2157 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2158 fptr
->fcip_rcvbytes
= fcipstatp
->fcips_rcvbytes
.value
.ul
;
2159 fptr
->fcip_xmtbytes
= fcipstatp
->fcips_xmtbytes
.value
.ul
;
2160 fptr
->fcip_multircv
= fcipstatp
->fcips_multircv
.value
.ul
;
2161 fptr
->fcip_multixmt
= fcipstatp
->fcips_multixmt
.value
.ul
;
2162 fptr
->fcip_brdcstrcv
= fcipstatp
->fcips_brdcstrcv
.value
.ul
;
2163 fptr
->fcip_norcvbuf
= fcipstatp
->fcips_norcvbuf
.value
.ul
;
2164 fptr
->fcip_noxmtbuf
= fcipstatp
->fcips_noxmtbuf
.value
.ul
;
2165 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2166 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2167 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2168 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2169 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2170 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2171 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2172 fptr
->fcip_allocbfail
= fcipstatp
->fcips_allocbfail
.value
.ul
;
2175 fcipstatp
->fcips_ipackets
.value
.ul
= fptr
->fcip_ipackets
;
2176 fcipstatp
->fcips_ierrors
.value
.ul
= fptr
->fcip_ierrors
;
2177 fcipstatp
->fcips_opackets
.value
.ul
= fptr
->fcip_opackets
;
2178 fcipstatp
->fcips_oerrors
.value
.ul
= fptr
->fcip_oerrors
;
2179 fcipstatp
->fcips_collisions
.value
.ul
= fptr
->fcip_collisions
;
2180 fcipstatp
->fcips_nocanput
.value
.ul
= fptr
->fcip_nocanput
;
2181 fcipstatp
->fcips_allocbfail
.value
.ul
= fptr
->fcip_allocbfail
;
2182 fcipstatp
->fcips_defer
.value
.ul
= fptr
->fcip_defer
;
2183 fcipstatp
->fcips_fram
.value
.ul
= fptr
->fcip_fram
;
2184 fcipstatp
->fcips_crc
.value
.ul
= fptr
->fcip_crc
;
2185 fcipstatp
->fcips_oflo
.value
.ul
= fptr
->fcip_oflo
;
2186 fcipstatp
->fcips_uflo
.value
.ul
= fptr
->fcip_uflo
;
2187 fcipstatp
->fcips_missed
.value
.ul
= fptr
->fcip_missed
;
2188 fcipstatp
->fcips_tlcol
.value
.ul
= fptr
->fcip_tlcol
;
2189 fcipstatp
->fcips_trtry
.value
.ul
= fptr
->fcip_trtry
;
2190 fcipstatp
->fcips_tnocar
.value
.ul
= fptr
->fcip_tnocar
;
2191 fcipstatp
->fcips_inits
.value
.ul
= fptr
->fcip_inits
;
2192 fcipstatp
->fcips_norbufs
.value
.ul
= fptr
->fcip_norbufs
;
2193 fcipstatp
->fcips_notbufs
.value
.ul
= fptr
->fcip_notbufs
;
2194 fcipstatp
->fcips_rcvbytes
.value
.ul
= fptr
->fcip_rcvbytes
;
2195 fcipstatp
->fcips_xmtbytes
.value
.ul
= fptr
->fcip_xmtbytes
;
2196 fcipstatp
->fcips_multircv
.value
.ul
= fptr
->fcip_multircv
;
2197 fcipstatp
->fcips_multixmt
.value
.ul
= fptr
->fcip_multixmt
;
2198 fcipstatp
->fcips_brdcstrcv
.value
.ul
= fptr
->fcip_brdcstrcv
;
2199 fcipstatp
->fcips_brdcstxmt
.value
.ul
= fptr
->fcip_brdcstxmt
;
2200 fcipstatp
->fcips_norcvbuf
.value
.ul
= fptr
->fcip_norcvbuf
;
2201 fcipstatp
->fcips_noxmtbuf
.value
.ul
= fptr
->fcip_noxmtbuf
;
2209 * fcip_statec_cb: handles all required state change callback notifications
2210 * it receives from the transport
2214 fcip_statec_cb(opaque_t ulp_handle
, opaque_t phandle
,
2215 uint32_t port_state
, uint32_t port_top
, fc_portmap_t changelist
[],
2216 uint32_t listlen
, uint32_t sid
)
2218 fcip_port_info_t
*fport
;
2220 struct fcipstr
*slp
;
2224 struct fcip_routing_table
*frtp
;
2226 fport
= fcip_get_port(phandle
);
2228 if (fport
== NULL
) {
2232 fptr
= fport
->fcipp_fcip
;
2233 ASSERT(fptr
!= NULL
);
2239 instance
= ddi_get_instance(fport
->fcipp_dip
);
2242 FCIP_DEBUG(FCIP_DEBUG_ELS
,
2243 (CE_NOTE
, "fcip%d, state change callback: state:0x%x, "
2244 "S_ID:0x%x, count:0x%x", instance
, port_state
, sid
, listlen
));
2246 mutex_enter(&fptr
->fcip_mutex
);
2248 if ((fptr
->fcip_flags
& (FCIP_DETACHING
| FCIP_DETACHED
)) ||
2249 (fptr
->fcip_flags
& (FCIP_SUSPENDED
| FCIP_POWER_DOWN
))) {
2250 mutex_exit(&fptr
->fcip_mutex
);
2255 * set fcip flags to indicate we are in the middle of a
2256 * state change callback so we can wait till the statechange
2257 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2259 fptr
->fcip_flags
|= FCIP_IN_SC_CB
;
2261 fport
->fcipp_pstate
= port_state
;
2264 * Check if topology changed. If Yes - Modify the broadcast
2265 * RTE entries to understand the new broadcast D_IDs
2267 if (fport
->fcipp_topology
!= port_top
&&
2268 (port_top
!= FC_TOP_UNKNOWN
)) {
2270 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_NOTE
,
2271 "topology changed: Old topology: 0x%x New topology 0x%x",
2272 fport
->fcipp_topology
, port_top
));
2274 * If topology changed - attempt a rediscovery of
2275 * devices. Helps specially in Fabric/Public loops
2276 * and if on_demand_node_creation is disabled
2278 fport
->fcipp_topology
= port_top
;
2279 fcip_handle_topology(fptr
);
2282 mutex_exit(&fptr
->fcip_mutex
);
2284 switch (FC_PORT_STATE_MASK(port_state
)) {
2285 case FC_STATE_ONLINE
:
2289 case FC_STATE_LIP_LBIT_SET
:
2292 * nothing to do here actually other than if we
2293 * were actually logged onto a port in the devlist
2294 * (which indicates active communication between
2295 * the host port and the port in the changelist).
2296 * If however we are in a private loop or point to
2297 * point mode, we need to check for any IP capable
2298 * ports and update our routing table.
2303 * This indicates a fabric port with a NameServer.
2304 * Check the devlist to see if we are in active
2305 * communication with a port on the devlist.
2307 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_NOTE
,
2308 "Statec_cb: fabric topology"));
2309 fcip_rt_update(fptr
, changelist
, listlen
);
2313 * No nameserver - so treat it like a Private loop
2314 * or point to point topology and get a map of
2315 * devices on the link and get IP capable ports to
2316 * to update the routing table.
2318 FCIP_DEBUG(FCIP_DEBUG_ELS
,
2319 (CE_NOTE
, "Statec_cb: NO_NS topology"));
2321 case FC_TOP_PRIVATE_LOOP
:
2322 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_NOTE
,
2323 "Statec_cb: Pvt_Loop topology"));
2327 * call get_port_map() and update routing table
2329 fcip_rt_update(fptr
, changelist
, listlen
);
2332 FCIP_DEBUG(FCIP_DEBUG_ELS
,
2333 (CE_NOTE
, "Statec_cb: Unknown topology"));
2337 * We should now enable the Queues and permit I/Os
2338 * to flow through downstream. The update of routing
2339 * table should have flushed out any port entries that
2340 * don't exist or are not available after the state change
2342 mutex_enter(&fptr
->fcip_mutex
);
2343 fptr
->fcip_port_state
= FCIP_PORT_ONLINE
;
2344 if (fptr
->fcip_flags
& FCIP_LINK_DOWN
) {
2345 fptr
->fcip_flags
&= ~FCIP_LINK_DOWN
;
2347 mutex_exit(&fptr
->fcip_mutex
);
2350 * Enable write queues
2352 rw_enter(&fcipstruplock
, RW_READER
);
2353 for (slp
= fcipstrup
; slp
!= NULL
; slp
= slp
->sl_nextp
) {
2354 if (slp
&& slp
->sl_fcip
== fptr
) {
2355 wrq
= WR(slp
->sl_rq
);
2356 if (wrq
->q_flag
& QFULL
) {
2361 rw_exit(&fcipstruplock
);
2363 case FC_STATE_OFFLINE
:
2365 * mark the port_state OFFLINE and wait for it to
2366 * become online. Any new messages in this state will
2367 * simply be queued back up. If the port does not
2368 * come online in a short while, we can begin failing
2369 * messages and flush the routing table
2371 mutex_enter(&fptr
->fcip_mutex
);
2372 fptr
->fcip_mark_offline
= fptr
->fcip_timeout_ticks
+
2373 FCIP_OFFLINE_TIMEOUT
;
2374 fptr
->fcip_port_state
= FCIP_PORT_OFFLINE
;
2375 mutex_exit(&fptr
->fcip_mutex
);
2378 * Mark all Routing table entries as invalid to prevent
2379 * any commands from trickling through to ports that
2380 * have disappeared from under us
2382 mutex_enter(&fptr
->fcip_rt_mutex
);
2383 for (index
= 0; index
< FCIP_RT_HASH_ELEMS
; index
++) {
2384 frtp
= fptr
->fcip_rtable
[index
];
2386 frtp
->fcipr_state
= PORT_DEVICE_INVALID
;
2387 frtp
= frtp
->fcipr_next
;
2390 mutex_exit(&fptr
->fcip_rt_mutex
);
2394 case FC_STATE_RESET_REQUESTED
:
2396 * Release all Unsolicited buffers back to transport/FCA.
2397 * This also means the port state is marked offline - so
2398 * we may have to do what OFFLINE state requires us to do.
2399 * Care must be taken to wait for any active unsolicited
2400 * buffer with the other Streams modules - so wait for
2401 * a freeb if the unsolicited buffer is passed back all
2404 mutex_enter(&fptr
->fcip_mutex
);
2406 #ifdef FCIP_ESBALLOC
2407 while (fptr
->fcip_ub_upstream
) {
2408 cv_wait(&fptr
->fcip_ub_cv
, &fptr
->fcip_mutex
);
2410 #endif /* FCIP_ESBALLOC */
2412 fptr
->fcip_mark_offline
= fptr
->fcip_timeout_ticks
+
2413 FCIP_OFFLINE_TIMEOUT
;
2414 fptr
->fcip_port_state
= FCIP_PORT_OFFLINE
;
2415 mutex_exit(&fptr
->fcip_mutex
);
2418 case FC_STATE_DEVICE_CHANGE
:
2420 fcip_rt_update(fptr
, changelist
, listlen
);
2423 case FC_STATE_RESET
:
2425 * Not much to do I guess - wait for port to become
2426 * ONLINE. If the port doesn't become online in a short
2427 * while, the upper layers abort any request themselves.
2428 * We can just putback the messages in the streams queues
2429 * if the link is offline
2433 mutex_enter(&fptr
->fcip_mutex
);
2434 fptr
->fcip_flags
&= ~(FCIP_IN_SC_CB
);
2435 mutex_exit(&fptr
->fcip_mutex
);
2439 * Given a port handle, return the fcip_port_info structure corresponding
2440 * to that port handle. The transport allocates and communicates with
2441 * ULPs using port handles
2443 static fcip_port_info_t
*
2444 fcip_get_port(opaque_t phandle
)
2446 fcip_port_info_t
*fport
;
2448 ASSERT(phandle
!= NULL
);
2450 mutex_enter(&fcip_global_mutex
);
2451 fport
= fcip_port_head
;
2453 while (fport
!= NULL
) {
2454 if (fport
->fcipp_handle
== phandle
) {
2458 fport
= fport
->fcipp_next
;
2461 mutex_exit(&fcip_global_mutex
);
2467 * Handle inbound ELS requests received by the transport. We are only
2468 * intereseted in FARP/InARP mostly.
2472 fcip_els_cb(opaque_t ulp_handle
, opaque_t phandle
,
2473 fc_unsol_buf_t
*buf
, uint32_t claimed
)
2475 fcip_port_info_t
*fport
;
2480 la_els_farp_t farp_cmd
;
2481 la_els_farp_t
*fcmd
;
2482 int rval
= FC_UNCLAIMED
;
2484 fport
= fcip_get_port(phandle
);
2485 if (fport
== NULL
) {
2486 return (FC_UNCLAIMED
);
2489 fptr
= fport
->fcipp_fcip
;
2490 ASSERT(fptr
!= NULL
);
2492 return (FC_UNCLAIMED
);
2495 instance
= ddi_get_instance(fport
->fcipp_dip
);
2497 mutex_enter(&fptr
->fcip_mutex
);
2498 if ((fptr
->fcip_flags
& (FCIP_DETACHING
| FCIP_DETACHED
)) ||
2499 (fptr
->fcip_flags
& (FCIP_SUSPENDED
| FCIP_POWER_DOWN
))) {
2500 mutex_exit(&fptr
->fcip_mutex
);
2501 return (FC_UNCLAIMED
);
2505 * set fcip flags to indicate we are in the middle of a
2506 * ELS callback so we can wait till the statechange
2507 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2509 fptr
->fcip_flags
|= FCIP_IN_ELS_CB
;
2510 mutex_exit(&fptr
->fcip_mutex
);
2512 FCIP_DEBUG(FCIP_DEBUG_ELS
,
2513 (CE_NOTE
, "fcip%d, ELS callback , ", instance
));
2515 r_ctl
= buf
->ub_frame
.r_ctl
;
2516 switch (r_ctl
& R_CTL_ROUTING
) {
2517 case R_CTL_EXTENDED_SVC
:
2518 if (r_ctl
== R_CTL_ELS_REQ
) {
2519 ls_code
= buf
->ub_buffer
[0];
2520 if (ls_code
== LA_ELS_FARP_REQ
) {
2522 * Inbound FARP broadcast request
2524 if (buf
->ub_bufsize
!= sizeof (la_els_farp_t
)) {
2525 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_WARN
,
2526 "Invalid FARP req buffer size "
2527 "expected 0x%lx, got 0x%x",
2528 (long)(sizeof (la_els_farp_t
)),
2530 rval
= FC_UNCLAIMED
;
2533 fcmd
= (la_els_farp_t
*)buf
;
2534 if (fcip_wwn_compare(&fcmd
->resp_nwwn
,
2536 FCIP_COMPARE_NWWN
) != 0) {
2537 rval
= FC_UNCLAIMED
;
2541 * copy the FARP request and release the
2542 * unsolicited buffer
2545 bcopy((void *)buf
, (void *)fcmd
,
2546 sizeof (la_els_farp_t
));
2547 (void) fc_ulp_ubrelease(fport
->fcipp_handle
, 1,
2550 if (fcip_farp_supported
&&
2551 fcip_handle_farp_request(fptr
, fcmd
) ==
2554 * We successfully sent out a FARP
2555 * reply to the requesting port
2560 rval
= FC_UNCLAIMED
;
2564 } else if (r_ctl
== R_CTL_ELS_RSP
) {
2565 ls_code
= buf
->ub_buffer
[0];
2566 if (ls_code
== LA_ELS_FARP_REPLY
) {
2568 * We received a REPLY to our FARP request
2570 if (buf
->ub_bufsize
!= sizeof (la_els_farp_t
)) {
2571 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_WARN
,
2572 "Invalid FARP req buffer size "
2573 "expected 0x%lx, got 0x%x",
2574 (long)(sizeof (la_els_farp_t
)),
2576 rval
= FC_UNCLAIMED
;
2580 bcopy((void *)buf
, (void *)fcmd
,
2581 sizeof (la_els_farp_t
));
2582 (void) fc_ulp_ubrelease(fport
->fcipp_handle
, 1,
2584 if (fcip_farp_supported
&&
2585 fcip_handle_farp_response(fptr
, fcmd
) ==
2587 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_NOTE
,
2588 "Successfully recevied a FARP "
2590 mutex_enter(&fptr
->fcip_mutex
);
2591 fptr
->fcip_farp_rsp_flag
= 1;
2592 cv_signal(&fptr
->fcip_farp_cv
);
2593 mutex_exit(&fptr
->fcip_mutex
);
2597 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_WARN
,
2598 "Unable to handle a FARP response "
2600 rval
= FC_UNCLAIMED
;
2610 mutex_enter(&fptr
->fcip_mutex
);
2611 fptr
->fcip_flags
&= ~(FCIP_IN_ELS_CB
);
2612 mutex_exit(&fptr
->fcip_mutex
);
2618 * Handle inbound FARP requests
2621 fcip_handle_farp_request(struct fcip
*fptr
, la_els_farp_t
*fcmd
)
2623 fcip_pkt_t
*fcip_pkt
;
2624 fc_packet_t
*fc_pkt
;
2625 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
2626 int rval
= FC_FAILURE
;
2629 struct fcip_routing_table
*frp
;
2630 struct fcip_dest
*fdestp
;
2633 * Add an entry for the remote port into our routing and destination
2636 map
.map_did
= fcmd
->req_id
;
2637 map
.map_hard_addr
.hard_addr
= fcmd
->req_id
.port_id
;
2638 map
.map_state
= PORT_DEVICE_VALID
;
2639 map
.map_type
= PORT_DEVICE_NEW
;
2642 bcopy((void *)&fcmd
->req_pwwn
, (void *)&map
.map_pwwn
,
2644 bcopy((void *)&fcmd
->req_nwwn
, (void *)&map
.map_nwwn
,
2646 fcip_rt_update(fptr
, &map
, 1);
2647 mutex_enter(&fptr
->fcip_rt_mutex
);
2648 frp
= fcip_lookup_rtable(fptr
, &fcmd
->req_pwwn
, FCIP_COMPARE_NWWN
);
2649 mutex_exit(&fptr
->fcip_rt_mutex
);
2651 fdestp
= fcip_add_dest(fptr
, frp
);
2653 fcip_pkt
= fcip_ipkt_alloc(fptr
, sizeof (la_els_farp_t
),
2654 sizeof (la_els_farp_t
), NULL
, KM_SLEEP
);
2655 if (fcip_pkt
== NULL
) {
2660 * Fill in our port's PWWN and NWWN
2662 fcmd
->resp_pwwn
= fport
->fcipp_pwwn
;
2663 fcmd
->resp_nwwn
= fport
->fcipp_nwwn
;
2665 fcip_init_unicast_pkt(fcip_pkt
, fport
->fcipp_sid
,
2666 fcmd
->req_id
, NULL
);
2669 fc_ulp_get_fca_device(fport
->fcipp_handle
, fcmd
->req_id
);
2670 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
2671 fc_pkt
->pkt_cmd_fhdr
.r_ctl
= R_CTL_ELS_RSP
;
2672 fc_pkt
->pkt_fca_device
= fca_dev
;
2673 fcip_pkt
->fcip_pkt_dest
= fdestp
;
2676 * Attempt a PLOGI again
2678 if (fcmd
->resp_flags
& FARP_INIT_P_LOGI
) {
2679 if (fcip_do_plogi(fptr
, frp
) != FC_SUCCESS
) {
2681 * Login to the remote port failed. There is no
2682 * point continuing with the FARP request further
2685 frp
->fcipr_state
= PORT_DEVICE_INVALID
;
2691 FCIP_CP_OUT(fcmd
, fc_pkt
->pkt_cmd
, fc_pkt
->pkt_cmd_acc
,
2692 sizeof (la_els_farp_t
));
2694 rval
= fc_ulp_issue_els(fport
->fcipp_handle
, fc_pkt
);
2695 if (rval
!= FC_SUCCESS
) {
2696 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_WARN
,
2697 "fcip_transport of farp reply failed 0x%x", rval
));
2706 * Handle FARP responses to our FARP requests. When we receive a FARP
2707 * reply, we need to add the entry for the Port that replied into our
2708 * routing and destination hash tables. It is possible that the remote
2709 * port did not login into us (FARP responses can be received without
2713 fcip_handle_farp_response(struct fcip
*fptr
, la_els_farp_t
*fcmd
)
2715 int rval
= FC_FAILURE
;
2717 struct fcip_routing_table
*frp
;
2718 struct fcip_dest
*fdestp
;
2721 * Add an entry for the remote port into our routing and destination
2724 map
.map_did
= fcmd
->dest_id
;
2725 map
.map_hard_addr
.hard_addr
= fcmd
->dest_id
.port_id
;
2726 map
.map_state
= PORT_DEVICE_VALID
;
2727 map
.map_type
= PORT_DEVICE_NEW
;
2730 bcopy((void *)&fcmd
->resp_pwwn
, (void *)&map
.map_pwwn
,
2732 bcopy((void *)&fcmd
->resp_nwwn
, (void *)&map
.map_nwwn
,
2734 fcip_rt_update(fptr
, &map
, 1);
2735 mutex_enter(&fptr
->fcip_rt_mutex
);
2736 frp
= fcip_lookup_rtable(fptr
, &fcmd
->resp_pwwn
, FCIP_COMPARE_NWWN
);
2737 mutex_exit(&fptr
->fcip_rt_mutex
);
2739 fdestp
= fcip_add_dest(fptr
, frp
);
2741 if (fdestp
!= NULL
) {
2748 #define FCIP_HDRS_LENGTH \
2749 sizeof (fcph_network_hdr_t)+sizeof (llc_snap_hdr_t)+sizeof (ipha_t)
2752 * fcip_data_cb is the heart of most IP operations. This routine is called
2753 * by the transport when any unsolicited IP data arrives at a port (which
2754 * is almost all IP data). This routine then strips off the Network header
2755 * from the payload (after authenticating the received payload ofcourse),
2756 * creates a message blk and sends the data upstream. You will see ugly
2757 * #defines because of problems with using esballoc() as opposed to
2758 * allocb to prevent an extra copy of data. We should probably move to
2759 * esballoc entirely when the MTU eventually will be larger than 1500 bytes
2760 * since copies will get more expensive then. At 1500 byte MTUs, there is
2761 * no noticable difference between using allocb and esballoc. The other
2762 * caveat is that the qlc firmware still cannot tell us accurately the
2763 * no. of valid bytes in the unsol buffer it DMA'ed so we have to resort
2764 * to looking into the IP header and hoping that the no. of bytes speficified
2765 * in the header was actually received.
2769 fcip_data_cb(opaque_t ulp_handle
, opaque_t phandle
,
2770 fc_unsol_buf_t
*buf
, uint32_t claimed
)
2772 fcip_port_info_t
*fport
;
2774 fcph_network_hdr_t
*nhdr
;
2775 llc_snap_hdr_t
*snaphdr
;
2783 #ifdef FCIP_ESBALLOC
2785 struct fcip_esballoc_arg
*fesb_argp
;
2786 #endif /* FCIP_ESBALLOC */
2788 fport
= fcip_get_port(phandle
);
2789 if (fport
== NULL
) {
2790 return (FC_UNCLAIMED
);
2793 fptr
= fport
->fcipp_fcip
;
2794 ASSERT(fptr
!= NULL
);
2797 return (FC_UNCLAIMED
);
2800 mutex_enter(&fptr
->fcip_mutex
);
2801 if ((fptr
->fcip_flags
& (FCIP_DETACHING
| FCIP_DETACHED
)) ||
2802 (fptr
->fcip_flags
& (FCIP_SUSPENDED
| FCIP_POWER_DOWN
))) {
2803 mutex_exit(&fptr
->fcip_mutex
);
2804 rval
= FC_UNCLAIMED
;
2809 * set fcip flags to indicate we are in the middle of a
2810 * data callback so we can wait till the statechange
2811 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2813 fptr
->fcip_flags
|= FCIP_IN_DATA_CB
;
2814 mutex_exit(&fptr
->fcip_mutex
);
2817 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2818 (CE_NOTE
, "fcip%d, data callback",
2819 ddi_get_instance(fport
->fcipp_dip
)));
2822 * get to the network and snap headers in the payload
2824 nhdr
= (fcph_network_hdr_t
*)buf
->ub_buffer
;
2825 snaphdr
= (llc_snap_hdr_t
*)(buf
->ub_buffer
+
2826 sizeof (fcph_network_hdr_t
));
2828 hdrlen
= sizeof (fcph_network_hdr_t
) + sizeof (llc_snap_hdr_t
);
2831 * get the IP header to obtain the no. of bytes we need to read
2832 * off from the unsol buffer. This obviously is because not all
2833 * data fills up the unsol buffer completely and the firmware
2834 * doesn't tell us how many valid bytes are in there as well
2836 iphdr
= (ipha_t
*)(buf
->ub_buffer
+ hdrlen
);
2837 snaphdr
->pid
= BE_16(snaphdr
->pid
);
2838 type
= snaphdr
->pid
;
2840 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2841 (CE_CONT
, "SNAPHDR: dsap %x, ssap %x, ctrl %x\n",
2842 snaphdr
->dsap
, snaphdr
->ssap
, snaphdr
->ctrl
));
2844 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2845 (CE_CONT
, "oui[0] 0x%x oui[1] 0x%x oui[2] 0x%x pid 0x%x\n",
2846 snaphdr
->oui
[0], snaphdr
->oui
[1], snaphdr
->oui
[2], snaphdr
->pid
));
2848 /* Authneticate, Authenticate */
2849 if (type
== ETHERTYPE_IP
) {
2850 len
= hdrlen
+ BE_16(iphdr
->ipha_length
);
2851 } else if (type
== ETHERTYPE_ARP
) {
2854 len
= buf
->ub_bufsize
;
2857 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2858 (CE_CONT
, "effective packet length is %d bytes.\n", len
));
2860 if (len
< hdrlen
|| len
> FCIP_UB_SIZE
) {
2861 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2862 (CE_NOTE
, "Incorrect buffer size %d bytes", len
));
2863 rval
= FC_UNCLAIMED
;
2867 if (buf
->ub_frame
.type
!= FC_TYPE_IS8802_SNAP
) {
2868 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
, (CE_NOTE
, "Not IP/ARP data"));
2869 rval
= FC_UNCLAIMED
;
2873 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
, (CE_NOTE
, "checking wwn"));
2875 if ((fcip_wwn_compare(&nhdr
->net_dest_addr
, &fport
->fcipp_pwwn
,
2876 FCIP_COMPARE_NWWN
) != 0) &&
2877 (!IS_BROADCAST_ADDR(&nhdr
->net_dest_addr
))) {
2878 rval
= FC_UNCLAIMED
;
2880 } else if (fcip_cache_on_arp_broadcast
&&
2881 IS_BROADCAST_ADDR(&nhdr
->net_dest_addr
)) {
2882 fcip_cache_arp_broadcast(fptr
, buf
);
2885 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
, (CE_NOTE
, "Allocate streams block"));
2888 * Using esballoc instead of allocb should be faster, atleast at
2889 * larger MTUs than 1500 bytes. Someday we'll get there :)
2891 #if defined(FCIP_ESBALLOC)
2893 * allocate memory for the frtn function arg. The Function
2894 * (fcip_ubfree) arg is a struct fcip_esballoc_arg type
2895 * which contains pointers to the unsol buffer and the
2896 * opaque port handle for releasing the unsol buffer back to
2899 fesb_argp
= (struct fcip_esballoc_arg
*)
2900 kmem_zalloc(sizeof (struct fcip_esballoc_arg
), KM_NOSLEEP
);
2902 if (fesb_argp
== NULL
) {
2903 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2904 (CE_WARN
, "esballoc of mblk failed in data_cb"));
2905 rval
= FC_UNCLAIMED
;
2909 * Check with KM_NOSLEEP
2911 free_ubuf
= (frtn_t
*)kmem_zalloc(sizeof (frtn_t
), KM_NOSLEEP
);
2912 if (free_ubuf
== NULL
) {
2913 kmem_free(fesb_argp
, sizeof (struct fcip_esballoc_arg
));
2914 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2915 (CE_WARN
, "esballoc of mblk failed in data_cb"));
2916 rval
= FC_UNCLAIMED
;
2920 fesb_argp
->frtnp
= free_ubuf
;
2921 fesb_argp
->buf
= buf
;
2922 fesb_argp
->phandle
= phandle
;
2923 free_ubuf
->free_func
= fcip_ubfree
;
2924 free_ubuf
->free_arg
= (char *)fesb_argp
;
2925 if ((bp
= (mblk_t
*)esballoc((unsigned char *)buf
->ub_buffer
,
2926 len
, BPRI_MED
, free_ubuf
)) == NULL
) {
2927 kmem_free(fesb_argp
, sizeof (struct fcip_esballoc_arg
));
2928 kmem_free(free_ubuf
, sizeof (frtn_t
));
2929 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2930 (CE_WARN
, "esballoc of mblk failed in data_cb"));
2931 rval
= FC_UNCLAIMED
;
2934 #elif !defined(FCIP_ESBALLOC)
2936 * allocate streams mblk and copy the contents of the
2937 * unsolicited buffer into this newly alloc'ed mblk
2939 if ((bp
= (mblk_t
*)fcip_allocb((size_t)len
, BPRI_LO
)) == NULL
) {
2940 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
2941 (CE_WARN
, "alloc of mblk failed in data_cb"));
2942 rval
= FC_UNCLAIMED
;
2947 * Unsolicited buffers handed up to us from the FCA must be
2948 * endian clean so just bcopy the data into our mblk. Else
2949 * we may have to either copy the data byte by byte or
2950 * use the ddi_rep_get* routines to do the copy for us.
2952 bcopy(buf
->ub_buffer
, bp
->b_rptr
, len
);
2955 * for esballoc'ed mblks - free the UB in the frtn function
2956 * along with the memory allocated for the function arg.
2957 * for allocb'ed mblk - release the unsolicited buffer here
2959 (void) fc_ulp_ubrelease(phandle
, 1, &buf
->ub_token
);
2961 #endif /* FCIP_ESBALLOC */
2963 bp
->b_wptr
= bp
->b_rptr
+ len
;
2964 fptr
->fcip_ipackets
++;
2966 if (type
== ETHERTYPE_IP
) {
2967 mutex_enter(&fptr
->fcip_mutex
);
2968 fptr
->fcip_ub_upstream
++;
2969 mutex_exit(&fptr
->fcip_mutex
);
2970 bp
->b_rptr
+= hdrlen
;
2973 * Check if ipq is valid in the sendup thread
2975 if (fcip_sendup_alloc_enque(fptr
, bp
, NULL
) != FC_SUCCESS
) {
2980 * We won't get ethernet 802.3 packets in FCIP but we may get
2981 * types other than ETHERTYPE_IP, such as ETHERTYPE_ARP. Let
2982 * fcip_sendup() do the matching.
2984 mutex_enter(&fptr
->fcip_mutex
);
2985 fptr
->fcip_ub_upstream
++;
2986 mutex_exit(&fptr
->fcip_mutex
);
2987 if (fcip_sendup_alloc_enque(fptr
, bp
,
2988 fcip_accept
) != FC_SUCCESS
) {
2996 * Unset fcip_flags to indicate we are out of callback and return
2999 mutex_enter(&fptr
->fcip_mutex
);
3000 fptr
->fcip_flags
&= ~(FCIP_IN_DATA_CB
);
3001 mutex_exit(&fptr
->fcip_mutex
);
3005 #if !defined(FCIP_ESBALLOC)
3007 * Allocate a message block for the inbound data to be sent upstream.
3010 fcip_allocb(size_t size
, uint_t pri
)
3014 if ((mp
= allocb(size
, pri
)) == NULL
) {
3023 * This helper routine kmem cache alloc's a sendup element for enquing
3024 * into the sendup list for callbacks upstream from the dedicated sendup
3025 * thread. We enque the msg buf into the sendup list and cv_signal the
3026 * sendup thread to finish the callback for us.
3029 fcip_sendup_alloc_enque(struct fcip
*fptr
, mblk_t
*mp
, struct fcipstr
*(*f
)())
3031 struct fcip_sendup_elem
*msg_elem
;
3032 int rval
= FC_FAILURE
;
3034 msg_elem
= kmem_cache_alloc(fptr
->fcip_sendup_cache
, KM_NOSLEEP
);
3035 if (msg_elem
== NULL
) {
3036 /* drop pkt to floor - update stats */
3038 goto sendup_alloc_done
;
3040 msg_elem
->fcipsu_mp
= mp
;
3041 msg_elem
->fcipsu_func
= f
;
3043 mutex_enter(&fptr
->fcip_sendup_mutex
);
3044 if (fptr
->fcip_sendup_head
== NULL
) {
3045 fptr
->fcip_sendup_head
= fptr
->fcip_sendup_tail
= msg_elem
;
3047 fptr
->fcip_sendup_tail
->fcipsu_next
= msg_elem
;
3048 fptr
->fcip_sendup_tail
= msg_elem
;
3050 fptr
->fcip_sendup_cnt
++;
3051 cv_signal(&fptr
->fcip_sendup_cv
);
3052 mutex_exit(&fptr
->fcip_sendup_mutex
);
3060 * One of the ways of performing the WWN to D_ID mapping required for
3061 * IPFC data is to cache the unsolicited ARP broadcast messages received
3062 * and update the routing table to add entry for the destination port
3063 * if we are the intended recipient of the ARP broadcast message. This is
3064 * one of the methods recommended in the rfc to obtain the WWN to D_ID mapping
3065 * but is not typically used unless enabled. The driver prefers to use the
3066 * nameserver/lilp map to obtain this mapping.
3069 fcip_cache_arp_broadcast(struct fcip
*fptr
, fc_unsol_buf_t
*buf
)
3071 fcip_port_info_t
*fport
;
3072 fcph_network_hdr_t
*nhdr
;
3073 struct fcip_routing_table
*frp
;
3076 fport
= fptr
->fcip_port_info
;
3077 if (fport
== NULL
) {
3080 ASSERT(fport
!= NULL
);
3082 nhdr
= (fcph_network_hdr_t
*)buf
->ub_buffer
;
3084 mutex_enter(&fptr
->fcip_rt_mutex
);
3085 frp
= fcip_lookup_rtable(fptr
, &nhdr
->net_src_addr
, FCIP_COMPARE_NWWN
);
3086 mutex_exit(&fptr
->fcip_rt_mutex
);
3088 map
.map_did
.port_id
= buf
->ub_frame
.s_id
;
3089 map
.map_hard_addr
.hard_addr
= buf
->ub_frame
.s_id
;
3090 map
.map_state
= PORT_DEVICE_VALID
;
3091 map
.map_type
= PORT_DEVICE_NEW
;
3094 bcopy((void *)&nhdr
->net_src_addr
, (void *)&map
.map_pwwn
,
3096 bcopy((void *)&nhdr
->net_src_addr
, (void *)&map
.map_nwwn
,
3098 fcip_rt_update(fptr
, &map
, 1);
3099 mutex_enter(&fptr
->fcip_rt_mutex
);
3100 frp
= fcip_lookup_rtable(fptr
, &nhdr
->net_src_addr
,
3102 mutex_exit(&fptr
->fcip_rt_mutex
);
3104 (void) fcip_add_dest(fptr
, frp
);
3110 * This is a dedicated thread to do callbacks from fcip's data callback
3111 * routines into the modules upstream. The reason for this thread is
3112 * the data callback function can be called from an interrupt context and
3113 * the upstream modules *can* make calls downstream in the same thread
3114 * context. If the call is to a fabric port which is not yet in our
3115 * routing tables, we may have to query the nameserver/fabric for the
3116 * MAC addr to Port_ID mapping which may be blocking calls.
3119 fcip_sendup_thr(void *arg
)
3121 struct fcip
*fptr
= (struct fcip
*)arg
;
3122 struct fcip_sendup_elem
*msg_elem
;
3123 queue_t
*ip4q
= NULL
;
3125 CALLB_CPR_INIT(&fptr
->fcip_cpr_info
, &fptr
->fcip_sendup_mutex
,
3126 callb_generic_cpr
, "fcip_sendup_thr");
3128 mutex_enter(&fptr
->fcip_sendup_mutex
);
3131 while (fptr
->fcip_sendup_thr_initted
&&
3132 fptr
->fcip_sendup_head
== NULL
) {
3133 CALLB_CPR_SAFE_BEGIN(&fptr
->fcip_cpr_info
);
3134 cv_wait(&fptr
->fcip_sendup_cv
,
3135 &fptr
->fcip_sendup_mutex
);
3136 CALLB_CPR_SAFE_END(&fptr
->fcip_cpr_info
,
3137 &fptr
->fcip_sendup_mutex
);
3140 if (fptr
->fcip_sendup_thr_initted
== 0) {
3144 msg_elem
= fptr
->fcip_sendup_head
;
3145 fptr
->fcip_sendup_head
= msg_elem
->fcipsu_next
;
3146 msg_elem
->fcipsu_next
= NULL
;
3147 mutex_exit(&fptr
->fcip_sendup_mutex
);
3149 if (msg_elem
->fcipsu_func
== NULL
) {
3151 * Message for ipq. Check to see if the ipq is
3152 * is still valid. Since the thread is asynchronous,
3153 * there could have been a close on the stream
3155 mutex_enter(&fptr
->fcip_mutex
);
3156 if (fptr
->fcip_ipq
&& canputnext(fptr
->fcip_ipq
)) {
3157 ip4q
= fptr
->fcip_ipq
;
3158 mutex_exit(&fptr
->fcip_mutex
);
3159 putnext(ip4q
, msg_elem
->fcipsu_mp
);
3161 mutex_exit(&fptr
->fcip_mutex
);
3162 freemsg(msg_elem
->fcipsu_mp
);
3165 fcip_sendup(fptr
, msg_elem
->fcipsu_mp
,
3166 msg_elem
->fcipsu_func
);
3169 #if !defined(FCIP_ESBALLOC)
3171 * for allocb'ed mblk - decrement upstream count here
3173 mutex_enter(&fptr
->fcip_mutex
);
3174 ASSERT(fptr
->fcip_ub_upstream
> 0);
3175 fptr
->fcip_ub_upstream
--;
3176 mutex_exit(&fptr
->fcip_mutex
);
3177 #endif /* FCIP_ESBALLOC */
3179 kmem_cache_free(fptr
->fcip_sendup_cache
, (void *)msg_elem
);
3180 mutex_enter(&fptr
->fcip_sendup_mutex
);
3181 fptr
->fcip_sendup_cnt
--;
3186 CALLB_CPR_EXIT(&fptr
->fcip_cpr_info
);
3188 mutex_exit(&fptr
->fcip_sendup_mutex
);
3189 #endif /* __lock_lint */
3191 /* Wake up fcip detach thread by the end */
3192 cv_signal(&fptr
->fcip_sendup_cv
);
3197 #ifdef FCIP_ESBALLOC
3200 * called from the stream head when it is done using an unsolicited buffer.
3201 * We release this buffer then to the FCA for reuse.
3204 fcip_ubfree(char *arg
)
3206 struct fcip_esballoc_arg
*fesb_argp
= (struct fcip_esballoc_arg
*)arg
;
3207 fc_unsol_buf_t
*ubuf
;
3209 fcip_port_info_t
*fport
;
3213 fport
= fcip_get_port(fesb_argp
->phandle
);
3214 fptr
= fport
->fcipp_fcip
;
3216 ASSERT(fesb_argp
!= NULL
);
3217 ubuf
= fesb_argp
->buf
;
3218 frtnp
= fesb_argp
->frtnp
;
3221 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
,
3222 (CE_WARN
, "freeing ubuf after esballoc in fcip_ubfree"));
3223 (void) fc_ulp_ubrelease(fesb_argp
->phandle
, 1, &ubuf
->ub_token
);
3225 mutex_enter(&fptr
->fcip_mutex
);
3226 ASSERT(fptr
->fcip_ub_upstream
> 0);
3227 fptr
->fcip_ub_upstream
--;
3228 cv_signal(&fptr
->fcip_ub_cv
);
3229 mutex_exit(&fptr
->fcip_mutex
);
3231 kmem_free(frtnp
, sizeof (frtn_t
));
3232 kmem_free(fesb_argp
, sizeof (struct fcip_esballoc_arg
));
3235 #endif /* FCIP_ESBALLOC */
3238 * handle data other than that of type ETHERTYPE_IP and send it on its
3239 * way upstream to the right streams module to handle
3242 fcip_sendup(struct fcip
*fptr
, mblk_t
*mp
, struct fcipstr
*(*acceptfunc
)())
3244 struct fcipstr
*slp
, *nslp
;
3247 uint32_t isgroupaddr
;
3250 fcph_network_hdr_t
*nhdr
;
3251 llc_snap_hdr_t
*snaphdr
;
3253 nhdr
= (fcph_network_hdr_t
*)mp
->b_rptr
;
3255 (llc_snap_hdr_t
*)(mp
->b_rptr
+ sizeof (fcph_network_hdr_t
));
3256 dhostp
= &nhdr
->net_dest_addr
;
3257 type
= snaphdr
->pid
;
3258 hdrlen
= sizeof (fcph_network_hdr_t
) + sizeof (llc_snap_hdr_t
);
3260 /* No group address with fibre channel */
3264 * While holding a reader lock on the linked list of streams structures,
3265 * attempt to match the address criteria for each stream
3266 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
3269 rw_enter(&fcipstruplock
, RW_READER
);
3271 if ((slp
= (*acceptfunc
)(fcipstrup
, fptr
, type
, dhostp
)) == NULL
) {
3272 rw_exit(&fcipstruplock
);
3278 * Loop on matching open streams until (*acceptfunc)() returns NULL.
3280 for (; nslp
= (*acceptfunc
)(slp
->sl_nextp
, fptr
, type
, dhostp
);
3282 if (canputnext(slp
->sl_rq
)) {
3283 if (nmp
= dupmsg(mp
)) {
3284 if ((slp
->sl_flags
& FCIP_SLFAST
) &&
3286 nmp
->b_rptr
+= hdrlen
;
3287 putnext(slp
->sl_rq
, nmp
);
3288 } else if (slp
->sl_flags
& FCIP_SLRAW
) {
3289 /* No headers when FCIP_SLRAW is set */
3290 putnext(slp
->sl_rq
, nmp
);
3291 } else if ((nmp
= fcip_addudind(fptr
, nmp
,
3293 putnext(slp
->sl_rq
, nmp
);
3302 if (canputnext(slp
->sl_rq
)) {
3303 if (slp
->sl_flags
& FCIP_SLFAST
) {
3304 mp
->b_rptr
+= hdrlen
;
3305 putnext(slp
->sl_rq
, mp
);
3306 } else if (slp
->sl_flags
& FCIP_SLRAW
) {
3307 putnext(slp
->sl_rq
, mp
);
3308 } else if ((mp
= fcip_addudind(fptr
, mp
, nhdr
, type
))) {
3309 putnext(slp
->sl_rq
, mp
);
3315 rw_exit(&fcipstruplock
);
3319 * Match the stream based on type and wwn if necessary.
3320 * Destination wwn dhostp is passed to this routine is reserved
3321 * for future usage. We don't need to use it right now since port
3322 * to fcip instance mapping is unique and wwn is already validated when
3323 * packet comes to fcip.
3326 static struct fcipstr
*
3327 fcip_accept(struct fcipstr
*slp
, struct fcip
*fptr
, int type
, la_wwn_t
*dhostp
)
3331 for (; slp
; slp
= slp
->sl_nextp
) {
3333 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM
, (CE_CONT
,
3334 "fcip_accept: checking next sap = %x, type = %x",
3337 if ((slp
->sl_fcip
== fptr
) && (type
== sap
)) {
3345 * Handle DL_UNITDATA_IND messages
3348 fcip_addudind(struct fcip
*fptr
, mblk_t
*mp
, fcph_network_hdr_t
*nhdr
,
3351 dl_unitdata_ind_t
*dludindp
;
3352 struct fcipdladdr
*dlap
;
3356 struct ether_addr src_addr
;
3357 struct ether_addr dest_addr
;
3360 hdrlen
= (sizeof (llc_snap_hdr_t
) + sizeof (fcph_network_hdr_t
));
3361 mp
->b_rptr
+= hdrlen
;
3364 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
3366 size
= sizeof (dl_unitdata_ind_t
) + FCIPADDRL
+ FCIPADDRL
;
3367 if ((nmp
= allocb(size
, BPRI_LO
)) == NULL
) {
3368 fptr
->fcip_allocbfail
++;
3372 DB_TYPE(nmp
) = M_PROTO
;
3373 nmp
->b_wptr
= nmp
->b_datap
->db_lim
;
3374 nmp
->b_rptr
= nmp
->b_wptr
- size
;
3377 * Construct a DL_UNITDATA_IND primitive.
3379 dludindp
= (dl_unitdata_ind_t
*)nmp
->b_rptr
;
3380 dludindp
->dl_primitive
= DL_UNITDATA_IND
;
3381 dludindp
->dl_dest_addr_length
= FCIPADDRL
;
3382 dludindp
->dl_dest_addr_offset
= sizeof (dl_unitdata_ind_t
);
3383 dludindp
->dl_src_addr_length
= FCIPADDRL
;
3384 dludindp
->dl_src_addr_offset
= sizeof (dl_unitdata_ind_t
) + FCIPADDRL
;
3385 dludindp
->dl_group_address
= 0; /* not DL_MULTI */
3387 dlap
= (struct fcipdladdr
*)(nmp
->b_rptr
+ sizeof (dl_unitdata_ind_t
));
3388 wwn_to_ether(&nhdr
->net_dest_addr
, &dest_addr
);
3389 ether_bcopy(&dest_addr
, &dlap
->dl_phys
);
3390 dlap
->dl_sap
= (uint16_t)type
;
3392 dlap
= (struct fcipdladdr
*)(nmp
->b_rptr
+ sizeof (dl_unitdata_ind_t
)
3394 wwn_to_ether(&nhdr
->net_src_addr
, &src_addr
);
3395 ether_bcopy(&src_addr
, &dlap
->dl_phys
);
3396 dlap
->dl_sap
= (uint16_t)type
;
3399 * Link the M_PROTO and M_DATA together.
3407 * The open routine. For clone opens, we return the next available minor
3408 * no. for the stream to use
3412 fcip_open(queue_t
*rq
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
3414 struct fcipstr
*slp
;
3415 struct fcipstr
**prevslp
;
3418 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
, "in fcip_open"));
3420 * We need to ensure that the port driver is loaded before
3423 if (ddi_hold_installed_driver(ddi_name_to_major(PORT_DRIVER
)) == NULL
) {
3424 /* no port driver instances found */
3425 FCIP_DEBUG(FCIP_DEBUG_STARTUP
, (CE_WARN
,
3426 "!ddi_hold_installed_driver of fp failed\n"));
3429 /* serialize opens */
3430 rw_enter(&fcipstruplock
, RW_WRITER
);
3432 prevslp
= &fcipstrup
;
3433 if (sflag
== CLONEOPEN
) {
3435 for (; (slp
= *prevslp
) != NULL
; prevslp
= &slp
->sl_nextp
) {
3436 if (minor
< slp
->sl_minor
) {
3441 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
,
3442 "getmajor returns 0x%x", getmajor(*devp
)));
3443 *devp
= makedevice(getmajor(*devp
), minor
);
3445 minor
= getminor(*devp
);
3449 * check if our qp's private area is already initialized. If yes
3450 * the stream is already open - just return
3456 slp
= GETSTRUCT(struct fcipstr
, 1);
3457 slp
->sl_minor
= minor
;
3461 slp
->sl_state
= DL_UNATTACHED
;
3462 slp
->sl_fcip
= NULL
;
3464 mutex_init(&slp
->sl_lock
, NULL
, MUTEX_DRIVER
, NULL
);
3467 * link this new stream entry into list of active streams
3469 slp
->sl_nextp
= *prevslp
;
3472 rq
->q_ptr
= WR(rq
)->q_ptr
= (char *)slp
;
3475 * Disable automatic enabling of our write service procedures
3476 * we need to control this explicitly. This will prevent
3477 * anyone scheduling of our write service procedures.
3482 rw_exit(&fcipstruplock
);
3484 * enable our put and service routines on the read side
3489 * There is only one instance of fcip (instance = 0)
3490 * for multiple instances of hardware
3492 (void) qassociate(rq
, 0); /* don't allow drcompat to be pushed */
3497 * close an opened stream. The minor no. will then be available for
3502 fcip_close(queue_t
*rq
, int flag
, cred_t
*credp
)
3504 struct fcipstr
*slp
;
3505 struct fcipstr
**prevslp
;
3507 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
, "in fcip_close"));
3509 /* we should also have the active stream pointer in q_ptr */
3512 ddi_rele_driver(ddi_name_to_major(PORT_DRIVER
));
3514 * disable our put and service procedures. We had enabled them
3518 slp
= (struct fcipstr
*)rq
->q_ptr
;
3521 * Implicitly detach stream a stream from an interface.
3527 (void) qassociate(rq
, -1); /* undo association in open */
3529 rw_enter(&fcipstruplock
, RW_WRITER
);
3532 * unlink this stream from the active stream list and free it
3534 for (prevslp
= &fcipstrup
; (slp
= *prevslp
) != NULL
;
3535 prevslp
= &slp
->sl_nextp
) {
3536 if (slp
== (struct fcipstr
*)rq
->q_ptr
) {
3541 /* we should have found slp */
3544 *prevslp
= slp
->sl_nextp
;
3545 mutex_destroy(&slp
->sl_lock
);
3546 kmem_free(slp
, sizeof (struct fcipstr
));
3547 rq
->q_ptr
= WR(rq
)->q_ptr
= NULL
;
3549 rw_exit(&fcipstruplock
);
3554 * This is not an extension of the DDI_DETACH request. This routine
3555 * only detaches a stream from an interface
3558 fcip_dodetach(struct fcipstr
*slp
)
3560 struct fcipstr
*tslp
;
3563 FCIP_DEBUG(FCIP_DEBUG_DETACH
, (CE_NOTE
, "in fcip_dodetach"));
3564 ASSERT(slp
->sl_fcip
!= NULL
);
3566 fptr
= slp
->sl_fcip
;
3567 slp
->sl_fcip
= NULL
;
3570 * we don't support promiscuous mode currently but check
3571 * for and disable any promiscuous mode operation
3573 if (slp
->sl_flags
& SLALLPHYS
) {
3574 slp
->sl_flags
&= ~SLALLPHYS
;
3578 * disable ALLMULTI mode if all mulitcast addr are ON
3580 if (slp
->sl_flags
& SLALLMULTI
) {
3581 slp
->sl_flags
&= ~SLALLMULTI
;
3585 * we are most likely going to perform multicast by
3586 * broadcasting to the well known addr (D_ID) 0xFFFFFF or
3587 * ALPA 0x00 in case of public loops
3592 * detach unit from device structure.
3594 for (tslp
= fcipstrup
; tslp
!= NULL
; tslp
= tslp
->sl_nextp
) {
3595 if (tslp
->sl_fcip
== fptr
) {
3600 FCIP_DEBUG(FCIP_DEBUG_DETACH
, (CE_WARN
,
3601 "fcip_dodeatch - active stream struct not found"));
3603 /* unregister with Fabric nameserver?? */
3605 slp
->sl_state
= DL_UNATTACHED
;
3612 * Set or clear device ipq pointer.
3613 * Walk thru all the streams on this device, if a ETHERTYPE_IP
3614 * stream is found, assign device ipq to its sl_rq.
3617 fcip_setipq(struct fcip
*fptr
)
3619 struct fcipstr
*slp
;
3621 queue_t
*ipq
= NULL
;
3623 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_NOTE
, "entered fcip_setipq"));
3625 rw_enter(&fcipstruplock
, RW_READER
);
3627 for (slp
= fcipstrup
; slp
!= NULL
; slp
= slp
->sl_nextp
) {
3628 if (slp
->sl_fcip
== fptr
) {
3629 if (slp
->sl_flags
& (SLALLPHYS
|SLALLSAP
)) {
3632 if (slp
->sl_sap
== ETHERTYPE_IP
) {
3642 rw_exit(&fcipstruplock
);
3644 if (fcip_check_port_exists(fptr
)) {
3645 /* fptr passed to us is stale */
3649 mutex_enter(&fptr
->fcip_mutex
);
3651 fptr
->fcip_ipq
= ipq
;
3653 fptr
->fcip_ipq
= NULL
;
3655 mutex_exit(&fptr
->fcip_mutex
);
3661 fcip_ioctl(queue_t
*wq
, mblk_t
*mp
)
3663 struct iocblk
*iocp
= (struct iocblk
*)mp
->b_rptr
;
3664 struct fcipstr
*slp
= (struct fcipstr
*)wq
->q_ptr
;
3666 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
3667 (CE_NOTE
, "in fcip ioctl : %d", iocp
->ioc_cmd
));
3669 switch (iocp
->ioc_cmd
) {
3671 slp
->sl_flags
|= FCIP_SLRAW
;
3672 miocack(wq
, mp
, 0, 0);
3675 case DL_IOC_HDR_INFO
:
3676 fcip_dl_ioc_hdr_info(wq
, mp
);
3680 miocnak(wq
, mp
, 0, EINVAL
);
3686 * The streams 'Put' routine.
3690 fcip_wput(queue_t
*wq
, mblk_t
*mp
)
3692 struct fcipstr
*slp
= (struct fcipstr
*)wq
->q_ptr
;
3694 struct fcip_dest
*fdestp
;
3695 fcph_network_hdr_t
*headerp
;
3697 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
3698 (CE_NOTE
, "in fcip_wput :: type:%x", DB_TYPE(mp
)));
3700 switch (DB_TYPE(mp
)) {
3703 fptr
= slp
->sl_fcip
;
3705 if (((slp
->sl_flags
& (FCIP_SLFAST
|FCIP_SLRAW
)) == 0) ||
3706 (slp
->sl_state
!= DL_IDLE
) ||
3709 * set error in the message block and send a reply
3710 * back upstream. Sun's merror routine does this
3711 * for us more cleanly.
3713 merror(wq
, mp
, EPROTO
);
3718 * if any messages are already enqueued or if the interface
3719 * is in promiscuous mode, causing the packets to loop back
3720 * up, then enqueue the message. Otherwise just transmit
3721 * the message. putq() puts the message on fcip's
3722 * write queue and qenable() puts the queue (wq) on
3723 * the list of queues to be called by the streams scheduler.
3726 (void) putq(wq
, mp
);
3727 fptr
->fcip_wantw
= 1;
3729 } else if (fptr
->fcip_flags
& FCIP_PROMISC
) {
3731 * Promiscous mode not supported but add this code in
3732 * case it will be supported in future.
3734 (void) putq(wq
, mp
);
3738 headerp
= (fcph_network_hdr_t
*)mp
->b_rptr
;
3739 fdestp
= fcip_get_dest(fptr
, &headerp
->net_dest_addr
);
3741 if (fdestp
== NULL
) {
3742 merror(wq
, mp
, EPROTO
);
3746 ASSERT(fdestp
!= NULL
);
3748 (void) fcip_start(wq
, mp
, fptr
, fdestp
, KM_SLEEP
);
3755 * to prevent recursive calls into fcip_proto
3756 * (PROTO and PCPROTO messages are handled by fcip_proto)
3757 * let the service procedure handle these messages by
3758 * calling putq here.
3760 (void) putq(wq
, mp
);
3769 if (*mp
->b_rptr
& FLUSHW
) {
3770 flushq(wq
, FLUSHALL
);
3771 *mp
->b_rptr
&= ~FLUSHW
;
3774 * we have both FLUSHW and FLUSHR set with FLUSHRW
3776 if (*mp
->b_rptr
& FLUSHR
) {
3778 * send msg back upstream. qreply() takes care
3779 * of using the RD(wq) queue on its reply
3788 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
3789 (CE_NOTE
, "default msg type: %x", DB_TYPE(mp
)));
3798 * Handle M_PROTO and M_PCPROTO messages
3802 fcip_proto(queue_t
*wq
, mblk_t
*mp
)
3804 union DL_primitives
*dlp
;
3805 struct fcipstr
*slp
;
3808 slp
= (struct fcipstr
*)wq
->q_ptr
;
3809 dlp
= (union DL_primitives
*)mp
->b_rptr
;
3810 prim
= dlp
->dl_primitive
;
3812 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_NOTE
, "dl_primitve : %x", prim
));
3814 mutex_enter(&slp
->sl_lock
);
3817 case DL_UNITDATA_REQ
:
3819 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "unit data request"));
3825 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "Attach request"));
3831 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "Detach request"));
3836 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "Bind request"));
3842 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "unbind request"));
3848 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "Info request"));
3852 case DL_SET_PHYS_ADDR_REQ
:
3854 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
3855 (CE_NOTE
, "set phy addr request"));
3856 fcip_spareq(wq
, mp
);
3859 case DL_PHYS_ADDR_REQ
:
3861 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "phy addr request"));
3865 case DL_ENABMULTI_REQ
:
3867 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
3868 (CE_NOTE
, "Enable Multicast request"));
3869 dlerrorack(wq
, mp
, prim
, DL_UNSUPPORTED
, 0);
3872 case DL_DISABMULTI_REQ
:
3874 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
3875 (CE_NOTE
, "Disable Multicast request"));
3876 dlerrorack(wq
, mp
, prim
, DL_UNSUPPORTED
, 0);
3879 case DL_PROMISCON_REQ
:
3881 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
3882 (CE_NOTE
, "Promiscuous mode ON request"));
3883 dlerrorack(wq
, mp
, prim
, DL_UNSUPPORTED
, 0);
3886 case DL_PROMISCOFF_REQ
:
3888 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
3889 (CE_NOTE
, "Promiscuous mode OFF request"));
3890 dlerrorack(wq
, mp
, prim
, DL_UNSUPPORTED
, 0);
3894 dlerrorack(wq
, mp
, prim
, DL_UNSUPPORTED
, 0);
3897 mutex_exit(&slp
->sl_lock
);
3901 * Always enqueue M_PROTO and M_PCPROTO messages pn the wq and M_DATA
3902 * messages sometimes. Processing of M_PROTO and M_PCPROTO messages
3903 * require us to hold fcip's internal locks across (upstream) putnext
3904 * calls. Specifically fcip_intr could hold fcip_intrlock and fcipstruplock
3905 * when it calls putnext(). That thread could loop back around to call
3906 * fcip_wput and eventually fcip_init() to cause a recursive mutex panic
3908 * M_DATA messages are enqueued only if we are out of xmit resources. Once
3909 * the transmit resources are available the service procedure is enabled
3910 * and an attempt is made to xmit all messages on the wq.
3914 fcip_wsrv(queue_t
*wq
)
3917 struct fcipstr
*slp
;
3919 struct fcip_dest
*fdestp
;
3920 fcph_network_hdr_t
*headerp
;
3922 slp
= (struct fcipstr
*)wq
->q_ptr
;
3923 fptr
= slp
->sl_fcip
;
3926 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
, "fcip wsrv"));
3928 while (mp
= getq(wq
)) {
3929 switch (DB_TYPE(mp
)) {
3932 headerp
= (fcph_network_hdr_t
*)mp
->b_rptr
;
3933 fdestp
= fcip_get_dest(fptr
,
3934 &headerp
->net_dest_addr
);
3935 if (fdestp
== NULL
) {
3939 if (fcip_start(wq
, mp
, fptr
, fdestp
,
3950 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
3951 (CE_NOTE
, "PROT msg in wsrv"));
3964 * This routine is called from fcip_wsrv to send a message downstream
3965 * on the fibre towards its destination. This routine performs the
3966 * actual WWN to D_ID mapping by looking up the routing and destination
3971 fcip_start(queue_t
*wq
, mblk_t
*mp
, struct fcip
*fptr
,
3972 struct fcip_dest
*fdestp
, int flags
)
3976 fcip_pkt_t
*fcip_pkt
;
3977 fc_packet_t
*fc_pkt
;
3978 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
3982 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
, "in fcipstart"));
3984 ASSERT(fdestp
!= NULL
);
3987 * Only return if port has gone offline and not come back online
3990 if (fptr
->fcip_flags
& FCIP_LINK_DOWN
) {
3996 * The message block coming in here already has the network and
3997 * llc_snap hdr stuffed in
4000 * Traditionally ethernet drivers at sun handle 3 cases here -
4001 * 1. messages with one mblk
4002 * 2. messages with 2 mblks
4003 * 3. messages with >2 mblks
4004 * For now lets handle all the 3 cases in a single case where we
4005 * put them together in one mblk that has all the data
4008 if (mp
->b_cont
!= NULL
) {
4009 if (!pullupmsg(mp
, -1)) {
4010 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
4011 (CE_WARN
, "failed to concat message"));
4017 datalen
= msgsize(mp
);
4019 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
,
4020 "msgsize with nhdr & llcsnap hdr in fcip_pkt_alloc 0x%lx",
4024 * We cannot have requests larger than FCIPMTU+Headers
4026 if (datalen
> (FCIPMTU
+ sizeof (llc_snap_hdr_t
) +
4027 sizeof (fcph_network_hdr_t
))) {
4029 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
,
4030 "fcip_pkt_alloc: datalen is larger than "
4031 "max possible size."));
4035 fcip_pkt
= fcip_pkt_alloc(fptr
, mp
, flags
, datalen
);
4036 if (fcip_pkt
== NULL
) {
4037 (void) putbq(wq
, mp
);
4041 fcip_pkt
->fcip_pkt_mp
= mp
;
4042 fcip_pkt
->fcip_pkt_wq
= wq
;
4043 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
4045 mutex_enter(&fdestp
->fcipd_mutex
);
4047 * If the device dynamically disappeared, just fail the request.
4049 if (fdestp
->fcipd_rtable
== NULL
) {
4050 mutex_exit(&fdestp
->fcipd_mutex
);
4051 fcip_pkt_free(fcip_pkt
, 1);
4056 * Now that we've assigned pkt_pd, we can call fc_ulp_init_packet
4059 fc_pkt
->pkt_pd
= fdestp
->fcipd_pd
;
4061 if (fc_ulp_init_packet((opaque_t
)fport
->fcipp_handle
,
4062 fc_pkt
, flags
) != FC_SUCCESS
) {
4063 mutex_exit(&fdestp
->fcipd_mutex
);
4064 fcip_pkt_free(fcip_pkt
, 1);
4068 fcip_fdestp_enqueue_pkt(fdestp
, fcip_pkt
);
4069 fcip_pkt
->fcip_pkt_dest
= fdestp
;
4070 fc_pkt
->pkt_fca_device
= fdestp
->fcipd_fca_dev
;
4072 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
,
4073 "setting cmdlen to 0x%x: rsp 0x%x : data 0x%x",
4074 fc_pkt
->pkt_cmdlen
, fc_pkt
->pkt_rsplen
, fc_pkt
->pkt_datalen
));
4076 fcip_init_unicast_pkt(fcip_pkt
, fport
->fcipp_sid
,
4077 fdestp
->fcipd_did
, fcip_pkt_callback
);
4079 fdestp
->fcipd_ncmds
++;
4081 mutex_exit(&fdestp
->fcipd_mutex
);
4082 if ((rval
= fcip_transport(fcip_pkt
)) == FC_SUCCESS
) {
4083 fptr
->fcip_opackets
++;
4087 free
= (rval
== FC_STATEC_BUSY
|| rval
== FC_OFFLINE
||
4088 rval
== FC_TRAN_BUSY
) ? 0 : 1;
4090 mutex_enter(&fdestp
->fcipd_mutex
);
4091 rval
= fcip_fdestp_dequeue_pkt(fdestp
, fcip_pkt
);
4096 fdestp
->fcipd_ncmds
--;
4098 mutex_exit(&fdestp
->fcipd_mutex
);
4100 if (fcip_pkt
!= NULL
) {
4101 fcip_pkt_free(fcip_pkt
, free
);
4105 (void) putbq(wq
, mp
);
4113 * This routine enqueus a packet marked to be issued to the
4114 * transport in the dest structure. This enables us to timeout any
4115 * request stuck with the FCA/transport for long periods of time
4116 * without a response. fcip_pkt_timeout will attempt to clean up
4117 * any packets hung in this state of limbo.
4120 fcip_fdestp_enqueue_pkt(struct fcip_dest
*fdestp
, fcip_pkt_t
*fcip_pkt
)
4122 ASSERT(mutex_owned(&fdestp
->fcipd_mutex
));
4125 * Just hang it off the head of packet list
4127 fcip_pkt
->fcip_pkt_next
= fdestp
->fcipd_head
;
4128 fcip_pkt
->fcip_pkt_prev
= NULL
;
4129 fcip_pkt
->fcip_pkt_flags
|= FCIP_PKT_IN_LIST
;
4131 if (fdestp
->fcipd_head
!= NULL
) {
4132 ASSERT(fdestp
->fcipd_head
->fcip_pkt_prev
== NULL
);
4133 fdestp
->fcipd_head
->fcip_pkt_prev
= fcip_pkt
;
4136 fdestp
->fcipd_head
= fcip_pkt
;
4141 * dequeues any packets after the transport/FCA tells us it has
4142 * been successfully sent on its way. Ofcourse it doesn't mean that
4143 * the packet will actually reach its destination but its atleast
4144 * a step closer in that direction
4147 fcip_fdestp_dequeue_pkt(struct fcip_dest
*fdestp
, fcip_pkt_t
*fcip_pkt
)
4149 fcip_pkt_t
*fcipd_pkt
;
4151 ASSERT(mutex_owned(&fdestp
->fcipd_mutex
));
4152 if (fcip_pkt
->fcip_pkt_flags
& FCIP_PKT_IN_TIMEOUT
) {
4153 fcipd_pkt
= fdestp
->fcipd_head
;
4155 if (fcipd_pkt
== fcip_pkt
) {
4156 fcip_pkt_t
*pptr
= NULL
;
4158 if (fcipd_pkt
== fdestp
->fcipd_head
) {
4159 ASSERT(fcipd_pkt
->fcip_pkt_prev
==
4161 fdestp
->fcipd_head
=
4162 fcipd_pkt
->fcip_pkt_next
;
4164 pptr
= fcipd_pkt
->fcip_pkt_prev
;
4165 ASSERT(pptr
!= NULL
);
4166 pptr
->fcip_pkt_next
=
4167 fcipd_pkt
->fcip_pkt_next
;
4169 if (fcipd_pkt
->fcip_pkt_next
) {
4170 pptr
= fcipd_pkt
->fcip_pkt_next
;
4171 pptr
->fcip_pkt_prev
=
4172 fcipd_pkt
->fcip_pkt_prev
;
4174 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_LIST
;
4177 fcipd_pkt
= fcipd_pkt
->fcip_pkt_next
;
4180 if (fcip_pkt
->fcip_pkt_prev
== NULL
) {
4181 ASSERT(fdestp
->fcipd_head
== fcip_pkt
);
4182 fdestp
->fcipd_head
= fcip_pkt
->fcip_pkt_next
;
4184 fcip_pkt
->fcip_pkt_prev
->fcip_pkt_next
=
4185 fcip_pkt
->fcip_pkt_next
;
4188 if (fcip_pkt
->fcip_pkt_next
) {
4189 fcip_pkt
->fcip_pkt_next
->fcip_pkt_prev
=
4190 fcip_pkt
->fcip_pkt_prev
;
4193 fcipd_pkt
= fcip_pkt
;
4194 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_LIST
;
4197 return (fcipd_pkt
== fcip_pkt
);
4201 * The transport routine - this is the routine that actually calls
4202 * into the FCA driver (through the transport ofcourse) to transmit a
4203 * datagram on the fibre. The dest struct assoicated with the port to
4204 * which the data is intended is already bound to the packet, this routine
4205 * only takes care of marking the packet a broadcast packet if it is
4206 * intended to be a broadcast request. This permits the transport to send
4207 * the packet down on the wire even if it doesn't have an entry for the
4208 * D_ID in its d_id hash tables.
4211 fcip_transport(fcip_pkt_t
*fcip_pkt
)
4214 fc_packet_t
*fc_pkt
;
4215 fcip_port_info_t
*fport
;
4216 struct fcip_dest
*fdestp
;
4218 int rval
= FC_FAILURE
;
4219 struct fcip_routing_table
*frp
= NULL
;
4221 fptr
= fcip_pkt
->fcip_pkt_fptr
;
4222 fport
= fptr
->fcip_port_info
;
4223 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
4224 fdestp
= fcip_pkt
->fcip_pkt_dest
;
4225 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_WARN
, "fcip_transport called"));
4227 did
= fptr
->fcip_broadcast_did
;
4228 if (fc_pkt
->pkt_cmd_fhdr
.d_id
== did
&&
4229 fc_pkt
->pkt_tran_type
!= FC_PKT_BROADCAST
) {
4230 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
4231 (CE_NOTE
, "trantype set to BROADCAST"));
4232 fc_pkt
->pkt_tran_type
= FC_PKT_BROADCAST
;
4235 mutex_enter(&fptr
->fcip_mutex
);
4236 if ((fc_pkt
->pkt_tran_type
!= FC_PKT_BROADCAST
) &&
4237 (fc_pkt
->pkt_pd
== NULL
)) {
4238 mutex_exit(&fptr
->fcip_mutex
);
4240 } else if (fptr
->fcip_port_state
== FCIP_PORT_OFFLINE
) {
4241 mutex_exit(&fptr
->fcip_mutex
);
4242 return (FC_TRAN_BUSY
);
4244 mutex_exit(&fptr
->fcip_mutex
);
4247 struct fcip_routing_table
*frp
;
4249 frp
= fdestp
->fcipd_rtable
;
4250 mutex_enter(&fptr
->fcip_rt_mutex
);
4251 mutex_enter(&fdestp
->fcipd_mutex
);
4252 if (fc_pkt
->pkt_pd
!= NULL
) {
4253 if ((frp
== NULL
) ||
4254 (frp
&& FCIP_RTE_UNAVAIL(frp
->fcipr_state
))) {
4255 mutex_exit(&fdestp
->fcipd_mutex
);
4256 mutex_exit(&fptr
->fcip_rt_mutex
);
4258 (frp
->fcipr_state
== FCIP_RT_INVALID
)) {
4259 return (FC_TRAN_BUSY
);
4265 mutex_exit(&fdestp
->fcipd_mutex
);
4266 mutex_exit(&fptr
->fcip_rt_mutex
);
4267 ASSERT(fcip_pkt
->fcip_pkt_flags
& FCIP_PKT_IN_LIST
);
4270 /* Explicitly invalidate this field till fcip decides to use it */
4271 fc_pkt
->pkt_ulp_rscn_infop
= NULL
;
4273 rval
= fc_ulp_transport(fport
->fcipp_handle
, fc_pkt
);
4274 if (rval
== FC_STATEC_BUSY
|| rval
== FC_OFFLINE
) {
4276 * Need to queue up the command for retry
4278 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
4279 (CE_WARN
, "ulp_transport failed: 0x%x", rval
));
4280 } else if (rval
== FC_LOGINREQ
&& (frp
!= NULL
)) {
4281 (void) fcip_do_plogi(fptr
, frp
);
4282 } else if (rval
== FC_BADPACKET
&& (frp
!= NULL
)) {
4284 * There is a distinct possiblity in our scheme of things
4285 * that we have a routing table entry with a NULL pd struct.
4286 * Mark the routing table entry for removal if it is not a
4289 if ((frp
->fcipr_d_id
.port_id
!= 0x0) &&
4290 (frp
->fcipr_d_id
.port_id
!= 0xffffff)) {
4291 mutex_enter(&fptr
->fcip_rt_mutex
);
4292 frp
->fcipr_pd
= NULL
;
4293 frp
->fcipr_state
= PORT_DEVICE_INVALID
;
4294 mutex_exit(&fptr
->fcip_rt_mutex
);
4302 * Call back routine. Called by the FCA/transport when the messages
4303 * has been put onto the wire towards its intended destination. We can
4304 * now free the fc_packet associated with the message
4307 fcip_pkt_callback(fc_packet_t
*fc_pkt
)
4310 fcip_pkt_t
*fcip_pkt
;
4311 struct fcip_dest
*fdestp
;
4313 fcip_pkt
= (fcip_pkt_t
*)fc_pkt
->pkt_ulp_private
;
4314 fdestp
= fcip_pkt
->fcip_pkt_dest
;
4317 * take the lock early so that we don't have a race condition
4320 * fdestp->fcipd_mutex isn't really intended to lock per
4321 * packet struct - see bug 5105592 for permanent solution
4323 mutex_enter(&fdestp
->fcipd_mutex
);
4325 fcip_pkt
->fcip_pkt_flags
|= FCIP_PKT_RETURNED
;
4326 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_ABORT
;
4327 if (fcip_pkt
->fcip_pkt_flags
& FCIP_PKT_IN_TIMEOUT
) {
4328 mutex_exit(&fdestp
->fcipd_mutex
);
4332 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
, "pkt callback"));
4334 ASSERT(fdestp
->fcipd_rtable
!= NULL
);
4335 ASSERT(fcip_pkt
->fcip_pkt_flags
& FCIP_PKT_IN_LIST
);
4336 rval
= fcip_fdestp_dequeue_pkt(fdestp
, fcip_pkt
);
4337 fdestp
->fcipd_ncmds
--;
4338 mutex_exit(&fdestp
->fcipd_mutex
);
4341 fcip_pkt_free(fcip_pkt
, 1);
4345 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_NOTE
, "pkt callback done"));
4349 * Return 1 if the topology is supported, else return 0.
4350 * Topology support is consistent with what the whole
4351 * stack supports together.
4354 fcip_is_supported_fc_topology(int fc_topology
)
4356 switch (fc_topology
) {
4358 case FC_TOP_PRIVATE_LOOP
:
4359 case FC_TOP_PUBLIC_LOOP
:
4360 case FC_TOP_FABRIC
:
4369 * handle any topology specific initializations here
4370 * this routine must be called while holding fcip_mutex
4374 fcip_handle_topology(struct fcip
*fptr
)
4377 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
4379 ASSERT(mutex_owned(&fptr
->fcip_mutex
));
4382 * Since we know the port's topology - handle topology
4383 * specific details here. In Point to Point and Private Loop
4384 * topologies - we would probably not have a name server
4388 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_NOTE
, "port state: %x, topology %x",
4389 fport
->fcipp_pstate
, fport
->fcipp_topology
));
4391 fptr
->fcip_broadcast_did
= fcip_get_broadcast_did(fptr
);
4392 mutex_exit(&fptr
->fcip_mutex
);
4393 (void) fcip_dest_add_broadcast_entry(fptr
, 0);
4394 mutex_enter(&fptr
->fcip_mutex
);
4396 if (!fcip_is_supported_fc_topology(fport
->fcipp_topology
)) {
4397 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4398 (CE_WARN
, "fcip(0x%x): Unsupported port topology (0x%x)",
4399 fptr
->fcip_instance
, fport
->fcipp_topology
));
4403 switch (fport
->fcipp_topology
) {
4404 case FC_TOP_PRIVATE_LOOP
: {
4406 fc_portmap_t
*port_map
;
4407 uint32_t listlen
, alloclen
;
4409 * we may have to maintain routing. Get a list of
4410 * all devices on this port that the transport layer is
4411 * aware of. Check if any of them is a IS8802 type port,
4412 * if yes get its WWN and DID mapping and cache it in
4413 * the purport routing table. Since there is no
4414 * State Change notification for private loop/point_point
4415 * topologies - this table may not be accurate. The static
4416 * routing table is updated on a state change callback.
4418 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_WARN
, "port state valid!!"));
4419 fptr
->fcip_port_state
= FCIP_PORT_ONLINE
;
4420 listlen
= alloclen
= FCIP_MAX_PORTS
;
4421 port_map
= (fc_portmap_t
*)
4422 kmem_zalloc((FCIP_MAX_PORTS
* sizeof (fc_portmap_t
)),
4424 if (fc_ulp_getportmap(fport
->fcipp_handle
, &port_map
,
4425 &listlen
, FC_ULP_PLOGI_PRESERVE
) == FC_SUCCESS
) {
4426 mutex_exit(&fptr
->fcip_mutex
);
4427 fcip_rt_update(fptr
, port_map
, listlen
);
4428 mutex_enter(&fptr
->fcip_mutex
);
4430 if (listlen
> alloclen
) {
4433 kmem_free(port_map
, (alloclen
* sizeof (fc_portmap_t
)));
4435 * Now fall through and register with the transport
4436 * that this port is IP capable
4442 * If we don't have a nameserver, lets wait until we
4443 * have to send out a packet to a remote port and then
4444 * try and discover the port using ARP/FARP.
4447 case FC_TOP_PUBLIC_LOOP
:
4448 case FC_TOP_FABRIC
: {
4449 fc_portmap_t
*port_map
;
4450 uint32_t listlen
, alloclen
;
4452 /* FC_TYPE of 0x05 goes to word 0, LSB */
4453 fptr
->fcip_port_state
= FCIP_PORT_ONLINE
;
4455 if (!(fptr
->fcip_flags
& FCIP_REG_INPROGRESS
)) {
4456 fptr
->fcip_flags
|= FCIP_REG_INPROGRESS
;
4457 if (taskq_dispatch(fptr
->fcip_tq
, fcip_port_ns
,
4458 fptr
, KM_NOSLEEP
) == 0) {
4459 fptr
->fcip_flags
&= ~FCIP_REG_INPROGRESS
;
4464 * If fcip_create_nodes_on_demand is overridden to force
4465 * discovery of all nodes in Fabric/Public loop topologies
4466 * we need to query for and obtain all nodes and log into
4467 * them as with private loop devices
4469 if (!fcip_create_nodes_on_demand
) {
4470 fptr
->fcip_port_state
= FCIP_PORT_ONLINE
;
4471 listlen
= alloclen
= FCIP_MAX_PORTS
;
4472 port_map
= (fc_portmap_t
*)
4473 kmem_zalloc((FCIP_MAX_PORTS
*
4474 sizeof (fc_portmap_t
)), KM_SLEEP
);
4475 if (fc_ulp_getportmap(fport
->fcipp_handle
, &port_map
,
4476 &listlen
, FC_ULP_PLOGI_PRESERVE
) == FC_SUCCESS
) {
4477 mutex_exit(&fptr
->fcip_mutex
);
4478 fcip_rt_update(fptr
, port_map
, listlen
);
4479 mutex_enter(&fptr
->fcip_mutex
);
4481 if (listlen
> alloclen
) {
4485 (alloclen
* sizeof (fc_portmap_t
)));
4496 fcip_port_ns(void *arg
)
4498 struct fcip
*fptr
= (struct fcip
*)arg
;
4499 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
4504 mutex_enter(&fptr
->fcip_mutex
);
4505 if ((fptr
->fcip_flags
& (FCIP_DETACHING
| FCIP_DETACHED
)) ||
4506 (fptr
->fcip_flags
& (FCIP_SUSPENDED
| FCIP_POWER_DOWN
))) {
4507 fptr
->fcip_flags
&= ~FCIP_REG_INPROGRESS
;
4508 mutex_exit(&fptr
->fcip_mutex
);
4511 mutex_exit(&fptr
->fcip_mutex
);
4514 * Prepare the Name server structure to
4515 * register with the transport in case of
4516 * Fabric configuration.
4518 bzero(&rfc
, sizeof (rfc
));
4519 bzero(types
, sizeof (types
));
4521 types
[FC4_TYPE_WORD_POS(FC_TYPE_IS8802_SNAP
)] = (1 <<
4522 FC4_TYPE_BIT_POS(FC_TYPE_IS8802_SNAP
));
4524 rfc
.rfc_port_id
.port_id
= fport
->fcipp_sid
.port_id
;
4525 bcopy(types
, rfc
.rfc_types
, sizeof (types
));
4527 ns_cmd
.ns_flags
= 0;
4528 ns_cmd
.ns_cmd
= NS_RFT_ID
;
4529 ns_cmd
.ns_req_len
= sizeof (rfc
);
4530 ns_cmd
.ns_req_payload
= (caddr_t
)&rfc
;
4531 ns_cmd
.ns_resp_len
= 0;
4532 ns_cmd
.ns_resp_payload
= NULL
;
4535 * Perform the Name Server Registration for FC IS8802_SNAP Type.
4536 * We don't expect a reply for registering port type
4538 (void) fc_ulp_port_ns(fptr
->fcip_port_info
->fcipp_handle
,
4539 (opaque_t
)0, &ns_cmd
);
4541 mutex_enter(&fptr
->fcip_mutex
);
4542 fptr
->fcip_flags
&= ~FCIP_REG_INPROGRESS
;
4543 mutex_exit(&fptr
->fcip_mutex
);
4547 * setup this instance of fcip. This routine inits kstats, allocates
4548 * unsolicited buffers, determines' this port's siblings and handles
4549 * topology specific details which includes registering with the name
4550 * server and also setting up the routing table for this port for
4551 * private loops and point to point topologies
4554 fcip_init_port(struct fcip
*fptr
)
4556 int rval
= FC_SUCCESS
;
4557 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
4558 static char buf
[64];
4559 size_t tok_buf_size
= 0;
4561 ASSERT(fport
!= NULL
);
4563 mutex_enter(&fptr
->fcip_mutex
);
4566 * setup mac address for this port. Don't be too worried if
4567 * the WWN is zero, there is probably nothing attached to
4568 * to the port. There is no point allocating unsolicited buffers
4569 * for an unused port so return success if we don't have a MAC
4570 * address. Do the port init on a state change notification.
4572 if (fcip_setup_mac_addr(fptr
) == FCIP_INVALID_WWN
) {
4573 fptr
->fcip_port_state
= FCIP_PORT_OFFLINE
;
4579 * clear routing table hash list for this port
4581 fcip_rt_flush(fptr
);
4584 * init kstats for this instance
4586 fcip_kstat_init(fptr
);
4589 * Allocate unsolicited buffers
4591 fptr
->fcip_ub_nbufs
= fcip_ub_nbufs
;
4592 tok_buf_size
= sizeof (*fptr
->fcip_ub_tokens
) * fcip_ub_nbufs
;
4594 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4595 (CE_WARN
, "tokBufsize: 0x%lx", tok_buf_size
));
4597 fptr
->fcip_ub_tokens
= kmem_zalloc(tok_buf_size
, KM_SLEEP
);
4599 if (fptr
->fcip_ub_tokens
== NULL
) {
4601 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4602 (CE_WARN
, "fcip(%d): failed to allocate unsol buf",
4603 fptr
->fcip_instance
));
4606 rval
= fc_ulp_uballoc(fport
->fcipp_handle
, &fptr
->fcip_ub_nbufs
,
4607 fcip_ub_size
, FC_TYPE_IS8802_SNAP
, fptr
->fcip_ub_tokens
);
4609 if (rval
!= FC_SUCCESS
) {
4610 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4611 (CE_WARN
, "fcip(%d): fc_ulp_uballoc failed with 0x%x!!",
4612 fptr
->fcip_instance
, rval
));
4620 fptr
->fcip_port_state
= FCIP_PORT_OFFLINE
;
4626 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4627 (CE_WARN
, "invalid ub alloc request !!"));
4633 * requested bytes could not be alloced
4635 if (fptr
->fcip_ub_nbufs
!= fcip_ub_nbufs
) {
4637 "!fcip(0x%x): Failed to alloc unsolicited bufs",
4638 ddi_get_instance(fport
->fcipp_dip
));
4650 * Preallocate a Cache of fcip packets for transmit and receive
4651 * We don't want to be holding on to unsolicited buffers while
4652 * we transmit the message upstream
4654 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_NOTE
, "allocating fcip_pkt cache"));
4656 (void) sprintf(buf
, "fcip%d_cache", fptr
->fcip_instance
);
4657 fptr
->fcip_xmit_cache
= kmem_cache_create(buf
,
4658 (fport
->fcipp_fca_pkt_size
+ sizeof (fcip_pkt_t
)),
4659 8, fcip_cache_constructor
, fcip_cache_destructor
,
4660 NULL
, (void *)fport
, NULL
, 0);
4662 (void) sprintf(buf
, "fcip%d_sendup_cache", fptr
->fcip_instance
);
4663 fptr
->fcip_sendup_cache
= kmem_cache_create(buf
,
4664 sizeof (struct fcip_sendup_elem
),
4665 8, fcip_sendup_constructor
, NULL
, NULL
, (void *)fport
, NULL
, 0);
4667 if (fptr
->fcip_xmit_cache
== NULL
) {
4668 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4669 (CE_WARN
, "fcip%d unable to allocate xmit cache",
4670 fptr
->fcip_instance
));
4676 * We may need to handle routing tables for point to point and
4677 * fcal topologies and register with NameServer for Fabric
4680 fcip_handle_topology(fptr
);
4681 mutex_exit(&fptr
->fcip_mutex
);
4682 if (fcip_dest_add_broadcast_entry(fptr
, 1) != FC_SUCCESS
) {
4683 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4684 (CE_WARN
, "fcip(0x%x):add broadcast entry failed!!",
4685 fptr
->fcip_instance
));
4686 mutex_enter(&fptr
->fcip_mutex
);
4696 * we don't always come here from port_attach - so cleanup
4697 * anything done in the init_port routine
4699 if (fptr
->fcip_kstatp
) {
4700 kstat_delete(fptr
->fcip_kstatp
);
4701 fptr
->fcip_kstatp
= NULL
;
4704 if (fptr
->fcip_xmit_cache
) {
4705 kmem_cache_destroy(fptr
->fcip_xmit_cache
);
4706 fptr
->fcip_xmit_cache
= NULL
;
4709 if (fptr
->fcip_sendup_cache
) {
4710 kmem_cache_destroy(fptr
->fcip_sendup_cache
);
4711 fptr
->fcip_sendup_cache
= NULL
;
4714 /* release unsolicited buffers */
4715 if (fptr
->fcip_ub_tokens
) {
4716 uint64_t *tokens
= fptr
->fcip_ub_tokens
;
4717 fptr
->fcip_ub_tokens
= NULL
;
4719 mutex_exit(&fptr
->fcip_mutex
);
4720 (void) fc_ulp_ubfree(fport
->fcipp_handle
, fptr
->fcip_ub_nbufs
,
4722 kmem_free(tokens
, tok_buf_size
);
4725 mutex_exit(&fptr
->fcip_mutex
);
4732 * Sets up a port's MAC address from its WWN
4735 fcip_setup_mac_addr(struct fcip
*fptr
)
4737 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
4739 ASSERT(mutex_owned(&fptr
->fcip_mutex
));
4741 fptr
->fcip_addrflags
= 0;
4744 * we cannot choose a MAC address for our interface - we have
4745 * to live with whatever node WWN we get (minus the top two
4746 * MSbytes for the MAC address) from the transport layer. We will
4747 * treat the WWN as our factory MAC address.
4750 if ((fport
->fcipp_nwwn
.w
.wwn_hi
!= 0) ||
4751 (fport
->fcipp_nwwn
.w
.wwn_lo
!= 0)) {
4752 char etherstr
[ETHERSTRL
];
4754 wwn_to_ether(&fport
->fcipp_nwwn
, &fptr
->fcip_macaddr
);
4755 fcip_ether_to_str(&fptr
->fcip_macaddr
, etherstr
);
4756 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4757 (CE_NOTE
, "setupmacaddr ouraddr %s", etherstr
));
4759 fptr
->fcip_addrflags
= (FCIP_FACTADDR_PRESENT
|
4763 * No WWN - just return failure - there's not much
4764 * we can do since we cannot set the WWN.
4766 FCIP_DEBUG(FCIP_DEBUG_INIT
,
4767 (CE_WARN
, "Port does not have a valid WWN"));
4768 return (FCIP_INVALID_WWN
);
4770 return (FC_SUCCESS
);
4775 * flush routing table entries
4778 fcip_rt_flush(struct fcip
*fptr
)
4782 mutex_enter(&fptr
->fcip_rt_mutex
);
4783 for (index
= 0; index
< FCIP_RT_HASH_ELEMS
; index
++) {
4784 struct fcip_routing_table
*frtp
, *frtp_next
;
4785 frtp
= fptr
->fcip_rtable
[index
];
4787 frtp_next
= frtp
->fcipr_next
;
4788 kmem_free(frtp
, sizeof (struct fcip_routing_table
));
4791 fptr
->fcip_rtable
[index
] = NULL
;
4793 mutex_exit(&fptr
->fcip_rt_mutex
);
4797 * Free up the fcip softstate and all allocated resources for the
4798 * fcip instance assoicated with a given port driver instance
4800 * Given that the list of structures pointed to by fcip_port_head,
4801 * this function is called from multiple sources, and the
4802 * fcip_global_mutex that protects fcip_port_head must be dropped,
4803 * our best solution is to return a value that indicates the next
4804 * port in the list. This way the caller doesn't need to worry
4805 * about the race condition where it saves off a pointer to the
4806 * next structure in the list and by the time this routine returns,
4807 * that next structure has already been freed.
4809 static fcip_port_info_t
*
4810 fcip_softstate_free(fcip_port_info_t
*fport
)
4812 struct fcip
*fptr
= NULL
;
4815 opaque_t phandle
= NULL
;
4816 fcip_port_info_t
*prev_fport
, *cur_fport
, *next_fport
= NULL
;
4818 ASSERT(MUTEX_HELD(&fcip_global_mutex
));
4821 phandle
= fport
->fcipp_handle
;
4822 fptr
= fport
->fcipp_fcip
;
4824 return (next_fport
);
4828 mutex_enter(&fptr
->fcip_mutex
);
4829 instance
= ddi_get_instance(fptr
->fcip_dip
);
4832 * dismantle timeout thread for this instance of fcip
4834 tid
= fptr
->fcip_timeout_id
;
4835 fptr
->fcip_timeout_id
= NULL
;
4837 mutex_exit(&fptr
->fcip_mutex
);
4838 (void) untimeout(tid
);
4839 mutex_enter(&fptr
->fcip_mutex
);
4841 ASSERT(fcip_num_instances
>= 0);
4842 fcip_num_instances
--;
4845 * stop sendup thread
4847 mutex_enter(&fptr
->fcip_sendup_mutex
);
4848 if (fptr
->fcip_sendup_thr_initted
) {
4849 fptr
->fcip_sendup_thr_initted
= 0;
4850 cv_signal(&fptr
->fcip_sendup_cv
);
4851 cv_wait(&fptr
->fcip_sendup_cv
,
4852 &fptr
->fcip_sendup_mutex
);
4854 ASSERT(fptr
->fcip_sendup_head
== NULL
);
4855 fptr
->fcip_sendup_head
= fptr
->fcip_sendup_tail
= NULL
;
4856 mutex_exit(&fptr
->fcip_sendup_mutex
);
4861 if (fptr
->fcip_tq
) {
4862 taskq_t
*tq
= fptr
->fcip_tq
;
4864 fptr
->fcip_tq
= NULL
;
4866 mutex_exit(&fptr
->fcip_mutex
);
4868 mutex_enter(&fptr
->fcip_mutex
);
4871 if (fptr
->fcip_kstatp
) {
4872 kstat_delete(fptr
->fcip_kstatp
);
4873 fptr
->fcip_kstatp
= NULL
;
4876 /* flush the routing table entries */
4877 fcip_rt_flush(fptr
);
4879 if (fptr
->fcip_xmit_cache
) {
4880 kmem_cache_destroy(fptr
->fcip_xmit_cache
);
4881 fptr
->fcip_xmit_cache
= NULL
;
4884 if (fptr
->fcip_sendup_cache
) {
4885 kmem_cache_destroy(fptr
->fcip_sendup_cache
);
4886 fptr
->fcip_sendup_cache
= NULL
;
4889 fcip_cleanup_dest(fptr
);
4891 /* release unsolicited buffers */
4892 if (fptr
->fcip_ub_tokens
) {
4893 uint64_t *tokens
= fptr
->fcip_ub_tokens
;
4895 fptr
->fcip_ub_tokens
= NULL
;
4896 mutex_exit(&fptr
->fcip_mutex
);
4899 * release the global mutex here to
4900 * permit any data pending callbacks to
4901 * complete. Else we will deadlock in the
4902 * FCA waiting for all unsol buffers to be
4905 mutex_exit(&fcip_global_mutex
);
4906 (void) fc_ulp_ubfree(phandle
,
4907 fptr
->fcip_ub_nbufs
, tokens
);
4908 mutex_enter(&fcip_global_mutex
);
4910 kmem_free(tokens
, (sizeof (*tokens
) * fcip_ub_nbufs
));
4912 mutex_exit(&fptr
->fcip_mutex
);
4915 mutex_destroy(&fptr
->fcip_mutex
);
4916 mutex_destroy(&fptr
->fcip_ub_mutex
);
4917 mutex_destroy(&fptr
->fcip_rt_mutex
);
4918 mutex_destroy(&fptr
->fcip_dest_mutex
);
4919 mutex_destroy(&fptr
->fcip_sendup_mutex
);
4920 cv_destroy(&fptr
->fcip_farp_cv
);
4921 cv_destroy(&fptr
->fcip_sendup_cv
);
4922 cv_destroy(&fptr
->fcip_ub_cv
);
4924 ddi_soft_state_free(fcip_softp
, instance
);
4928 * Now dequeue the fcip_port_info from the port list
4930 cur_fport
= fcip_port_head
;
4932 while (cur_fport
!= NULL
) {
4933 if (cur_fport
== fport
) {
4936 prev_fport
= cur_fport
;
4937 cur_fport
= cur_fport
->fcipp_next
;
4941 * Assert that we found a port in our port list
4943 ASSERT(cur_fport
== fport
);
4947 * Not the first port in the port list
4949 prev_fport
->fcipp_next
= fport
->fcipp_next
;
4954 fcip_port_head
= fport
->fcipp_next
;
4956 next_fport
= fport
->fcipp_next
;
4957 kmem_free(fport
, sizeof (fcip_port_info_t
));
4959 return (next_fport
);
4964 * This is called by transport for any ioctl operations performed
4965 * on the devctl or other transport minor nodes. It is currently
4970 fcip_port_ioctl(opaque_t ulp_handle
, opaque_t port_handle
, dev_t dev
,
4971 int cmd
, intptr_t data
, int mode
, cred_t
*credp
, int *rval
,
4974 return (FC_UNCLAIMED
);
4978 * DL_INFO_REQ - returns information about the DLPI stream to the DLS user
4979 * requesting information about this interface
4982 fcip_ireq(queue_t
*wq
, mblk_t
*mp
)
4984 struct fcipstr
*slp
;
4986 dl_info_ack_t
*dlip
;
4987 struct fcipdladdr
*dlap
;
4990 char etherstr
[ETHERSTRL
];
4992 slp
= (struct fcipstr
*)wq
->q_ptr
;
4994 fptr
= slp
->sl_fcip
;
4996 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
4997 (CE_NOTE
, "fcip_ireq: info request req rcvd"));
4999 if (MBLKL(mp
) < DL_INFO_REQ_SIZE
) {
5000 dlerrorack(wq
, mp
, DL_INFO_REQ
, DL_BADPRIM
, 0);
5005 * Exchange current message for a DL_INFO_ACK
5007 size
= sizeof (dl_info_ack_t
) + FCIPADDRL
+ ETHERADDRL
;
5008 if ((mp
= mexchange(wq
, mp
, size
, M_PCPROTO
, DL_INFO_ACK
)) == NULL
) {
5013 * FILL in the DL_INFO_ACK fields and reply
5015 dlip
= (dl_info_ack_t
*)mp
->b_rptr
;
5016 *dlip
= fcip_infoack
;
5017 dlip
->dl_current_state
= slp
->sl_state
;
5018 dlap
= (struct fcipdladdr
*)(mp
->b_rptr
+ dlip
->dl_addr_offset
);
5019 dlap
->dl_sap
= slp
->sl_sap
;
5023 fcip_ether_to_str(&fptr
->fcip_macaddr
, etherstr
);
5024 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5025 (CE_NOTE
, "ireq - our mac: %s", etherstr
));
5026 ether_bcopy(&fptr
->fcip_macaddr
, &dlap
->dl_phys
);
5028 bzero((caddr_t
)&dlap
->dl_phys
, ETHERADDRL
);
5031 ep
= (la_wwn_t
*)(mp
->b_rptr
+ dlip
->dl_brdcst_addr_offset
);
5032 ether_bcopy(&fcip_arpbroadcast_addr
, ep
);
5034 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "sending back info req.."));
5040 * To handle DL_UNITDATA_REQ requests.
5044 fcip_udreq(queue_t
*wq
, mblk_t
*mp
)
5046 struct fcipstr
*slp
;
5048 fcip_port_info_t
*fport
;
5049 dl_unitdata_req_t
*dludp
;
5051 struct fcipdladdr
*dlap
;
5052 fcph_network_hdr_t
*headerp
;
5053 llc_snap_hdr_t
*lsnap
;
5054 t_uscalar_t off
, len
;
5055 struct fcip_dest
*fdestp
;
5059 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "inside fcip_udreq"));
5061 slp
= (struct fcipstr
*)wq
->q_ptr
;
5063 if (slp
->sl_state
!= DL_IDLE
) {
5064 dlerrorack(wq
, mp
, DL_UNITDATA_REQ
, DL_OUTSTATE
, 0);
5068 fptr
= slp
->sl_fcip
;
5071 dlerrorack(wq
, mp
, DL_UNITDATA_REQ
, DL_OUTSTATE
, 0);
5075 fport
= fptr
->fcip_port_info
;
5077 dludp
= (dl_unitdata_req_t
*)mp
->b_rptr
;
5078 off
= dludp
->dl_dest_addr_offset
;
5079 len
= dludp
->dl_dest_addr_length
;
5082 * Validate destination address format
5084 if (!MBLKIN(mp
, off
, len
) || (len
!= FCIPADDRL
)) {
5085 dluderrorind(wq
, mp
, (mp
->b_rptr
+ off
), len
, DL_BADADDR
, 0);
5090 * Error if no M_DATA follows
5094 dluderrorind(wq
, mp
, (mp
->b_rptr
+ off
), len
, DL_BADDATA
, 0);
5097 dlap
= (struct fcipdladdr
*)(mp
->b_rptr
+ off
);
5100 * Now get the destination structure for the remote NPORT
5102 ether_to_wwn(&dlap
->dl_phys
, &wwn
);
5103 fdestp
= fcip_get_dest(fptr
, &wwn
);
5105 if (fdestp
== NULL
) {
5106 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
,
5107 "udreq - couldn't find dest struct for remote port"));
5108 dluderrorind(wq
, mp
, (mp
->b_rptr
+ off
), len
, DL_BADDATA
, 0);
5113 * Network header + SAP
5115 hdr_size
= sizeof (fcph_network_hdr_t
) + sizeof (llc_snap_hdr_t
);
5117 /* DB_REF gives the no. of msgs pointing to this block */
5118 if ((DB_REF(nmp
) == 1) &&
5119 (MBLKHEAD(nmp
) >= hdr_size
) &&
5120 (((uintptr_t)mp
->b_rptr
& 0x1) == 0)) {
5122 nmp
->b_rptr
-= hdr_size
;
5124 /* first put the network header */
5125 headerp
= (fcph_network_hdr_t
*)nmp
->b_rptr
;
5126 if (ether_cmp(&dlap
->dl_phys
, &fcip_arpbroadcast_addr
) == 0) {
5127 ether_to_wwn(&fcipnhbroadcastaddr
, &wwn
);
5129 ether_to_wwn(&dlap
->dl_phys
, &wwn
);
5131 bcopy(&wwn
, &headerp
->net_dest_addr
, sizeof (la_wwn_t
));
5132 ether_to_wwn(&fptr
->fcip_macaddr
, &wwn
);
5133 bcopy(&wwn
, &headerp
->net_src_addr
, sizeof (la_wwn_t
));
5135 /* Now the snap header */
5136 lsnap
= (llc_snap_hdr_t
*)(nmp
->b_rptr
+
5137 sizeof (fcph_network_hdr_t
));
5141 lsnap
->oui
[0] = 0x00;
5142 lsnap
->oui
[1] = 0x00; /* 80 */
5143 lsnap
->oui
[2] = 0x00; /* C2 */
5144 lsnap
->pid
= BE_16((dlap
->dl_sap
));
5152 DB_TYPE(mp
) = M_DATA
;
5153 headerp
= (fcph_network_hdr_t
*)mp
->b_rptr
;
5156 * Only fill in the low 48bits of WWN for now - we can
5157 * fill in the NAA_ID after we find the port in the
5160 if (ether_cmp(&dlap
->dl_phys
, &fcip_arpbroadcast_addr
) == 0) {
5161 ether_to_wwn(&fcipnhbroadcastaddr
, &wwn
);
5163 ether_to_wwn(&dlap
->dl_phys
, &wwn
);
5165 bcopy(&wwn
, &headerp
->net_dest_addr
, sizeof (la_wwn_t
));
5166 /* need to send our PWWN */
5167 bcopy(&fport
->fcipp_pwwn
, &headerp
->net_src_addr
,
5170 lsnap
= (llc_snap_hdr_t
*)(nmp
->b_rptr
+
5171 sizeof (fcph_network_hdr_t
));
5175 lsnap
->oui
[0] = 0x00;
5176 lsnap
->oui
[1] = 0x00;
5177 lsnap
->oui
[2] = 0x00;
5178 lsnap
->pid
= BE_16(dlap
->dl_sap
);
5180 mp
->b_wptr
= mp
->b_rptr
+ hdr_size
;
5184 * Ethernet drivers have a lot of gunk here to put the Type
5185 * information (for Ethernet encapsulation (RFC 894) or the
5186 * Length (for 802.2/802.3) - I guess we'll just ignore that
5191 * Start the I/O on this port. If fcip_start failed for some reason
5192 * we call putbq in fcip_start so we don't need to check the
5193 * return value from fcip_start
5195 (void) fcip_start(wq
, mp
, fptr
, fdestp
, KM_SLEEP
);
5199 * DL_ATTACH_REQ: attaches a PPA with a stream. ATTACH requets are needed
5200 * for style 2 DLS providers to identify the physical medium through which
5201 * the streams communication will happen
5204 fcip_areq(queue_t
*wq
, mblk_t
*mp
)
5206 struct fcipstr
*slp
;
5207 union DL_primitives
*dlp
;
5208 fcip_port_info_t
*fport
;
5212 slp
= (struct fcipstr
*)wq
->q_ptr
;
5213 dlp
= (union DL_primitives
*)mp
->b_rptr
;
5215 if (MBLKL(mp
) < DL_ATTACH_REQ_SIZE
) {
5216 dlerrorack(wq
, mp
, DL_ATTACH_REQ
, DL_BADPRIM
, 0);
5220 if (slp
->sl_state
!= DL_UNATTACHED
) {
5221 dlerrorack(wq
, mp
, DL_ATTACH_REQ
, DL_OUTSTATE
, 0);
5225 ppa
= dlp
->attach_req
.dl_ppa
;
5226 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "attach req: ppa %x", ppa
));
5229 * check if the PPA is valid
5232 mutex_enter(&fcip_global_mutex
);
5234 for (fport
= fcip_port_head
; fport
; fport
= fport
->fcipp_next
) {
5235 if ((fptr
= fport
->fcipp_fcip
) == NULL
) {
5238 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "ppa %x, inst %x", ppa
,
5239 ddi_get_instance(fptr
->fcip_dip
)));
5241 if (ppa
== ddi_get_instance(fptr
->fcip_dip
)) {
5242 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5243 (CE_NOTE
, "ppa found %x", ppa
));
5248 if (fport
== NULL
) {
5249 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5250 (CE_NOTE
, "dlerrorack coz fport==NULL"));
5252 mutex_exit(&fcip_global_mutex
);
5254 if (fc_ulp_get_port_handle(ppa
) == NULL
) {
5255 dlerrorack(wq
, mp
, DL_ATTACH_REQ
, DL_BADPPA
, 0);
5260 * Wait for Port attach callback to trigger. If port_detach
5261 * got in while we were waiting, then ddi_get_soft_state
5262 * will return NULL, and we'll return error.
5265 delay(drv_usectohz(FCIP_INIT_DELAY
));
5266 mutex_enter(&fcip_global_mutex
);
5268 fptr
= ddi_get_soft_state(fcip_softp
, ppa
);
5270 mutex_exit(&fcip_global_mutex
);
5271 dlerrorack(wq
, mp
, DL_ATTACH_REQ
, DL_BADPPA
, 0);
5277 * set link to device and update our state
5279 slp
->sl_fcip
= fptr
;
5280 slp
->sl_state
= DL_UNBOUND
;
5282 mutex_exit(&fcip_global_mutex
);
5285 mutex_enter(&fptr
->fcip_mutex
);
5286 if (fptr
->fcip_flags
& FCIP_LINK_DOWN
) {
5287 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_WARN
, "port not online yet"));
5289 mutex_exit(&fptr
->fcip_mutex
);
5292 dlokack(wq
, mp
, DL_ATTACH_REQ
);
5297 * DL_DETACH request - detaches a PPA from a stream
5300 fcip_dreq(queue_t
*wq
, mblk_t
*mp
)
5302 struct fcipstr
*slp
;
5304 slp
= (struct fcipstr
*)wq
->q_ptr
;
5306 if (MBLKL(mp
) < DL_DETACH_REQ_SIZE
) {
5307 dlerrorack(wq
, mp
, DL_DETACH_REQ
, DL_BADPRIM
, 0);
5311 if (slp
->sl_state
!= DL_UNBOUND
) {
5312 dlerrorack(wq
, mp
, DL_DETACH_REQ
, DL_OUTSTATE
, 0);
5317 dlokack(wq
, mp
, DL_DETACH_REQ
);
5321 * DL_BIND request: requests a DLS provider to bind a DLSAP to the stream.
5322 * DLS users communicate with a physical interface through DLSAPs. Multiple
5323 * DLSAPs can be bound to the same stream (PPA)
5326 fcip_breq(queue_t
*wq
, mblk_t
*mp
)
5328 struct fcipstr
*slp
;
5329 union DL_primitives
*dlp
;
5331 struct fcipdladdr fcipaddr
;
5335 slp
= (struct fcipstr
*)wq
->q_ptr
;
5337 if (MBLKL(mp
) < DL_BIND_REQ_SIZE
) {
5338 dlerrorack(wq
, mp
, DL_BIND_REQ
, DL_BADPRIM
, 0);
5342 if (slp
->sl_state
!= DL_UNBOUND
) {
5343 dlerrorack(wq
, mp
, DL_BIND_REQ
, DL_OUTSTATE
, 0);
5347 dlp
= (union DL_primitives
*)mp
->b_rptr
;
5348 fptr
= slp
->sl_fcip
;
5351 dlerrorack(wq
, mp
, DL_BIND_REQ
, DL_OUTSTATE
, 0);
5355 sap
= dlp
->bind_req
.dl_sap
;
5356 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "fcip_breq - sap: %x", sap
));
5357 xidtest
= dlp
->bind_req
.dl_xidtest_flg
;
5360 dlerrorack(wq
, mp
, DL_BIND_REQ
, DL_NOAUTO
, 0);
5364 FCIP_DEBUG(FCIP_DEBUG_DLPI
, (CE_NOTE
, "DLBIND: sap : %x", sap
));
5366 if (sap
> ETHERTYPE_MAX
) {
5367 dlerrorack(wq
, mp
, dlp
->dl_primitive
, DL_BADSAP
, 0);
5371 * save SAP for this stream and change the link state
5374 slp
->sl_state
= DL_IDLE
;
5376 fcipaddr
.dl_sap
= sap
;
5377 ether_bcopy(&fptr
->fcip_macaddr
, &fcipaddr
.dl_phys
);
5378 dlbindack(wq
, mp
, sap
, &fcipaddr
, FCIPADDRL
, 0, 0);
5384 * DL_UNBIND request to unbind a previously bound DLSAP, from this stream
5387 fcip_ubreq(queue_t
*wq
, mblk_t
*mp
)
5389 struct fcipstr
*slp
;
5391 slp
= (struct fcipstr
*)wq
->q_ptr
;
5393 if (MBLKL(mp
) < DL_UNBIND_REQ_SIZE
) {
5394 dlerrorack(wq
, mp
, DL_UNBIND_REQ
, DL_BADPRIM
, 0);
5398 if (slp
->sl_state
!= DL_IDLE
) {
5399 dlerrorack(wq
, mp
, DL_UNBIND_REQ
, DL_OUTSTATE
, 0);
5403 slp
->sl_state
= DL_UNBOUND
;
5406 (void) putnextctl1(RD(wq
), M_FLUSH
, FLUSHRW
);
5407 dlokack(wq
, mp
, DL_UNBIND_REQ
);
5409 fcip_setipq(slp
->sl_fcip
);
5413 * Return our physical address
5416 fcip_pareq(queue_t
*wq
, mblk_t
*mp
)
5418 struct fcipstr
*slp
;
5419 union DL_primitives
*dlp
;
5422 fcip_port_info_t
*fport
;
5423 struct ether_addr addr
;
5425 slp
= (struct fcipstr
*)wq
->q_ptr
;
5427 if (MBLKL(mp
) < DL_PHYS_ADDR_REQ_SIZE
) {
5428 dlerrorack(wq
, mp
, DL_PHYS_ADDR_REQ
, DL_BADPRIM
, 0);
5432 dlp
= (union DL_primitives
*)mp
->b_rptr
;
5433 type
= dlp
->physaddr_req
.dl_addr_type
;
5434 fptr
= slp
->sl_fcip
;
5437 dlerrorack(wq
, mp
, DL_PHYS_ADDR_REQ
, DL_OUTSTATE
, 0);
5441 fport
= fptr
->fcip_port_info
;
5444 case DL_FACT_PHYS_ADDR
:
5445 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5446 (CE_NOTE
, "returning factory phys addr"));
5447 wwn_to_ether(&fport
->fcipp_pwwn
, &addr
);
5450 case DL_CURR_PHYS_ADDR
:
5451 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5452 (CE_NOTE
, "returning current phys addr"));
5453 ether_bcopy(&fptr
->fcip_macaddr
, &addr
);
5457 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5458 (CE_NOTE
, "Not known cmd type in phys addr"));
5459 dlerrorack(wq
, mp
, DL_PHYS_ADDR_REQ
, DL_NOTSUPPORTED
, 0);
5462 dlphysaddrack(wq
, mp
, &addr
, ETHERADDRL
);
5466 * Set physical address DLPI request
5469 fcip_spareq(queue_t
*wq
, mblk_t
*mp
)
5471 struct fcipstr
*slp
;
5472 union DL_primitives
*dlp
;
5473 t_uscalar_t off
, len
;
5474 struct ether_addr
*addrp
;
5477 fc_ns_cmd_t fcip_ns_cmd
;
5479 slp
= (struct fcipstr
*)wq
->q_ptr
;
5481 if (MBLKL(mp
) < DL_SET_PHYS_ADDR_REQ_SIZE
) {
5482 dlerrorack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_BADPRIM
, 0);
5486 dlp
= (union DL_primitives
*)mp
->b_rptr
;
5487 len
= dlp
->set_physaddr_req
.dl_addr_length
;
5488 off
= dlp
->set_physaddr_req
.dl_addr_offset
;
5490 if (!MBLKIN(mp
, off
, len
)) {
5491 dlerrorack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_BADPRIM
, 0);
5495 addrp
= (struct ether_addr
*)(mp
->b_rptr
+ off
);
5498 * If the length of physical address is not correct or address
5499 * specified is a broadcast address or multicast addr -
5502 if ((len
!= ETHERADDRL
) ||
5503 ((addrp
->ether_addr_octet
[0] & 01) == 1) ||
5504 (ether_cmp(addrp
, &fcip_arpbroadcast_addr
) == 0)) {
5505 dlerrorack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_BADADDR
, 0);
5510 * check if a stream is attached to this device. Else return an error
5512 if ((fptr
= slp
->sl_fcip
) == NULL
) {
5513 dlerrorack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_OUTSTATE
, 0);
5518 * set the new interface local address. We request the transport
5519 * layer to change the Port WWN for this device - return an error
5520 * if we don't succeed.
5523 ether_to_wwn(addrp
, &wwn
);
5524 if (fcip_set_wwn(&wwn
) == FC_SUCCESS
) {
5525 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5526 (CE_WARN
, "WWN changed in spareq"));
5528 dlerrorack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_BADADDR
, 0);
5532 * register The new Port WWN and Node WWN with the transport
5533 * and Nameserver. Hope the transport ensures all current I/O
5534 * has stopped before actually attempting to register a new
5535 * port and Node WWN else we are hosed. Maybe a Link reset
5536 * will get everyone's attention.
5538 fcip_ns_cmd
.ns_flags
= 0;
5539 fcip_ns_cmd
.ns_cmd
= NS_RPN_ID
;
5540 fcip_ns_cmd
.ns_req_len
= sizeof (la_wwn_t
);
5541 fcip_ns_cmd
.ns_req_payload
= (caddr_t
)&wwn
.raw_wwn
[0];
5542 fcip_ns_cmd
.ns_resp_len
= 0;
5543 fcip_ns_cmd
.ns_resp_payload
= (caddr_t
)0;
5544 if (fc_ulp_port_ns(fptr
->fcip_port_info
->fcipp_handle
,
5545 (opaque_t
)0, &fcip_ns_cmd
) != FC_SUCCESS
) {
5546 FCIP_DEBUG(FCIP_DEBUG_DLPI
,
5547 (CE_WARN
, "setting Port WWN failed"));
5548 dlerrorack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_BADPRIM
, 0);
5552 dlokack(wq
, mp
, DL_SET_PHYS_ADDR_REQ
);
5556 * change our port's WWN if permitted by hardware
5560 fcip_set_wwn(la_wwn_t
*pwwn
)
5563 * We're usually not allowed to change the WWN of adapters
5564 * but some adapters do permit us to change the WWN - don't
5565 * permit setting of WWNs (yet?) - This behavior could be
5566 * modified if needed
5568 return (FC_FAILURE
);
5573 * This routine fills in the header for fastpath data requests. What this
5574 * does in simple terms is, instead of sending all data through the Unitdata
5575 * request dlpi code paths (which will then append the protocol specific
5576 * header - network and snap headers in our case), the upper layers issue
5577 * a M_IOCTL with a DL_IOC_HDR_INFO request and ask the streams endpoint
5578 * driver to give the header it needs appended and the upper layer
5579 * allocates and fills in the header and calls our put routine
5582 fcip_dl_ioc_hdr_info(queue_t
*wq
, mblk_t
*mp
)
5585 struct fcipstr
*slp
;
5586 struct fcipdladdr
*dlap
;
5587 dl_unitdata_req_t
*dlup
;
5588 fcph_network_hdr_t
*headerp
;
5590 llc_snap_hdr_t
*lsnap
;
5592 fcip_port_info_t
*fport
;
5593 t_uscalar_t off
, len
;
5597 slp
= (struct fcipstr
*)wq
->q_ptr
;
5598 fptr
= slp
->sl_fcip
;
5600 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5601 (CE_NOTE
, "dliochdr : returns EINVAL1"));
5602 miocnak(wq
, mp
, 0, EINVAL
);
5606 error
= miocpullup(mp
, sizeof (dl_unitdata_req_t
) + FCIPADDRL
);
5608 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5609 (CE_NOTE
, "dliochdr : returns %d", error
));
5610 miocnak(wq
, mp
, 0, error
);
5614 fport
= fptr
->fcip_port_info
;
5617 * check if the DL_UNITDATA_REQ destination addr has valid offset
5620 dlup
= (dl_unitdata_req_t
*)mp
->b_cont
->b_rptr
;
5621 off
= dlup
->dl_dest_addr_offset
;
5622 len
= dlup
->dl_dest_addr_length
;
5623 if (dlup
->dl_primitive
!= DL_UNITDATA_REQ
||
5624 !MBLKIN(mp
->b_cont
, off
, len
) || (len
!= FCIPADDRL
)) {
5625 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5626 (CE_NOTE
, "dliochdr : returns EINVAL2"));
5627 miocnak(wq
, mp
, 0, EINVAL
);
5631 dlap
= (struct fcipdladdr
*)(mp
->b_cont
->b_rptr
+ off
);
5634 * Allocate a new mblk to hold the ether header
5638 * setup space for network header
5640 hdrlen
= (sizeof (llc_snap_hdr_t
) + sizeof (fcph_network_hdr_t
));
5641 if ((nmp
= allocb(hdrlen
, BPRI_MED
)) == NULL
) {
5642 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5643 (CE_NOTE
, "dliochdr : returns ENOMEM"));
5644 miocnak(wq
, mp
, 0, ENOMEM
);
5647 nmp
->b_wptr
+= hdrlen
;
5650 * Fill in the Network Hdr and LLC SNAP header;
5652 headerp
= (fcph_network_hdr_t
*)nmp
->b_rptr
;
5654 * just fill in the Node WWN here - we can fill in the NAA_ID when
5655 * we search the routing table
5657 if (ether_cmp(&dlap
->dl_phys
, &fcip_arpbroadcast_addr
) == 0) {
5658 ether_to_wwn(&fcipnhbroadcastaddr
, &wwn
);
5660 ether_to_wwn(&dlap
->dl_phys
, &wwn
);
5662 bcopy(&wwn
, &headerp
->net_dest_addr
, sizeof (la_wwn_t
));
5663 bcopy(&fport
->fcipp_pwwn
, &headerp
->net_src_addr
, sizeof (la_wwn_t
));
5664 lsnap
= (llc_snap_hdr_t
*)(nmp
->b_rptr
+ sizeof (fcph_network_hdr_t
));
5668 lsnap
->oui
[0] = 0x00;
5669 lsnap
->oui
[1] = 0x00;
5670 lsnap
->oui
[2] = 0x00;
5671 lsnap
->pid
= BE_16(dlap
->dl_sap
);
5674 * Link new mblk in after the "request" mblks.
5678 slp
->sl_flags
|= FCIP_SLFAST
;
5680 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5681 (CE_NOTE
, "dliochdr : returns success "));
5682 miocack(wq
, mp
, msgsize(mp
->b_cont
), 0);
5687 * Establish a kmem cache for fcip packets
5690 fcip_cache_constructor(void *buf
, void *arg
, int flags
)
5692 fcip_pkt_t
*fcip_pkt
= buf
;
5693 fc_packet_t
*fc_pkt
;
5694 fcip_port_info_t
*fport
= (fcip_port_info_t
*)arg
;
5695 int (*cb
) (caddr_t
);
5698 cb
= (flags
== KM_SLEEP
) ? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
;
5700 ASSERT(fport
!= NULL
);
5702 fptr
= fport
->fcipp_fcip
;
5705 * we allocated space for our private area at the end of the
5706 * fc packet. Make sure we point to it correctly. Ideally we
5707 * should just push fc_packet_private to the beginning or end
5708 * of the fc_packet structure
5710 fcip_pkt
->fcip_pkt_next
= NULL
;
5711 fcip_pkt
->fcip_pkt_prev
= NULL
;
5712 fcip_pkt
->fcip_pkt_dest
= NULL
;
5713 fcip_pkt
->fcip_pkt_state
= 0;
5714 fcip_pkt
->fcip_pkt_reason
= 0;
5715 fcip_pkt
->fcip_pkt_flags
= 0;
5716 fcip_pkt
->fcip_pkt_fptr
= fptr
;
5717 fcip_pkt
->fcip_pkt_dma_flags
= 0;
5719 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
5720 fc_pkt
->pkt_ulp_rscn_infop
= NULL
;
5723 * We use pkt_cmd_dma for OUTBOUND requests. We don't expect
5724 * any responses for outbound IP data so no need to setup
5725 * response or data dma handles.
5727 if (ddi_dma_alloc_handle(fport
->fcipp_dip
,
5728 &fport
->fcipp_cmd_dma_attr
, cb
, NULL
,
5729 &fc_pkt
->pkt_cmd_dma
) != DDI_SUCCESS
) {
5730 return (FCIP_FAILURE
);
5733 fc_pkt
->pkt_cmd_acc
= fc_pkt
->pkt_resp_acc
= NULL
;
5734 fc_pkt
->pkt_fca_private
= (opaque_t
)((caddr_t
)buf
+
5735 sizeof (fcip_pkt_t
));
5736 fc_pkt
->pkt_ulp_private
= (opaque_t
)fcip_pkt
;
5738 fc_pkt
->pkt_cmd_cookie_cnt
= fc_pkt
->pkt_resp_cookie_cnt
=
5739 fc_pkt
->pkt_data_cookie_cnt
= 0;
5740 fc_pkt
->pkt_cmd_cookie
= fc_pkt
->pkt_resp_cookie
=
5741 fc_pkt
->pkt_data_cookie
= NULL
;
5743 return (FCIP_SUCCESS
);
5747 * destroy the fcip kmem cache
5750 fcip_cache_destructor(void *buf
, void *arg
)
5752 fcip_pkt_t
*fcip_pkt
= (fcip_pkt_t
*)buf
;
5753 fc_packet_t
*fc_pkt
;
5754 fcip_port_info_t
*fport
= (fcip_port_info_t
*)arg
;
5757 ASSERT(fport
!= NULL
);
5759 fptr
= fport
->fcipp_fcip
;
5761 ASSERT(fptr
== fcip_pkt
->fcip_pkt_fptr
);
5762 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
5764 if (fc_pkt
->pkt_cmd_dma
) {
5765 ddi_dma_free_handle(&fc_pkt
->pkt_cmd_dma
);
5770 * the fcip destination structure is hashed on Node WWN assuming
5771 * a NAA_ID of 0x1 (IEEE)
5773 static struct fcip_dest
*
5774 fcip_get_dest(struct fcip
*fptr
, la_wwn_t
*pwwn
)
5776 struct fcip_dest
*fdestp
= NULL
;
5777 fcip_port_info_t
*fport
;
5781 struct fcip_routing_table
*frp
;
5783 uint32_t *twwnp
= (uint32_t *)&twwn
;
5785 hash_bucket
= FCIP_DEST_HASH(pwwn
->raw_wwn
);
5786 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5787 (CE_NOTE
, "get dest hashbucket : 0x%x", hash_bucket
));
5788 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5789 (CE_NOTE
, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5790 pwwn
->raw_wwn
[2], pwwn
->raw_wwn
[3], pwwn
->raw_wwn
[4],
5791 pwwn
->raw_wwn
[5], pwwn
->raw_wwn
[6], pwwn
->raw_wwn
[7]));
5793 ASSERT(hash_bucket
< FCIP_DEST_HASH_ELEMS
);
5795 if (fcip_check_port_exists(fptr
)) {
5796 /* fptr is stale, return fdestp */
5799 fport
= fptr
->fcip_port_info
;
5802 * First check if we have active I/Os going on with the
5803 * destination port (an entry would exist in fcip_dest hash table)
5805 mutex_enter(&fptr
->fcip_dest_mutex
);
5806 fdestp
= fptr
->fcip_dest
[hash_bucket
];
5807 while (fdestp
!= NULL
) {
5808 mutex_enter(&fdestp
->fcipd_mutex
);
5809 if (fdestp
->fcipd_rtable
) {
5810 if (fcip_wwn_compare(pwwn
, &fdestp
->fcipd_pwwn
,
5811 FCIP_COMPARE_NWWN
) == 0) {
5812 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5813 (CE_NOTE
, "found fdestp"));
5814 mutex_exit(&fdestp
->fcipd_mutex
);
5815 mutex_exit(&fptr
->fcip_dest_mutex
);
5819 mutex_exit(&fdestp
->fcipd_mutex
);
5820 fdestp
= fdestp
->fcipd_next
;
5822 mutex_exit(&fptr
->fcip_dest_mutex
);
5825 * We did not find the destination port information in our
5826 * active port list so search for an entry in our routing
5829 mutex_enter(&fptr
->fcip_rt_mutex
);
5830 frp
= fcip_lookup_rtable(fptr
, pwwn
, FCIP_COMPARE_NWWN
);
5831 mutex_exit(&fptr
->fcip_rt_mutex
);
5833 if (frp
== NULL
|| (frp
&& (!FCIP_RTE_UNAVAIL(frp
->fcipr_state
)) &&
5834 frp
->fcipr_state
!= PORT_DEVICE_LOGGED_IN
) ||
5835 (frp
&& frp
->fcipr_pd
== NULL
)) {
5837 * No entry for the destination port in our routing
5838 * table too. First query the transport to see if it
5839 * already has structures for the destination port in
5840 * its hash tables. This must be done for all topologies
5841 * since we could have retired entries in the hash tables
5842 * which may have to be re-added without a statechange
5843 * callback happening. Its better to try and get an entry
5844 * for the destination port rather than simply failing a
5845 * request though it may be an overkill in private loop
5847 * If a entry for the remote port exists in the transport's
5848 * hash tables, we are fine and can add the entry to our
5849 * routing and dest hash lists, Else for fabric configs we
5850 * query the nameserver if one exists or issue FARP ELS.
5854 * We need to do a PortName based Nameserver
5855 * query operation. So get the right PortWWN
5858 bcopy(pwwn
, &twwn
, sizeof (la_wwn_t
));
5861 * Try IEEE Name (Format 1) first, this is the default and
5862 * Emulex uses this format.
5864 pd
= fc_ulp_get_remote_port(fport
->fcipp_handle
,
5867 if (rval
!= FC_SUCCESS
) {
5869 * If IEEE Name (Format 1) query failed, try IEEE
5870 * Extended Name (Format 2) which Qlogic uses.
5871 * And try port 1 on Qlogic FC-HBA first.
5872 * Note: On x86, we need to byte swap the 32-bit
5873 * word first, after the modification, swap it back.
5875 *twwnp
= BE_32(*twwnp
);
5876 twwn
.w
.nport_id
= QLC_PORT_1_ID_BITS
;
5877 twwn
.w
.naa_id
= QLC_PORT_NAA
;
5878 *twwnp
= BE_32(*twwnp
);
5879 pd
= fc_ulp_get_remote_port(fport
->fcipp_handle
,
5883 if (rval
!= FC_SUCCESS
) {
5884 /* If still failed, try port 2 on Qlogic FC-HBA. */
5885 *twwnp
= BE_32(*twwnp
);
5886 twwn
.w
.nport_id
= QLC_PORT_2_ID_BITS
;
5887 *twwnp
= BE_32(*twwnp
);
5888 pd
= fc_ulp_get_remote_port(fport
->fcipp_handle
,
5892 if (rval
== FC_SUCCESS
) {
5895 * Add the newly found destination structure
5896 * to our routing table. Create a map with
5897 * the device we found. We could ask the
5898 * transport to give us the list of all
5899 * devices connected to our port but we
5900 * probably don't need to know all the devices
5901 * so let us just constuct a list with only
5902 * one device instead.
5905 fc_ulp_copy_portmap(&map
, pd
);
5906 fcip_rt_update(fptr
, &map
, 1);
5908 mutex_enter(&fptr
->fcip_rt_mutex
);
5909 frp
= fcip_lookup_rtable(fptr
, pwwn
,
5911 mutex_exit(&fptr
->fcip_rt_mutex
);
5913 fdestp
= fcip_add_dest(fptr
, frp
);
5914 } else if (fcip_farp_supported
&&
5915 (FC_TOP_EXTERNAL(fport
->fcipp_topology
) ||
5916 (fport
->fcipp_topology
== FC_TOP_PT_PT
))) {
5918 * The Name server request failed so
5921 fdestp
= fcip_do_farp(fptr
, pwwn
, NULL
,
5926 } else if (frp
&& frp
->fcipr_state
== PORT_DEVICE_LOGGED_IN
) {
5928 * Prepare a dest structure to return to caller
5930 fdestp
= fcip_add_dest(fptr
, frp
);
5931 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5932 (CE_NOTE
, "in fcip get dest non fabric"));
5939 * Endian clean WWN compare.
5940 * Returns 0 if they compare OK, else return non zero value.
5941 * flag can be bitwise OR of FCIP_COMPARE_NWWN, FCIP_COMPARE_PWWN,
5942 * FCIP_COMPARE_BROADCAST.
5945 fcip_wwn_compare(la_wwn_t
*wwn1
, la_wwn_t
*wwn2
, int flag
)
5948 if ((wwn1
->raw_wwn
[2] != wwn2
->raw_wwn
[2]) ||
5949 (wwn1
->raw_wwn
[3] != wwn2
->raw_wwn
[3]) ||
5950 (wwn1
->raw_wwn
[4] != wwn2
->raw_wwn
[4]) ||
5951 (wwn1
->raw_wwn
[5] != wwn2
->raw_wwn
[5]) ||
5952 (wwn1
->raw_wwn
[6] != wwn2
->raw_wwn
[6]) ||
5953 (wwn1
->raw_wwn
[7] != wwn2
->raw_wwn
[7])) {
5955 } else if ((flag
== FCIP_COMPARE_PWWN
) &&
5956 (((wwn1
->raw_wwn
[0] & 0xf0) != (wwn2
->raw_wwn
[0] & 0xf0)) ||
5957 (wwn1
->raw_wwn
[1] != wwn2
->raw_wwn
[1]))) {
5965 * Add an entry for a remote port in the dest hash table. Dest hash table
5966 * has entries for ports in the routing hash table with which we decide
5967 * to establish IP communication with. The no. of entries in the dest hash
5968 * table must always be less than or equal to the entries in the routing
5969 * hash table. Every entry in the dest hash table ofcourse must have a
5970 * corresponding entry in the routing hash table
5972 static struct fcip_dest
*
5973 fcip_add_dest(struct fcip
*fptr
, struct fcip_routing_table
*frp
)
5975 struct fcip_dest
*fdestp
= NULL
;
5978 struct fcip_dest
*fdest_new
;
5984 pwwn
= &frp
->fcipr_pwwn
;
5985 mutex_enter(&fptr
->fcip_dest_mutex
);
5986 hash_bucket
= FCIP_DEST_HASH(pwwn
->raw_wwn
);
5987 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
5988 (CE_NOTE
, "add dest hash_bucket: 0x%x", hash_bucket
));
5990 ASSERT(hash_bucket
< FCIP_DEST_HASH_ELEMS
);
5992 fdestp
= fptr
->fcip_dest
[hash_bucket
];
5993 while (fdestp
!= NULL
) {
5994 mutex_enter(&fdestp
->fcipd_mutex
);
5995 if (fdestp
->fcipd_rtable
) {
5996 if (fcip_wwn_compare(pwwn
, &fdestp
->fcipd_pwwn
,
5997 FCIP_COMPARE_PWWN
) == 0) {
5998 mutex_exit(&fdestp
->fcipd_mutex
);
5999 mutex_exit(&fptr
->fcip_dest_mutex
);
6003 mutex_exit(&fdestp
->fcipd_mutex
);
6004 fdestp
= fdestp
->fcipd_next
;
6007 ASSERT(fdestp
== NULL
);
6009 fdest_new
= (struct fcip_dest
*)
6010 kmem_zalloc(sizeof (struct fcip_dest
), KM_SLEEP
);
6012 mutex_init(&fdest_new
->fcipd_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
6013 fdest_new
->fcipd_next
= fptr
->fcip_dest
[hash_bucket
];
6014 fdest_new
->fcipd_refcnt
= 0;
6015 fdest_new
->fcipd_rtable
= frp
;
6016 fdest_new
->fcipd_ncmds
= 0;
6017 fptr
->fcip_dest
[hash_bucket
] = fdest_new
;
6018 fdest_new
->fcipd_flags
= FCIP_PORT_NOTLOGGED
;
6020 mutex_exit(&fptr
->fcip_dest_mutex
);
6025 * Cleanup the dest hash table and remove all entries
6028 fcip_cleanup_dest(struct fcip
*fptr
)
6030 struct fcip_dest
*fdestp
= NULL
;
6031 struct fcip_dest
*fdest_delp
= NULL
;
6034 mutex_enter(&fptr
->fcip_dest_mutex
);
6036 for (i
= 0; i
< FCIP_DEST_HASH_ELEMS
; i
++) {
6037 fdestp
= fptr
->fcip_dest
[i
];
6038 while (fdestp
!= NULL
) {
6039 mutex_destroy(&fdestp
->fcipd_mutex
);
6040 fdest_delp
= fdestp
;
6041 fdestp
= fdestp
->fcipd_next
;
6042 kmem_free(fdest_delp
, sizeof (struct fcip_dest
));
6043 fptr
->fcip_dest
[i
] = NULL
;
6046 mutex_exit(&fptr
->fcip_dest_mutex
);
6051 * Send FARP requests for Fabric ports when we don't have the port
6052 * we wish to talk to in our routing hash table. FARP is specially required
6053 * to talk to FC switches for inband switch management. Most FC switches
6054 * today have a switch FC IP address for IP over FC inband switch management
6055 * but the WWN and Port_ID for this traffic is not available through the
6056 * Nameservers since the switch themeselves are transparent.
6059 static struct fcip_dest
*
6060 fcip_do_farp(struct fcip
*fptr
, la_wwn_t
*pwwn
, char *ip_addr
,
6061 size_t ip_addr_len
, int flags
)
6063 fcip_pkt_t
*fcip_pkt
;
6064 fc_packet_t
*fc_pkt
;
6065 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6066 la_els_farp_t farp_cmd
;
6067 la_els_farp_t
*fcmd
;
6068 struct fcip_dest
*fdestp
= NULL
;
6071 la_wwn_t broadcast_wwn
;
6072 struct fcip_dest
*bdestp
;
6073 struct fcip_routing_table
*frp
;
6075 bdestp
= fcip_get_dest(fptr
, &broadcast_wwn
);
6077 if (bdestp
== NULL
) {
6081 fcip_pkt
= fcip_ipkt_alloc(fptr
, sizeof (la_els_farp_t
),
6082 sizeof (la_els_farp_t
), bdestp
->fcipd_pd
, KM_SLEEP
);
6084 if (fcip_pkt
== NULL
) {
6088 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6089 ether_to_wwn(&fcip_arpbroadcast_addr
, &broadcast_wwn
);
6091 mutex_enter(&bdestp
->fcipd_mutex
);
6092 if (bdestp
->fcipd_rtable
== NULL
) {
6093 mutex_exit(&bdestp
->fcipd_mutex
);
6094 fcip_ipkt_free(fcip_pkt
);
6098 fcip_pkt
->fcip_pkt_dest
= bdestp
;
6099 fc_pkt
->pkt_fca_device
= bdestp
->fcipd_fca_dev
;
6101 bdestp
->fcipd_ncmds
++;
6102 mutex_exit(&bdestp
->fcipd_mutex
);
6104 fcip_init_broadcast_pkt(fcip_pkt
, NULL
, 1);
6105 fcip_pkt
->fcip_pkt_flags
|= FCIP_PKT_IN_LIST
;
6108 * Now initialize the FARP payload itself
6111 fcmd
->ls_code
.ls_code
= LA_ELS_FARP_REQ
;
6112 fcmd
->ls_code
.mbz
= 0;
6114 * for now just match the Port WWN since the other match addr
6115 * code points are optional. We can explore matching the IP address
6119 fcmd
->match_addr
= FARP_MATCH_WW_PN_IPv4
;
6121 fcmd
->match_addr
= FARP_MATCH_WW_PN
;
6125 * Request the responder port to log into us - that way
6126 * the Transport is aware of the remote port when we create
6127 * an entry for it in our tables
6129 fcmd
->resp_flags
= FARP_INIT_REPLY
| FARP_INIT_P_LOGI
;
6130 fcmd
->req_id
= fport
->fcipp_sid
;
6131 fcmd
->dest_id
.port_id
= fc_pkt
->pkt_cmd_fhdr
.d_id
;
6132 bcopy(&fport
->fcipp_pwwn
, &fcmd
->req_pwwn
, sizeof (la_wwn_t
));
6133 bcopy(&fport
->fcipp_nwwn
, &fcmd
->req_nwwn
, sizeof (la_wwn_t
));
6134 bcopy(pwwn
, &fcmd
->resp_pwwn
, sizeof (la_wwn_t
));
6136 * copy in source IP address if we get to know it
6139 bcopy(ip_addr
, fcmd
->resp_ip
, ip_addr_len
);
6142 fc_pkt
->pkt_cmdlen
= sizeof (la_els_farp_t
);
6143 fc_pkt
->pkt_rsplen
= sizeof (la_els_farp_t
);
6144 fc_pkt
->pkt_tran_type
= FC_PKT_EXCHANGE
;
6145 fc_pkt
->pkt_ulp_private
= (opaque_t
)fcip_pkt
;
6150 FCIP_CP_OUT(fcmd
, fc_pkt
->pkt_cmd
, fc_pkt
->pkt_cmd_acc
,
6151 sizeof (la_els_farp_t
));
6154 * send the packet in polled mode.
6156 rval
= fc_ulp_issue_els(fport
->fcipp_handle
, fc_pkt
);
6157 if (rval
!= FC_SUCCESS
) {
6158 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_WARN
,
6159 "fcip_transport of farp pkt failed 0x%x", rval
));
6160 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_LIST
;
6161 fcip_ipkt_free(fcip_pkt
);
6163 mutex_enter(&bdestp
->fcipd_mutex
);
6164 bdestp
->fcipd_ncmds
--;
6165 mutex_exit(&bdestp
->fcipd_mutex
);
6170 farp_lbolt
= ddi_get_lbolt();
6171 farp_lbolt
+= drv_usectohz(FCIP_FARP_TIMEOUT
);
6173 mutex_enter(&fptr
->fcip_mutex
);
6174 fptr
->fcip_farp_rsp_flag
= 0;
6175 while (!fptr
->fcip_farp_rsp_flag
) {
6176 if (cv_timedwait(&fptr
->fcip_farp_cv
, &fptr
->fcip_mutex
,
6177 farp_lbolt
) == -1) {
6179 * No FARP response from any destination port
6182 fptr
->fcip_farp_rsp_flag
= 1;
6185 * We received a FARP response - check to see if the
6186 * response was in reply to our FARP request.
6189 mutex_enter(&fptr
->fcip_rt_mutex
);
6190 frp
= fcip_lookup_rtable(fptr
, pwwn
, FCIP_COMPARE_NWWN
);
6191 mutex_exit(&fptr
->fcip_rt_mutex
);
6193 if ((frp
!= NULL
) &&
6194 !FCIP_RTE_UNAVAIL(frp
->fcipr_state
)) {
6195 fdestp
= fcip_get_dest(fptr
, pwwn
);
6198 * Not our FARP response so go back and wait
6199 * again till FARP_TIMEOUT expires
6201 fptr
->fcip_farp_rsp_flag
= 0;
6205 mutex_exit(&fptr
->fcip_mutex
);
6207 fcip_pkt
->fcip_pkt_flags
|= FCIP_PKT_IN_LIST
;
6208 fcip_ipkt_free(fcip_pkt
);
6209 mutex_enter(&bdestp
->fcipd_mutex
);
6210 bdestp
->fcipd_ncmds
--;
6211 mutex_exit(&bdestp
->fcipd_mutex
);
6218 * Helper routine to PLOGI to a remote port we wish to talk to.
6219 * This may not be required since the port driver does logins anyway,
6220 * but this can be required in fabric cases since FARP requests/responses
6221 * don't require you to be logged in?
6226 fcip_do_plogi(struct fcip
*fptr
, struct fcip_routing_table
*frp
)
6228 fcip_pkt_t
*fcip_pkt
;
6229 fc_packet_t
*fc_pkt
;
6230 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6233 fc_frame_hdr_t
*fr_hdr
;
6236 * Don't bother to login for broadcast RTE entries
6238 if ((frp
->fcipr_d_id
.port_id
== 0x0) ||
6239 (frp
->fcipr_d_id
.port_id
== 0xffffff)) {
6240 return (FC_FAILURE
);
6244 * We shouldn't pound in too many logins here
6247 if (frp
->fcipr_state
== FCIP_RT_LOGIN_PROGRESS
||
6248 frp
->fcipr_state
== PORT_DEVICE_LOGGED_IN
) {
6249 return (FC_SUCCESS
);
6252 fcip_pkt
= fcip_ipkt_alloc(fptr
, sizeof (la_els_logi_t
),
6253 sizeof (la_els_logi_t
), frp
->fcipr_pd
, KM_SLEEP
);
6255 if (fcip_pkt
== NULL
) {
6256 return (FC_FAILURE
);
6260 * Update back pointer for login state update
6262 fcip_pkt
->fcip_pkt_frp
= frp
;
6263 frp
->fcipr_state
= FCIP_RT_LOGIN_PROGRESS
;
6265 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6268 * Initialize frame header for ELS
6270 fr_hdr
= &fc_pkt
->pkt_cmd_fhdr
;
6271 fr_hdr
->r_ctl
= R_CTL_ELS_REQ
;
6272 fr_hdr
->type
= FC_TYPE_EXTENDED_LS
;
6273 fr_hdr
->f_ctl
= F_CTL_SEQ_INITIATIVE
| F_CTL_FIRST_SEQ
;
6275 fr_hdr
->s_id
= fport
->fcipp_sid
.port_id
;
6276 fr_hdr
->d_id
= frp
->fcipr_d_id
.port_id
;
6277 fr_hdr
->seq_cnt
= 0;
6278 fr_hdr
->ox_id
= 0xffff;
6279 fr_hdr
->rx_id
= 0xffff;
6282 fc_pkt
->pkt_rsplen
= sizeof (la_els_logi_t
);
6283 fc_pkt
->pkt_comp
= fcip_ipkt_callback
;
6284 fc_pkt
->pkt_tran_type
= FC_PKT_EXCHANGE
;
6285 fc_pkt
->pkt_timeout
= 10; /* 10 seconds */
6286 fcip_pkt
->fcip_pkt_ttl
= fptr
->fcip_timeout_ticks
+ fc_pkt
->pkt_timeout
;
6287 fc_pkt
->pkt_ulp_private
= (opaque_t
)fcip_pkt
;
6290 * Everybody does class 3, so let's just set it. If the transport
6291 * knows better, it will deal with the class appropriately.
6294 fc_pkt
->pkt_tran_flags
= FC_TRAN_INTR
| FC_TRAN_CLASS3
;
6297 * we need only fill in the ls_code and the cmd frame header
6299 bzero((void *)&logi
, sizeof (la_els_logi_t
));
6300 logi
.ls_code
.ls_code
= LA_ELS_PLOGI
;
6301 logi
.ls_code
.mbz
= 0;
6303 FCIP_CP_OUT((uint8_t *)&logi
, fc_pkt
->pkt_cmd
, fc_pkt
->pkt_cmd_acc
,
6304 sizeof (la_els_logi_t
));
6306 rval
= fc_ulp_login(fport
->fcipp_handle
, &fc_pkt
, 1);
6307 if (rval
!= FC_SUCCESS
) {
6309 "!fc_ulp_login failed for d_id: 0x%x, rval: 0x%x",
6310 frp
->fcipr_d_id
.port_id
, rval
);
6311 fcip_ipkt_free(fcip_pkt
);
6317 * The packet callback routine - called from the transport/FCA after
6318 * it is done DMA'ing/sending out the packet contents on the wire so
6319 * that the alloc'ed packet can be freed
6322 fcip_ipkt_callback(fc_packet_t
*fc_pkt
)
6325 ls_code_t logi_resp
;
6326 fcip_pkt_t
*fcip_pkt
;
6327 fc_frame_hdr_t
*fr_hdr
;
6329 fcip_port_info_t
*fport
;
6330 struct fcip_routing_table
*frp
;
6332 fr_hdr
= &fc_pkt
->pkt_cmd_fhdr
;
6334 FCIP_CP_IN(fc_pkt
->pkt_resp
, (uint8_t *)&logi_resp
,
6335 fc_pkt
->pkt_resp_acc
, sizeof (logi_resp
));
6337 FCIP_CP_IN(fc_pkt
->pkt_cmd
, (uint8_t *)&logi_req
, fc_pkt
->pkt_cmd_acc
,
6340 fcip_pkt
= (fcip_pkt_t
*)fc_pkt
->pkt_ulp_private
;
6341 frp
= fcip_pkt
->fcip_pkt_frp
;
6342 fptr
= fcip_pkt
->fcip_pkt_fptr
;
6343 fport
= fptr
->fcip_port_info
;
6345 ASSERT(logi_req
.ls_code
== LA_ELS_PLOGI
);
6347 if (fc_pkt
->pkt_state
!= FC_PKT_SUCCESS
||
6348 logi_resp
.ls_code
!= LA_ELS_ACC
) {
6351 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_WARN
,
6352 "opcode : 0x%x to d_id: 0x%x failed",
6353 logi_req
.ls_code
, fr_hdr
->d_id
));
6355 mutex_enter(&fptr
->fcip_rt_mutex
);
6356 frp
->fcipr_state
= PORT_DEVICE_INVALID
;
6357 frp
->fcipr_invalid_timeout
= fptr
->fcip_timeout_ticks
+
6358 (FCIP_RTE_TIMEOUT
/ 2);
6359 mutex_exit(&fptr
->fcip_rt_mutex
);
6363 d_id
.port_id
= fr_hdr
->d_id
;
6364 d_id
.priv_lilp_posit
= 0;
6367 * Update PLOGI results; FCA Handle, and Port device handles
6369 mutex_enter(&fptr
->fcip_rt_mutex
);
6370 frp
->fcipr_pd
= fc_pkt
->pkt_pd
;
6371 frp
->fcipr_fca_dev
=
6372 fc_ulp_get_fca_device(fport
->fcipp_handle
, d_id
);
6373 frp
->fcipr_state
= PORT_DEVICE_LOGGED_IN
;
6374 mutex_exit(&fptr
->fcip_rt_mutex
);
6377 fcip_ipkt_free(fcip_pkt
);
6382 * pkt_alloc routine for outbound IP datagrams. The cache constructor
6383 * Only initializes the pkt_cmd_dma (which is where the outbound datagram
6384 * is stuffed) since we don't expect response
6387 fcip_pkt_alloc(struct fcip
*fptr
, mblk_t
*bp
, int flags
, int datalen
)
6389 fcip_pkt_t
*fcip_pkt
;
6390 fc_packet_t
*fc_pkt
;
6391 ddi_dma_cookie_t pkt_cookie
;
6392 ddi_dma_cookie_t
*cp
;
6394 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6396 fcip_pkt
= kmem_cache_alloc(fptr
->fcip_xmit_cache
, flags
);
6397 if (fcip_pkt
== NULL
) {
6398 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
, (CE_WARN
,
6399 "fcip_pkt_alloc: kmem_cache_alloc failed"));
6403 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6404 fcip_pkt
->fcip_pkt_fcpktp
= fc_pkt
;
6405 fc_pkt
->pkt_tran_flags
= 0;
6406 fcip_pkt
->fcip_pkt_dma_flags
= 0;
6409 * the cache constructor has allocated the dma handle
6411 fc_pkt
->pkt_cmd
= (caddr_t
)bp
->b_rptr
;
6412 if (ddi_dma_addr_bind_handle(fc_pkt
->pkt_cmd_dma
, NULL
,
6413 (caddr_t
)bp
->b_rptr
, datalen
, DDI_DMA_WRITE
| DDI_DMA_CONSISTENT
,
6414 DDI_DMA_DONTWAIT
, NULL
, &pkt_cookie
,
6415 &fc_pkt
->pkt_cmd_cookie_cnt
) != DDI_DMA_MAPPED
) {
6419 fcip_pkt
->fcip_pkt_dma_flags
|= FCIP_CMD_DMA_BOUND
;
6421 if (fc_pkt
->pkt_cmd_cookie_cnt
>
6422 fport
->fcipp_cmd_dma_attr
.dma_attr_sgllen
) {
6426 ASSERT(fc_pkt
->pkt_cmd_cookie_cnt
!= 0);
6428 cp
= fc_pkt
->pkt_cmd_cookie
= (ddi_dma_cookie_t
*)kmem_alloc(
6429 fc_pkt
->pkt_cmd_cookie_cnt
* sizeof (pkt_cookie
),
6438 for (cnt
= 1; cnt
< fc_pkt
->pkt_cmd_cookie_cnt
; cnt
++, cp
++) {
6439 ddi_dma_nextcookie(fc_pkt
->pkt_cmd_dma
, &pkt_cookie
);
6443 fc_pkt
->pkt_cmdlen
= datalen
;
6445 fcip_pkt
->fcip_pkt_mp
= NULL
;
6446 fcip_pkt
->fcip_pkt_wq
= NULL
;
6447 fcip_pkt
->fcip_pkt_dest
= NULL
;
6448 fcip_pkt
->fcip_pkt_next
= NULL
;
6449 fcip_pkt
->fcip_pkt_prev
= NULL
;
6450 fcip_pkt
->fcip_pkt_state
= 0;
6451 fcip_pkt
->fcip_pkt_reason
= 0;
6452 fcip_pkt
->fcip_pkt_flags
= 0;
6453 fcip_pkt
->fcip_pkt_frp
= NULL
;
6458 fcip_pkt_free(fcip_pkt
, 0);
6460 return ((fcip_pkt_t
*)0);
6464 * Free a packet and all its associated resources
6467 fcip_pkt_free(struct fcip_pkt
*fcip_pkt
, int free_mblk
)
6469 fc_packet_t
*fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6470 struct fcip
*fptr
= fcip_pkt
->fcip_pkt_fptr
;
6472 if (fc_pkt
->pkt_cmd_cookie
!= NULL
) {
6473 kmem_free(fc_pkt
->pkt_cmd_cookie
, fc_pkt
->pkt_cmd_cookie_cnt
*
6474 sizeof (ddi_dma_cookie_t
));
6475 fc_pkt
->pkt_cmd_cookie
= NULL
;
6478 fcip_free_pkt_dma(fcip_pkt
);
6479 if (free_mblk
&& fcip_pkt
->fcip_pkt_mp
) {
6480 freemsg(fcip_pkt
->fcip_pkt_mp
);
6481 fcip_pkt
->fcip_pkt_mp
= NULL
;
6484 (void) fc_ulp_uninit_packet(fptr
->fcip_port_info
->fcipp_handle
, fc_pkt
);
6486 kmem_cache_free(fptr
->fcip_xmit_cache
, (void *)fcip_pkt
);
6490 * Allocate a Packet for internal driver use. This is for requests
6491 * that originate from within the driver
6494 fcip_ipkt_alloc(struct fcip
*fptr
, int cmdlen
, int resplen
,
6495 opaque_t pd
, int flags
)
6497 fcip_pkt_t
*fcip_pkt
;
6498 fc_packet_t
*fc_pkt
;
6500 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6502 uint_t held_here
= 0;
6503 ddi_dma_cookie_t pkt_cookie
;
6504 ddi_dma_cookie_t
*cp
;
6507 cb
= (flags
== KM_SLEEP
) ? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
;
6509 fcip_pkt
= kmem_zalloc((sizeof (fcip_pkt_t
) +
6510 fport
->fcipp_fca_pkt_size
), flags
);
6512 if (fcip_pkt
== NULL
) {
6513 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
6514 (CE_WARN
, "pkt alloc of ineternal pkt failed"));
6518 fcip_pkt
->fcip_pkt_flags
= FCIP_PKT_INTERNAL
;
6519 fcip_pkt
->fcip_pkt_fptr
= fptr
;
6520 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6521 fcip_pkt
->fcip_pkt_fcpktp
= fc_pkt
;
6522 fc_pkt
->pkt_tran_flags
= 0;
6523 fc_pkt
->pkt_cmdlen
= 0;
6524 fc_pkt
->pkt_rsplen
= 0;
6525 fc_pkt
->pkt_datalen
= 0;
6526 fc_pkt
->pkt_fca_private
= (opaque_t
)((caddr_t
)fcip_pkt
+
6527 sizeof (fcip_pkt_t
));
6528 fc_pkt
->pkt_ulp_private
= (opaque_t
)fcip_pkt
;
6531 if (ddi_dma_alloc_handle(fptr
->fcip_dip
,
6532 &fport
->fcipp_cmd_dma_attr
, cb
, NULL
,
6533 &fc_pkt
->pkt_cmd_dma
) != DDI_SUCCESS
) {
6537 if (ddi_dma_mem_alloc(fc_pkt
->pkt_cmd_dma
, cmdlen
,
6538 &fport
->fcipp_fca_acc_attr
, DDI_DMA_CONSISTENT
,
6539 cb
, NULL
, (caddr_t
*)&fc_pkt
->pkt_cmd
,
6540 &real_len
, &fc_pkt
->pkt_cmd_acc
) != DDI_SUCCESS
) {
6544 fcip_pkt
->fcip_pkt_dma_flags
|= FCIP_CMD_DMA_MEM
;
6545 fc_pkt
->pkt_cmdlen
= cmdlen
;
6547 if (real_len
< cmdlen
) {
6551 if (ddi_dma_addr_bind_handle(fc_pkt
->pkt_cmd_dma
, NULL
,
6552 (caddr_t
)fc_pkt
->pkt_cmd
, real_len
,
6553 DDI_DMA_WRITE
| DDI_DMA_CONSISTENT
, cb
, NULL
,
6554 &pkt_cookie
, &fc_pkt
->pkt_cmd_cookie_cnt
) !=
6559 fcip_pkt
->fcip_pkt_dma_flags
|= FCIP_CMD_DMA_BOUND
;
6561 if (fc_pkt
->pkt_cmd_cookie_cnt
>
6562 fport
->fcipp_cmd_dma_attr
.dma_attr_sgllen
) {
6566 ASSERT(fc_pkt
->pkt_cmd_cookie_cnt
!= 0);
6568 cp
= fc_pkt
->pkt_cmd_cookie
= (ddi_dma_cookie_t
*)kmem_alloc(
6569 fc_pkt
->pkt_cmd_cookie_cnt
* sizeof (pkt_cookie
),
6578 for (cnt
= 1; cnt
< fc_pkt
->pkt_cmd_cookie_cnt
; cnt
++, cp
++) {
6579 ddi_dma_nextcookie(fc_pkt
->pkt_cmd_dma
, &pkt_cookie
);
6585 if (ddi_dma_alloc_handle(fptr
->fcip_dip
,
6586 &fport
->fcipp_resp_dma_attr
, cb
, NULL
,
6587 &fc_pkt
->pkt_resp_dma
) != DDI_SUCCESS
) {
6591 if (ddi_dma_mem_alloc(fc_pkt
->pkt_resp_dma
, resplen
,
6592 &fport
->fcipp_fca_acc_attr
, DDI_DMA_CONSISTENT
,
6593 cb
, NULL
, (caddr_t
*)&fc_pkt
->pkt_resp
,
6594 &real_len
, &fc_pkt
->pkt_resp_acc
) != DDI_SUCCESS
) {
6598 fcip_pkt
->fcip_pkt_dma_flags
|= FCIP_RESP_DMA_MEM
;
6600 if (real_len
< resplen
) {
6604 if (ddi_dma_addr_bind_handle(fc_pkt
->pkt_resp_dma
, NULL
,
6605 (caddr_t
)fc_pkt
->pkt_resp
, real_len
,
6606 DDI_DMA_WRITE
| DDI_DMA_CONSISTENT
, cb
, NULL
,
6607 &pkt_cookie
, &fc_pkt
->pkt_resp_cookie_cnt
) !=
6612 fcip_pkt
->fcip_pkt_dma_flags
|= FCIP_RESP_DMA_BOUND
;
6613 fc_pkt
->pkt_rsplen
= resplen
;
6615 if (fc_pkt
->pkt_resp_cookie_cnt
>
6616 fport
->fcipp_resp_dma_attr
.dma_attr_sgllen
) {
6620 ASSERT(fc_pkt
->pkt_resp_cookie_cnt
!= 0);
6622 cp
= fc_pkt
->pkt_resp_cookie
= (ddi_dma_cookie_t
*)kmem_alloc(
6623 fc_pkt
->pkt_resp_cookie_cnt
* sizeof (pkt_cookie
),
6632 for (cnt
= 1; cnt
< fc_pkt
->pkt_resp_cookie_cnt
; cnt
++, cp
++) {
6633 ddi_dma_nextcookie(fc_pkt
->pkt_resp_dma
, &pkt_cookie
);
6639 * Initialize pkt_pd prior to calling fc_ulp_init_packet
6642 fc_pkt
->pkt_pd
= pd
;
6645 * Ask the FCA to bless the internal packet
6647 if (fc_ulp_init_packet((opaque_t
)fport
->fcipp_handle
,
6648 fc_pkt
, flags
) != FC_SUCCESS
) {
6653 * Keep track of # of ipkts alloc-ed
6654 * This function can get called with mutex either held or not. So, we'll
6655 * grab mutex if it is not already held by this thread.
6656 * This has to be cleaned up someday.
6658 if (!MUTEX_HELD(&fptr
->fcip_mutex
)) {
6660 mutex_enter(&fptr
->fcip_mutex
);
6663 fptr
->fcip_num_ipkts_pending
++;
6666 mutex_exit(&fptr
->fcip_mutex
);
6671 fcip_ipkt_free(fcip_pkt
);
6678 * free up an internal IP packet (like a FARP pkt etc)
6681 fcip_ipkt_free(fcip_pkt_t
*fcip_pkt
)
6683 fc_packet_t
*fc_pkt
;
6684 struct fcip
*fptr
= fcip_pkt
->fcip_pkt_fptr
;
6685 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6687 ASSERT(fptr
!= NULL
);
6688 ASSERT(!mutex_owned(&fptr
->fcip_mutex
));
6690 /* One less ipkt to wait for */
6691 mutex_enter(&fptr
->fcip_mutex
);
6692 if (fptr
->fcip_num_ipkts_pending
) /* Safety check */
6693 fptr
->fcip_num_ipkts_pending
--;
6694 mutex_exit(&fptr
->fcip_mutex
);
6696 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6698 if (fc_pkt
->pkt_cmd_cookie
!= NULL
) {
6699 kmem_free(fc_pkt
->pkt_cmd_cookie
, fc_pkt
->pkt_cmd_cookie_cnt
*
6700 sizeof (ddi_dma_cookie_t
));
6701 fc_pkt
->pkt_cmd_cookie
= NULL
;
6704 if (fc_pkt
->pkt_resp_cookie
!= NULL
) {
6705 kmem_free(fc_pkt
->pkt_resp_cookie
, fc_pkt
->pkt_resp_cookie_cnt
*
6706 sizeof (ddi_dma_cookie_t
));
6707 fc_pkt
->pkt_resp_cookie
= NULL
;
6710 if (fc_ulp_uninit_packet(fport
->fcipp_handle
, fc_pkt
) != FC_SUCCESS
) {
6711 FCIP_DEBUG(FCIP_DEBUG_ELS
, (CE_WARN
,
6712 "fc_ulp_uninit_pkt failed for internal fc pkt 0x%p",
6715 fcip_free_pkt_dma(fcip_pkt
);
6716 kmem_free(fcip_pkt
, (sizeof (fcip_pkt_t
) + fport
->fcipp_fca_pkt_size
));
6720 * initialize a unicast request. This is a misnomer because even the
6721 * broadcast requests are initialized with this routine
6724 fcip_init_unicast_pkt(fcip_pkt_t
*fcip_pkt
, fc_portid_t sid
, fc_portid_t did
,
6727 fc_packet_t
*fc_pkt
;
6728 fc_frame_hdr_t
*fr_hdr
;
6729 struct fcip
*fptr
= fcip_pkt
->fcip_pkt_fptr
;
6731 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6732 fr_hdr
= &fc_pkt
->pkt_cmd_fhdr
;
6734 fr_hdr
->r_ctl
= R_CTL_DEVICE_DATA
| R_CTL_UNSOL_DATA
;
6735 fr_hdr
->s_id
= sid
.port_id
;
6736 fr_hdr
->d_id
= did
.port_id
;
6737 fr_hdr
->type
= FC_TYPE_IS8802_SNAP
;
6738 fr_hdr
->f_ctl
= F_CTL_FIRST_SEQ
| F_CTL_LAST_SEQ
;
6739 fr_hdr
->df_ctl
= DF_CTL_NET_HDR
;
6740 fr_hdr
->seq_cnt
= 0;
6741 fr_hdr
->ox_id
= 0xffff;
6742 fr_hdr
->rx_id
= 0xffff;
6745 * reset all the length fields
6747 fc_pkt
->pkt_rsplen
= 0;
6748 fc_pkt
->pkt_datalen
= 0;
6749 fc_pkt
->pkt_comp
= comp
;
6751 fc_pkt
->pkt_tran_flags
|= FC_TRAN_INTR
;
6753 fc_pkt
->pkt_tran_flags
|= FC_TRAN_NO_INTR
;
6755 fc_pkt
->pkt_tran_type
= FC_PKT_OUTBOUND
| FC_PKT_IP_WRITE
;
6756 fc_pkt
->pkt_timeout
= fcip_pkt_ttl_ticks
;
6757 fcip_pkt
->fcip_pkt_ttl
= fptr
->fcip_timeout_ticks
+ fc_pkt
->pkt_timeout
;
6762 * Initialize a fcip_packet for broadcast data transfers
6765 fcip_init_broadcast_pkt(fcip_pkt_t
*fcip_pkt
, void (*comp
) (), int is_els
)
6767 fc_packet_t
*fc_pkt
;
6768 fc_frame_hdr_t
*fr_hdr
;
6769 struct fcip
*fptr
= fcip_pkt
->fcip_pkt_fptr
;
6770 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6774 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6775 fr_hdr
= &fc_pkt
->pkt_cmd_fhdr
;
6776 sid
= fport
->fcipp_sid
.port_id
;
6779 fr_hdr
->r_ctl
= R_CTL_ELS_REQ
;
6781 fr_hdr
->r_ctl
= R_CTL_DEVICE_DATA
| R_CTL_UNSOL_DATA
;
6785 * The destination broadcast address depends on the topology
6786 * of the underlying port
6788 did
= fptr
->fcip_broadcast_did
;
6790 * mark pkt a broadcast pkt
6792 fc_pkt
->pkt_tran_type
= FC_PKT_BROADCAST
;
6795 fr_hdr
->type
= FC_TYPE_IS8802_SNAP
;
6796 fr_hdr
->f_ctl
= F_CTL_FIRST_SEQ
| F_CTL_LAST_SEQ
| F_CTL_END_SEQ
;
6797 fr_hdr
->f_ctl
&= ~(F_CTL_SEQ_INITIATIVE
);
6798 fr_hdr
->df_ctl
= DF_CTL_NET_HDR
;
6799 fr_hdr
->seq_cnt
= 0;
6800 fr_hdr
->ox_id
= 0xffff;
6801 fr_hdr
->rx_id
= 0xffff;
6803 fc_pkt
->pkt_comp
= comp
;
6806 fc_pkt
->pkt_tran_flags
|= FC_TRAN_INTR
;
6808 fc_pkt
->pkt_tran_flags
|= FC_TRAN_NO_INTR
;
6811 fc_pkt
->pkt_tran_type
= FC_PKT_BROADCAST
;
6812 fc_pkt
->pkt_timeout
= fcip_pkt_ttl_ticks
;
6813 fcip_pkt
->fcip_pkt_ttl
= fptr
->fcip_timeout_ticks
+ fc_pkt
->pkt_timeout
;
6819 * Free up all DMA resources associated with an allocated packet
6822 fcip_free_pkt_dma(fcip_pkt_t
*fcip_pkt
)
6824 fc_packet_t
*fc_pkt
;
6826 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
6828 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
6829 (CE_NOTE
, "in freepktdma : flags 0x%x",
6830 fcip_pkt
->fcip_pkt_dma_flags
));
6832 if (fcip_pkt
->fcip_pkt_dma_flags
& FCIP_CMD_DMA_BOUND
) {
6833 (void) ddi_dma_unbind_handle(fc_pkt
->pkt_cmd_dma
);
6835 if (fcip_pkt
->fcip_pkt_dma_flags
& FCIP_CMD_DMA_MEM
) {
6836 ddi_dma_mem_free(&fc_pkt
->pkt_cmd_acc
);
6839 if (fcip_pkt
->fcip_pkt_dma_flags
& FCIP_RESP_DMA_BOUND
) {
6840 (void) ddi_dma_unbind_handle(fc_pkt
->pkt_resp_dma
);
6842 if (fcip_pkt
->fcip_pkt_dma_flags
& FCIP_RESP_DMA_MEM
) {
6843 ddi_dma_mem_free(&fc_pkt
->pkt_resp_acc
);
6846 * for internal commands, we need to free up the dma handles too.
6847 * This is done in the cache destructor for non internal cmds
6849 if (fcip_pkt
->fcip_pkt_flags
& FCIP_PKT_INTERNAL
) {
6850 if (fc_pkt
->pkt_cmd_dma
) {
6851 ddi_dma_free_handle(&fc_pkt
->pkt_cmd_dma
);
6853 if (fc_pkt
->pkt_resp_dma
) {
6854 ddi_dma_free_handle(&fc_pkt
->pkt_resp_dma
);
6861 * helper routine to generate a string, given an ether addr
6864 fcip_ether_to_str(struct ether_addr
*e
, caddr_t s
)
6868 for (i
= 0; i
< sizeof (struct ether_addr
); i
++, s
+= 2) {
6869 FCIP_DEBUG(FCIP_DEBUG_MISC
,
6870 (CE_CONT
, "0x%02X:", e
->ether_addr_octet
[i
]));
6871 (void) sprintf(s
, "%02X", e
->ether_addr_octet
[i
]);
6878 * When a broadcast request comes from the upper streams modules, it
6879 * is ugly to look into every datagram to figure out if it is a broadcast
6880 * datagram or a unicast packet. Instead just add the broadcast entries
6881 * into our routing and dest tables and the standard hash table look ups
6882 * will find the entries. It is a lot cleaner this way. Also Solaris ifconfig
6883 * seems to be very ethernet specific and it requires broadcasts to the
6884 * ether broadcast addr of 0xffffffffff to succeed even though we specified
6885 * in the dl_info request that our broadcast MAC addr is 0x0000000000
6886 * (can't figure out why RFC2625 did this though). So add broadcast entries
6887 * for both MAC address
6890 fcip_dest_add_broadcast_entry(struct fcip
*fptr
, int new_flag
)
6893 struct fcip_routing_table
*frp
;
6895 la_wwn_t broadcast_wwn
;
6898 * get port_id of destination for broadcast - this is topology
6901 did
= fptr
->fcip_broadcast_did
;
6903 ether_to_wwn(&fcip_arpbroadcast_addr
, &broadcast_wwn
);
6904 bcopy((void *)&broadcast_wwn
, (void *)&map
.map_pwwn
, sizeof (la_wwn_t
));
6905 bcopy((void *)&broadcast_wwn
, (void *)&map
.map_nwwn
, sizeof (la_wwn_t
));
6907 map
.map_did
.port_id
= did
;
6908 map
.map_hard_addr
.hard_addr
= did
;
6909 map
.map_state
= PORT_DEVICE_VALID
;
6911 map
.map_type
= PORT_DEVICE_NEW
;
6913 map
.map_type
= PORT_DEVICE_CHANGED
;
6917 bzero(&map
.map_fc4_types
, sizeof (map
.map_fc4_types
));
6918 fcip_rt_update(fptr
, &map
, 1);
6919 mutex_enter(&fptr
->fcip_rt_mutex
);
6920 frp
= fcip_lookup_rtable(fptr
, &broadcast_wwn
, FCIP_COMPARE_NWWN
);
6921 mutex_exit(&fptr
->fcip_rt_mutex
);
6923 return (FC_FAILURE
);
6925 (void) fcip_add_dest(fptr
, frp
);
6927 * The Upper IP layers expect the traditional broadcast MAC addr
6928 * of 0xff ff ff ff ff ff to work too if we want to plumb the fcip
6929 * stream through the /etc/hostname.fcipXX file. Instead of checking
6930 * each phys addr for a match with fcip's ARP header broadcast
6931 * addr (0x00 00 00 00 00 00), its simply easier to add another
6932 * broadcast entry for 0xff ff ff ff ff ff.
6934 ether_to_wwn(&fcipnhbroadcastaddr
, &broadcast_wwn
);
6935 bcopy((void *)&broadcast_wwn
, (void *)&map
.map_pwwn
, sizeof (la_wwn_t
));
6936 bcopy((void *)&broadcast_wwn
, (void *)&map
.map_nwwn
, sizeof (la_wwn_t
));
6937 fcip_rt_update(fptr
, &map
, 1);
6938 mutex_enter(&fptr
->fcip_rt_mutex
);
6939 frp
= fcip_lookup_rtable(fptr
, &broadcast_wwn
, FCIP_COMPARE_NWWN
);
6940 mutex_exit(&fptr
->fcip_rt_mutex
);
6942 return (FC_FAILURE
);
6944 (void) fcip_add_dest(fptr
, frp
);
6945 return (FC_SUCCESS
);
6949 * We need to obtain the D_ID of the broadcast port for transmitting all
6950 * our broadcast (and multicast) requests. The broadcast D_ID as we know
6951 * is dependent on the link topology
6954 fcip_get_broadcast_did(struct fcip
*fptr
)
6956 fcip_port_info_t
*fport
= fptr
->fcip_port_info
;
6960 sid
= fport
->fcipp_sid
.port_id
;
6962 switch (fport
->fcipp_topology
) {
6964 case FC_TOP_PT_PT
: {
6965 fc_portmap_t
*port_map
= NULL
;
6966 uint32_t listlen
= 0;
6968 if (fc_ulp_getportmap(fport
->fcipp_handle
, &port_map
,
6969 &listlen
, FC_ULP_PLOGI_DONTCARE
) == FC_SUCCESS
) {
6970 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_NOTE
,
6971 "fcip_gpmap: listlen : 0x%x", listlen
));
6973 did
= port_map
->map_did
.port_id
;
6977 kmem_free(port_map
, listlen
* sizeof (fc_portmap_t
));
6980 /* Dummy return value */
6981 return (0x00FFFFFF);
6990 * The broadcast address is the same whether or not
6991 * the switch/fabric contains a Name service.
6996 case FC_TOP_PUBLIC_LOOP
:
6998 * The open replicate primitive must not be used. The
6999 * broadcast sequence is simply sent to ALPA 0x00. The
7000 * fabric controller then propagates the broadcast to all
7001 * other ports. The fabric propagates the broadcast by
7002 * using the OPNfr primitive.
7007 case FC_TOP_PRIVATE_LOOP
:
7009 * The source port for broadcast in private loop mode
7010 * must send an OPN(fr) signal forcing all ports in the
7011 * loop to replicate the frames that they receive.
7016 case FC_TOP_UNKNOWN
:
7020 FCIP_DEBUG(FCIP_DEBUG_INIT
, (CE_WARN
,
7021 "fcip(0x%x):unknown topology in init_broadcast_pkt",
7022 fptr
->fcip_instance
));
7031 * fcip timeout performs 2 operations:
7032 * 1. timeout any packets sent to the FCA for which a callback hasn't
7033 * happened. If you are wondering why we need a callback since all
7034 * traffic in FCIP is unidirectional, hence all exchanges are unidirectional
7035 * but wait, we can only free up the resources after we know the FCA has
7036 * DMA'ed out the data. pretty obvious eh :)
7038 * 2. Retire and routing table entries we marked up for retiring. This is
7039 * to give the link a chance to recover instead of marking a port down
7040 * when we have lost all communication with it after a link transition
7043 fcip_timeout(void *arg
)
7045 struct fcip
*fptr
= (struct fcip
*)arg
;
7047 fcip_pkt_t
*fcip_pkt
;
7048 struct fcip_dest
*fdestp
;
7050 struct fcip_routing_table
*frtp
;
7051 int dispatch_rte_removal
= 0;
7053 mutex_enter(&fptr
->fcip_mutex
);
7055 fptr
->fcip_flags
|= FCIP_IN_TIMEOUT
;
7056 fptr
->fcip_timeout_ticks
+= fcip_tick_incr
;
7058 if (fptr
->fcip_flags
& (FCIP_DETACHED
| FCIP_DETACHING
| \
7059 FCIP_SUSPENDED
| FCIP_POWER_DOWN
)) {
7060 fptr
->fcip_flags
&= ~(FCIP_IN_TIMEOUT
);
7061 mutex_exit(&fptr
->fcip_mutex
);
7065 if (fptr
->fcip_port_state
== FCIP_PORT_OFFLINE
) {
7066 if (fptr
->fcip_timeout_ticks
> fptr
->fcip_mark_offline
) {
7067 fptr
->fcip_flags
|= FCIP_LINK_DOWN
;
7070 if (!fptr
->fcip_flags
& FCIP_RTE_REMOVING
) {
7071 dispatch_rte_removal
= 1;
7073 mutex_exit(&fptr
->fcip_mutex
);
7076 * Check if we have any Invalid routing table entries in our
7077 * hashtable we have marked off for deferred removal. If any,
7078 * we can spawn a taskq thread to do the cleanup for us. We
7079 * need to avoid cleanup in the timeout thread since we may
7080 * have to wait for outstanding commands to complete before
7081 * we retire a routing table entry. Also dispatch the taskq
7082 * thread only if we are already do not have a taskq thread
7085 if (dispatch_rte_removal
) {
7086 mutex_enter(&fptr
->fcip_rt_mutex
);
7087 for (index
= 0; index
< FCIP_RT_HASH_ELEMS
; index
++) {
7088 frtp
= fptr
->fcip_rtable
[index
];
7090 if ((frtp
->fcipr_state
== FCIP_RT_INVALID
) &&
7091 (fptr
->fcip_timeout_ticks
>
7092 frtp
->fcipr_invalid_timeout
)) {
7094 * If we cannot schedule a task thread
7095 * let us attempt again on the next
7096 * tick rather than call
7097 * fcip_rte_remove_deferred() from here
7098 * directly since the routine can sleep.
7100 frtp
->fcipr_state
= FCIP_RT_RETIRED
;
7102 mutex_enter(&fptr
->fcip_mutex
);
7103 fptr
->fcip_flags
|= FCIP_RTE_REMOVING
;
7104 mutex_exit(&fptr
->fcip_mutex
);
7106 if (taskq_dispatch(fptr
->fcip_tq
,
7107 fcip_rte_remove_deferred
, fptr
,
7110 * failed - so mark the entry
7116 mutex_enter(&fptr
->fcip_mutex
);
7119 mutex_exit(&fptr
->fcip_mutex
);
7122 frtp
= frtp
->fcipr_next
;
7125 mutex_exit(&fptr
->fcip_rt_mutex
);
7128 mutex_enter(&fptr
->fcip_dest_mutex
);
7131 * Now timeout any packets stuck with the transport/FCA for too long
7133 for (i
= 0; i
< FCIP_DEST_HASH_ELEMS
; i
++) {
7134 fdestp
= fptr
->fcip_dest
[i
];
7135 while (fdestp
!= NULL
) {
7136 mutex_enter(&fdestp
->fcipd_mutex
);
7137 for (fcip_pkt
= fdestp
->fcipd_head
; fcip_pkt
!= NULL
;
7138 fcip_pkt
= fcip_pkt
->fcip_pkt_next
) {
7139 if (fcip_pkt
->fcip_pkt_flags
&
7140 (FCIP_PKT_RETURNED
| FCIP_PKT_IN_TIMEOUT
|
7141 FCIP_PKT_IN_ABORT
)) {
7144 if (fptr
->fcip_timeout_ticks
>
7145 fcip_pkt
->fcip_pkt_ttl
) {
7146 fcip_pkt
->fcip_pkt_flags
|=
7147 FCIP_PKT_IN_TIMEOUT
;
7149 mutex_exit(&fdestp
->fcipd_mutex
);
7150 if (taskq_dispatch(fptr
->fcip_tq
,
7151 fcip_pkt_timeout
, fcip_pkt
,
7154 * timeout immediately
7156 fcip_pkt_timeout(fcip_pkt
);
7158 mutex_enter(&fdestp
->fcipd_mutex
);
7160 * The linked list is altered because
7161 * of one of the following reasons:
7162 * a. Timeout code dequeued a pkt
7163 * b. Pkt completion happened
7165 * So restart the spin starting at
7166 * the head again; This is a bit
7167 * excessive, but okay since
7168 * fcip_timeout_ticks isn't incremented
7169 * for this spin, we will skip the
7170 * not-to-be-timedout packets quickly
7172 fcip_pkt
= fdestp
->fcipd_head
;
7173 if (fcip_pkt
== NULL
) {
7178 mutex_exit(&fdestp
->fcipd_mutex
);
7179 fdestp
= fdestp
->fcipd_next
;
7182 mutex_exit(&fptr
->fcip_dest_mutex
);
7185 * reschedule the timeout thread
7187 mutex_enter(&fptr
->fcip_mutex
);
7189 fptr
->fcip_timeout_id
= timeout(fcip_timeout
, fptr
,
7190 drv_usectohz(1000000));
7191 fptr
->fcip_flags
&= ~(FCIP_IN_TIMEOUT
);
7192 mutex_exit(&fptr
->fcip_mutex
);
7197 * This routine is either called from taskq or directly from fcip_timeout
7198 * does the actual job of aborting the packet
7201 fcip_pkt_timeout(void *arg
)
7203 fcip_pkt_t
*fcip_pkt
= (fcip_pkt_t
*)arg
;
7204 struct fcip_dest
*fdestp
;
7206 fc_packet_t
*fc_pkt
;
7207 fcip_port_info_t
*fport
;
7210 fdestp
= fcip_pkt
->fcip_pkt_dest
;
7211 fptr
= fcip_pkt
->fcip_pkt_fptr
;
7212 fport
= fptr
->fcip_port_info
;
7213 fc_pkt
= FCIP_PKT_TO_FC_PKT(fcip_pkt
);
7216 * try to abort the pkt
7218 fcip_pkt
->fcip_pkt_flags
|= FCIP_PKT_IN_ABORT
;
7219 rval
= fc_ulp_abort(fport
->fcipp_handle
, fc_pkt
, KM_NOSLEEP
);
7221 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM
,
7222 (CE_NOTE
, "fc_ulp_abort returns: 0x%x", rval
));
7224 if (rval
== FC_SUCCESS
) {
7225 ASSERT(fdestp
!= NULL
);
7228 * dequeue the pkt from the dest structure pkt list
7230 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_ABORT
;
7231 mutex_enter(&fdestp
->fcipd_mutex
);
7232 rval
= fcip_fdestp_dequeue_pkt(fdestp
, fcip_pkt
);
7234 mutex_exit(&fdestp
->fcipd_mutex
);
7237 * Now cleanup the pkt and free the mblk
7239 fcip_pkt_free(fcip_pkt
, 1);
7242 * abort failed - just mark the pkt as done and
7243 * wait for it to complete in fcip_pkt_callback since
7244 * the pkt has already been xmitted by the FCA
7246 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_TIMEOUT
;
7247 if (fcip_pkt
->fcip_pkt_flags
& FCIP_PKT_RETURNED
) {
7248 fcip_pkt
->fcip_pkt_flags
&= ~FCIP_PKT_IN_ABORT
;
7249 mutex_enter(&fdestp
->fcipd_mutex
);
7250 rval
= fcip_fdestp_dequeue_pkt(fdestp
, fcip_pkt
);
7252 mutex_exit(&fdestp
->fcipd_mutex
);
7254 fcip_pkt_free(fcip_pkt
, 1);
7262 * Remove a routing table entry marked for deferred removal. This routine
7263 * unlike fcip_pkt_timeout, is always called from a taskq context
7266 fcip_rte_remove_deferred(void *arg
)
7268 struct fcip
*fptr
= (struct fcip
*)arg
;
7270 struct fcip_dest
*fdestp
;
7273 struct fcip_routing_table
*frtp
, *frtp_next
, *frtp_prev
;
7276 mutex_enter(&fptr
->fcip_rt_mutex
);
7277 for (index
= 0; index
< FCIP_RT_HASH_ELEMS
; index
++) {
7278 frtp
= fptr
->fcip_rtable
[index
];
7281 frtp_next
= frtp
->fcipr_next
;
7283 if (frtp
->fcipr_state
== FCIP_RT_RETIRED
) {
7285 pwwn
= &frtp
->fcipr_pwwn
;
7287 * Get hold of destination pointer
7289 mutex_enter(&fptr
->fcip_dest_mutex
);
7291 hash_bucket
= FCIP_DEST_HASH(pwwn
->raw_wwn
);
7292 ASSERT(hash_bucket
< FCIP_DEST_HASH_ELEMS
);
7294 fdestp
= fptr
->fcip_dest
[hash_bucket
];
7295 while (fdestp
!= NULL
) {
7296 mutex_enter(&fdestp
->fcipd_mutex
);
7297 if (fdestp
->fcipd_rtable
) {
7298 if (fcip_wwn_compare(pwwn
,
7299 &fdestp
->fcipd_pwwn
,
7300 FCIP_COMPARE_PWWN
) == 0) {
7302 &fdestp
->fcipd_mutex
);
7306 mutex_exit(&fdestp
->fcipd_mutex
);
7307 fdestp
= fdestp
->fcipd_next
;
7310 mutex_exit(&fptr
->fcip_dest_mutex
);
7311 if (fdestp
== NULL
) {
7317 mutex_enter(&fdestp
->fcipd_mutex
);
7318 if (fdestp
->fcipd_ncmds
) {
7320 * Instead of waiting to drain commands
7321 * let us revisit this RT entry in
7324 mutex_exit(&fdestp
->fcipd_mutex
);
7331 * We are clean, so remove the RTE
7333 fdestp
->fcipd_rtable
= NULL
;
7334 mutex_exit(&fdestp
->fcipd_mutex
);
7336 if (frtp_prev
== NULL
) {
7338 fptr
->fcip_rtable
[index
] =
7341 frtp_prev
->fcipr_next
=
7345 sizeof (struct fcip_routing_table
));
7354 mutex_exit(&fptr
->fcip_rt_mutex
);
7356 * Clear the RTE_REMOVING flag
7358 mutex_enter(&fptr
->fcip_mutex
);
7359 fptr
->fcip_flags
&= ~FCIP_RTE_REMOVING
;
7360 mutex_exit(&fptr
->fcip_mutex
);
7364 * Walk through all the dest hash table entries and count up the total
7365 * no. of packets outstanding against a given port
7368 fcip_port_get_num_pkts(struct fcip
*fptr
)
7372 struct fcip_dest
*fdestp
;
7374 ASSERT(mutex_owned(&fptr
->fcip_dest_mutex
));
7376 for (i
= 0; i
< FCIP_DEST_HASH_ELEMS
; i
++) {
7377 fdestp
= fptr
->fcip_dest
[i
];
7378 while (fdestp
!= NULL
) {
7379 mutex_enter(&fdestp
->fcipd_mutex
);
7381 ASSERT(fdestp
->fcipd_ncmds
>= 0);
7383 if (fdestp
->fcipd_ncmds
> 0) {
7384 num_cmds
+= fdestp
->fcipd_ncmds
;
7386 mutex_exit(&fdestp
->fcipd_mutex
);
7387 fdestp
= fdestp
->fcipd_next
;
7396 * Walk through the routing table for this state instance and see if there is a
7397 * PLOGI in progress for any of the entries. Return success even if we find one.
7400 fcip_plogi_in_progress(struct fcip
*fptr
)
7403 struct fcip_routing_table
*frp
;
7405 ASSERT(mutex_owned(&fptr
->fcip_rt_mutex
));
7407 for (i
= 0; i
< FCIP_RT_HASH_ELEMS
; i
++) {
7408 frp
= fptr
->fcip_rtable
[i
];
7410 if (frp
->fcipr_state
== FCIP_RT_LOGIN_PROGRESS
) {
7411 /* Found an entry where PLOGI is in progress */
7414 frp
= frp
->fcipr_next
;
7422 * Walk through the fcip port global list and check if the given port exists in
7423 * the list. Returns "0" if port exists and "1" if otherwise.
7426 fcip_check_port_exists(struct fcip
*fptr
)
7428 fcip_port_info_t
*cur_fport
;
7429 fcip_port_info_t
*fport
;
7431 mutex_enter(&fcip_global_mutex
);
7432 fport
= fptr
->fcip_port_info
;
7433 cur_fport
= fcip_port_head
;
7434 while (cur_fport
!= NULL
) {
7435 if (cur_fport
== fport
) {
7437 mutex_exit(&fcip_global_mutex
);
7440 cur_fport
= cur_fport
->fcipp_next
;
7443 mutex_exit(&fcip_global_mutex
);
7449 * Constructor to initialize the sendup elements for callback into
7455 fcip_sendup_constructor(void *buf
, void *arg
, int flags
)
7457 struct fcip_sendup_elem
*msg_elem
= (struct fcip_sendup_elem
*)buf
;
7458 fcip_port_info_t
*fport
= (fcip_port_info_t
*)arg
;
7460 ASSERT(fport
!= NULL
);
7462 msg_elem
->fcipsu_mp
= NULL
;
7463 msg_elem
->fcipsu_func
= NULL
;
7464 msg_elem
->fcipsu_next
= NULL
;
7466 return (FCIP_SUCCESS
);