4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29 #include <sys/sunddi.h>
30 #include <sys/cpuvar.h>
33 #include <sys/socket.h>
34 #include <sys/strsubr.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysmacros.h>
38 #include <sys/idm/idm.h>
39 #include <sys/idm/idm_so.h>
42 extern idm_transport_t idm_transport_list
[];
48 static int iscsi_crc32_hd
= -1;
51 idm_pdu_rx(idm_conn_t
*ic
, idm_pdu_t
*pdu
)
53 iscsi_async_evt_hdr_t
*async_evt
;
56 * If we are in full-featured mode then route SCSI-related
57 * commands to the appropriate function vector
59 ic
->ic_timestamp
= ddi_get_lbolt();
60 mutex_enter(&ic
->ic_state_mutex
);
61 if (ic
->ic_ffp
&& ic
->ic_pdu_events
== 0) {
62 mutex_exit(&ic
->ic_state_mutex
);
64 if (idm_pdu_rx_forward_ffp(ic
, pdu
) == B_TRUE
) {
65 /* Forwarded SCSI-related commands */
68 mutex_enter(&ic
->ic_state_mutex
);
72 * If we get here with a SCSI-related PDU then we are not in
73 * full-feature mode and the PDU is a protocol error (SCSI command
74 * PDU's may sometimes be an exception, see below). All
75 * non-SCSI PDU's get treated them the same regardless of whether
76 * we are in full-feature mode.
78 * Look at the opcode and in some cases the PDU status and
79 * determine the appropriate event to send to the connection
80 * state machine. Generate the event, passing the PDU as data.
81 * If the current connection state allows reception of the event
82 * the PDU will be submitted to the IDM client for processing,
83 * otherwise the PDU will be dropped.
85 switch (IDM_PDU_OPCODE(pdu
)) {
86 case ISCSI_OP_LOGIN_CMD
:
87 DTRACE_ISCSI_2(login__command
, idm_conn_t
*, ic
,
88 iscsi_login_hdr_t
*, (iscsi_login_hdr_t
*)pdu
->isp_hdr
);
89 idm_conn_rx_pdu_event(ic
, CE_LOGIN_RCV
, (uintptr_t)pdu
);
91 case ISCSI_OP_LOGIN_RSP
:
92 idm_parse_login_rsp(ic
, pdu
, /* RX */ B_TRUE
);
94 case ISCSI_OP_LOGOUT_CMD
:
95 DTRACE_ISCSI_2(logout__command
, idm_conn_t
*, ic
,
97 (iscsi_logout_hdr_t
*)pdu
->isp_hdr
);
98 idm_parse_logout_req(ic
, pdu
, /* RX */ B_TRUE
);
100 case ISCSI_OP_LOGOUT_RSP
:
101 idm_parse_logout_rsp(ic
, pdu
, /* RX */ B_TRUE
);
103 case ISCSI_OP_ASYNC_EVENT
:
104 async_evt
= (iscsi_async_evt_hdr_t
*)pdu
->isp_hdr
;
105 switch (async_evt
->async_event
) {
106 case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT
:
107 idm_conn_rx_pdu_event(ic
, CE_ASYNC_LOGOUT_RCV
,
110 case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION
:
111 idm_conn_rx_pdu_event(ic
, CE_ASYNC_DROP_CONN_RCV
,
114 case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS
:
115 idm_conn_rx_pdu_event(ic
, CE_ASYNC_DROP_ALL_CONN_RCV
,
118 case ISCSI_ASYNC_EVENT_SCSI_EVENT
:
119 case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION
:
121 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
,
126 case ISCSI_OP_SCSI_CMD
:
128 * Consider this scenario: We are a target connection
129 * in "in login" state and a "login success sent" event has
130 * been generated but not yet handled. Since we've sent
131 * the login response but we haven't actually transitioned
132 * to FFP mode we might conceivably receive a SCSI command
133 * from the initiator before we are ready. We are actually
134 * in FFP we just don't know it yet -- to address this we
135 * can generate an event corresponding to the SCSI command.
136 * At the point when the event is handled by the state
137 * machine the login request will have been handled and we
138 * should be in FFP. If we are not in FFP by that time
139 * we can reject the SCSI command with a protocol error.
141 * This scenario only applies to the target.
143 * Handle dtrace probe in iscsit so we can find all the
146 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
, (uintptr_t)pdu
);
148 case ISCSI_OP_SCSI_DATA
:
149 DTRACE_ISCSI_2(data__receive
, idm_conn_t
*, ic
,
151 (iscsi_data_hdr_t
*)pdu
->isp_hdr
);
152 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
, (uintptr_t)pdu
);
154 case ISCSI_OP_SCSI_TASK_MGT_MSG
:
155 DTRACE_ISCSI_2(task__command
, idm_conn_t
*, ic
,
156 iscsi_scsi_task_mgt_hdr_t
*,
157 (iscsi_scsi_task_mgt_hdr_t
*)pdu
->isp_hdr
);
158 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
, (uintptr_t)pdu
);
160 case ISCSI_OP_NOOP_OUT
:
161 DTRACE_ISCSI_2(nop__receive
, idm_conn_t
*, ic
,
162 iscsi_nop_out_hdr_t
*,
163 (iscsi_nop_out_hdr_t
*)pdu
->isp_hdr
);
164 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
, (uintptr_t)pdu
);
166 case ISCSI_OP_TEXT_CMD
:
167 DTRACE_ISCSI_2(text__command
, idm_conn_t
*, ic
,
169 (iscsi_text_hdr_t
*)pdu
->isp_hdr
);
170 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
, (uintptr_t)pdu
);
172 /* Initiator PDU's */
173 case ISCSI_OP_SCSI_DATA_RSP
:
174 case ISCSI_OP_RTT_RSP
:
175 case ISCSI_OP_SNACK_CMD
:
176 case ISCSI_OP_NOOP_IN
:
177 case ISCSI_OP_TEXT_RSP
:
178 case ISCSI_OP_REJECT_MSG
:
179 case ISCSI_OP_SCSI_TASK_MGT_RSP
:
180 /* Validate received PDU against current state */
181 idm_conn_rx_pdu_event(ic
, CE_MISC_RX
,
185 mutex_exit(&ic
->ic_state_mutex
);
189 idm_pdu_tx_forward(idm_conn_t
*ic
, idm_pdu_t
*pdu
)
191 (*ic
->ic_transport_ops
->it_tx_pdu
)(ic
, pdu
);
195 idm_pdu_rx_forward_ffp(idm_conn_t
*ic
, idm_pdu_t
*pdu
)
198 * If this is an FFP request, call the appropriate handler
199 * and return B_TRUE, otherwise return B_FALSE.
201 switch (IDM_PDU_OPCODE(pdu
)) {
202 case ISCSI_OP_SCSI_CMD
:
203 (*ic
->ic_conn_ops
.icb_rx_scsi_cmd
)(ic
, pdu
);
205 case ISCSI_OP_SCSI_DATA
:
206 DTRACE_ISCSI_2(data__receive
, idm_conn_t
*, ic
,
208 (iscsi_data_hdr_t
*)pdu
->isp_hdr
);
209 (*ic
->ic_transport_ops
->it_rx_dataout
)(ic
, pdu
);
211 case ISCSI_OP_SCSI_TASK_MGT_MSG
:
212 DTRACE_ISCSI_2(task__command
, idm_conn_t
*, ic
,
213 iscsi_scsi_task_mgt_hdr_t
*,
214 (iscsi_scsi_task_mgt_hdr_t
*)pdu
->isp_hdr
);
215 (*ic
->ic_conn_ops
.icb_rx_misc
)(ic
, pdu
);
217 case ISCSI_OP_NOOP_OUT
:
218 DTRACE_ISCSI_2(nop__receive
, idm_conn_t
*, ic
,
219 iscsi_nop_out_hdr_t
*,
220 (iscsi_nop_out_hdr_t
*)pdu
->isp_hdr
);
221 (*ic
->ic_conn_ops
.icb_rx_misc
)(ic
, pdu
);
223 case ISCSI_OP_TEXT_CMD
:
224 DTRACE_ISCSI_2(text__command
, idm_conn_t
*, ic
,
226 (iscsi_text_hdr_t
*)pdu
->isp_hdr
);
227 (*ic
->ic_conn_ops
.icb_rx_misc
)(ic
, pdu
);
230 case ISCSI_OP_SCSI_RSP
:
231 (*ic
->ic_conn_ops
.icb_rx_scsi_rsp
)(ic
, pdu
);
233 case ISCSI_OP_SCSI_DATA_RSP
:
234 (*ic
->ic_transport_ops
->it_rx_datain
)(ic
, pdu
);
236 case ISCSI_OP_RTT_RSP
:
237 (*ic
->ic_transport_ops
->it_rx_rtt
)(ic
, pdu
);
239 case ISCSI_OP_SCSI_TASK_MGT_RSP
:
240 case ISCSI_OP_TEXT_RSP
:
241 case ISCSI_OP_NOOP_IN
:
242 (*ic
->ic_conn_ops
.icb_rx_misc
)(ic
, pdu
);
251 idm_pdu_rx_forward(idm_conn_t
*ic
, idm_pdu_t
*pdu
)
254 * Some PDU's specific to FFP get special handling. This function
255 * will normally never be called in FFP with an FFP PDU since this
256 * is a slow path but in can happen on the target side during
257 * the transition to FFP. We primarily call
258 * idm_pdu_rx_forward_ffp here to avoid code duplication.
260 if (idm_pdu_rx_forward_ffp(ic
, pdu
) == B_FALSE
) {
262 * Non-FFP PDU, use generic RC handler
264 (*ic
->ic_conn_ops
.icb_rx_misc
)(ic
, pdu
);
269 idm_parse_login_rsp(idm_conn_t
*ic
, idm_pdu_t
*login_rsp_pdu
, boolean_t rx
)
271 iscsi_login_rsp_hdr_t
*login_rsp
=
272 (iscsi_login_rsp_hdr_t
*)login_rsp_pdu
->isp_hdr
;
273 idm_conn_event_t new_event
;
275 if (login_rsp
->status_class
== ISCSI_STATUS_CLASS_SUCCESS
) {
276 if (!(login_rsp
->flags
& ISCSI_FLAG_LOGIN_CONTINUE
) &&
277 (login_rsp
->flags
& ISCSI_FLAG_LOGIN_TRANSIT
) &&
278 (ISCSI_LOGIN_NEXT_STAGE(login_rsp
->flags
) ==
279 ISCSI_FULL_FEATURE_PHASE
)) {
280 new_event
= (rx
? CE_LOGIN_SUCCESS_RCV
:
281 CE_LOGIN_SUCCESS_SND
);
283 new_event
= (rx
? CE_MISC_RX
: CE_MISC_TX
);
286 new_event
= (rx
? CE_LOGIN_FAIL_RCV
: CE_LOGIN_FAIL_SND
);
290 idm_conn_rx_pdu_event(ic
, new_event
, (uintptr_t)login_rsp_pdu
);
292 idm_conn_tx_pdu_event(ic
, new_event
, (uintptr_t)login_rsp_pdu
);
298 idm_parse_logout_req(idm_conn_t
*ic
, idm_pdu_t
*logout_req_pdu
, boolean_t rx
)
300 iscsi_logout_hdr_t
*logout_req
=
301 (iscsi_logout_hdr_t
*)logout_req_pdu
->isp_hdr
;
302 idm_conn_event_t new_event
;
304 (logout_req
->flags
& ISCSI_FLAG_LOGOUT_REASON_MASK
);
307 * For a normal logout (close connection or close session) IDM
308 * will terminate processing of all tasks completing the tasks
309 * back to the client with a status indicating the connection
310 * was logged out. These tasks do not get completed.
312 * For a "close connection for recovery logout) IDM suspends
313 * processing of all tasks and completes them back to the client
314 * with a status indicating connection was logged out for
315 * recovery. Both initiator and target hang onto these tasks.
316 * When we add ERL2 support IDM will need to provide mechanisms
317 * to change the task and buffer associations to a new connection.
319 * This code doesn't address the possibility of MC/S. We'll
320 * need to decide how the separate connections get handled
321 * in that case. One simple option is to make the client
322 * generate the events for the other connections.
324 if (reason
== ISCSI_LOGOUT_REASON_CLOSE_SESSION
) {
326 (rx
? CE_LOGOUT_SESSION_RCV
: CE_LOGOUT_SESSION_SND
);
327 } else if ((reason
== ISCSI_LOGOUT_REASON_CLOSE_CONNECTION
) ||
328 (reason
== ISCSI_LOGOUT_REASON_RECOVERY
)) {
329 /* Check logout CID against this connection's CID */
330 if (ntohs(logout_req
->cid
) == ic
->ic_login_cid
) {
331 /* Logout is for this connection */
332 new_event
= (rx
? CE_LOGOUT_THIS_CONN_RCV
:
333 CE_LOGOUT_THIS_CONN_SND
);
336 * Logout affects another connection. This is not
337 * a relevant event for this connection so we'll
338 * just treat it as a normal PDU event. Client
339 * will need to lookup the other connection and
340 * generate the event.
342 new_event
= (rx
? CE_MISC_RX
: CE_MISC_TX
);
345 /* Invalid reason code */
346 new_event
= (rx
? CE_RX_PROTOCOL_ERROR
: CE_TX_PROTOCOL_ERROR
);
350 idm_conn_rx_pdu_event(ic
, new_event
, (uintptr_t)logout_req_pdu
);
352 idm_conn_tx_pdu_event(ic
, new_event
, (uintptr_t)logout_req_pdu
);
359 idm_parse_logout_rsp(idm_conn_t
*ic
, idm_pdu_t
*logout_rsp_pdu
, boolean_t rx
)
361 idm_conn_event_t new_event
;
362 iscsi_logout_rsp_hdr_t
*logout_rsp
=
363 (iscsi_logout_rsp_hdr_t
*)logout_rsp_pdu
->isp_hdr
;
365 if (logout_rsp
->response
== ISCSI_STATUS_CLASS_SUCCESS
) {
366 new_event
= rx
? CE_LOGOUT_SUCCESS_RCV
: CE_LOGOUT_SUCCESS_SND
;
368 new_event
= rx
? CE_LOGOUT_FAIL_RCV
: CE_LOGOUT_FAIL_SND
;
372 idm_conn_rx_pdu_event(ic
, new_event
, (uintptr_t)logout_rsp_pdu
);
374 idm_conn_tx_pdu_event(ic
, new_event
, (uintptr_t)logout_rsp_pdu
);
379 * idm_svc_conn_create()
380 * Transport-agnostic service connection creation, invoked from the transport
384 idm_svc_conn_create(idm_svc_t
*is
, idm_transport_type_t tt
,
385 idm_conn_t
**ic_result
)
391 * Skip some work if we can already tell we are going offline.
392 * Otherwise we will destroy this connection later as part of
393 * shutting down the svc.
395 mutex_enter(&is
->is_mutex
);
396 if (!is
->is_online
) {
397 mutex_exit(&is
->is_mutex
);
398 return (IDM_STATUS_FAIL
);
400 mutex_exit(&is
->is_mutex
);
402 ic
= idm_conn_create_common(CONN_TYPE_TGT
, tt
,
403 &is
->is_svc_req
.sr_conn_ops
);
405 return (IDM_STATUS_FAIL
);
407 ic
->ic_svc_binding
= is
;
410 * Prepare connection state machine
412 if ((rc
= idm_conn_sm_init(ic
)) != 0) {
413 idm_conn_destroy_common(ic
);
420 mutex_enter(&idm
.idm_global_mutex
);
421 list_insert_tail(&idm
.idm_tgt_conn_list
, ic
);
422 idm
.idm_tgt_conn_count
++;
423 mutex_exit(&idm
.idm_global_mutex
);
425 return (IDM_STATUS_SUCCESS
);
429 idm_svc_conn_destroy(idm_conn_t
*ic
)
431 mutex_enter(&idm
.idm_global_mutex
);
432 list_remove(&idm
.idm_tgt_conn_list
, ic
);
433 idm
.idm_tgt_conn_count
--;
434 mutex_exit(&idm
.idm_global_mutex
);
436 if (ic
->ic_transport_private
!= NULL
) {
437 ic
->ic_transport_ops
->it_tgt_conn_destroy(ic
);
439 idm_conn_destroy_common(ic
);
443 * idm_conn_create_common()
445 * Allocate and initialize IDM connection context
448 idm_conn_create_common(idm_conn_type_t conn_type
, idm_transport_type_t tt
,
449 idm_conn_ops_t
*conn_ops
)
453 idm_transport_type_t type
;
455 for (type
= 0; type
< IDM_TRANSPORT_NUM_TYPES
; type
++) {
456 it
= &idm_transport_list
[type
];
458 if ((it
->it_ops
!= NULL
) && (it
->it_type
== tt
))
461 ASSERT(it
->it_type
== tt
);
462 if (it
->it_type
!= tt
)
465 ic
= kmem_zalloc(sizeof (idm_conn_t
), KM_SLEEP
);
467 /* Initialize data */
468 ic
->ic_target_name
[0] = '\0';
469 ic
->ic_initiator_name
[0] = '\0';
470 ic
->ic_isid
[0] = '\0';
471 ic
->ic_tsih
[0] = '\0';
472 ic
->ic_conn_type
= conn_type
;
473 ic
->ic_conn_ops
= *conn_ops
;
474 ic
->ic_transport_ops
= it
->it_ops
;
475 ic
->ic_transport_type
= tt
;
476 ic
->ic_transport_private
= NULL
; /* Set by transport service */
477 ic
->ic_internal_cid
= idm_cid_alloc();
478 if (ic
->ic_internal_cid
== 0) {
479 kmem_free(ic
, sizeof (idm_conn_t
));
482 mutex_init(&ic
->ic_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
483 cv_init(&ic
->ic_cv
, NULL
, CV_DEFAULT
, NULL
);
484 idm_refcnt_init(&ic
->ic_refcnt
, ic
);
490 idm_conn_destroy_common(idm_conn_t
*ic
)
492 idm_conn_sm_fini(ic
);
493 idm_refcnt_destroy(&ic
->ic_refcnt
);
494 cv_destroy(&ic
->ic_cv
);
495 mutex_destroy(&ic
->ic_mutex
);
496 idm_cid_free(ic
->ic_internal_cid
);
498 kmem_free(ic
, sizeof (idm_conn_t
));
502 * Invoked from the SM as a result of client's invocation of
503 * idm_ini_conn_connect()
506 idm_ini_conn_finish(idm_conn_t
*ic
)
508 /* invoke transport-specific connection */
509 return (ic
->ic_transport_ops
->it_ini_conn_connect(ic
));
513 idm_tgt_conn_finish(idm_conn_t
*ic
)
517 rc
= idm_notify_client(ic
, CN_CONNECT_ACCEPT
, (uintptr_t)NULL
);
518 if (rc
!= IDM_STATUS_SUCCESS
) {
519 return (IDM_STATUS_REJECT
);
522 /* Target client is ready to receive a login, start connection */
523 return (ic
->ic_transport_ops
->it_tgt_conn_connect(ic
));
527 idm_transport_lookup(idm_conn_req_t
*cr
)
529 idm_transport_type_t type
;
531 idm_transport_caps_t caps
;
534 * Make sure all available transports are setup. We call this now
535 * instead of at initialization time in case IB has become available
536 * since we started (hotplug, etc).
538 idm_transport_setup(cr
->cr_li
, cr
->cr_boot_conn
);
540 /* Determine the transport for this connection */
541 for (type
= 0; type
< IDM_TRANSPORT_NUM_TYPES
; type
++) {
542 it
= &idm_transport_list
[type
];
544 if (it
->it_ops
== NULL
) {
545 /* transport is not registered */
549 if (it
->it_ops
->it_conn_is_capable(cr
, &caps
)) {
555 return (NULL
); /* Make gcc happy */
559 idm_transport_setup(ldi_ident_t li
, boolean_t boot_conn
)
561 idm_transport_type_t type
;
565 for (type
= 0; type
< IDM_TRANSPORT_NUM_TYPES
; type
++) {
566 it
= &idm_transport_list
[type
];
568 * We may want to store the LDI handle in the idm_svc_t
569 * and then allow multiple calls to ldi_open_by_name. This
570 * would enable the LDI code to track who has the device open
571 * which could be useful in the case where we have multiple
572 * services and perhaps also have initiator and target opening
573 * the transport simultaneously. For now we stick with the
576 if (it
->it_ops
== NULL
) {
577 /* transport is not ready, try to initialize it */
578 if (it
->it_type
== IDM_TRANSPORT_TYPE_SOCKETS
) {
581 if (boot_conn
== B_TRUE
) {
583 * iSCSI boot doesn't need iSER.
584 * Open iSER here may drive IO to
585 * a failed session and cause
590 rc
= ldi_open_by_name(it
->it_device_path
,
591 FREAD
| FWRITE
, kcred
, &it
->it_ldi_hdl
, li
);
593 * If the open is successful we will have
594 * filled in the LDI handle in the transport
595 * table and we expect that the transport
599 it
->it_ldi_hdl
= NULL
;
607 idm_transport_teardown()
609 idm_transport_type_t type
;
612 ASSERT(mutex_owned(&idm
.idm_global_mutex
));
614 /* Caller holds the IDM global mutex */
615 for (type
= 0; type
< IDM_TRANSPORT_NUM_TYPES
; type
++) {
616 it
= &idm_transport_list
[type
];
617 /* If we have an open LDI handle on this driver, close it */
618 if (it
->it_ldi_hdl
!= NULL
) {
619 (void) ldi_close(it
->it_ldi_hdl
, FNDELAY
, kcred
);
620 it
->it_ldi_hdl
= NULL
;
626 * ID pool code. We use this to generate unique structure identifiers without
627 * searching the existing structures. This avoids the need to lock entire
628 * sets of structures at inopportune times. Adapted from the CIFS server code.
630 * A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
631 * A bit set to '1' indicates that that particular value has been allocated.
632 * The allocation process is done shifting a bit through the whole bitmap.
633 * The current position of that index bit is kept in the idm_idpool_t
634 * structure and represented by a byte index (0 to buffer size minus 1) and
635 * a bit index (0 to 7).
637 * The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
638 * out of IDs its current size is doubled until it reaches its maximum size
639 * (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
640 * means that a pool can have a maximum number of 65534 IDs available.
644 idm_idpool_increment(
650 ASSERT(pool
->id_magic
== IDM_IDPOOL_MAGIC
);
652 new_size
= pool
->id_size
* 2;
653 if (new_size
<= IDM_IDPOOL_MAX_SIZE
) {
654 new_pool
= kmem_alloc(new_size
/ 8, KM_NOSLEEP
);
656 bzero(new_pool
, new_size
/ 8);
657 bcopy(pool
->id_pool
, new_pool
, pool
->id_size
/ 8);
658 kmem_free(pool
->id_pool
, pool
->id_size
/ 8);
659 pool
->id_pool
= new_pool
;
660 pool
->id_free_counter
+= new_size
- pool
->id_size
;
661 pool
->id_max_free_counter
+= new_size
- pool
->id_size
;
662 pool
->id_size
= new_size
;
663 pool
->id_idx_msk
= (new_size
/ 8) - 1;
664 if (new_size
>= IDM_IDPOOL_MAX_SIZE
) {
665 /* id -1 made unavailable */
666 pool
->id_pool
[pool
->id_idx_msk
] = 0x80;
667 pool
->id_free_counter
--;
668 pool
->id_max_free_counter
--;
677 * idm_idpool_constructor
679 * This function initializes the pool structure provided.
683 idm_idpool_create(idm_idpool_t
*pool
)
686 ASSERT(pool
->id_magic
!= IDM_IDPOOL_MAGIC
);
688 pool
->id_size
= IDM_IDPOOL_MIN_SIZE
;
689 pool
->id_idx_msk
= (IDM_IDPOOL_MIN_SIZE
/ 8) - 1;
690 pool
->id_free_counter
= IDM_IDPOOL_MIN_SIZE
- 1;
691 pool
->id_max_free_counter
= IDM_IDPOOL_MIN_SIZE
- 1;
693 pool
->id_bit_idx
= 1;
695 pool
->id_pool
= kmem_alloc((IDM_IDPOOL_MIN_SIZE
/ 8),
697 bzero(pool
->id_pool
, (IDM_IDPOOL_MIN_SIZE
/ 8));
698 /* -1 id made unavailable */
699 pool
->id_pool
[0] = 0x01; /* id 0 made unavailable */
700 mutex_init(&pool
->id_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
701 pool
->id_magic
= IDM_IDPOOL_MAGIC
;
706 * idm_idpool_destructor
708 * This function tears down and frees the resources associated with the
713 idm_idpool_destroy(idm_idpool_t
*pool
)
715 ASSERT(pool
->id_magic
== IDM_IDPOOL_MAGIC
);
716 ASSERT(pool
->id_free_counter
== pool
->id_max_free_counter
);
717 pool
->id_magic
= (uint32_t)~IDM_IDPOOL_MAGIC
;
718 mutex_destroy(&pool
->id_mutex
);
719 kmem_free(pool
->id_pool
, (size_t)(pool
->id_size
/ 8));
725 * This function allocates an ID from the pool provided.
728 idm_idpool_alloc(idm_idpool_t
*pool
, uint16_t *id
)
735 ASSERT(pool
->id_magic
== IDM_IDPOOL_MAGIC
);
737 mutex_enter(&pool
->id_mutex
);
738 if ((pool
->id_free_counter
== 0) && idm_idpool_increment(pool
)) {
739 mutex_exit(&pool
->id_mutex
);
746 bit_idx
= pool
->id_bit_idx
;
747 byte
= pool
->id_pool
[pool
->id_idx
];
754 pool
->id_pool
[pool
->id_idx
] |= bit
;
755 *id
= (uint16_t)(pool
->id_idx
* 8 + (uint32_t)bit_idx
);
756 pool
->id_free_counter
--;
758 pool
->id_bit_idx
= bit_idx
;
759 mutex_exit(&pool
->id_mutex
);
763 pool
->id_bit_idx
= 0;
765 pool
->id_idx
&= pool
->id_idx_msk
;
769 * This section of code shouldn't be reached. If there are IDs
770 * available and none could be found there's a problem.
773 mutex_exit(&pool
->id_mutex
);
780 * This function frees the ID provided.
783 idm_idpool_free(idm_idpool_t
*pool
, uint16_t id
)
785 ASSERT(pool
->id_magic
== IDM_IDPOOL_MAGIC
);
787 ASSERT(id
!= 0xFFFF);
789 mutex_enter(&pool
->id_mutex
);
790 if (pool
->id_pool
[id
>> 3] & (1 << (id
& 7))) {
791 pool
->id_pool
[id
>> 3] &= ~(1 << (id
& 7));
792 pool
->id_free_counter
++;
793 ASSERT(pool
->id_free_counter
<= pool
->id_max_free_counter
);
794 mutex_exit(&pool
->id_mutex
);
797 /* Freeing a free ID. */
799 mutex_exit(&pool
->id_mutex
);
806 * ID pool works with 16-bit identifiers right now. That should
807 * be plenty since we will probably never have more than 2^16
808 * connections simultaneously.
812 if (idm_idpool_alloc(&idm
.idm_conn_id_pool
, &cid16
) == -1) {
813 return (0); /* Fail */
816 return ((uint32_t)cid16
);
820 idm_cid_free(uint32_t cid
)
822 idm_idpool_free(&idm
.idm_conn_id_pool
, (uint16_t)cid
);
827 * Code for generating the header and data digests
829 * This is the CRC-32C table
833 * reflect input bytes = true
834 * reflect output bytes = true
837 uint32_t idm_crc32c_table
[256] =
839 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
840 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
841 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
842 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
843 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
844 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
845 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
846 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
847 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
848 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
849 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
850 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
851 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
852 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
853 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
854 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
855 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
856 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
857 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
858 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
859 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
860 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
861 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
862 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
863 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
864 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
865 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
866 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
867 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
868 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
869 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
870 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
871 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
872 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
873 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
874 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
875 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
876 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
877 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
878 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
879 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
880 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
881 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
882 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
883 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
884 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
885 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
886 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
887 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
888 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
889 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
890 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
891 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
892 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
893 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
894 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
895 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
896 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
897 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
898 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
899 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
900 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
901 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
902 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
906 * iscsi_crc32c - Steps through buffer one byte at at time, calculates
907 * reflected crc using table.
910 idm_crc32c(void *address
, unsigned long length
)
912 uint8_t *buffer
= address
;
913 uint32_t crc
= 0xffffffff, result
;
915 uint8_t byte0
, byte1
, byte2
, byte3
;
918 ASSERT(address
!= NULL
);
920 if (iscsi_crc32_hd
== -1) {
921 if (hd_crc32_avail((uint32_t *)idm_crc32c_table
) == B_TRUE
) {
927 if (iscsi_crc32_hd
== 0)
928 return (HW_CRC32(buffer
, length
, crc
));
931 crc
= idm_crc32c_table
[(crc
^ *buffer
++) & 0xFFL
] ^
934 result
= crc
^ 0xffffffff;
937 byte0
= (uint8_t)(result
& 0xFF);
938 byte1
= (uint8_t)((result
>> 8) & 0xFF);
939 byte2
= (uint8_t)((result
>> 16) & 0xFF);
940 byte3
= (uint8_t)((result
>> 24) & 0xFF);
941 result
= ((byte0
<< 24) | (byte1
<< 16) | (byte2
<< 8) | byte3
);
942 #endif /* _BIG_ENDIAN */
949 * idm_crc32c_continued - Continues stepping through buffer one
950 * byte at at time, calculates reflected crc using table.
953 idm_crc32c_continued(void *address
, unsigned long length
, uint32_t crc
)
955 uint8_t *buffer
= address
;
958 uint8_t byte0
, byte1
, byte2
, byte3
;
961 ASSERT(address
!= NULL
);
963 if (iscsi_crc32_hd
== -1) {
964 if (hd_crc32_avail((uint32_t *)idm_crc32c_table
) == B_TRUE
) {
970 if (iscsi_crc32_hd
== 0)
971 return (HW_CRC32_CONT(buffer
, length
, crc
));
975 byte0
= (uint8_t)((crc
>> 24) & 0xFF);
976 byte1
= (uint8_t)((crc
>> 16) & 0xFF);
977 byte2
= (uint8_t)((crc
>> 8) & 0xFF);
978 byte3
= (uint8_t)(crc
& 0xFF);
979 crc
= ((byte3
<< 24) | (byte2
<< 16) | (byte1
<< 8) | byte0
);
982 crc
= crc
^ 0xffffffff;
984 crc
= idm_crc32c_table
[(crc
^ *buffer
++) & 0xFFL
] ^
987 result
= crc
^ 0xffffffff;
990 byte0
= (uint8_t)(result
& 0xFF);
991 byte1
= (uint8_t)((result
>> 8) & 0xFF);
992 byte2
= (uint8_t)((result
>> 16) & 0xFF);
993 byte3
= (uint8_t)((result
>> 24) & 0xFF);
994 result
= ((byte0
<< 24) | (byte1
<< 16) | (byte2
<< 8) | byte3
);
1001 idm_task_constructor(void *hdl
, void *arg
, int flags
)
1003 idm_task_t
*idt
= (idm_task_t
*)hdl
;
1006 mutex_init(&idt
->idt_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
1008 /* Find the next free task ID */
1009 rw_enter(&idm
.idm_taskid_table_lock
, RW_WRITER
);
1010 next_task
= idm
.idm_taskid_next
;
1011 while (idm
.idm_taskid_table
[next_task
]) {
1013 if (next_task
== idm
.idm_taskid_max
)
1015 if (next_task
== idm
.idm_taskid_next
) {
1016 rw_exit(&idm
.idm_taskid_table_lock
);
1021 idm
.idm_taskid_table
[next_task
] = idt
;
1022 idm
.idm_taskid_next
= (next_task
+ 1) % idm
.idm_taskid_max
;
1023 rw_exit(&idm
.idm_taskid_table_lock
);
1025 idt
->idt_tt
= next_task
;
1027 list_create(&idt
->idt_inbufv
, sizeof (idm_buf_t
),
1028 offsetof(idm_buf_t
, idb_buflink
));
1029 list_create(&idt
->idt_outbufv
, sizeof (idm_buf_t
),
1030 offsetof(idm_buf_t
, idb_buflink
));
1031 idm_refcnt_init(&idt
->idt_refcnt
, idt
);
1034 * Set the transport header pointer explicitly. This removes the
1035 * need for per-transport header allocation, which simplifies cache
1036 * init considerably. If at a later date we have an additional IDM
1037 * transport that requires a different size, we'll revisit this.
1039 idt
->idt_transport_hdr
= (void *)(idt
+ 1); /* pointer arithmetic */
1046 idm_task_destructor(void *hdl
, void *arg
)
1048 idm_task_t
*idt
= (idm_task_t
*)hdl
;
1050 /* Remove the task from the ID table */
1051 rw_enter(&idm
.idm_taskid_table_lock
, RW_WRITER
);
1052 idm
.idm_taskid_table
[idt
->idt_tt
] = NULL
;
1053 rw_exit(&idm
.idm_taskid_table_lock
);
1055 /* free the inbuf and outbuf */
1056 idm_refcnt_destroy(&idt
->idt_refcnt
);
1057 list_destroy(&idt
->idt_inbufv
);
1058 list_destroy(&idt
->idt_outbufv
);
1061 * The final call to idm_task_rele may happen with the task
1062 * mutex held which may invoke this destructor immediately.
1063 * Stall here until the task mutex owner lets go.
1065 mutex_enter(&idt
->idt_mutex
);
1066 mutex_destroy(&idt
->idt_mutex
);
1070 * idm_listbuf_insert searches from the back of the list looking for the
1074 idm_listbuf_insert(list_t
*lst
, idm_buf_t
*buf
)
1078 /* iterate through the list to find the insertion point */
1079 for (idb
= list_tail(lst
); idb
!= NULL
; idb
= list_prev(lst
, idb
)) {
1081 if (idb
->idb_bufoffset
< buf
->idb_bufoffset
) {
1083 list_insert_after(lst
, idb
, buf
);
1088 /* add the buf to the head of the list */
1089 list_insert_head(lst
, buf
);
1095 idm_wd_thread(void *arg
)
1098 clock_t wake_time
= SEC_TO_TICK(IDM_WD_INTERVAL
);
1101 /* Record the thread id for thread_join() */
1102 idm
.idm_wd_thread_did
= curthread
->t_did
;
1103 mutex_enter(&idm
.idm_global_mutex
);
1104 idm
.idm_wd_thread_running
= B_TRUE
;
1105 cv_signal(&idm
.idm_wd_cv
);
1107 while (idm
.idm_wd_thread_running
) {
1108 for (ic
= list_head(&idm
.idm_tgt_conn_list
);
1110 ic
= list_next(&idm
.idm_tgt_conn_list
, ic
)) {
1111 idle_time
= ddi_get_lbolt() - ic
->ic_timestamp
;
1114 * If this connection is in FFP then grab a hold
1115 * and check the various timeout thresholds. Otherwise
1116 * the connection is closing and we should just
1117 * move on to the next one.
1119 mutex_enter(&ic
->ic_state_mutex
);
1123 mutex_exit(&ic
->ic_state_mutex
);
1128 * If there hasn't been any activity on this
1129 * connection for the keepalive timeout period
1130 * and if the client has provided a keepalive
1131 * callback then call the keepalive callback.
1132 * This allows the client to take action to keep
1133 * the link alive (like send a nop PDU).
1135 if ((TICK_TO_SEC(idle_time
) >=
1136 IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT
) &&
1137 !ic
->ic_keepalive
) {
1138 ic
->ic_keepalive
= B_TRUE
;
1139 if (ic
->ic_conn_ops
.icb_keepalive
) {
1140 mutex_exit(&ic
->ic_state_mutex
);
1141 mutex_exit(&idm
.idm_global_mutex
);
1142 (*ic
->ic_conn_ops
.icb_keepalive
)(ic
);
1143 mutex_enter(&idm
.idm_global_mutex
);
1144 mutex_enter(&ic
->ic_state_mutex
);
1146 } else if ((TICK_TO_SEC(idle_time
) <
1147 IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT
)) {
1148 /* Reset keepalive */
1149 ic
->ic_keepalive
= B_FALSE
;
1153 * If there hasn't been any activity on this
1154 * connection for the failure timeout period then
1155 * drop the connection. We expect the initiator
1156 * to keep the connection alive if it wants the
1157 * connection to stay open.
1159 * If it turns out to be desireable to take a
1160 * more active role in maintaining the connect
1161 * we could add a client callback to send
1162 * a "keepalive" kind of message (no doubt a nop)
1163 * and fire that on a shorter timer.
1165 if (TICK_TO_SEC(idle_time
) >
1166 IDM_TRANSPORT_FAIL_IDLE_TIMEOUT
) {
1167 mutex_exit(&ic
->ic_state_mutex
);
1168 mutex_exit(&idm
.idm_global_mutex
);
1169 IDM_SM_LOG(CE_WARN
, "idm_wd_thread: "
1170 "conn %p idle for %d seconds, "
1171 "sending CE_TRANSPORT_FAIL",
1172 (void *)ic
, (int)idle_time
);
1173 idm_conn_event(ic
, CE_TRANSPORT_FAIL
,
1175 mutex_enter(&idm
.idm_global_mutex
);
1176 mutex_enter(&ic
->ic_state_mutex
);
1181 mutex_exit(&ic
->ic_state_mutex
);
1184 (void) cv_reltimedwait(&idm
.idm_wd_cv
, &idm
.idm_global_mutex
,
1185 wake_time
, TR_CLOCK_TICK
);
1187 mutex_exit(&idm
.idm_global_mutex
);