4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Infiniband Device Management Agent for IB storage.
33 #include <sys/sunddi.h>
34 #include <sys/modctl.h>
36 #include <sys/sysmacros.h>
38 #include <sys/ib/ibtl/ibti.h> /* IB public interfaces */
40 #include <sys/ib/mgt/ibdma/ibdma.h>
41 #include <sys/ib/mgt/ibdma/ibdma_impl.h>
44 * NOTE: The IB Device Management Agent function, like other IB
45 * managers and agents is best implemented as a kernel misc.
47 * Eventually we could modify IBT_DM_AGENT so that we don't need to
48 * open each HCA to receive asynchronous events.
51 #define IBDMA_NAME_VERSION "IB Device Management Agent"
53 extern struct mod_ops mod_miscops
;
55 static void ibdma_ibt_async_handler(void *clnt
, ibt_hca_hdl_t hdl
,
56 ibt_async_code_t code
, ibt_async_event_t
*event
);
58 static void ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl
,
59 ibmf_msg_t
*msgp
, void *args
);
60 static void ibdma_create_resp_mad(ibmf_msg_t
*msgp
);
63 * Misc. kernel module for now.
65 static struct modlmisc modlmisc
= {
70 static struct modlinkage modlinkage
= {
71 MODREV_1
, (void *)&modlmisc
, NULL
74 static ibt_clnt_modinfo_t ibdma_ibt_modinfo
= {
77 ibdma_ibt_async_handler
,
83 * Module global state allocated at init().
85 static ibdma_mod_state_t
*ibdma
= NULL
;
88 * Init/Fini handlers and IBTL HCA management prototypes.
90 static int ibdma_init();
91 static int ibdma_fini();
92 static int ibdma_ibt_init();
93 static void ibdma_ibt_fini();
94 static ibdma_hca_t
*ibdma_hca_init(ib_guid_t guid
);
95 static void ibdma_hca_fini(ibdma_hca_t
*hca
);
96 static ibdma_hca_t
*ibdma_find_hca(ib_guid_t guid
);
99 * DevMgmt Agent MAD attribute handlers prototypes.
101 static void ibdma_get_class_portinfo(ibmf_msg_t
*msg
);
102 static void ibdma_get_io_unitinfo(ibdma_hca_t
*hca
, ibmf_msg_t
*msg
);
103 static void ibdma_get_ioc_profile(ibdma_hca_t
*hca
, ibmf_msg_t
*msg
);
104 static void ibdma_get_ioc_services(ibdma_hca_t
*hca
, ibmf_msg_t
*msg
);
114 ASSERT(ibdma
== NULL
);
116 ibdma
= kmem_zalloc(sizeof (*ibdma
), KM_SLEEP
);
117 ASSERT(ibdma
!= NULL
);
119 status
= ibdma_init();
120 if (status
!= DDI_SUCCESS
) {
121 kmem_free(ibdma
, sizeof (*ibdma
));
126 status
= mod_install(&modlinkage
);
127 if (status
!= DDI_SUCCESS
) {
128 cmn_err(CE_NOTE
, "_init, mod_install error (%d)", status
);
130 kmem_free(ibdma
, sizeof (*ibdma
));
140 _info(struct modinfo
*modinfop
)
142 return (mod_info(&modlinkage
, modinfop
));
155 status
= mod_remove(&modlinkage
);
156 if (status
!= DDI_SUCCESS
) {
157 cmn_err(CE_NOTE
, "_fini, mod_remove error (%d)", status
);
162 * Sanity check to see if anyone is not cleaning
165 mutex_enter(&ibdma
->ms_hca_list_lock
);
166 hca
= list_head(&ibdma
->ms_hca_list
);
167 while (hca
!= NULL
) {
168 for (slot
= 0; slot
< IBDMA_MAX_IOC
; slot
++) {
169 if (hca
->ih_ioc
[slot
].ii_inuse
) {
170 cmn_err(CE_NOTE
, "_fini, IOC %d still attached"
171 " for (0x%0llx)", slot
+1,
172 (u_longlong_t
)hca
->ih_iou_guid
);
175 hca
= list_next(&ibdma
->ms_hca_list
, hca
);
177 mutex_exit(&ibdma
->ms_hca_list_lock
);
180 kmem_free(ibdma
, sizeof (*ibdma
));
187 * Initialize I/O Unit structure, generate initial HCA list and register
188 * it port with the IBMF.
196 * Global lock and I/O Unit initialization.
198 mutex_init(&ibdma
->ms_hca_list_lock
, NULL
, MUTEX_DRIVER
, NULL
);
201 * Discover IB hardware and setup for device management agent
204 status
= ibdma_ibt_init();
205 if (status
!= DDI_SUCCESS
) {
206 cmn_err(CE_NOTE
, "ibdma_init, ibt_attach failed (%d)",
208 mutex_destroy(&ibdma
->ms_hca_list_lock
);
218 * Release resource if we are no longer in use.
224 mutex_destroy(&ibdma
->ms_hca_list_lock
);
225 return (DDI_SUCCESS
);
229 * ibdma_ibt_async_handler()
233 ibdma_ibt_async_handler(void *clnt
, ibt_hca_hdl_t hdl
,
234 ibt_async_code_t code
, ibt_async_event_t
*event
)
240 case IBT_EVENT_PORT_UP
:
241 case IBT_ERROR_PORT_DOWN
:
242 case IBT_PORT_CHANGE_EVENT
:
243 case IBT_CLNT_REREG_EVENT
:
246 case IBT_HCA_ATTACH_EVENT
:
247 mutex_enter(&ibdma
->ms_hca_list_lock
);
248 hca
= ibdma_hca_init(event
->ev_hca_guid
);
250 list_insert_tail(&ibdma
->ms_hca_list
, hca
);
251 cmn_err(CE_NOTE
, "hca ibt hdl (%p)",
252 (void *)hca
->ih_ibt_hdl
);
253 ibdma
->ms_num_hcas
++;
255 mutex_exit(&ibdma
->ms_hca_list_lock
);
258 case IBT_HCA_DETACH_EVENT
:
259 mutex_enter(&ibdma
->ms_hca_list_lock
);
260 hca
= ibdma_find_hca(event
->ev_hca_guid
);
262 list_remove(&ibdma
->ms_hca_list
, hca
);
263 cmn_err(CE_NOTE
, "removing hca (%p) (0x%llx)",
265 (u_longlong_t
)hca
->ih_iou_guid
: 0x0ll
);
268 mutex_exit(&ibdma
->ms_hca_list_lock
);
273 cmn_err(CE_NOTE
, "ibt_async_handler, unhandled event(%d)",
294 * Attach to IBTF and get HCA list.
296 status
= ibt_attach(&ibdma_ibt_modinfo
, NULL
,
297 ibdma
, &ibdma
->ms_ibt_hdl
);
298 if (status
!= DDI_SUCCESS
) {
299 cmn_err(CE_NOTE
, "ibt_init, ibt_attach failed (%d)",
304 list_create(&ibdma
->ms_hca_list
, sizeof (ibdma_hca_t
),
305 offsetof(ibdma_hca_t
, ih_node
));
307 hca_cnt
= ibt_get_hca_list(&guid
);
310 cmn_err(CE_NOTE
, "ibt_init, no HCA(s) found");
312 /* not an error if no HCAs, but nothing more to do here */
313 return (DDI_SUCCESS
);
316 mutex_enter(&ibdma
->ms_hca_list_lock
);
318 for (hca_ndx
= 0; hca_ndx
< hca_cnt
; hca_ndx
++) {
320 cmn_err(CE_NOTE
, "adding hca GUID(0x%llx)",
321 (u_longlong_t
)guid
[hca_ndx
]);
324 hca
= ibdma_hca_init(guid
[hca_ndx
]);
326 cmn_err(CE_NOTE
, "ibt_init, hca_init GUID(0x%llx)"
327 " failed", (u_longlong_t
)guid
[hca_ndx
]);
330 list_insert_tail(&ibdma
->ms_hca_list
, hca
);
331 ibdma
->ms_num_hcas
++;
334 mutex_exit(&ibdma
->ms_hca_list_lock
);
336 ibt_free_hca_list(guid
, hca_cnt
);
338 cmn_err(CE_NOTE
, "Added %d HCA(s)",
341 return (DDI_SUCCESS
);
353 mutex_enter(&ibdma
->ms_hca_list_lock
);
354 hca
= list_head(&ibdma
->ms_hca_list
);
355 while (hca
!= NULL
) {
356 next
= list_next(&ibdma
->ms_hca_list
, hca
);
357 list_remove(&ibdma
->ms_hca_list
, hca
);
359 cmn_err(CE_NOTE
, "removing hca (%p) (0x%llx)",
361 (u_longlong_t
)hca
->ih_iou_guid
: 0x0ll
);
362 cmn_err(CE_NOTE
, "hca ibt hdl (%p)",
363 (void *)hca
->ih_ibt_hdl
);
368 list_destroy(&ibdma
->ms_hca_list
);
370 (void) ibt_detach(ibdma
->ms_ibt_hdl
);
371 ibdma
->ms_ibt_hdl
= NULL
;
372 ibdma
->ms_num_hcas
= 0;
373 mutex_exit(&ibdma
->ms_hca_list_lock
);
380 ibdma_find_hca(ib_guid_t guid
)
384 ASSERT(mutex_owned(&ibdma
->ms_hca_list_lock
));
386 hca
= list_head(&ibdma
->ms_hca_list
);
387 while (hca
!= NULL
) {
388 if (hca
->ih_iou_guid
== guid
) {
391 hca
= list_next(&ibdma
->ms_hca_list
, hca
);
400 ibdma_hca_init(ib_guid_t guid
)
405 ibt_hca_attr_t hca_attr
;
408 ASSERT(mutex_owned(&ibdma
->ms_hca_list_lock
));
410 status
= ibt_query_hca_byguid(guid
, &hca_attr
);
411 if (status
!= IBT_SUCCESS
) {
412 cmn_err(CE_NOTE
, "hca_init HCA query error (%d)",
417 if (ibdma_find_hca(guid
) != NULL
) {
419 cmn_err(CE_NOTE
, "hca_init HCA already exists");
424 hca
= kmem_zalloc(sizeof (ibdma_hca_t
) +
425 (hca_attr
.hca_nports
-1)*sizeof (ibdma_port_t
), KM_SLEEP
);
428 hca
->ih_nports
= hca_attr
.hca_nports
;
430 rw_init(&hca
->ih_iou_rwlock
, NULL
, RW_DRIVER
, NULL
);
431 rw_enter(&hca
->ih_iou_rwlock
, RW_WRITER
);
432 hca
->ih_iou_guid
= guid
;
433 hca
->ih_iou
.iou_changeid
= h2b16(1);
434 hca
->ih_iou
.iou_num_ctrl_slots
= IBDMA_MAX_IOC
;
435 hca
->ih_iou
.iou_flag
= IB_DM_IOU_OPTIONROM_ABSENT
;
437 list_create(&hca
->ih_hdl_list
, sizeof (ibdma_hdl_impl_t
),
438 offsetof(ibdma_hdl_impl_t
, ih_node
));
439 rw_exit(&hca
->ih_iou_rwlock
);
442 * It would be better to not open, but IBTL is setup to only allow
443 * certain managers to get async call backs if not open.
445 status
= ibt_open_hca(ibdma
->ms_ibt_hdl
, guid
, &hca
->ih_ibt_hdl
);
446 if (status
!= IBT_SUCCESS
) {
447 cmn_err(CE_NOTE
, "hca_init() IBT open failed (%d)",
450 list_destroy(&hca
->ih_hdl_list
);
451 rw_destroy(&hca
->ih_iou_rwlock
);
452 kmem_free(hca
, sizeof (ibdma_hca_t
) +
453 (hca_attr
.hca_nports
-1)*sizeof (ibdma_port_t
));
458 * Register with the IB Management Framework and setup MAD call-back.
460 for (ndx
= 0; ndx
< hca
->ih_nports
; ndx
++) {
461 port
= &hca
->ih_port
[ndx
];
463 port
->ip_ibmf_reg
.ir_ci_guid
= hca
->ih_iou_guid
;
464 port
->ip_ibmf_reg
.ir_port_num
= ndx
+ 1;
465 port
->ip_ibmf_reg
.ir_client_class
= DEV_MGT_AGENT
;
467 status
= ibmf_register(&port
->ip_ibmf_reg
, IBMF_VERSION
,
468 0, NULL
, NULL
, &port
->ip_ibmf_hdl
, &port
->ip_ibmf_caps
);
469 if (status
!= IBMF_SUCCESS
) {
470 cmn_err(CE_NOTE
, "hca_init, IBMF register failed (%d)",
472 port
->ip_ibmf_hdl
= NULL
;
477 status
= ibmf_setup_async_cb(port
->ip_ibmf_hdl
,
478 IBMF_QP_HANDLE_DEFAULT
, ibdma_mad_recv_cb
, port
, 0);
479 if (status
!= IBMF_SUCCESS
) {
480 cmn_err(CE_NOTE
, "hca_init, IBMF cb setup failed (%d)",
486 status
= ibt_modify_port_byguid(hca
->ih_iou_guid
,
487 ndx
+1, IBT_PORT_SET_DEVMGT
, 0);
488 if (status
!= IBT_SUCCESS
) {
489 cmn_err(CE_NOTE
, "hca_init, IBT modify port caps"
490 " error (%d)", status
);
502 ibdma_hca_fini(ibdma_hca_t
*hca
)
507 ibdma_hdl_impl_t
*hdl
;
508 ibdma_hdl_impl_t
*hdl_next
;
510 ASSERT(mutex_owned(&ibdma
->ms_hca_list_lock
));
513 rw_enter(&hca
->ih_iou_rwlock
, RW_WRITER
);
516 * All handles should have been de-registered, but release
517 * any that are outstanding.
519 hdl
= list_head(&hca
->ih_hdl_list
);
520 while (hdl
!= NULL
) {
521 hdl_next
= list_next(&hca
->ih_hdl_list
, hdl
);
522 list_remove(&hca
->ih_hdl_list
, hdl
);
523 cmn_err(CE_NOTE
, "hca_fini, unexpected ibdma user handle"
525 kmem_free(hdl
, sizeof (*hdl
));
528 list_destroy(&hca
->ih_hdl_list
);
531 * Un-register with the IBMF.
533 for (ndx
= 0; ndx
< hca
->ih_nports
; ndx
++) {
534 port
= &hca
->ih_port
[ndx
];
535 port
->ip_hcap
= NULL
;
537 status
= ibt_modify_port_byguid(hca
->ih_iou_guid
,
538 ndx
+1, IBT_PORT_RESET_DEVMGT
, 0);
539 if (status
!= IBT_SUCCESS
)
540 cmn_err(CE_NOTE
, "hca_fini, IBT modify port caps"
541 " error (%d)", status
);
543 if (port
->ip_ibmf_hdl
== NULL
)
546 status
= ibmf_tear_down_async_cb(port
->ip_ibmf_hdl
,
547 IBMF_QP_HANDLE_DEFAULT
, 0);
548 if (status
!= IBMF_SUCCESS
)
549 cmn_err(CE_NOTE
, "hca_fini, IBMF tear down cb"
550 " error (%d)", status
);
552 status
= ibmf_unregister(&port
->ip_ibmf_hdl
, 0);
553 if (status
!= IBMF_SUCCESS
)
554 cmn_err(CE_NOTE
, "hca_fini, IBMF un-register"
555 " error (%d)", status
);
556 port
->ip_ibmf_hdl
= NULL
;
559 status
= ibt_close_hca(hca
->ih_ibt_hdl
);
560 if (status
!= IBT_SUCCESS
)
561 cmn_err(CE_NOTE
, "hca_fini close error (%d)", status
);
563 rw_exit(&hca
->ih_iou_rwlock
);
564 rw_destroy(&hca
->ih_iou_rwlock
);
565 kmem_free(hca
, sizeof (ibdma_hca_t
) +
566 (hca
->ih_nports
-1) * sizeof (ibdma_port_t
));
569 /* DM IBMF MAD handlers */
571 * ibdma_create_resp_mad()
574 ibdma_create_resp_mad(ibmf_msg_t
*msgp
)
577 * Allocate send buffer fix up hdr for response.
579 msgp
->im_msgbufs_send
.im_bufs_mad_hdr
=
580 kmem_zalloc(IBDMA_MAD_SIZE
, KM_SLEEP
);
582 msgp
->im_msgbufs_send
.im_bufs_cl_hdr
= (uchar_t
*)
583 msgp
->im_msgbufs_send
.im_bufs_mad_hdr
+ sizeof (ib_mad_hdr_t
);
584 msgp
->im_msgbufs_send
.im_bufs_cl_hdr_len
= IBDMA_DM_MAD_HDR_SIZE
;
585 msgp
->im_msgbufs_send
.im_bufs_cl_data
=
586 ((char *)msgp
->im_msgbufs_send
.im_bufs_cl_hdr
+
587 IBDMA_DM_MAD_HDR_SIZE
);
588 msgp
->im_msgbufs_send
.im_bufs_cl_data_len
=
589 IBDMA_MAD_SIZE
- sizeof (ib_mad_hdr_t
) - IBDMA_DM_MAD_HDR_SIZE
;
590 (void) memcpy(msgp
->im_msgbufs_send
.im_bufs_mad_hdr
,
591 msgp
->im_msgbufs_recv
.im_bufs_mad_hdr
, IBDMA_MAD_SIZE
);
594 * We may want to support a GRH since this is a GMP; not
595 * required for current SRP device manager platforms.
598 if (msgp
->im_msg_flags
& IBMF_MSG_FLAGS_GLOBAL_ADDRESS
) {
599 ib_gid_t temp
= msgp
->im_global_addr
.ig_recver_gid
;
601 msgp
->im_global_addr
.ig_recver_gid
=
602 msgp
->im_global_addr
.ig_sender_gid
;
603 msgp
->im_global_addr
.ig_sender_gid
= temp
;
609 * ibdma_mad_send_cb()
613 ibdma_mad_send_cb(ibmf_handle_t ibmf_hdl
, ibmf_msg_t
*msgp
, void *arg
)
616 * Just free the buffers and release the message.
618 if (msgp
->im_msgbufs_send
.im_bufs_mad_hdr
!= NULL
) {
619 kmem_free(msgp
->im_msgbufs_send
.im_bufs_mad_hdr
,
621 msgp
->im_msgbufs_send
.im_bufs_mad_hdr
= NULL
;
623 if (ibmf_free_msg(ibmf_hdl
, &msgp
) != IBMF_SUCCESS
) {
624 cmn_err(CE_NOTE
, "mad_send_cb, IBMF message free error");
629 * ibdma_mad_recv_cb()
632 ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl
, ibmf_msg_t
*msgp
, void *args
)
635 ib_mad_hdr_t
*in_mad
;
636 ib_mad_hdr_t
*out_mad
;
637 ibdma_port_t
*port
= args
;
639 ASSERT(msgp
!= NULL
);
640 ASSERT(port
!= NULL
);
642 if (msgp
->im_msg_status
!= IBMF_SUCCESS
) {
643 cmn_err(CE_NOTE
, "mad_recv_cb, bad MAD receive status (%d)",
644 msgp
->im_msg_status
);
648 in_mad
= msgp
->im_msgbufs_recv
.im_bufs_mad_hdr
;
650 if (in_mad
->MgmtClass
!= MAD_MGMT_CLASS_DEV_MGT
) {
652 cmn_err(CE_NOTE
, "mad_recv_cb, MAD not of Dev Mgmt Class");
657 ibdma_create_resp_mad(msgp
);
658 out_mad
= msgp
->im_msgbufs_send
.im_bufs_mad_hdr
;
660 out_mad
->R_Method
= IB_DM_DEVMGT_METHOD_GET_RESP
;
663 if (in_mad
->R_Method
== MAD_METHOD_SET
) {
665 cmn_err(CE_NOTE
, "mad_recv_cb, no attributes supported"
668 out_mad
->Status
= MAD_STATUS_UNSUPP_METHOD_ATTR
;
672 if (in_mad
->R_Method
!= MAD_METHOD_GET
) {
674 cmn_err(CE_NOTE
, "mad_recv_cb, no attributes supported"
677 out_mad
->Status
= MAD_STATUS_UNSUPP_METHOD
;
682 * Process a GET method.
684 switch (b2h16(in_mad
->AttributeID
)) {
686 case IB_DM_ATTR_CLASSPORTINFO
:
687 ibdma_get_class_portinfo(msgp
);
690 case IB_DM_ATTR_IO_UNITINFO
:
691 ibdma_get_io_unitinfo(port
->ip_hcap
, msgp
);
694 case IB_DM_ATTR_IOC_CTRL_PROFILE
:
695 ibdma_get_ioc_profile(port
->ip_hcap
, msgp
);
698 case IB_DM_ATTR_SERVICE_ENTRIES
:
699 ibdma_get_ioc_services(port
->ip_hcap
, msgp
);
703 out_mad
->Status
= MAD_STATUS_UNSUPP_METHOD_ATTR
;
708 status
= ibmf_msg_transport(ibmf_hdl
, IBMF_QP_HANDLE_DEFAULT
,
709 msgp
, NULL
, ibdma_mad_send_cb
, NULL
, 0);
710 if (status
!= IBMF_SUCCESS
) {
711 cmn_err(CE_NOTE
, "mad_recv_cb, send error (%d)", status
);
712 ibdma_mad_send_cb(ibmf_hdl
, msgp
, NULL
);
717 status
= ibmf_free_msg(ibmf_hdl
, &msgp
);
718 if (status
!= IBMF_SUCCESS
) {
719 cmn_err(CE_NOTE
, "mad_recv_cb, error dropping (%d)",
725 * ibdma_get_class_portinfo()
728 ibdma_get_class_portinfo(ibmf_msg_t
*msg
)
730 ib_mad_classportinfo_t
*cpip
;
732 cpip
= (ib_mad_classportinfo_t
*)msg
->im_msgbufs_send
.im_bufs_cl_data
;
733 bzero(cpip
, sizeof (*cpip
));
734 cpip
->BaseVersion
= MAD_CLASS_BASE_VERS_1
;
735 cpip
->ClassVersion
= IB_DM_CLASS_VERSION_1
;
736 cpip
->RespTimeValue
= h2b32(IBDMA_DM_RESP_TIME
);
740 * ibdma_get_io_unitinfo()
743 ibdma_get_io_unitinfo(ibdma_hca_t
*hca
, ibmf_msg_t
*msg
)
745 ib_dm_io_unitinfo_t
*uip
;
747 uip
= (ib_dm_io_unitinfo_t
*)msg
->im_msgbufs_send
.im_bufs_cl_data
;
748 rw_enter(&hca
->ih_iou_rwlock
, RW_READER
);
749 bcopy(&hca
->ih_iou
, uip
, sizeof (ib_dm_io_unitinfo_t
));
750 rw_exit(&hca
->ih_iou_rwlock
);
754 * ibdma_get_ioc_profile()
757 ibdma_get_ioc_profile(ibdma_hca_t
*hca
, ibmf_msg_t
*msg
)
759 ib_dm_ioc_ctrl_profile_t
*iocp
;
764 slot
= b2h32(msg
->im_msgbufs_recv
.im_bufs_mad_hdr
->AttributeModifier
);
765 iocp
= (ib_dm_ioc_ctrl_profile_t
*)
766 msg
->im_msgbufs_send
.im_bufs_cl_data
;
767 if (slot
== 0 || slot
> IBDMA_MAX_IOC
) {
768 msg
->im_msgbufs_send
.im_bufs_mad_hdr
->Status
=
769 MAD_STATUS_INVALID_FIELD
;
774 rw_enter(&hca
->ih_iou_rwlock
, RW_READER
);
775 if (ibdma_get_ioc_state(hca
, slot
) == IBDMA_IOC_PRESENT
) {
776 bcopy(&hca
->ih_ioc
[slot
].ii_profile
, iocp
,
777 sizeof (ib_dm_ioc_ctrl_profile_t
));
779 msg
->im_msgbufs_send
.im_bufs_mad_hdr
->Status
=
780 IB_DM_DEVMGT_MAD_STAT_NORESP
;
782 rw_exit(&hca
->ih_iou_rwlock
);
786 * ibdma_get_ioc_services()
789 ibdma_get_ioc_services(ibdma_hca_t
*hca
, ibmf_msg_t
*msg
)
791 ib_dm_srv_t
*to_svcp
;
792 ib_dm_srv_t
*from_svcp
;
799 slot
= b2h32(msg
->im_msgbufs_recv
.im_bufs_mad_hdr
->AttributeModifier
);
800 hi
= (slot
>> 8) & 0x00FF;
802 slot
= (slot
>> 16) & 0x0FFFF;
803 if (slot
== 0 || slot
> IBDMA_MAX_IOC
) {
804 msg
->im_msgbufs_send
.im_bufs_mad_hdr
->Status
=
805 MAD_STATUS_INVALID_FIELD
;
811 rw_enter(&hca
->ih_iou_rwlock
, RW_READER
);
812 if (ibdma_get_ioc_state(hca
, slot
) != IBDMA_IOC_PRESENT
) {
813 msg
->im_msgbufs_send
.im_bufs_mad_hdr
->Status
=
814 IB_DM_DEVMGT_MAD_STAT_NORESP
;
815 rw_exit(&hca
->ih_iou_rwlock
);
819 if ((low
> hi
) || (hi
- low
> 4)) {
820 msg
->im_msgbufs_send
.im_bufs_mad_hdr
->Status
=
821 MAD_STATUS_INVALID_FIELD
;
822 rw_exit(&hca
->ih_iou_rwlock
);
826 if (hi
> hca
->ih_ioc
[slot
].ii_profile
.ioc_service_entries
) {
827 msg
->im_msgbufs_send
.im_bufs_mad_hdr
->Status
=
828 MAD_STATUS_INVALID_FIELD
;
829 rw_exit(&hca
->ih_iou_rwlock
);
833 to_svcp
= (ib_dm_srv_t
*)msg
->im_msgbufs_send
.im_bufs_cl_data
;
834 from_svcp
= hca
->ih_ioc
[slot
].ii_srvcs
+ low
;
835 bcopy(from_svcp
, to_svcp
, sizeof (ib_dm_srv_t
) * (hi
- low
+ 1));
836 rw_exit(&hca
->ih_iou_rwlock
);
841 * Client API internal helpers
848 ibdma_get_hdl_impl(ibdma_hdl_t hdl
)
851 ibdma_hdl_impl_t
*hdl_tmp
= hdl
;
852 ibdma_hdl_impl_t
*hdl_impl
;
854 ASSERT(mutex_owned(&ibdma
->ms_hca_list_lock
));
856 if (hdl_tmp
== NULL
) {
857 cmn_err(CE_NOTE
, "get_hdl_impl, NULL handle");
861 hca
= ibdma_find_hca(hdl_tmp
->ih_iou_guid
);
863 cmn_err(CE_NOTE
, "get_hdl_impl, invalid handle, bad IOU");
867 hdl_impl
= list_head(&hca
->ih_hdl_list
);
868 while (hdl_impl
!= NULL
) {
869 if (hdl_impl
== hdl_tmp
) {
872 hdl_impl
= list_next(&hca
->ih_hdl_list
, hdl_impl
);
878 * ibdma_set_ioc_state()
880 * slot should be 0 based (not DM 1 based slot).
882 * I/O Unit write lock should be held outside of this function.
885 ibdma_set_ioc_state(ibdma_hca_t
*hca
, int slot
, ibdma_ioc_state_t state
)
890 cur
= hca
->ih_iou
.iou_ctrl_list
[slot
>> 1];
892 cur
= (cur
& 0xF0) | state
;
894 cur
= (cur
& 0x0F) | (state
<< 4);
896 hca
->ih_iou
.iou_ctrl_list
[slot
>> 1] = cur
;
897 id
= b2h16(hca
->ih_iou
.iou_changeid
);
899 hca
->ih_iou
.iou_changeid
= h2b16(id
);
901 cmn_err(CE_NOTE
, "set_ioc_state, slot offset(%d), value(%d)",
902 slot
, hca
->ih_iou
.iou_ctrl_list
[slot
>> 1]);
907 * ibdma_get_ioc_state()
909 * slot should be 0 based (not DM 1 based slot).
911 * I/O Unit read lock should be held outside of this function.
913 static ibdma_ioc_state_t
914 ibdma_get_ioc_state(ibdma_hca_t
*hca
, int slot
)
918 if (slot
>= IBDMA_MAX_IOC
)
921 cur
= hca
->ih_iou
.iou_ctrl_list
[slot
>> 1];
922 cur
= slot
& 1 ? cur
& 0x0F : cur
>> 4;
926 /* CLIENT API Implementation */
928 * ibdma_ioc_register()
932 ibdma_ioc_register(ib_guid_t iou_guid
, ib_dm_ioc_ctrl_profile_t
*profile
,
933 ib_dm_srv_t
*services
)
939 ibdma_hdl_impl_t
*hdl
;
941 if (profile
== NULL
|| services
== NULL
) {
942 cmn_err(CE_NOTE
, "ioc_register, bad parameter");
946 svc_entries
= profile
->ioc_service_entries
;
947 if (svc_entries
== 0) {
948 cmn_err(CE_NOTE
, "ioc_register, bad profile no service");
953 * Find the associated I/O Unit.
955 mutex_enter(&ibdma
->ms_hca_list_lock
);
956 hca
= ibdma_find_hca(iou_guid
);
958 mutex_exit(&ibdma
->ms_hca_list_lock
);
959 cmn_err(CE_NOTE
, "ioc_register, bad I/O Unit GUID (0x%llx)",
960 (u_longlong_t
)iou_guid
);
964 rw_enter(&hca
->ih_iou_rwlock
, RW_WRITER
);
965 for (slot
= 0; slot
< IBDMA_MAX_IOC
; slot
++) {
966 if (hca
->ih_ioc
[slot
].ii_inuse
== 0) {
967 if (free_slot
== -1) {
973 if (profile
->ioc_guid
==
974 hca
->ih_ioc
[slot
].ii_profile
.ioc_guid
) {
975 rw_exit(&hca
->ih_iou_rwlock
);
976 mutex_exit(&ibdma
->ms_hca_list_lock
);
978 cmn_err(CE_NOTE
, "ioc_register, IOC previously"
986 rw_exit(&hca
->ih_iou_rwlock
);
987 cmn_err(CE_NOTE
, "ioc_register, error - I/O Unit full");
991 cmn_err(CE_NOTE
, "ibdma_ioc_register, assigned to 0 based slot (%d)",
995 hca
->ih_ioc
[free_slot
].ii_inuse
= 1;
996 hca
->ih_ioc
[free_slot
].ii_slot
= free_slot
;
997 hca
->ih_ioc
[free_slot
].ii_hcap
= hca
;
1000 * Allocate local copy of profile and services.
1002 hca
->ih_ioc
[free_slot
].ii_srvcs
=
1003 kmem_zalloc(sizeof (ib_dm_srv_t
) * svc_entries
, KM_SLEEP
);
1004 bcopy(profile
, &hca
->ih_ioc
[free_slot
].ii_profile
,
1005 sizeof (ib_dm_ioc_ctrl_profile_t
));
1006 bcopy(services
, hca
->ih_ioc
[free_slot
].ii_srvcs
,
1007 sizeof (ib_dm_srv_t
) * svc_entries
);
1010 * Update the profile copy with the I/O controller slot assigned.
1011 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit
1014 profile
->ioc_vendorid
|= h2b32(free_slot
);
1016 ibdma_set_ioc_state(hca
, free_slot
, IBDMA_IOC_PRESENT
);
1018 hdl
= kmem_alloc(sizeof (*hdl
), KM_SLEEP
);
1019 hdl
->ih_iou_guid
= hca
->ih_iou_guid
;
1020 hdl
->ih_ioc_ndx
= (uint8_t)free_slot
;
1021 list_insert_tail(&hca
->ih_hdl_list
, hdl
);
1023 rw_exit(&hca
->ih_iou_rwlock
);
1024 mutex_exit(&ibdma
->ms_hca_list_lock
);
1026 return ((ibdma_hdl_t
)hdl
);
1030 * ibdma_ioc_unregister()
1034 ibdma_ioc_unregister(ibdma_hdl_t hdl
)
1039 ibdma_hdl_impl_t
*hdl_tmp
= hdl
;
1040 ibdma_hdl_impl_t
*hdl_impl
;
1043 cmn_err(CE_NOTE
, "ioc_unregister, NULL handle");
1044 return (IBDMA_BAD_PARAM
);
1047 mutex_enter(&ibdma
->ms_hca_list_lock
);
1048 hca
= ibdma_find_hca(hdl_tmp
->ih_iou_guid
);
1050 cmn_err(CE_NOTE
, "ioc_unregsiter, invalid handle, IOU"
1052 mutex_exit(&ibdma
->ms_hca_list_lock
);
1053 return (IBDMA_BAD_PARAM
);
1056 hdl_impl
= list_head(&hca
->ih_hdl_list
);
1057 while (hdl_impl
!= NULL
) {
1058 if (hdl_impl
== hdl_tmp
) {
1061 hdl_impl
= list_next(&hca
->ih_hdl_list
, hdl_impl
);
1064 if (hdl_impl
== NULL
) {
1065 cmn_err(CE_NOTE
, "ioc_unregsiter, invalid handle, not found");
1066 mutex_exit(&ibdma
->ms_hca_list_lock
);
1067 return (IBDMA_BAD_PARAM
);
1070 list_remove(&hca
->ih_hdl_list
, hdl_impl
);
1072 if (hdl_impl
->ih_ioc_ndx
>= IBDMA_MAX_IOC
) {
1073 cmn_err(CE_NOTE
, "ioc_unregister, corrupted handle");
1074 kmem_free(hdl_impl
, sizeof (*hdl_impl
));
1075 mutex_exit(&ibdma
->ms_hca_list_lock
);
1076 return (IBDMA_BAD_PARAM
);
1078 ioc
= &hca
->ih_ioc
[hdl_impl
->ih_ioc_ndx
];
1079 kmem_free(hdl_impl
, sizeof (*hdl_impl
));
1081 if (ioc
->ii_slot
> IBDMA_MAX_IOC
) {
1082 cmn_err(CE_NOTE
, "ioc_unregister, IOC corrupted, bad"
1084 mutex_exit(&ibdma
->ms_hca_list_lock
);
1085 return (IBDMA_BAD_PARAM
);
1088 rw_enter(&ioc
->ii_hcap
->ih_iou_rwlock
, RW_WRITER
);
1089 if (ioc
->ii_inuse
== 0) {
1090 rw_exit(&ioc
->ii_hcap
->ih_iou_rwlock
);
1091 mutex_exit(&ibdma
->ms_hca_list_lock
);
1092 cmn_err(CE_NOTE
, "ioc_unregister, slot not in use (%d)",
1094 return (IBDMA_BAD_PARAM
);
1097 ASSERT(ioc
->ii_srvcs
!= NULL
);
1099 slot
= ioc
->ii_slot
;
1101 kmem_free(ioc
->ii_srvcs
, sizeof (ib_dm_srv_t
) *
1102 ioc
->ii_profile
.ioc_service_entries
);
1103 bzero(ioc
, sizeof (ibdma_ioc_t
));
1104 ibdma_set_ioc_state(hca
, slot
, IBDMA_IOC_NOT_INSTALLED
);
1106 rw_exit(&hca
->ih_iou_rwlock
);
1107 mutex_exit(&ibdma
->ms_hca_list_lock
);
1109 return (IBDMA_SUCCESS
);
1113 * ibdma_ioc_update()
1117 ibdma_ioc_update(ibdma_hdl_t hdl
, ib_dm_ioc_ctrl_profile_t
*profile
,
1118 ib_dm_srv_t
*services
)
1122 ibdma_hdl_impl_t
*hdl_tmp
= hdl
;
1123 ibdma_hdl_impl_t
*hdl_impl
;
1126 cmn_err(CE_NOTE
, "ioc_update, NULL handle");
1127 return (IBDMA_BAD_PARAM
);
1130 if (profile
== NULL
|| services
== NULL
) {
1131 cmn_err(CE_NOTE
, "ioc_update, NULL parameter");
1132 return (IBDMA_BAD_PARAM
);
1135 mutex_enter(&ibdma
->ms_hca_list_lock
);
1136 hca
= ibdma_find_hca(hdl_tmp
->ih_iou_guid
);
1138 cmn_err(CE_NOTE
, "ioc_update, invalid handle, IOU not found");
1139 mutex_exit(&ibdma
->ms_hca_list_lock
);
1140 return (IBDMA_BAD_PARAM
);
1143 hdl_impl
= list_head(&hca
->ih_hdl_list
);
1144 while (hdl_impl
!= NULL
) {
1145 if (hdl_impl
== hdl_tmp
) {
1148 hdl_impl
= list_next(&hca
->ih_hdl_list
, hdl_impl
);
1151 if (hdl_impl
== NULL
) {
1152 cmn_err(CE_NOTE
, "ioc_update, invalid handle, not found");
1153 mutex_exit(&ibdma
->ms_hca_list_lock
);
1154 return (IBDMA_BAD_PARAM
);
1157 if (hdl_impl
->ih_ioc_ndx
>= IBDMA_MAX_IOC
) {
1158 cmn_err(CE_NOTE
, "ioc_update, corrupted handle");
1159 mutex_exit(&ibdma
->ms_hca_list_lock
);
1160 return (IBDMA_BAD_PARAM
);
1162 ioc
= &hca
->ih_ioc
[hdl_impl
->ih_ioc_ndx
];
1164 if (ioc
->ii_slot
>= IBDMA_MAX_IOC
|| ioc
->ii_hcap
== NULL
) {
1165 cmn_err(CE_NOTE
, "ioc_update, bad handle (%p)",
1167 mutex_exit(&ibdma
->ms_hca_list_lock
);
1168 return (IBDMA_BAD_PARAM
);
1171 rw_enter(&ioc
->ii_hcap
->ih_iou_rwlock
, RW_WRITER
);
1172 if (ioc
->ii_inuse
== 0) {
1173 rw_exit(&ioc
->ii_hcap
->ih_iou_rwlock
);
1174 mutex_exit(&ibdma
->ms_hca_list_lock
);
1175 cmn_err(CE_NOTE
, "ioc_udate slot not in use (%d)",
1177 return (IBDMA_BAD_PARAM
);
1180 ASSERT(ioc
->ii_srvcs
!= NULL
);
1182 kmem_free(ioc
->ii_srvcs
, ioc
->ii_profile
.ioc_service_entries
*
1183 sizeof (ib_dm_srv_t
));
1184 ioc
->ii_srvcs
= kmem_zalloc(profile
->ioc_service_entries
*
1185 sizeof (ib_dm_srv_t
), KM_SLEEP
);
1187 bcopy(profile
, &ioc
->ii_profile
, sizeof (ib_dm_ioc_ctrl_profile_t
));
1188 bcopy(services
, ioc
->ii_srvcs
, sizeof (ib_dm_srv_t
) *
1189 profile
->ioc_service_entries
);
1191 * Update the profile copy with the I/O controller slot assigned.
1192 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit
1195 profile
->ioc_vendorid
|= h2b32(ioc
->ii_slot
);
1196 ibdma_set_ioc_state(ioc
->ii_hcap
, ioc
->ii_slot
, IBDMA_IOC_PRESENT
);
1197 rw_exit(&ioc
->ii_hcap
->ih_iou_rwlock
);
1198 mutex_exit(&ibdma
->ms_hca_list_lock
);
1200 return (IBDMA_SUCCESS
);