4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
34 #include <sys/types.h>
38 #include <sys/sysmacros.h>
48 #include <sys/rsm/rsmndi.h>
49 #include <rsmlib_in.h>
50 #include <sys/rsm/rsm.h>
54 extern rsm_node_id_t rsm_local_nodeid
;
55 extern int loopback_getv(rsm_scat_gath_t
*);
56 extern int loopback_putv(rsm_scat_gath_t
*);
58 static rsm_ndlib_attr_t _rsm_genlib_attr
= {
59 B_TRUE
, /* mapping needed for put/get */
60 B_FALSE
/* mapping needed for putv/getv */
65 rsmapi_controller_handle_t controller
, rsm_node_id_t node_id
,
66 rsm_memseg_id_t segment_id
, rsm_permission_t perm
,
67 rsm_memseg_import_handle_t
*im_memseg
) {
69 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
70 "__rsm_import_connect: enter\n"));
72 controller
= controller
;
74 segment_id
= segment_id
;
76 im_memseg
= im_memseg
;
78 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
79 "__rsm_import_connect: exit\n"));
85 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg
) {
87 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
88 "__rsm_import_disconnect: enter\n"));
90 im_memseg
= im_memseg
;
92 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
93 "__rsm_import_disconnect: exit\n"));
99 * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
100 * We can have a new ops vector that makes that assumption.
104 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg
, off_t off
,
109 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
111 (uint8_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
117 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
118 "__rsm_import_get8x8: enter\n"));
120 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
121 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
122 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
123 if (e
!= RSM_SUCCESS
) {
128 for (i
= 0; i
< rep_cnt
; i
++) {
129 datap
[i
] = data_addr
[i
];
132 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
133 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
134 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
135 if (e
!= RSM_SUCCESS
) {
140 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
141 "__rsm_import_get8x8: exit\n"));
143 return (RSM_SUCCESS
);
147 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg
, off_t off
,
152 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
153 uint16_t *data_addr
=
155 (uint16_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
161 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
162 "__rsm_import_get16x16: enter\n"));
164 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
165 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
166 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
167 if (e
!= RSM_SUCCESS
) {
172 for (i
= 0; i
< rep_cnt
; i
++) {
173 datap
[i
] = data_addr
[i
];
176 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
177 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
178 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
179 if (e
!= RSM_SUCCESS
) {
184 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
185 "__rsm_import_get16x16: exit\n"));
187 return (RSM_SUCCESS
);
191 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg
, off_t off
,
196 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
197 uint32_t *data_addr
=
199 (uint32_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
205 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
206 "__rsm_import_get32x32: enter\n"));
208 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
209 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
210 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
211 if (e
!= RSM_SUCCESS
) {
216 for (i
= 0; i
< rep_cnt
; i
++) {
217 datap
[i
] = data_addr
[i
];
220 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
221 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
222 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
223 if (e
!= RSM_SUCCESS
) {
228 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
229 "__rsm_import_get32x32: exit\n"));
231 return (RSM_SUCCESS
);
235 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg
, off_t off
,
240 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
241 uint64_t *data_addr
=
243 (uint64_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
249 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
250 "__rsm_import_get64x64: enter\n"));
252 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
253 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
254 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
255 if (e
!= RSM_SUCCESS
) {
260 for (i
= 0; i
< rep_cnt
; i
++) {
261 datap
[i
] = data_addr
[i
];
264 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
265 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
266 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
267 if (e
!= RSM_SUCCESS
) {
272 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
273 "__rsm_import_get64x64: exit\n"));
275 return (RSM_SUCCESS
);
279 * import side memory segment operations (write access functions):
283 * XXX: Each one of the following cases ought to be a separate function loaded
284 * into a segment access ops vector. We determine the correct function at
285 * segment connect time. When a new controller is register, we can decode
286 * it's direct_access_size attribute and load the correct function. For
287 * loop back we need to create a special ops vector that bypasses all of
290 * XXX: We need to create a special interrupt queue for the library to handle
291 * partial writes in the remote process.
294 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg
, off_t off
,
299 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
301 (uint8_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
307 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
308 "__rsm_put8x8: enter\n"));
310 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
311 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
312 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
313 if (e
!= RSM_SUCCESS
) {
318 for (i
= 0; i
< rep_cnt
; i
++) {
319 data_addr
[i
] = datap
[i
];
322 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
323 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
324 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
325 if (e
!= RSM_SUCCESS
) {
330 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
331 "__rsm_put8x8: exit\n"));
333 return (RSM_SUCCESS
);
337 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg
, off_t off
,
342 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
343 uint16_t *data_addr
=
345 (uint16_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
351 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
352 "__rsm_put16x16: enter\n"));
354 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
355 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
356 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
357 if (e
!= RSM_SUCCESS
) {
362 for (i
= 0; i
< rep_cnt
; i
++) {
363 data_addr
[i
] = datap
[i
];
366 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
367 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
368 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
369 if (e
!= RSM_SUCCESS
) {
374 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
375 "__rsm_put16x16: exit\n"));
377 return (RSM_SUCCESS
);
381 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg
, off_t off
,
386 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
387 uint32_t *data_addr
=
389 (uint32_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
395 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
396 "__rsm_put32x32: enter\n"));
398 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
399 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
400 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
401 if (e
!= RSM_SUCCESS
) {
406 for (i
= 0; i
< rep_cnt
; i
++) {
407 data_addr
[i
] = datap
[i
];
410 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
411 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
412 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
413 if (e
!= RSM_SUCCESS
) {
418 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
419 "__rsm_put32x32: exit\n"));
421 return (RSM_SUCCESS
);
425 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg
, off_t off
,
430 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
431 uint64_t *data_addr
=
433 (uint64_t *)&seg
->rsmseg_vaddr
[off
- seg
->rsmseg_mapoffset
];
439 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
440 "__rsm_put64x64: enter\n"));
442 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
443 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
444 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
445 if (e
!= RSM_SUCCESS
) {
450 for (i
= 0; i
< rep_cnt
; i
++) {
451 data_addr
[i
] = datap
[i
];
454 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
455 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
456 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
457 if (e
!= RSM_SUCCESS
) {
462 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
463 "__rsm_put64x64: exit\n"));
465 return (RSM_SUCCESS
);
469 __rsm_get(rsm_memseg_import_handle_t im_memseg
, off_t offset
, void *dst_addr
,
472 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
475 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
476 "__rsm_get: enter\n"));
478 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
479 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
480 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
481 if (e
!= RSM_SUCCESS
) {
486 (void) bcopy(seg
->rsmseg_vaddr
+ offset
- seg
->rsmseg_mapoffset
,
489 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
490 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
491 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
492 if (e
!= RSM_SUCCESS
) {
497 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
498 "__rsm_get: exit\n"));
500 return (RSM_SUCCESS
);
504 __rsm_getv(rsm_scat_gath_t
*sg_io
)
506 rsm_iovec_t
*iovec
= sg_io
->iovec
;
507 rsmka_iovec_t ka_iovec_arr
[RSM_MAX_IOVLEN
];
508 rsmka_iovec_t
*ka_iovec
, *ka_iovec_start
;
509 rsmka_iovec_t l_iovec_arr
[RSM_MAX_IOVLEN
];
510 rsmka_iovec_t
*l_iovec
, *l_iovec_start
;
511 rsmseg_handle_t
*im_seg_hndl
= (rsmseg_handle_t
*)sg_io
->remote_handle
;
512 rsmseg_handle_t
*seg_hndl
;
513 int iovec_size
= sizeof (rsmka_iovec_t
) * sg_io
->io_request_count
;
516 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
517 "__rsm_getv: enter\n"));
520 * Use loopback for single node operations.
521 * replace local handles with virtual addresses
524 if (im_seg_hndl
->rsmseg_nodeid
== rsm_local_nodeid
) {
526 * To use the loopback optimization map the segment
529 if (im_seg_hndl
->rsmseg_state
== IMPORT_CONNECT
) {
531 va
= mmap(NULL
, im_seg_hndl
->rsmseg_size
,
532 PROT_READ
|PROT_WRITE
,
533 MAP_SHARED
|MAP_NORESERVE
,
534 im_seg_hndl
->rsmseg_fd
, 0);
536 if (va
== MAP_FAILED
) {
537 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
538 "implicit map failed:%d\n", errno
));
540 return (RSMERR_BAD_MEM_ALIGNMENT
);
541 else if (errno
== ENOMEM
|| errno
== ENXIO
||
543 return (RSMERR_BAD_LENGTH
);
544 else if (errno
== EAGAIN
)
545 return (RSMERR_INSUFFICIENT_RESOURCES
);
550 im_seg_hndl
->rsmseg_vaddr
= va
;
551 im_seg_hndl
->rsmseg_maplen
= im_seg_hndl
->rsmseg_size
;
552 im_seg_hndl
->rsmseg_mapoffset
= 0;
553 im_seg_hndl
->rsmseg_state
= IMPORT_MAP
;
554 im_seg_hndl
->rsmseg_flags
|= RSM_IMPLICIT_MAP
;
557 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
558 l_iovec_start
= l_iovec
= malloc(iovec_size
);
560 l_iovec_start
= l_iovec
= l_iovec_arr
;
562 bcopy((caddr_t
)iovec
, (caddr_t
)l_iovec
, iovec_size
);
563 for (i
= 0; i
< sg_io
->io_request_count
; i
++) {
564 if (l_iovec
->io_type
== RSM_HANDLE_TYPE
) {
565 /* Get the surrogate export segment handle */
566 seg_hndl
= (rsmseg_handle_t
*)
567 l_iovec
->local
.handle
;
568 l_iovec
->local
.vaddr
= seg_hndl
->rsmseg_vaddr
;
569 l_iovec
->io_type
= RSM_VA_TYPE
;
573 sg_io
->iovec
= (rsm_iovec_t
*)l_iovec_start
;
574 e
= loopback_getv(sg_io
);
575 sg_io
->iovec
= iovec
;
576 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
578 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
579 "__rsm_getv: exit\n"));
583 /* for the Kernel Agent, replace local handles with segment ids */
584 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
585 ka_iovec_start
= ka_iovec
= malloc(iovec_size
);
587 ka_iovec_start
= ka_iovec
= ka_iovec_arr
;
589 bcopy((caddr_t
)iovec
, (caddr_t
)ka_iovec
, iovec_size
);
590 for (i
= 0; i
< sg_io
->io_request_count
; i
++) {
591 if (ka_iovec
->io_type
== RSM_HANDLE_TYPE
) {
592 seg_hndl
= (rsmseg_handle_t
*)ka_iovec
->local
.handle
;
593 ka_iovec
->local
.segid
= seg_hndl
->rsmseg_keyid
;
598 sg_io
->iovec
= (rsm_iovec_t
*)ka_iovec_start
;
599 e
= ioctl(im_seg_hndl
->rsmseg_fd
, RSM_IOCTL_GETV
, sg_io
);
600 sg_io
->iovec
= iovec
;
602 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
603 free(ka_iovec_start
);
606 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
607 " RSM_IOCTL_GETV failed\n"));
611 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
612 "__rsm_getv: exit\n"));
614 return (RSM_SUCCESS
);
619 __rsm_put(rsm_memseg_import_handle_t im_memseg
, off_t offset
, void *src_addr
,
622 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
625 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
626 "__rsm_put: enter\n"));
628 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
629 e
= seg
->rsmseg_ops
->rsm_memseg_import_open_barrier(
630 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
631 if (e
!= RSM_SUCCESS
) {
636 bcopy(src_addr
, seg
->rsmseg_vaddr
+ offset
- seg
->rsmseg_mapoffset
,
639 if (seg
->rsmseg_barmode
== RSM_BARRIER_MODE_IMPLICIT
) {
640 e
= seg
->rsmseg_ops
->rsm_memseg_import_close_barrier(
641 (rsm_barrier_handle_t
)seg
->rsmseg_barrier
);
642 if (e
!= RSM_SUCCESS
) {
647 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
648 "__rsm_put: exit\n"));
650 return (RSM_SUCCESS
);
654 __rsm_putv(rsm_scat_gath_t
*sg_io
)
656 rsm_iovec_t
*iovec
= sg_io
->iovec
;
657 rsmka_iovec_t ka_iovec_arr
[RSM_MAX_IOVLEN
];
658 rsmka_iovec_t
*ka_iovec
, *ka_iovec_start
;
659 rsmka_iovec_t l_iovec_arr
[RSM_MAX_IOVLEN
];
660 rsmka_iovec_t
*l_iovec
, *l_iovec_start
;
661 rsmseg_handle_t
*im_seg_hndl
= (rsmseg_handle_t
*)sg_io
->remote_handle
;
662 rsmseg_handle_t
*seg_hndl
;
663 int iovec_size
= sizeof (rsmka_iovec_t
) * sg_io
->io_request_count
;
666 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
667 "__rsm_putv: enter\n"));
670 * Use loopback for single node operations.
671 * replace local handles with virtual addresses
674 if (im_seg_hndl
->rsmseg_nodeid
== rsm_local_nodeid
) {
676 * To use the loopback optimization map the segment
679 if (im_seg_hndl
->rsmseg_state
== IMPORT_CONNECT
) {
681 va
= mmap(NULL
, im_seg_hndl
->rsmseg_size
,
682 PROT_READ
|PROT_WRITE
,
683 MAP_SHARED
|MAP_NORESERVE
,
684 im_seg_hndl
->rsmseg_fd
, 0);
686 if (va
== MAP_FAILED
) {
687 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
688 "implicit map failed:%d\n", errno
));
690 return (RSMERR_BAD_MEM_ALIGNMENT
);
691 else if (errno
== ENOMEM
|| errno
== ENXIO
||
693 return (RSMERR_BAD_LENGTH
);
694 else if (errno
== EAGAIN
)
695 return (RSMERR_INSUFFICIENT_RESOURCES
);
699 im_seg_hndl
->rsmseg_vaddr
= va
;
700 im_seg_hndl
->rsmseg_maplen
= im_seg_hndl
->rsmseg_size
;
701 im_seg_hndl
->rsmseg_mapoffset
= 0;
702 im_seg_hndl
->rsmseg_state
= IMPORT_MAP
;
703 im_seg_hndl
->rsmseg_flags
|= RSM_IMPLICIT_MAP
;
706 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
707 l_iovec_start
= l_iovec
= malloc(iovec_size
);
709 l_iovec_start
= l_iovec
= l_iovec_arr
;
711 bcopy((caddr_t
)iovec
, (caddr_t
)l_iovec
, iovec_size
);
712 for (i
= 0; i
< sg_io
->io_request_count
; i
++) {
713 if (l_iovec
->io_type
== RSM_HANDLE_TYPE
) {
714 /* Get the surrogate export segment handle */
715 seg_hndl
= (rsmseg_handle_t
*)
716 l_iovec
->local
.handle
;
717 l_iovec
->local
.vaddr
= seg_hndl
->rsmseg_vaddr
;
718 l_iovec
->io_type
= RSM_VA_TYPE
;
722 sg_io
->iovec
= (rsm_iovec_t
*)l_iovec_start
;
723 e
= loopback_putv(sg_io
);
724 sg_io
->iovec
= iovec
;
726 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
729 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
730 "__rsm_putv: exit\n"));
736 /* for the Kernel Agent, replace local handles with segment ids */
737 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
738 ka_iovec_start
= ka_iovec
= malloc(iovec_size
);
740 ka_iovec_start
= ka_iovec
= ka_iovec_arr
;
742 bcopy((caddr_t
)iovec
, (caddr_t
)ka_iovec
, iovec_size
);
744 for (i
= 0; i
< sg_io
->io_request_count
; i
++) {
745 if (ka_iovec
->io_type
== RSM_HANDLE_TYPE
) {
746 seg_hndl
= (rsmseg_handle_t
*)ka_iovec
->local
.handle
;
747 ka_iovec
->local
.segid
= seg_hndl
->rsmseg_keyid
;
752 sg_io
->iovec
= (rsm_iovec_t
*)ka_iovec_start
;
753 e
= ioctl(im_seg_hndl
->rsmseg_fd
, RSM_IOCTL_PUTV
, sg_io
);
754 sg_io
->iovec
= iovec
;
756 if (sg_io
->io_request_count
> RSM_MAX_IOVLEN
)
757 free(ka_iovec_start
);
760 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
761 " RSM_IOCTL_PUTV failed\n"));
765 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
766 "__rsm_putv: exit\n"));
768 return (RSM_SUCCESS
);
772 * import side memory segment operations (barriers):
775 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg
,
776 rsm_barrier_type_t type
,
777 rsm_barrier_handle_t barrier
)
779 rsmseg_handle_t
*seg
= (rsmseg_handle_t
*)im_memseg
;
780 rsmgenbar_handle_t
*bar
= (rsmgenbar_handle_t
*)barrier
;
782 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
784 "__rsm_memseg_import_init_barrier: enter\n"));
789 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
790 "invalid segment handle\n"));
791 return (RSMERR_BAD_SEG_HNDL
);
794 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
795 "invalid barrier handle\n"));
796 return (RSMERR_BAD_BARRIER_PTR
);
799 /* XXX: fix later. We only support span-of-node barriers */
801 bar
->rsmgenbar_data
= (rsm_barrier_t
*)malloc(sizeof (rsm_barrier_t
));
802 if (bar
->rsmgenbar_data
== NULL
) {
803 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
804 "not enough memory\n"));
805 return (RSMERR_INSUFFICIENT_MEM
);
807 bar
->rsmgenbar_seg
= seg
;
809 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
810 "__rsm_memseg_import_init_barrier: exit\n"));
812 return (RSM_SUCCESS
);
816 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier
)
818 rsmgenbar_handle_t
*bar
= (rsmgenbar_handle_t
*)barrier
;
819 rsmseg_handle_t
*seg
;
822 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
823 "__rsm_memseg_import_open_barrier: enter\n"));
826 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
827 "invalid barrier pointer\n"));
828 return (RSMERR_BAD_BARRIER_PTR
);
831 if ((seg
= bar
->rsmgenbar_seg
) == 0) {
832 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
833 "uninitialized barrier\n"));
834 return (RSMERR_BARRIER_UNINITIALIZED
);
837 /* lint -save -e718 -e746 */
838 msg
.bar
= *(bar
->rsmgenbar_data
);
839 if (ioctl(seg
->rsmseg_fd
,
840 RSM_IOCTL_BAR_OPEN
, &msg
) < 0) {
841 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
842 " RSM_IOCTL_BAR_OPEN failed\n"));
844 return (RSMERR_BARRIER_OPEN_FAILED
);
847 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
848 "__rsm_memseg_import_open_barrier: exit\n"));
850 return (RSM_SUCCESS
);
854 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier
)
856 rsmgenbar_handle_t
*bar
= (rsmgenbar_handle_t
*)barrier
;
857 rsmseg_handle_t
*seg
;
860 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
861 "__rsm_memseg_import_order_barrier: enter\n"));
864 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
865 "invalid barrier\n"));
866 return (RSMERR_BAD_BARRIER_PTR
);
868 if ((seg
= bar
->rsmgenbar_seg
) == 0) {
869 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
870 "uninitialized barrier\n"));
871 return (RSMERR_BARRIER_UNINITIALIZED
);
874 msg
.bar
= *(bar
->rsmgenbar_data
);
875 if (ioctl(seg
->rsmseg_fd
, RSM_IOCTL_BAR_ORDER
, &msg
) < 0) {
876 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
877 "RSM_IOCTL_BAR_ORDER failed\n"));
878 return (RSMERR_BARRIER_FAILURE
);
881 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
882 "__rsm_memseg_import_order_barrier: exit\n"));
884 return (RSM_SUCCESS
);
888 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier
)
890 rsmgenbar_handle_t
*bar
= (rsmgenbar_handle_t
*)barrier
;
891 rsmseg_handle_t
*seg
;
894 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
895 "__rsm_memseg_import_close_barrier: enter\n"));
898 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
899 "invalid barrier\n"));
900 return (RSMERR_BAD_BARRIER_PTR
);
902 if ((seg
= bar
->rsmgenbar_seg
) == 0) {
903 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
904 "uninitialized barrier\n"));
905 return (RSMERR_BARRIER_UNINITIALIZED
);
908 msg
.bar
= *(bar
->rsmgenbar_data
);
909 if (ioctl(seg
->rsmseg_fd
, RSM_IOCTL_BAR_CLOSE
, &msg
) < 0) {
910 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
911 " RSM_IOCTL_BAR_CLOSE failed\n"));
912 return (RSMERR_BARRIER_FAILURE
);
915 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
916 "__rsm_memseg_import_close_barrier: exit\n"));
918 return (RSM_SUCCESS
);
922 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier
)
924 rsmgenbar_handle_t
*bar
= (rsmgenbar_handle_t
*)barrier
;
926 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
927 "__rsm_memseg_import_destroy_barrier: enter\n"));
930 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
931 "invalid barrier\n"));
932 return (RSMERR_BAD_BARRIER_PTR
);
935 free((void *) bar
->rsmgenbar_data
);
937 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
938 "__rsm_memseg_import_destroy_barrier: exit\n"));
940 return (RSM_SUCCESS
);
945 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg
,
946 rsm_barrier_mode_t
*mode
)
948 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
949 "__rsm_memseg_import_get_mode: enter\n"));
951 im_memseg
= im_memseg
; mode
= mode
;
953 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
954 "__rsm_memseg_import_get_mode: exit\n"));
956 return (RSM_SUCCESS
);
959 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg
,
960 rsm_barrier_mode_t mode
)
962 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
963 "__rsm_memseg_import_set_mode: enter\n"));
965 im_memseg
= im_memseg
; mode
= mode
;
967 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
968 "__rsm_memseg_import_set_mode: exit\n"));
970 return (RSM_SUCCESS
);
974 __rsm_create_memory_handle(rsmapi_controller_handle_t controller
,
975 rsm_localmemory_handle_t
*local_hndl_p
,
976 caddr_t local_va
, size_t len
)
978 rsm_memseg_export_handle_t memseg
;
979 rsmapi_access_entry_t acl
[1];
980 rsm_memseg_id_t segid
= 0;
985 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
986 "__rsm_create_memory_handle: enter\n"));
989 * create a surrogate segment (local memory will be locked down).
991 size
= roundup(len
, PAGESIZE
);
992 e
= rsm_memseg_export_create(controller
, &memseg
,
993 (void *)local_va
, size
,
995 if (e
!= RSM_SUCCESS
) {
996 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
997 "export create failed\n"));
1002 * Publish the segment to the local node only. If the segment
1003 * length is very large then don't publish to the adapter driver
1004 * because that will consume too much DVMA space - this is indicated
1005 * to the Kernel Agent using null permissions. DVMA binding will
1006 * be done when the RDMA is set up.
1008 acl
[0].ae_node
= rsm_local_nodeid
;
1009 if (len
> RSM_MAX_HANDLE_DVMA
)
1010 acl
[0].ae_permission
= 0;
1012 acl
[0].ae_permission
= RSM_PERM_RDWR
;
1014 e
= rsm_memseg_export_publish(memseg
, &segid
, acl
, 1);
1015 if (e
!= RSM_SUCCESS
) {
1016 DBPRINTF((RSM_LIBRARY
, RSM_ERR
,
1017 "export publish failed\n"));
1018 rsm_memseg_export_destroy(memseg
);
1022 /* Use the surrogate seghandle as the local memory handle */
1023 *local_hndl_p
= (rsm_localmemory_handle_t
)memseg
;
1025 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1026 "__rsm_create_memory_handle: exit\n"));
1032 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle
)
1034 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1035 "__rsm_free_memory_handle: enter\n"));
1037 rsm_memseg_export_destroy((rsm_memseg_export_handle_t
)local_handle
);
1039 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1040 "__rsm_free_memory_handle: exit\n"));
1042 return (RSM_SUCCESS
);
1046 __rsm_get_lib_attr(rsm_ndlib_attr_t
**libattrp
)
1049 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1050 "__rsm_get_lib_attr: enter\n"));
1052 *libattrp
= &_rsm_genlib_attr
;
1054 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1055 "__rsm_get_lib_attr: exit\n"));
1057 return (RSM_SUCCESS
);
1061 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle
)
1064 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1065 "__rsm_closedevice: enter\n"));
1067 cntr_handle
= cntr_handle
;
1069 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1070 "__rsm_closedevice: exit\n"));
1072 return (RSM_SUCCESS
);
1076 __rsmdefault_setops(rsm_segops_t
*segops
)
1079 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1080 "__rsmdefault_setops: enter\n"));
1082 if (segops
->rsm_memseg_import_connect
== NULL
) {
1083 segops
->rsm_memseg_import_connect
= __rsm_import_connect
;
1085 if (segops
->rsm_memseg_import_disconnect
== NULL
) {
1086 segops
->rsm_memseg_import_disconnect
= __rsm_import_disconnect
;
1089 if (segops
->rsm_memseg_import_get8
== NULL
) {
1090 segops
->rsm_memseg_import_get8
= __rsm_get8x8
;
1092 if (segops
->rsm_memseg_import_get16
== NULL
) {
1093 segops
->rsm_memseg_import_get16
= __rsm_get16x16
;
1095 if (segops
->rsm_memseg_import_get32
== NULL
) {
1096 segops
->rsm_memseg_import_get32
= __rsm_get32x32
;
1098 if (segops
->rsm_memseg_import_get64
== NULL
) {
1099 segops
->rsm_memseg_import_get64
= __rsm_get64x64
;
1101 if (segops
->rsm_memseg_import_get
== NULL
) {
1102 segops
->rsm_memseg_import_get
= __rsm_get
;
1105 if (segops
->rsm_memseg_import_put8
== NULL
) {
1106 segops
->rsm_memseg_import_put8
= __rsm_put8x8
;
1108 if (segops
->rsm_memseg_import_put16
== NULL
) {
1109 segops
->rsm_memseg_import_put16
= __rsm_put16x16
;
1111 if (segops
->rsm_memseg_import_put32
== NULL
) {
1112 segops
->rsm_memseg_import_put32
= __rsm_put32x32
;
1114 if (segops
->rsm_memseg_import_put64
== NULL
) {
1115 segops
->rsm_memseg_import_put64
= __rsm_put64x64
;
1117 if (segops
->rsm_memseg_import_put
== NULL
) {
1118 segops
->rsm_memseg_import_put
= __rsm_put
;
1121 if (segops
->rsm_memseg_import_putv
== NULL
) {
1122 segops
->rsm_memseg_import_putv
= __rsm_putv
;
1125 if (segops
->rsm_memseg_import_getv
== NULL
) {
1126 segops
->rsm_memseg_import_getv
= __rsm_getv
;
1129 if (segops
->rsm_create_localmemory_handle
== NULL
) {
1130 segops
->rsm_create_localmemory_handle
=
1131 __rsm_create_memory_handle
;
1134 if (segops
->rsm_free_localmemory_handle
== NULL
) {
1135 segops
->rsm_free_localmemory_handle
=
1136 __rsm_free_memory_handle
;
1139 /* XXX: Need to support barrier functions */
1140 if (segops
->rsm_memseg_import_init_barrier
== NULL
) {
1141 segops
->rsm_memseg_import_init_barrier
=
1142 __rsm_memseg_import_init_barrier
;
1144 if (segops
->rsm_memseg_import_open_barrier
== NULL
) {
1145 segops
->rsm_memseg_import_open_barrier
=
1146 __rsm_memseg_import_open_barrier
;
1148 if (segops
->rsm_memseg_import_order_barrier
== NULL
) {
1149 segops
->rsm_memseg_import_order_barrier
=
1150 __rsm_memseg_import_order_barrier
;
1152 if (segops
->rsm_memseg_import_close_barrier
== NULL
) {
1153 segops
->rsm_memseg_import_close_barrier
=
1154 __rsm_memseg_import_close_barrier
;
1156 if (segops
->rsm_memseg_import_destroy_barrier
== NULL
) {
1157 segops
->rsm_memseg_import_destroy_barrier
=
1158 __rsm_memseg_import_destroy_barrier
;
1161 if (segops
->rsm_memseg_import_get_mode
== NULL
) {
1162 segops
->rsm_memseg_import_get_mode
=
1163 __rsm_memseg_import_get_mode
;
1165 if (segops
->rsm_memseg_import_set_mode
== NULL
) {
1166 segops
->rsm_memseg_import_set_mode
=
1167 __rsm_memseg_import_set_mode
;
1170 if (segops
->rsm_get_lib_attr
== NULL
) {
1171 segops
->rsm_get_lib_attr
=
1175 if (segops
->rsm_closedevice
== NULL
) {
1176 segops
->rsm_closedevice
=
1181 DBPRINTF((RSM_LIBRARY
, RSM_DEBUG_VERBOSE
,
1182 "__rsmdefault_setops: exit\n"));