8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / lib / librsm / common / rsmgen.c
blobeb2f957945c45f7c021e29dbf63c62f76362361b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <string.h>
33 #include <strings.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <sys/uio.h>
38 #include <sys/sysmacros.h>
39 #include <unistd.h>
40 #include <errno.h>
41 #include <assert.h>
42 #include <malloc.h>
43 #include <fcntl.h>
44 #include <dlfcn.h>
45 #include <sched.h>
47 #include <rsmapi.h>
48 #include <sys/rsm/rsmndi.h>
49 #include <rsmlib_in.h>
50 #include <sys/rsm/rsm.h>
52 /* lint -w2 */
54 extern rsm_node_id_t rsm_local_nodeid;
55 extern int loopback_getv(rsm_scat_gath_t *);
56 extern int loopback_putv(rsm_scat_gath_t *);
58 static rsm_ndlib_attr_t _rsm_genlib_attr = {
59 B_TRUE, /* mapping needed for put/get */
60 B_FALSE /* mapping needed for putv/getv */
63 static int
64 __rsm_import_connect(
65 rsmapi_controller_handle_t controller, rsm_node_id_t node_id,
66 rsm_memseg_id_t segment_id, rsm_permission_t perm,
67 rsm_memseg_import_handle_t *im_memseg) {
69 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
70 "__rsm_import_connect: enter\n"));
72 controller = controller;
73 node_id = node_id;
74 segment_id = segment_id;
75 perm = perm;
76 im_memseg = im_memseg;
78 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
79 "__rsm_import_connect: exit\n"));
81 return (RSM_SUCCESS);
84 static int
85 __rsm_import_disconnect(rsm_memseg_import_handle_t im_memseg) {
87 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
88 "__rsm_import_disconnect: enter\n"));
90 im_memseg = im_memseg;
92 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
93 "__rsm_import_disconnect: exit\n"));
95 return (RSM_SUCCESS);
99 * XXX: one day we ought to rewrite this stuff based on 64byte atomic access.
100 * We can have a new ops vector that makes that assumption.
103 static int
104 __rsm_get8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
105 uint8_t *datap,
106 ulong_t rep_cnt,
107 boolean_t swap)
109 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
110 uint8_t *data_addr =
111 (uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
112 uint_t i = 0;
113 int e;
115 swap = swap;
117 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
118 "__rsm_import_get8x8: enter\n"));
120 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
121 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
122 (rsm_barrier_handle_t)seg->rsmseg_barrier);
123 if (e != RSM_SUCCESS) {
124 return (e);
128 for (i = 0; i < rep_cnt; i++) {
129 datap[i] = data_addr[i];
132 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
133 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
134 (rsm_barrier_handle_t)seg->rsmseg_barrier);
135 if (e != RSM_SUCCESS) {
136 return (e);
140 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
141 "__rsm_import_get8x8: exit\n"));
143 return (RSM_SUCCESS);
146 static int
147 __rsm_get16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
148 uint16_t *datap,
149 ulong_t rep_cnt,
150 boolean_t swap)
152 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
153 uint16_t *data_addr =
154 /* LINTED */
155 (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
156 uint_t i = 0;
157 int e;
159 swap = swap;
161 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
162 "__rsm_import_get16x16: enter\n"));
164 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
165 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
166 (rsm_barrier_handle_t)seg->rsmseg_barrier);
167 if (e != RSM_SUCCESS) {
168 return (e);
172 for (i = 0; i < rep_cnt; i++) {
173 datap[i] = data_addr[i];
176 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
177 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
178 (rsm_barrier_handle_t)seg->rsmseg_barrier);
179 if (e != RSM_SUCCESS) {
180 return (e);
184 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
185 "__rsm_import_get16x16: exit\n"));
187 return (RSM_SUCCESS);
190 static int
191 __rsm_get32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
192 uint32_t *datap,
193 ulong_t rep_cnt,
194 boolean_t swap)
196 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
197 uint32_t *data_addr =
198 /* LINTED */
199 (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
200 uint_t i = 0;
201 int e;
203 swap = swap;
205 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
206 "__rsm_import_get32x32: enter\n"));
208 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
209 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
210 (rsm_barrier_handle_t)seg->rsmseg_barrier);
211 if (e != RSM_SUCCESS) {
212 return (e);
216 for (i = 0; i < rep_cnt; i++) {
217 datap[i] = data_addr[i];
220 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
221 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
222 (rsm_barrier_handle_t)seg->rsmseg_barrier);
223 if (e != RSM_SUCCESS) {
224 return (e);
228 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
229 "__rsm_import_get32x32: exit\n"));
231 return (RSM_SUCCESS);
234 static int
235 __rsm_get64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
236 uint64_t *datap,
237 ulong_t rep_cnt,
238 boolean_t swap)
240 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
241 uint64_t *data_addr =
242 /* LINTED */
243 (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
244 uint_t i = 0;
245 int e;
247 swap = swap;
249 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
250 "__rsm_import_get64x64: enter\n"));
252 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
253 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
254 (rsm_barrier_handle_t)seg->rsmseg_barrier);
255 if (e != RSM_SUCCESS) {
256 return (e);
260 for (i = 0; i < rep_cnt; i++) {
261 datap[i] = data_addr[i];
264 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
265 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
266 (rsm_barrier_handle_t)seg->rsmseg_barrier);
267 if (e != RSM_SUCCESS) {
268 return (e);
272 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
273 "__rsm_import_get64x64: exit\n"));
275 return (RSM_SUCCESS);
279 * import side memory segment operations (write access functions):
283 * XXX: Each one of the following cases ought to be a separate function loaded
284 * into a segment access ops vector. We determine the correct function at
285 * segment connect time. When a new controller is register, we can decode
286 * it's direct_access_size attribute and load the correct function. For
287 * loop back we need to create a special ops vector that bypasses all of
288 * this stuff.
290 * XXX: We need to create a special interrupt queue for the library to handle
291 * partial writes in the remote process.
293 static int
294 __rsm_put8x8(rsm_memseg_import_handle_t im_memseg, off_t off,
295 uint8_t *datap,
296 ulong_t rep_cnt,
297 boolean_t swap)
299 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
300 uint8_t *data_addr =
301 (uint8_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
302 uint_t i = 0;
303 int e;
305 swap = swap;
307 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
308 "__rsm_put8x8: enter\n"));
310 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
311 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
312 (rsm_barrier_handle_t)seg->rsmseg_barrier);
313 if (e != RSM_SUCCESS) {
314 return (e);
318 for (i = 0; i < rep_cnt; i++) {
319 data_addr[i] = datap[i];
322 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
323 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
324 (rsm_barrier_handle_t)seg->rsmseg_barrier);
325 if (e != RSM_SUCCESS) {
326 return (e);
330 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
331 "__rsm_put8x8: exit\n"));
333 return (RSM_SUCCESS);
336 static int
337 __rsm_put16x16(rsm_memseg_import_handle_t im_memseg, off_t off,
338 uint16_t *datap,
339 ulong_t rep_cnt,
340 boolean_t swap)
342 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
343 uint16_t *data_addr =
344 /* LINTED */
345 (uint16_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
346 uint_t i = 0;
347 int e;
349 swap = swap;
351 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
352 "__rsm_put16x16: enter\n"));
354 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
355 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
356 (rsm_barrier_handle_t)seg->rsmseg_barrier);
357 if (e != RSM_SUCCESS) {
358 return (e);
362 for (i = 0; i < rep_cnt; i++) {
363 data_addr[i] = datap[i];
366 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
367 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
368 (rsm_barrier_handle_t)seg->rsmseg_barrier);
369 if (e != RSM_SUCCESS) {
370 return (e);
374 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
375 "__rsm_put16x16: exit\n"));
377 return (RSM_SUCCESS);
380 static int
381 __rsm_put32x32(rsm_memseg_import_handle_t im_memseg, off_t off,
382 uint32_t *datap,
383 ulong_t rep_cnt,
384 boolean_t swap)
386 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
387 uint32_t *data_addr =
388 /* LINTED */
389 (uint32_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
390 uint_t i = 0;
391 int e;
393 swap = swap;
395 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
396 "__rsm_put32x32: enter\n"));
398 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
399 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
400 (rsm_barrier_handle_t)seg->rsmseg_barrier);
401 if (e != RSM_SUCCESS) {
402 return (e);
406 for (i = 0; i < rep_cnt; i++) {
407 data_addr[i] = datap[i];
410 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
411 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
412 (rsm_barrier_handle_t)seg->rsmseg_barrier);
413 if (e != RSM_SUCCESS) {
414 return (e);
418 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
419 "__rsm_put32x32: exit\n"));
421 return (RSM_SUCCESS);
424 static int
425 __rsm_put64x64(rsm_memseg_import_handle_t im_memseg, off_t off,
426 uint64_t *datap,
427 ulong_t rep_cnt,
428 boolean_t swap)
430 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
431 uint64_t *data_addr =
432 /* LINTED */
433 (uint64_t *)&seg->rsmseg_vaddr[off - seg->rsmseg_mapoffset];
434 uint_t i = 0;
435 int e;
437 swap = swap;
439 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
440 "__rsm_put64x64: enter\n"));
442 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
443 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
444 (rsm_barrier_handle_t)seg->rsmseg_barrier);
445 if (e != RSM_SUCCESS) {
446 return (e);
450 for (i = 0; i < rep_cnt; i++) {
451 data_addr[i] = datap[i];
454 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
455 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
456 (rsm_barrier_handle_t)seg->rsmseg_barrier);
457 if (e != RSM_SUCCESS) {
458 return (e);
462 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
463 "__rsm_put64x64: exit\n"));
465 return (RSM_SUCCESS);
468 static int
469 __rsm_get(rsm_memseg_import_handle_t im_memseg, off_t offset, void *dst_addr,
470 size_t length)
472 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
473 int e;
475 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
476 "__rsm_get: enter\n"));
478 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
479 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
480 (rsm_barrier_handle_t)seg->rsmseg_barrier);
481 if (e != RSM_SUCCESS) {
482 return (e);
486 (void) bcopy(seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
487 dst_addr, length);
489 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
490 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
491 (rsm_barrier_handle_t)seg->rsmseg_barrier);
492 if (e != RSM_SUCCESS) {
493 return (e);
497 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
498 "__rsm_get: exit\n"));
500 return (RSM_SUCCESS);
503 static int
504 __rsm_getv(rsm_scat_gath_t *sg_io)
506 rsm_iovec_t *iovec = sg_io->iovec;
507 rsmka_iovec_t ka_iovec_arr[RSM_MAX_IOVLEN];
508 rsmka_iovec_t *ka_iovec, *ka_iovec_start;
509 rsmka_iovec_t l_iovec_arr[RSM_MAX_IOVLEN];
510 rsmka_iovec_t *l_iovec, *l_iovec_start;
511 rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
512 rsmseg_handle_t *seg_hndl;
513 int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
514 int e, i;
516 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
517 "__rsm_getv: enter\n"));
520 * Use loopback for single node operations.
521 * replace local handles with virtual addresses
524 if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
526 * To use the loopback optimization map the segment
527 * here implicitly.
529 if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
530 caddr_t va;
531 va = mmap(NULL, im_seg_hndl->rsmseg_size,
532 PROT_READ|PROT_WRITE,
533 MAP_SHARED|MAP_NORESERVE,
534 im_seg_hndl->rsmseg_fd, 0);
536 if (va == MAP_FAILED) {
537 DBPRINTF((RSM_LIBRARY, RSM_ERR,
538 "implicit map failed:%d\n", errno));
539 if (errno == EINVAL)
540 return (RSMERR_BAD_MEM_ALIGNMENT);
541 else if (errno == ENOMEM || errno == ENXIO ||
542 errno == EOVERFLOW)
543 return (RSMERR_BAD_LENGTH);
544 else if (errno == EAGAIN)
545 return (RSMERR_INSUFFICIENT_RESOURCES);
546 else
547 return (errno);
550 im_seg_hndl->rsmseg_vaddr = va;
551 im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
552 im_seg_hndl->rsmseg_mapoffset = 0;
553 im_seg_hndl->rsmseg_state = IMPORT_MAP;
554 im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
557 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
558 l_iovec_start = l_iovec = malloc(iovec_size);
559 else
560 l_iovec_start = l_iovec = l_iovec_arr;
562 bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
563 for (i = 0; i < sg_io->io_request_count; i++) {
564 if (l_iovec->io_type == RSM_HANDLE_TYPE) {
565 /* Get the surrogate export segment handle */
566 seg_hndl = (rsmseg_handle_t *)
567 l_iovec->local.handle;
568 l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
569 l_iovec->io_type = RSM_VA_TYPE;
571 l_iovec++;
573 sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
574 e = loopback_getv(sg_io);
575 sg_io->iovec = iovec;
576 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
577 free(l_iovec_start);
578 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
579 "__rsm_getv: exit\n"));
580 return (e);
583 /* for the Kernel Agent, replace local handles with segment ids */
584 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
585 ka_iovec_start = ka_iovec = malloc(iovec_size);
586 else
587 ka_iovec_start = ka_iovec = ka_iovec_arr;
589 bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
590 for (i = 0; i < sg_io->io_request_count; i++) {
591 if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
592 seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
593 ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
595 ka_iovec++;
598 sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
599 e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_GETV, sg_io);
600 sg_io->iovec = iovec;
602 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
603 free(ka_iovec_start);
605 if (e < 0) {
606 DBPRINTF((RSM_LIBRARY, RSM_ERR,
607 " RSM_IOCTL_GETV failed\n"));
608 return (errno);
611 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
612 "__rsm_getv: exit\n"));
614 return (RSM_SUCCESS);
618 static int
619 __rsm_put(rsm_memseg_import_handle_t im_memseg, off_t offset, void *src_addr,
620 size_t length)
622 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
623 int e;
625 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
626 "__rsm_put: enter\n"));
628 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
629 e = seg->rsmseg_ops->rsm_memseg_import_open_barrier(
630 (rsm_barrier_handle_t)seg->rsmseg_barrier);
631 if (e != RSM_SUCCESS) {
632 return (e);
636 bcopy(src_addr, seg->rsmseg_vaddr + offset - seg->rsmseg_mapoffset,
637 length);
639 if (seg->rsmseg_barmode == RSM_BARRIER_MODE_IMPLICIT) {
640 e = seg->rsmseg_ops->rsm_memseg_import_close_barrier(
641 (rsm_barrier_handle_t)seg->rsmseg_barrier);
642 if (e != RSM_SUCCESS) {
643 return (e);
647 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
648 "__rsm_put: exit\n"));
650 return (RSM_SUCCESS);
653 static int
654 __rsm_putv(rsm_scat_gath_t *sg_io)
656 rsm_iovec_t *iovec = sg_io->iovec;
657 rsmka_iovec_t ka_iovec_arr[RSM_MAX_IOVLEN];
658 rsmka_iovec_t *ka_iovec, *ka_iovec_start;
659 rsmka_iovec_t l_iovec_arr[RSM_MAX_IOVLEN];
660 rsmka_iovec_t *l_iovec, *l_iovec_start;
661 rsmseg_handle_t *im_seg_hndl = (rsmseg_handle_t *)sg_io->remote_handle;
662 rsmseg_handle_t *seg_hndl;
663 int iovec_size = sizeof (rsmka_iovec_t) * sg_io->io_request_count;
664 int e, i;
666 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
667 "__rsm_putv: enter\n"));
670 * Use loopback for single node operations.
671 * replace local handles with virtual addresses
674 if (im_seg_hndl->rsmseg_nodeid == rsm_local_nodeid) {
676 * To use the loopback optimization map the segment
677 * here implicitly.
679 if (im_seg_hndl->rsmseg_state == IMPORT_CONNECT) {
680 caddr_t va;
681 va = mmap(NULL, im_seg_hndl->rsmseg_size,
682 PROT_READ|PROT_WRITE,
683 MAP_SHARED|MAP_NORESERVE,
684 im_seg_hndl->rsmseg_fd, 0);
686 if (va == MAP_FAILED) {
687 DBPRINTF((RSM_LIBRARY, RSM_ERR,
688 "implicit map failed:%d\n", errno));
689 if (errno == EINVAL)
690 return (RSMERR_BAD_MEM_ALIGNMENT);
691 else if (errno == ENOMEM || errno == ENXIO ||
692 errno == EOVERFLOW)
693 return (RSMERR_BAD_LENGTH);
694 else if (errno == EAGAIN)
695 return (RSMERR_INSUFFICIENT_RESOURCES);
696 else
697 return (errno);
699 im_seg_hndl->rsmseg_vaddr = va;
700 im_seg_hndl->rsmseg_maplen = im_seg_hndl->rsmseg_size;
701 im_seg_hndl->rsmseg_mapoffset = 0;
702 im_seg_hndl->rsmseg_state = IMPORT_MAP;
703 im_seg_hndl->rsmseg_flags |= RSM_IMPLICIT_MAP;
706 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
707 l_iovec_start = l_iovec = malloc(iovec_size);
708 else
709 l_iovec_start = l_iovec = l_iovec_arr;
711 bcopy((caddr_t)iovec, (caddr_t)l_iovec, iovec_size);
712 for (i = 0; i < sg_io->io_request_count; i++) {
713 if (l_iovec->io_type == RSM_HANDLE_TYPE) {
714 /* Get the surrogate export segment handle */
715 seg_hndl = (rsmseg_handle_t *)
716 l_iovec->local.handle;
717 l_iovec->local.vaddr = seg_hndl->rsmseg_vaddr;
718 l_iovec->io_type = RSM_VA_TYPE;
720 l_iovec++;
722 sg_io->iovec = (rsm_iovec_t *)l_iovec_start;
723 e = loopback_putv(sg_io);
724 sg_io->iovec = iovec;
726 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
727 free(l_iovec_start);
729 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
730 "__rsm_putv: exit\n"));
733 return (e);
736 /* for the Kernel Agent, replace local handles with segment ids */
737 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
738 ka_iovec_start = ka_iovec = malloc(iovec_size);
739 else
740 ka_iovec_start = ka_iovec = ka_iovec_arr;
742 bcopy((caddr_t)iovec, (caddr_t)ka_iovec, iovec_size);
744 for (i = 0; i < sg_io->io_request_count; i++) {
745 if (ka_iovec->io_type == RSM_HANDLE_TYPE) {
746 seg_hndl = (rsmseg_handle_t *)ka_iovec->local.handle;
747 ka_iovec->local.segid = seg_hndl->rsmseg_keyid;
749 ka_iovec++;
752 sg_io->iovec = (rsm_iovec_t *)ka_iovec_start;
753 e = ioctl(im_seg_hndl->rsmseg_fd, RSM_IOCTL_PUTV, sg_io);
754 sg_io->iovec = iovec;
756 if (sg_io->io_request_count > RSM_MAX_IOVLEN)
757 free(ka_iovec_start);
759 if (e < 0) {
760 DBPRINTF((RSM_LIBRARY, RSM_ERR,
761 " RSM_IOCTL_PUTV failed\n"));
762 return (errno);
765 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
766 "__rsm_putv: exit\n"));
768 return (RSM_SUCCESS);
772 * import side memory segment operations (barriers):
774 static int
775 __rsm_memseg_import_init_barrier(rsm_memseg_import_handle_t im_memseg,
776 rsm_barrier_type_t type,
777 rsm_barrier_handle_t barrier)
779 rsmseg_handle_t *seg = (rsmseg_handle_t *)im_memseg;
780 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
782 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
784 "__rsm_memseg_import_init_barrier: enter\n"));
786 type = type;
788 if (!seg) {
789 DBPRINTF((RSM_LIBRARY, RSM_ERR,
790 "invalid segment handle\n"));
791 return (RSMERR_BAD_SEG_HNDL);
793 if (!bar) {
794 DBPRINTF((RSM_LIBRARY, RSM_ERR,
795 "invalid barrier handle\n"));
796 return (RSMERR_BAD_BARRIER_PTR);
799 /* XXX: fix later. We only support span-of-node barriers */
801 bar->rsmgenbar_data = (rsm_barrier_t *)malloc(sizeof (rsm_barrier_t));
802 if (bar->rsmgenbar_data == NULL) {
803 DBPRINTF((RSM_LIBRARY, RSM_ERR,
804 "not enough memory\n"));
805 return (RSMERR_INSUFFICIENT_MEM);
807 bar->rsmgenbar_seg = seg;
809 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
810 "__rsm_memseg_import_init_barrier: exit\n"));
812 return (RSM_SUCCESS);
815 static int
816 __rsm_memseg_import_open_barrier(rsm_barrier_handle_t barrier)
818 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
819 rsmseg_handle_t *seg;
820 rsm_ioctlmsg_t msg;
822 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
823 "__rsm_memseg_import_open_barrier: enter\n"));
825 if (!bar) {
826 DBPRINTF((RSM_LIBRARY, RSM_ERR,
827 "invalid barrier pointer\n"));
828 return (RSMERR_BAD_BARRIER_PTR);
831 if ((seg = bar->rsmgenbar_seg) == 0) {
832 DBPRINTF((RSM_LIBRARY, RSM_ERR,
833 "uninitialized barrier\n"));
834 return (RSMERR_BARRIER_UNINITIALIZED);
837 /* lint -save -e718 -e746 */
838 msg.bar = *(bar->rsmgenbar_data);
839 if (ioctl(seg->rsmseg_fd,
840 RSM_IOCTL_BAR_OPEN, &msg) < 0) {
841 DBPRINTF((RSM_LIBRARY, RSM_ERR,
842 " RSM_IOCTL_BAR_OPEN failed\n"));
843 /* lint -restore */
844 return (RSMERR_BARRIER_OPEN_FAILED);
847 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
848 "__rsm_memseg_import_open_barrier: exit\n"));
850 return (RSM_SUCCESS);
853 static int
854 __rsm_memseg_import_order_barrier(rsm_barrier_handle_t barrier)
856 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
857 rsmseg_handle_t *seg;
858 rsm_ioctlmsg_t msg;
860 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
861 "__rsm_memseg_import_order_barrier: enter\n"));
863 if (!bar) {
864 DBPRINTF((RSM_LIBRARY, RSM_ERR,
865 "invalid barrier\n"));
866 return (RSMERR_BAD_BARRIER_PTR);
868 if ((seg = bar->rsmgenbar_seg) == 0) {
869 DBPRINTF((RSM_LIBRARY, RSM_ERR,
870 "uninitialized barrier\n"));
871 return (RSMERR_BARRIER_UNINITIALIZED);
874 msg.bar = *(bar->rsmgenbar_data);
875 if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_ORDER, &msg) < 0) {
876 DBPRINTF((RSM_LIBRARY, RSM_ERR,
877 "RSM_IOCTL_BAR_ORDER failed\n"));
878 return (RSMERR_BARRIER_FAILURE);
881 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
882 "__rsm_memseg_import_order_barrier: exit\n"));
884 return (RSM_SUCCESS);
887 static int
888 __rsm_memseg_import_close_barrier(rsm_barrier_handle_t barrier)
890 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
891 rsmseg_handle_t *seg;
892 rsm_ioctlmsg_t msg;
894 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
895 "__rsm_memseg_import_close_barrier: enter\n"));
897 if (!bar) {
898 DBPRINTF((RSM_LIBRARY, RSM_ERR,
899 "invalid barrier\n"));
900 return (RSMERR_BAD_BARRIER_PTR);
902 if ((seg = bar->rsmgenbar_seg) == 0) {
903 DBPRINTF((RSM_LIBRARY, RSM_ERR,
904 "uninitialized barrier\n"));
905 return (RSMERR_BARRIER_UNINITIALIZED);
908 msg.bar = *(bar->rsmgenbar_data);
909 if (ioctl(seg->rsmseg_fd, RSM_IOCTL_BAR_CLOSE, &msg) < 0) {
910 DBPRINTF((RSM_LIBRARY, RSM_ERR,
911 " RSM_IOCTL_BAR_CLOSE failed\n"));
912 return (RSMERR_BARRIER_FAILURE);
915 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
916 "__rsm_memseg_import_close_barrier: exit\n"));
918 return (RSM_SUCCESS);
921 static int
922 __rsm_memseg_import_destroy_barrier(rsm_barrier_handle_t barrier)
924 rsmgenbar_handle_t *bar = (rsmgenbar_handle_t *)barrier;
926 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
927 "__rsm_memseg_import_destroy_barrier: enter\n"));
929 if (!bar) {
930 DBPRINTF((RSM_LIBRARY, RSM_ERR,
931 "invalid barrier\n"));
932 return (RSMERR_BAD_BARRIER_PTR);
935 free((void *) bar->rsmgenbar_data);
937 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
938 "__rsm_memseg_import_destroy_barrier: exit\n"));
940 return (RSM_SUCCESS);
943 /* lint -w1 */
944 static int
945 __rsm_memseg_import_get_mode(rsm_memseg_import_handle_t im_memseg,
946 rsm_barrier_mode_t *mode)
948 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
949 "__rsm_memseg_import_get_mode: enter\n"));
951 im_memseg = im_memseg; mode = mode;
953 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
954 "__rsm_memseg_import_get_mode: exit\n"));
956 return (RSM_SUCCESS);
958 static int
959 __rsm_memseg_import_set_mode(rsm_memseg_import_handle_t im_memseg,
960 rsm_barrier_mode_t mode)
962 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
963 "__rsm_memseg_import_set_mode: enter\n"));
965 im_memseg = im_memseg; mode = mode;
967 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
968 "__rsm_memseg_import_set_mode: exit\n"));
970 return (RSM_SUCCESS);
973 static int
974 __rsm_create_memory_handle(rsmapi_controller_handle_t controller,
975 rsm_localmemory_handle_t *local_hndl_p,
976 caddr_t local_va, size_t len)
978 rsm_memseg_export_handle_t memseg;
979 rsmapi_access_entry_t acl[1];
980 rsm_memseg_id_t segid = 0;
981 size_t size;
982 int e;
985 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
986 "__rsm_create_memory_handle: enter\n"));
989 * create a surrogate segment (local memory will be locked down).
991 size = roundup(len, PAGESIZE);
992 e = rsm_memseg_export_create(controller, &memseg,
993 (void *)local_va, size,
994 RSM_ALLOW_REBIND);
995 if (e != RSM_SUCCESS) {
996 DBPRINTF((RSM_LIBRARY, RSM_ERR,
997 "export create failed\n"));
998 return (e);
1002 * Publish the segment to the local node only. If the segment
1003 * length is very large then don't publish to the adapter driver
1004 * because that will consume too much DVMA space - this is indicated
1005 * to the Kernel Agent using null permissions. DVMA binding will
1006 * be done when the RDMA is set up.
1008 acl[0].ae_node = rsm_local_nodeid;
1009 if (len > RSM_MAX_HANDLE_DVMA)
1010 acl[0].ae_permission = 0;
1011 else
1012 acl[0].ae_permission = RSM_PERM_RDWR;
1014 e = rsm_memseg_export_publish(memseg, &segid, acl, 1);
1015 if (e != RSM_SUCCESS) {
1016 DBPRINTF((RSM_LIBRARY, RSM_ERR,
1017 "export publish failed\n"));
1018 rsm_memseg_export_destroy(memseg);
1019 return (e);
1022 /* Use the surrogate seghandle as the local memory handle */
1023 *local_hndl_p = (rsm_localmemory_handle_t)memseg;
1025 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1026 "__rsm_create_memory_handle: exit\n"));
1028 return (e);
1031 static int
1032 __rsm_free_memory_handle(rsm_localmemory_handle_t local_handle)
1034 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1035 "__rsm_free_memory_handle: enter\n"));
1037 rsm_memseg_export_destroy((rsm_memseg_export_handle_t)local_handle);
1039 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1040 "__rsm_free_memory_handle: exit\n"));
1042 return (RSM_SUCCESS);
1045 static int
1046 __rsm_get_lib_attr(rsm_ndlib_attr_t **libattrp)
1049 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1050 "__rsm_get_lib_attr: enter\n"));
1052 *libattrp = &_rsm_genlib_attr;
1054 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1055 "__rsm_get_lib_attr: exit\n"));
1057 return (RSM_SUCCESS);
1060 static int
1061 __rsm_closedevice(rsmapi_controller_handle_t cntr_handle)
1064 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1065 "__rsm_closedevice: enter\n"));
1067 cntr_handle = cntr_handle;
1069 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1070 "__rsm_closedevice: exit\n"));
1072 return (RSM_SUCCESS);
1075 void
1076 __rsmdefault_setops(rsm_segops_t *segops)
1079 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1080 "__rsmdefault_setops: enter\n"));
1082 if (segops->rsm_memseg_import_connect == NULL) {
1083 segops->rsm_memseg_import_connect = __rsm_import_connect;
1085 if (segops->rsm_memseg_import_disconnect == NULL) {
1086 segops->rsm_memseg_import_disconnect = __rsm_import_disconnect;
1089 if (segops->rsm_memseg_import_get8 == NULL) {
1090 segops->rsm_memseg_import_get8 = __rsm_get8x8;
1092 if (segops->rsm_memseg_import_get16 == NULL) {
1093 segops->rsm_memseg_import_get16 = __rsm_get16x16;
1095 if (segops->rsm_memseg_import_get32 == NULL) {
1096 segops->rsm_memseg_import_get32 = __rsm_get32x32;
1098 if (segops->rsm_memseg_import_get64 == NULL) {
1099 segops->rsm_memseg_import_get64 = __rsm_get64x64;
1101 if (segops->rsm_memseg_import_get == NULL) {
1102 segops->rsm_memseg_import_get = __rsm_get;
1105 if (segops->rsm_memseg_import_put8 == NULL) {
1106 segops->rsm_memseg_import_put8 = __rsm_put8x8;
1108 if (segops->rsm_memseg_import_put16 == NULL) {
1109 segops->rsm_memseg_import_put16 = __rsm_put16x16;
1111 if (segops->rsm_memseg_import_put32 == NULL) {
1112 segops->rsm_memseg_import_put32 = __rsm_put32x32;
1114 if (segops->rsm_memseg_import_put64 == NULL) {
1115 segops->rsm_memseg_import_put64 = __rsm_put64x64;
1117 if (segops->rsm_memseg_import_put == NULL) {
1118 segops->rsm_memseg_import_put = __rsm_put;
1121 if (segops->rsm_memseg_import_putv == NULL) {
1122 segops->rsm_memseg_import_putv = __rsm_putv;
1125 if (segops->rsm_memseg_import_getv == NULL) {
1126 segops->rsm_memseg_import_getv = __rsm_getv;
1129 if (segops->rsm_create_localmemory_handle == NULL) {
1130 segops->rsm_create_localmemory_handle =
1131 __rsm_create_memory_handle;
1134 if (segops->rsm_free_localmemory_handle == NULL) {
1135 segops->rsm_free_localmemory_handle =
1136 __rsm_free_memory_handle;
1139 /* XXX: Need to support barrier functions */
1140 if (segops->rsm_memseg_import_init_barrier == NULL) {
1141 segops->rsm_memseg_import_init_barrier =
1142 __rsm_memseg_import_init_barrier;
1144 if (segops->rsm_memseg_import_open_barrier == NULL) {
1145 segops->rsm_memseg_import_open_barrier =
1146 __rsm_memseg_import_open_barrier;
1148 if (segops->rsm_memseg_import_order_barrier == NULL) {
1149 segops->rsm_memseg_import_order_barrier =
1150 __rsm_memseg_import_order_barrier;
1152 if (segops->rsm_memseg_import_close_barrier == NULL) {
1153 segops->rsm_memseg_import_close_barrier =
1154 __rsm_memseg_import_close_barrier;
1156 if (segops->rsm_memseg_import_destroy_barrier == NULL) {
1157 segops->rsm_memseg_import_destroy_barrier =
1158 __rsm_memseg_import_destroy_barrier;
1161 if (segops->rsm_memseg_import_get_mode == NULL) {
1162 segops->rsm_memseg_import_get_mode =
1163 __rsm_memseg_import_get_mode;
1165 if (segops->rsm_memseg_import_set_mode == NULL) {
1166 segops->rsm_memseg_import_set_mode =
1167 __rsm_memseg_import_set_mode;
1170 if (segops->rsm_get_lib_attr == NULL) {
1171 segops->rsm_get_lib_attr =
1172 __rsm_get_lib_attr;
1175 if (segops->rsm_closedevice == NULL) {
1176 segops->rsm_closedevice =
1177 __rsm_closedevice;
1181 DBPRINTF((RSM_LIBRARY, RSM_DEBUG_VERBOSE,
1182 "__rsmdefault_setops: exit\n"));