4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2016 Joyent, Inc.
29 #include <sys/errno.h>
30 #include <sys/types.h>
35 #include <sys/sunddi.h>
38 #include <sys/modctl.h>
39 #include <sys/ddi_impldefs.h>
40 #include <vm/seg_kmem.h>
41 #include <sys/vmsystm.h>
42 #include <sys/sysmacros.h>
43 #include <sys/ddidevmap.h>
48 /* total max memory which can be alloced with ioctl interface */
49 uint64_t xsvc_max_memory
= 10 * 1024 * 1024;
51 extern void i86_va_map(caddr_t vaddr
, struct as
*asp
, caddr_t kaddr
);
54 static int xsvc_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*cred
);
55 static int xsvc_close(dev_t devp
, int flag
, int otyp
, cred_t
*cred
);
56 static int xsvc_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*cred
,
58 static int xsvc_devmap(dev_t dev
, devmap_cookie_t dhp
, offset_t off
, size_t len
,
59 size_t *maplen
, uint_t model
);
60 static int xsvc_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
);
61 static int xsvc_detach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
);
62 static int xsvc_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
,
65 static struct cb_ops xsvc_cb_ops
= {
66 xsvc_open
, /* cb_open */
67 xsvc_close
, /* cb_close */
68 nodev
, /* cb_strategy */
73 xsvc_ioctl
, /* cb_ioctl */
74 xsvc_devmap
, /* cb_devmap */
77 nochpoll
, /* cb_chpoll */
78 ddi_prop_op
, /* cb_prop_op */
80 D_NEW
| D_MP
| D_64BIT
| D_DEVMAP
, /* cb_flag */
84 static struct dev_ops xsvc_dev_ops
= {
85 DEVO_REV
, /* devo_rev */
87 xsvc_getinfo
, /* devo_getinfo */
88 nulldev
, /* devo_identify */
89 nulldev
, /* devo_probe */
90 xsvc_attach
, /* devo_attach */
91 xsvc_detach
, /* devo_detach */
92 nodev
, /* devo_reset */
93 &xsvc_cb_ops
, /* devo_cb_ops */
94 NULL
, /* devo_bus_ops */
96 ddi_quiesce_not_needed
, /* quiesce */
99 static struct modldrv xsvc_modldrv
= {
100 &mod_driverops
, /* Type of module. This one is a driver */
101 "xsvc driver", /* Name of the module. */
102 &xsvc_dev_ops
, /* driver ops */
105 static struct modlinkage xsvc_modlinkage
= {
107 (void *) &xsvc_modldrv
,
112 static int xsvc_ioctl_alloc_memory(xsvc_state_t
*state
, void *arg
, int mode
);
113 static int xsvc_ioctl_flush_memory(xsvc_state_t
*state
, void *arg
, int mode
);
114 static int xsvc_ioctl_free_memory(xsvc_state_t
*state
, void *arg
, int mode
);
115 static int xsvc_mem_alloc(xsvc_state_t
*state
, uint64_t key
,
117 static void xsvc_mem_free(xsvc_state_t
*state
, xsvc_mem_t
*mp
);
118 static xsvc_mem_t
*xsvc_mem_lookup(xsvc_state_t
*state
,
120 static int xsvc_mnode_key_compare(const void *q
, const void *e
);
121 static int xsvc_umem_cookie_alloc(caddr_t kva
, size_t size
, int flags
,
122 ddi_umem_cookie_t
*cookiep
);
123 static void xsvc_umem_cookie_free(ddi_umem_cookie_t
*cookiep
);
128 static ddi_device_acc_attr_t xsvc_device_attr
= {
134 static int xsvc_devmap_map(devmap_cookie_t dhp
, dev_t dev
, uint_t flags
,
135 offset_t off
, size_t len
, void **pvtp
);
136 static int xsvc_devmap_dup(devmap_cookie_t dhp
, void *pvtp
,
137 devmap_cookie_t new_dhp
, void **new_pvtp
);
138 static void xsvc_devmap_unmap(devmap_cookie_t dhp
, void *pvtp
, offset_t off
,
139 size_t len
, devmap_cookie_t new_dhp1
, void **new_pvtp1
,
140 devmap_cookie_t new_dhp2
, void **new_pvtp2
);
143 static struct devmap_callback_ctl xsvc_callbk
= {
161 err
= ddi_soft_state_init(&xsvc_statep
, sizeof (xsvc_state_t
), 1);
166 err
= mod_install(&xsvc_modlinkage
);
168 ddi_soft_state_fini(&xsvc_statep
);
180 _info(struct modinfo
*modinfop
)
182 return (mod_info(&xsvc_modlinkage
, modinfop
));
194 err
= mod_remove(&xsvc_modlinkage
);
199 ddi_soft_state_fini(&xsvc_statep
);
209 xsvc_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
222 return (DDI_SUCCESS
);
225 return (DDI_FAILURE
);
228 instance
= ddi_get_instance(dip
);
229 err
= ddi_soft_state_zalloc(xsvc_statep
, instance
);
230 if (err
!= DDI_SUCCESS
) {
231 return (DDI_FAILURE
);
233 state
= ddi_get_soft_state(xsvc_statep
, instance
);
235 goto attachfail_get_soft_state
;
239 state
->xs_instance
= instance
;
241 /* Initialize allocation count */
242 mutex_init(&state
->xs_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
243 state
->xs_currently_alloced
= 0;
245 mutex_init(&state
->xs_cookie_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
247 /* create the minor node (for the ioctl) */
248 err
= ddi_create_minor_node(dip
, "xsvc", S_IFCHR
, instance
, DDI_PSEUDO
,
250 if (err
!= DDI_SUCCESS
) {
251 goto attachfail_minor_node
;
255 * the maxallocmem property will override the default (xsvc_max_memory).
256 * This is the maximum total memory the ioctl will allow to be alloced.
258 maxallocmem
= ddi_prop_get_int(DDI_DEV_T_ANY
, state
->xs_dip
,
259 DDI_PROP_DONTPASS
, "maxallocmem", -1);
260 if (maxallocmem
>= 0) {
261 xsvc_max_memory
= maxallocmem
* 1024;
264 /* Initialize list of memory allocs */
265 mutex_init(&state
->xs_mlist
.ml_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
266 avl_create(&state
->xs_mlist
.ml_avl
, xsvc_mnode_key_compare
,
267 sizeof (xsvc_mnode_t
), offsetof(xsvc_mnode_t
, mn_link
));
269 /* Report that driver was loaded */
272 return (DDI_SUCCESS
);
274 attachfail_minor_node
:
275 mutex_destroy(&state
->xs_cookie_mutex
);
276 mutex_destroy(&state
->xs_mutex
);
277 attachfail_get_soft_state
:
278 (void) ddi_soft_state_free(xsvc_statep
, instance
);
288 xsvc_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
296 instance
= ddi_get_instance(dip
);
297 state
= ddi_get_soft_state(xsvc_statep
, instance
);
299 return (DDI_FAILURE
);
307 return (DDI_SUCCESS
);
310 return (DDI_FAILURE
);
313 ddi_remove_minor_node(dip
, NULL
);
315 /* Free any memory on list */
316 while ((mnode
= avl_first(&state
->xs_mlist
.ml_avl
)) != NULL
) {
318 xsvc_mem_free(state
, mp
);
322 avl_destroy(&state
->xs_mlist
.ml_avl
);
323 mutex_destroy(&state
->xs_mlist
.ml_mutex
);
325 mutex_destroy(&state
->xs_cookie_mutex
);
326 mutex_destroy(&state
->xs_mutex
);
327 (void) ddi_soft_state_free(xsvc_statep
, state
->xs_instance
);
328 return (DDI_SUCCESS
);
337 xsvc_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
346 instance
= getminor(dev
);
349 case DDI_INFO_DEVT2DEVINFO
:
350 state
= ddi_get_soft_state(xsvc_statep
, instance
);
352 return (DDI_FAILURE
);
354 *result
= (void *)state
->xs_dip
;
358 case DDI_INFO_DEVT2INSTANCE
:
359 *result
= (void *)(uintptr_t)instance
;
378 xsvc_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*cred
)
383 instance
= getminor(*devp
);
384 state
= ddi_get_soft_state(xsvc_statep
, instance
);
398 xsvc_close(dev_t devp
, int flag
, int otyp
, cred_t
*cred
)
409 xsvc_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*cred
, int *rval
)
416 err
= drv_priv(cred
);
420 instance
= getminor(dev
);
421 if (instance
== -1) {
424 state
= ddi_get_soft_state(xsvc_statep
, instance
);
431 err
= xsvc_ioctl_alloc_memory(state
, (void *)arg
, mode
);
435 err
= xsvc_ioctl_free_memory(state
, (void *)arg
, mode
);
439 err
= xsvc_ioctl_flush_memory(state
, (void *)arg
, mode
);
450 * xsvc_ioctl_alloc_memory()
454 xsvc_ioctl_alloc_memory(xsvc_state_t
*state
, void *arg
, int mode
)
456 xsvc_mem_req_32 params32
;
457 xsvc_mloc_32
*usgl32
;
469 /* Copy in the params, then get the size and key */
470 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
471 err
= ddi_copyin(arg
, ¶ms32
, sizeof (xsvc_mem_req_32
),
477 key
= (uint64_t)params32
.xsvc_mem_reqid
;
478 size
= P2ROUNDUP((size_t)params32
.xsvc_mem_size
, PAGESIZE
);
480 err
= ddi_copyin(arg
, ¶ms
, sizeof (xsvc_mem_req
), mode
);
484 key
= (uint64_t)params
.xsvc_mem_reqid
;
485 size
= P2ROUNDUP(params
.xsvc_mem_size
, PAGESIZE
);
489 * make sure this doesn't put us over the maximum allowed to be
492 mutex_enter(&state
->xs_mutex
);
493 if ((state
->xs_currently_alloced
+ size
) > xsvc_max_memory
) {
494 mutex_exit(&state
->xs_mutex
);
497 state
->xs_currently_alloced
+= size
;
498 mutex_exit(&state
->xs_mutex
);
500 /* get state to track this memory */
501 err
= xsvc_mem_alloc(state
, key
, &mp
);
507 /* allocate and bind the memory */
508 mp
->xm_dma_attr
.dma_attr_version
= DMA_ATTR_V0
;
509 mp
->xm_dma_attr
.dma_attr_count_max
= (uint64_t)0xFFFFFFFF;
510 mp
->xm_dma_attr
.dma_attr_burstsizes
= 1;
511 mp
->xm_dma_attr
.dma_attr_minxfer
= 1;
512 mp
->xm_dma_attr
.dma_attr_maxxfer
= (uint64_t)0xFFFFFFFF;
513 mp
->xm_dma_attr
.dma_attr_seg
= (uint64_t)0xFFFFFFFF;
514 mp
->xm_dma_attr
.dma_attr_granular
= 1;
515 mp
->xm_dma_attr
.dma_attr_flags
= 0;
517 /* Finish converting params */
518 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
519 mp
->xm_dma_attr
.dma_attr_addr_lo
= params32
.xsvc_mem_addr_lo
;
520 mp
->xm_dma_attr
.dma_attr_addr_hi
= params32
.xsvc_mem_addr_hi
;
521 mp
->xm_dma_attr
.dma_attr_sgllen
= params32
.xsvc_mem_sgllen
;
522 usgl32
= (xsvc_mloc_32
*)(uintptr_t)params32
.xsvc_sg_list
;
523 mp
->xm_dma_attr
.dma_attr_align
= P2ROUNDUP(
524 params32
.xsvc_mem_align
, PAGESIZE
);
526 mp
->xm_dma_attr
.dma_attr_addr_lo
= params
.xsvc_mem_addr_lo
;
527 mp
->xm_dma_attr
.dma_attr_addr_hi
= params
.xsvc_mem_addr_hi
;
528 mp
->xm_dma_attr
.dma_attr_sgllen
= params
.xsvc_mem_sgllen
;
529 usgl
= (xsvc_mloc
*)(uintptr_t)params
.xsvc_sg_list
;
530 mp
->xm_dma_attr
.dma_attr_align
= P2ROUNDUP(
531 params
.xsvc_mem_align
, PAGESIZE
);
534 mp
->xm_device_attr
= xsvc_device_attr
;
536 err
= ddi_dma_alloc_handle(state
->xs_dip
, &mp
->xm_dma_attr
,
537 DDI_DMA_SLEEP
, NULL
, &mp
->xm_dma_handle
);
538 if (err
!= DDI_SUCCESS
) {
540 goto allocfail_alloc_handle
;
543 /* don't sleep here so we don't get stuck in contig alloc */
544 err
= ddi_dma_mem_alloc(mp
->xm_dma_handle
, mp
->xm_size
,
545 &mp
->xm_device_attr
, DDI_DMA_CONSISTENT
, DDI_DMA_DONTWAIT
, NULL
,
546 &mp
->xm_addr
, &mp
->xm_real_length
, &mp
->xm_mem_handle
);
547 if (err
!= DDI_SUCCESS
) {
549 goto allocfail_alloc_mem
;
552 err
= ddi_dma_addr_bind_handle(mp
->xm_dma_handle
, NULL
, mp
->xm_addr
,
553 mp
->xm_size
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
, DDI_DMA_SLEEP
,
554 NULL
, &mp
->xm_cookie
, &mp
->xm_cookie_count
);
555 if (err
!= DDI_DMA_MAPPED
) {
561 for (i
= 0; i
< mp
->xm_cookie_count
; i
++) {
562 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
563 sgl32
.mloc_addr
= mp
->xm_cookie
.dmac_laddress
;
564 sgl32
.mloc_size
= mp
->xm_cookie
.dmac_size
;
565 err
= ddi_copyout(&sgl32
, &usgl32
[i
],
566 sizeof (xsvc_mloc_32
), mode
);
569 goto allocfail_copyout
;
572 sgl
.mloc_addr
= mp
->xm_cookie
.dmac_laddress
;
573 sgl
.mloc_size
= mp
->xm_cookie
.dmac_size
;
574 err
= ddi_copyout(&sgl
, &usgl
[i
], sizeof (xsvc_mloc
),
578 goto allocfail_copyout
;
581 ddi_dma_nextcookie(mp
->xm_dma_handle
, &mp
->xm_cookie
);
584 /* set the last sgl entry to 0 to indicate cookie count */
585 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
588 err
= ddi_copyout(&sgl32
, &usgl32
[i
], sizeof (xsvc_mloc_32
),
592 goto allocfail_copyout
;
597 err
= ddi_copyout(&sgl
, &usgl
[i
], sizeof (xsvc_mloc
), mode
);
600 goto allocfail_copyout
;
607 (void) ddi_dma_unbind_handle(mp
->xm_dma_handle
);
609 ddi_dma_mem_free(&mp
->xm_mem_handle
);
611 ddi_dma_free_handle(&mp
->xm_dma_handle
);
612 allocfail_alloc_handle
:
613 mp
->xm_dma_handle
= NULL
;
614 xsvc_mem_free(state
, mp
);
616 mutex_enter(&state
->xs_mutex
);
617 state
->xs_currently_alloced
= state
->xs_currently_alloced
- size
;
618 mutex_exit(&state
->xs_mutex
);
624 * xsvc_ioctl_flush_memory()
628 xsvc_ioctl_flush_memory(xsvc_state_t
*state
, void *arg
, int mode
)
630 xsvc_mem_req_32 params32
;
637 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
638 err
= ddi_copyin(arg
, ¶ms32
, sizeof (xsvc_mem_req_32
),
643 key
= (uint64_t)params32
.xsvc_mem_reqid
;
645 err
= ddi_copyin(arg
, ¶ms
, sizeof (xsvc_mem_req
), mode
);
649 key
= (uint64_t)params
.xsvc_mem_reqid
;
652 /* find the memory */
653 mp
= xsvc_mem_lookup(state
, key
);
658 (void) ddi_dma_sync(mp
->xm_dma_handle
, 0, 0, DDI_DMA_SYNC_FORCPU
);
665 * xsvc_ioctl_free_memory()
669 xsvc_ioctl_free_memory(xsvc_state_t
*state
, void *arg
, int mode
)
671 xsvc_mem_req_32 params32
;
678 if (ddi_model_convert_from(mode
& FMODELS
) == DDI_MODEL_ILP32
) {
679 err
= ddi_copyin(arg
, ¶ms32
, sizeof (xsvc_mem_req_32
),
684 key
= (uint64_t)params32
.xsvc_mem_reqid
;
686 err
= ddi_copyin(arg
, ¶ms
, sizeof (xsvc_mem_req
), mode
);
690 key
= (uint64_t)params
.xsvc_mem_reqid
;
693 /* find the memory */
694 mp
= xsvc_mem_lookup(state
, key
);
699 xsvc_mem_free(state
, mp
);
709 xsvc_mem_alloc(xsvc_state_t
*state
, uint64_t key
, xsvc_mem_t
**mp
)
713 mem
= xsvc_mem_lookup(state
, key
);
715 xsvc_mem_free(state
, mem
);
718 *mp
= kmem_alloc(sizeof (xsvc_mem_t
), KM_SLEEP
);
719 (*mp
)->xm_mnode
.mn_home
= *mp
;
720 (*mp
)->xm_mnode
.mn_key
= key
;
722 mutex_enter(&state
->xs_mlist
.ml_mutex
);
723 avl_add(&state
->xs_mlist
.ml_avl
, &(*mp
)->xm_mnode
);
724 mutex_exit(&state
->xs_mlist
.ml_mutex
);
734 xsvc_mem_free(xsvc_state_t
*state
, xsvc_mem_t
*mp
)
736 if (mp
->xm_dma_handle
!= NULL
) {
737 (void) ddi_dma_unbind_handle(mp
->xm_dma_handle
);
738 ddi_dma_mem_free(&mp
->xm_mem_handle
);
739 ddi_dma_free_handle(&mp
->xm_dma_handle
);
741 mutex_enter(&state
->xs_mutex
);
742 state
->xs_currently_alloced
= state
->xs_currently_alloced
-
744 mutex_exit(&state
->xs_mutex
);
747 mutex_enter(&state
->xs_mlist
.ml_mutex
);
748 avl_remove(&state
->xs_mlist
.ml_avl
, &mp
->xm_mnode
);
749 mutex_exit(&state
->xs_mlist
.ml_mutex
);
751 kmem_free(mp
, sizeof (*mp
));
759 xsvc_mem_lookup(xsvc_state_t
*state
, uint64_t key
)
767 mutex_enter(&state
->xs_mlist
.ml_mutex
);
768 mnp
= avl_find(&state
->xs_mlist
.ml_avl
, &mnode
, &where
);
769 mutex_exit(&state
->xs_mlist
.ml_mutex
);
781 * xsvc_mnode_key_compare()
785 xsvc_mnode_key_compare(const void *q
, const void *e
)
790 n1
= (xsvc_mnode_t
*)q
;
791 n2
= (xsvc_mnode_t
*)e
;
793 if (n1
->mn_key
< n2
->mn_key
) {
795 } else if (n1
->mn_key
> n2
->mn_key
) {
808 xsvc_devmap(dev_t dev
, devmap_cookie_t dhp
, offset_t off
, size_t len
,
809 size_t *maplen
, uint_t model
)
811 ddi_umem_cookie_t cookie
;
824 instance
= getminor(dev
);
825 state
= ddi_get_soft_state(xsvc_statep
, instance
);
831 * On 64-bit kernels, if we have a 32-bit application doing a mmap(),
832 * smmap32 will sign extend the offset. We need to undo that since
833 * we are passed a physical address in off, not a offset.
836 if (((model
& DDI_MODEL_MASK
) == DDI_MODEL_ILP32
) &&
837 ((off
& ~0xFFFFFFFFll
) == ~0xFFFFFFFFll
)) {
838 off
= off
& 0xFFFFFFFF;
843 /* always work with whole pages */
845 off_align
= P2ALIGN(off
, PAGESIZE
);
846 psize
= P2ROUNDUP(off
+ len
, PAGESIZE
) - off_align
;
849 * if this is memory we're trying to map into user space, we first
850 * need to map the PFNs into KVA, then build up a umem cookie, and
851 * finally do a umem_setup to map it in.
853 if (pf_is_memory(pfn
)) {
854 npages
= btop(psize
);
856 kva
= vmem_alloc(heap_arena
, psize
, VM_SLEEP
);
862 for (i
= 0; i
< npages
; i
++) {
863 page_t
*pp
= page_numtopp_nolock(pfn
);
866 * Preemptively check for panic conditions from
867 * hat_devload and error out instead.
869 if (pp
!= NULL
&& (PP_ISFREE(pp
) ||
870 (!PAGE_LOCKED(pp
) && !PP_ISNORELOC(pp
)))) {
873 goto devmapfail_cookie_alloc
;
876 hat_devload(kas
.a_hat
, kvai
, PAGESIZE
, pfn
,
877 PROT_READ
| PROT_WRITE
, HAT_LOAD_LOCK
);
879 kvai
= (caddr_t
)((uintptr_t)kvai
+ PAGESIZE
);
882 err
= xsvc_umem_cookie_alloc(kva
, psize
, KM_SLEEP
, &cookie
);
884 goto devmapfail_cookie_alloc
;
887 if ((err
= devmap_umem_setup(dhp
, state
->xs_dip
, &xsvc_callbk
,
888 cookie
, 0, psize
, PROT_ALL
, 0, &xsvc_device_attr
)) < 0) {
889 goto devmapfail_umem_setup
;
894 * If this is not memory (or a foreign MFN in i86xpv), go through
898 if ((err
= devmap_devmem_setup(dhp
, state
->xs_dip
, NULL
, 0,
899 off_align
, psize
, PROT_ALL
, 0, &xsvc_device_attr
)) < 0) {
907 devmapfail_umem_setup
:
908 xsvc_umem_cookie_free(&cookie
);
910 devmapfail_cookie_alloc
:
912 for (i
= 0; i
< npages
; i
++) {
913 hat_unload(kas
.a_hat
, kvai
, PAGESIZE
,
915 kvai
= (caddr_t
)((uintptr_t)kvai
+ PAGESIZE
);
917 vmem_free(heap_arena
, kva
, psize
);
923 * xsvc_umem_cookie_alloc()
925 * allocate a umem cookie to be used in devmap_umem_setup using KVA already
929 xsvc_umem_cookie_alloc(caddr_t kva
, size_t size
, int flags
,
930 ddi_umem_cookie_t
*cookiep
)
932 struct ddi_umem_cookie
*umem_cookiep
;
934 umem_cookiep
= kmem_zalloc(sizeof (struct ddi_umem_cookie
), flags
);
935 if (umem_cookiep
== NULL
) {
940 umem_cookiep
->cvaddr
= kva
;
941 umem_cookiep
->type
= KMEM_NON_PAGEABLE
;
942 umem_cookiep
->size
= size
;
943 *cookiep
= (ddi_umem_cookie_t
*)umem_cookiep
;
949 * xsvc_umem_cookie_free()
953 xsvc_umem_cookie_free(ddi_umem_cookie_t
*cookiep
)
955 kmem_free(*cookiep
, sizeof (struct ddi_umem_cookie
));
966 xsvc_devmap_map(devmap_cookie_t dhc
, dev_t dev
, uint_t flags
, offset_t off
,
967 size_t len
, void **pvtp
)
969 struct ddi_umem_cookie
*cp
;
970 devmap_handle_t
*dhp
;
975 instance
= getminor(dev
);
976 state
= ddi_get_soft_state(xsvc_statep
, instance
);
981 dhp
= (devmap_handle_t
*)dhc
;
982 /* This driver only supports MAP_SHARED, not MAP_PRIVATE */
983 if (flags
& MAP_PRIVATE
) {
984 cmn_err(CE_WARN
, "!xsvc driver doesn't support MAP_PRIVATE");
988 cp
= (struct ddi_umem_cookie
*)dhp
->dh_cookie
;
999 * keep a reference count for forks so we don't unmap if we have multiple
1004 xsvc_devmap_dup(devmap_cookie_t dhc
, void *pvtp
, devmap_cookie_t new_dhp
,
1007 struct ddi_umem_cookie
*cp
;
1008 devmap_handle_t
*dhp
;
1009 xsvc_state_t
*state
;
1012 state
= (xsvc_state_t
*)pvtp
;
1013 dhp
= (devmap_handle_t
*)dhc
;
1015 mutex_enter(&state
->xs_cookie_mutex
);
1016 cp
= (struct ddi_umem_cookie
*)dhp
->dh_cookie
;
1018 mutex_exit(&state
->xs_cookie_mutex
);
1023 mutex_exit(&state
->xs_cookie_mutex
);
1031 * xsvc_devmap_unmap()
1033 * This routine is only call if we were mapping in memory in xsvc_devmap().
1034 * i.e. we only pass in xsvc_callbk to devmap_umem_setup if pf_is_memory()
1035 * was true. It would have been nice if devmap_callback_ctl had an args param.
1036 * We wouldn't have had to look into the devmap_handle and into the umem
1041 xsvc_devmap_unmap(devmap_cookie_t dhc
, void *pvtp
, offset_t off
, size_t len
,
1042 devmap_cookie_t new_dhp1
, void **new_pvtp1
, devmap_cookie_t new_dhp2
,
1045 struct ddi_umem_cookie
*ncp
;
1046 struct ddi_umem_cookie
*cp
;
1047 devmap_handle_t
*ndhp
;
1048 devmap_handle_t
*dhp
;
1049 xsvc_state_t
*state
;
1057 state
= (xsvc_state_t
*)pvtp
;
1058 mutex_enter(&state
->xs_cookie_mutex
);
1060 /* peek into the umem cookie to figure out what we need to free up */
1061 dhp
= (devmap_handle_t
*)dhc
;
1062 cp
= (struct ddi_umem_cookie
*)dhp
->dh_cookie
;
1065 if (new_dhp1
!= NULL
) {
1066 ndhp
= (devmap_handle_t
*)new_dhp1
;
1067 ncp
= (struct ddi_umem_cookie
*)ndhp
->dh_cookie
;
1071 if (new_dhp2
!= NULL
) {
1072 ndhp
= (devmap_handle_t
*)new_dhp2
;
1073 ncp
= (struct ddi_umem_cookie
*)ndhp
->dh_cookie
;
1079 if (cp
->cook_refcnt
== 0) {
1084 * free up the umem cookie, then unmap all the pages what we
1085 * mapped in during devmap, then free up the kva space.
1087 npages
= btop(size
);
1088 xsvc_umem_cookie_free(&dhp
->dh_cookie
);
1090 for (i
= 0; i
< npages
; i
++) {
1091 hat_unload(kas
.a_hat
, kvai
, PAGESIZE
,
1093 kvai
= (caddr_t
)((uintptr_t)kvai
+ PAGESIZE
);
1095 vmem_free(heap_arena
, kva
, size
);
1098 mutex_exit(&state
->xs_cookie_mutex
);