2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <nvif/if000c.h>
28 nvif_vmm_unmap(struct nvif_vmm
*vmm
, u64 addr
)
30 return nvif_object_mthd(&vmm
->object
, NVIF_VMM_V0_UNMAP
,
31 &(struct nvif_vmm_unmap_v0
) { .addr
= addr
},
32 sizeof(struct nvif_vmm_unmap_v0
));
36 nvif_vmm_map(struct nvif_vmm
*vmm
, u64 addr
, u64 size
, void *argv
, u32 argc
,
37 struct nvif_mem
*mem
, u64 offset
)
39 struct nvif_vmm_map_v0
*args
;
43 if (sizeof(*args
) + argc
> sizeof(stack
)) {
44 if (!(args
= kmalloc(sizeof(*args
) + argc
, GFP_KERNEL
)))
53 args
->memory
= nvif_handle(&mem
->object
);
54 args
->offset
= offset
;
55 memcpy(args
->data
, argv
, argc
);
57 ret
= nvif_object_mthd(&vmm
->object
, NVIF_VMM_V0_MAP
,
58 args
, sizeof(*args
) + argc
);
59 if (args
!= (void *)stack
)
65 nvif_vmm_put(struct nvif_vmm
*vmm
, struct nvif_vma
*vma
)
68 WARN_ON(nvif_object_mthd(&vmm
->object
, NVIF_VMM_V0_PUT
,
69 &(struct nvif_vmm_put_v0
) {
71 }, sizeof(struct nvif_vmm_put_v0
)));
77 nvif_vmm_get(struct nvif_vmm
*vmm
, enum nvif_vmm_get type
, bool sparse
,
78 u8 page
, u8 align
, u64 size
, struct nvif_vma
*vma
)
80 struct nvif_vmm_get_v0 args
;
83 args
.version
= vma
->size
= 0;
90 case ADDR
: args
.type
= NVIF_VMM_GET_V0_ADDR
; break;
91 case PTES
: args
.type
= NVIF_VMM_GET_V0_PTES
; break;
92 case LAZY
: args
.type
= NVIF_VMM_GET_V0_LAZY
; break;
98 ret
= nvif_object_mthd(&vmm
->object
, NVIF_VMM_V0_GET
,
101 vma
->addr
= args
.addr
;
102 vma
->size
= args
.size
;
108 nvif_vmm_dtor(struct nvif_vmm
*vmm
)
111 nvif_object_dtor(&vmm
->object
);
115 nvif_vmm_ctor(struct nvif_mmu
*mmu
, const char *name
, s32 oclass
, bool managed
,
116 u64 addr
, u64 size
, void *argv
, u32 argc
, struct nvif_vmm
*vmm
)
118 struct nvif_vmm_v0
*args
;
119 u32 argn
= sizeof(*args
) + argc
;
120 int ret
= -ENOSYS
, i
;
122 vmm
->object
.client
= NULL
;
125 if (!(args
= kmalloc(argn
, GFP_KERNEL
)))
128 args
->managed
= managed
;
131 memcpy(args
->data
, argv
, argc
);
133 ret
= nvif_object_ctor(&mmu
->object
, name
? name
: "nvifVmm", 0,
134 oclass
, args
, argn
, &vmm
->object
);
138 vmm
->start
= args
->addr
;
139 vmm
->limit
= args
->size
;
141 vmm
->page_nr
= args
->page_nr
;
142 vmm
->page
= kmalloc_array(vmm
->page_nr
, sizeof(*vmm
->page
),
149 for (i
= 0; i
< vmm
->page_nr
; i
++) {
150 struct nvif_vmm_page_v0 args
= { .index
= i
};
152 ret
= nvif_object_mthd(&vmm
->object
, NVIF_VMM_V0_PAGE
,
153 &args
, sizeof(args
));
157 vmm
->page
[i
].shift
= args
.shift
;
158 vmm
->page
[i
].sparse
= args
.sparse
;
159 vmm
->page
[i
].vram
= args
.vram
;
160 vmm
->page
[i
].host
= args
.host
;
161 vmm
->page
[i
].comp
= args
.comp
;