2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/memory.h>
29 #include <nvif/if000c.h>
30 #include <nvif/unpack.h>
32 static const struct nvkm_object_func nvkm_uvmm
;
34 nvkm_uvmm_search(struct nvkm_client
*client
, u64 handle
)
36 struct nvkm_object
*object
;
38 object
= nvkm_object_search(client
, handle
, &nvkm_uvmm
);
40 return (void *)object
;
42 return nvkm_uvmm(object
)->vmm
;
46 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
48 struct nvkm_client
*client
= uvmm
->object
.client
;
50 struct nvif_vmm_pfnclr_v0 v0
;
52 struct nvkm_vmm
*vmm
= uvmm
->vmm
;
56 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
66 mutex_lock(&vmm
->mutex
);
67 ret
= nvkm_vmm_pfn_unmap(vmm
, addr
, size
);
68 mutex_unlock(&vmm
->mutex
);
75 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
77 struct nvkm_client
*client
= uvmm
->object
.client
;
79 struct nvif_vmm_pfnmap_v0 v0
;
81 struct nvkm_vmm
*vmm
= uvmm
->vmm
;
83 u64 addr
, size
, *phys
;
86 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, true))) {
91 if (argc
!= (size
>> page
) * sizeof(args
->v0
.phys
[0]))
100 mutex_lock(&vmm
->mutex
);
101 ret
= nvkm_vmm_pfn_map(vmm
, page
, addr
, size
, phys
);
102 mutex_unlock(&vmm
->mutex
);
109 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
111 struct nvkm_client
*client
= uvmm
->object
.client
;
113 struct nvif_vmm_unmap_v0 v0
;
115 struct nvkm_vmm
*vmm
= uvmm
->vmm
;
116 struct nvkm_vma
*vma
;
120 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
121 addr
= args
->v0
.addr
;
125 mutex_lock(&vmm
->mutex
);
126 vma
= nvkm_vmm_node_search(vmm
, addr
);
127 if (ret
= -ENOENT
, !vma
|| vma
->addr
!= addr
) {
128 VMM_DEBUG(vmm
, "lookup %016llx: %016llx",
129 addr
, vma
? vma
->addr
: ~0ULL);
133 if (ret
= -ENOENT
, (!vma
->user
&& !client
->super
) || vma
->busy
) {
134 VMM_DEBUG(vmm
, "denied %016llx: %d %d %d", addr
,
135 vma
->user
, !client
->super
, vma
->busy
);
139 if (ret
= -EINVAL
, !vma
->memory
) {
140 VMM_DEBUG(vmm
, "unmapped");
144 nvkm_vmm_unmap_locked(vmm
, vma
, false);
147 mutex_unlock(&vmm
->mutex
);
152 nvkm_uvmm_mthd_map(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
154 struct nvkm_client
*client
= uvmm
->object
.client
;
156 struct nvif_vmm_map_v0 v0
;
158 u64 addr
, size
, handle
, offset
;
159 struct nvkm_vmm
*vmm
= uvmm
->vmm
;
160 struct nvkm_vma
*vma
;
161 struct nvkm_memory
*memory
;
164 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, true))) {
165 addr
= args
->v0
.addr
;
166 size
= args
->v0
.size
;
167 handle
= args
->v0
.memory
;
168 offset
= args
->v0
.offset
;
172 memory
= nvkm_umem_search(client
, handle
);
173 if (IS_ERR(memory
)) {
174 VMM_DEBUG(vmm
, "memory %016llx %ld\n", handle
, PTR_ERR(memory
));
175 return PTR_ERR(memory
);
178 mutex_lock(&vmm
->mutex
);
179 if (ret
= -ENOENT
, !(vma
= nvkm_vmm_node_search(vmm
, addr
))) {
180 VMM_DEBUG(vmm
, "lookup %016llx", addr
);
184 if (ret
= -ENOENT
, (!vma
->user
&& !client
->super
) || vma
->busy
) {
185 VMM_DEBUG(vmm
, "denied %016llx: %d %d %d", addr
,
186 vma
->user
, !client
->super
, vma
->busy
);
190 if (ret
= -EINVAL
, vma
->mapped
&& !vma
->memory
) {
191 VMM_DEBUG(vmm
, "pfnmap %016llx", addr
);
195 if (ret
= -EINVAL
, vma
->addr
!= addr
|| vma
->size
!= size
) {
196 if (addr
+ size
> vma
->addr
+ vma
->size
|| vma
->memory
||
197 (vma
->refd
== NVKM_VMA_PAGE_NONE
&& !vma
->mapref
)) {
198 VMM_DEBUG(vmm
, "split %d %d %d "
199 "%016llx %016llx %016llx %016llx",
200 !!vma
->memory
, vma
->refd
, vma
->mapref
,
201 addr
, size
, vma
->addr
, (u64
)vma
->size
);
205 vma
= nvkm_vmm_node_split(vmm
, vma
, addr
, size
);
212 mutex_unlock(&vmm
->mutex
);
214 ret
= nvkm_memory_map(memory
, offset
, vmm
, vma
, argv
, argc
);
216 /* Successful map will clear vma->busy. */
217 nvkm_memory_unref(&memory
);
221 mutex_lock(&vmm
->mutex
);
223 nvkm_vmm_unmap_region(vmm
, vma
);
225 mutex_unlock(&vmm
->mutex
);
226 nvkm_memory_unref(&memory
);
231 nvkm_uvmm_mthd_put(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
233 struct nvkm_client
*client
= uvmm
->object
.client
;
235 struct nvif_vmm_put_v0 v0
;
237 struct nvkm_vmm
*vmm
= uvmm
->vmm
;
238 struct nvkm_vma
*vma
;
242 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
243 addr
= args
->v0
.addr
;
247 mutex_lock(&vmm
->mutex
);
248 vma
= nvkm_vmm_node_search(vmm
, args
->v0
.addr
);
249 if (ret
= -ENOENT
, !vma
|| vma
->addr
!= addr
|| vma
->part
) {
250 VMM_DEBUG(vmm
, "lookup %016llx: %016llx %d", addr
,
251 vma
? vma
->addr
: ~0ULL, vma
? vma
->part
: 0);
255 if (ret
= -ENOENT
, (!vma
->user
&& !client
->super
) || vma
->busy
) {
256 VMM_DEBUG(vmm
, "denied %016llx: %d %d %d", addr
,
257 vma
->user
, !client
->super
, vma
->busy
);
261 nvkm_vmm_put_locked(vmm
, vma
);
264 mutex_unlock(&vmm
->mutex
);
269 nvkm_uvmm_mthd_get(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
271 struct nvkm_client
*client
= uvmm
->object
.client
;
273 struct nvif_vmm_get_v0 v0
;
275 struct nvkm_vmm
*vmm
= uvmm
->vmm
;
276 struct nvkm_vma
*vma
;
278 bool getref
, mapref
, sparse
;
282 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
283 getref
= args
->v0
.type
== NVIF_VMM_GET_V0_PTES
;
284 mapref
= args
->v0
.type
== NVIF_VMM_GET_V0_ADDR
;
285 sparse
= args
->v0
.sparse
;
286 page
= args
->v0
.page
;
287 align
= args
->v0
.align
;
288 size
= args
->v0
.size
;
292 mutex_lock(&vmm
->mutex
);
293 ret
= nvkm_vmm_get_locked(vmm
, getref
, mapref
, sparse
,
294 page
, align
, size
, &vma
);
295 mutex_unlock(&vmm
->mutex
);
299 args
->v0
.addr
= vma
->addr
;
300 vma
->user
= !client
->super
;
305 nvkm_uvmm_mthd_page(struct nvkm_uvmm
*uvmm
, void *argv
, u32 argc
)
308 struct nvif_vmm_page_v0 v0
;
310 const struct nvkm_vmm_page
*page
;
314 page
= uvmm
->vmm
->func
->page
;
315 for (nr
= 0; page
[nr
].shift
; nr
++);
317 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
318 if ((index
= args
->v0
.index
) >= nr
)
320 type
= page
[index
].type
;
321 args
->v0
.shift
= page
[index
].shift
;
322 args
->v0
.sparse
= !!(type
& NVKM_VMM_PAGE_SPARSE
);
323 args
->v0
.vram
= !!(type
& NVKM_VMM_PAGE_VRAM
);
324 args
->v0
.host
= !!(type
& NVKM_VMM_PAGE_HOST
);
325 args
->v0
.comp
= !!(type
& NVKM_VMM_PAGE_COMP
);
333 nvkm_uvmm_mthd(struct nvkm_object
*object
, u32 mthd
, void *argv
, u32 argc
)
335 struct nvkm_uvmm
*uvmm
= nvkm_uvmm(object
);
337 case NVIF_VMM_V0_PAGE
: return nvkm_uvmm_mthd_page (uvmm
, argv
, argc
);
338 case NVIF_VMM_V0_GET
: return nvkm_uvmm_mthd_get (uvmm
, argv
, argc
);
339 case NVIF_VMM_V0_PUT
: return nvkm_uvmm_mthd_put (uvmm
, argv
, argc
);
340 case NVIF_VMM_V0_MAP
: return nvkm_uvmm_mthd_map (uvmm
, argv
, argc
);
341 case NVIF_VMM_V0_UNMAP
: return nvkm_uvmm_mthd_unmap (uvmm
, argv
, argc
);
342 case NVIF_VMM_V0_PFNMAP
: return nvkm_uvmm_mthd_pfnmap(uvmm
, argv
, argc
);
343 case NVIF_VMM_V0_PFNCLR
: return nvkm_uvmm_mthd_pfnclr(uvmm
, argv
, argc
);
344 case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
345 if (uvmm
->vmm
->func
->mthd
) {
346 return uvmm
->vmm
->func
->mthd(uvmm
->vmm
,
358 nvkm_uvmm_dtor(struct nvkm_object
*object
)
360 struct nvkm_uvmm
*uvmm
= nvkm_uvmm(object
);
361 nvkm_vmm_unref(&uvmm
->vmm
);
365 static const struct nvkm_object_func
367 .dtor
= nvkm_uvmm_dtor
,
368 .mthd
= nvkm_uvmm_mthd
,
372 nvkm_uvmm_new(const struct nvkm_oclass
*oclass
, void *argv
, u32 argc
,
373 struct nvkm_object
**pobject
)
375 struct nvkm_mmu
*mmu
= nvkm_ummu(oclass
->parent
)->mmu
;
376 const bool more
= oclass
->base
.maxver
>= 0;
378 struct nvif_vmm_v0 v0
;
380 const struct nvkm_vmm_page
*page
;
381 struct nvkm_uvmm
*uvmm
;
386 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, more
))) {
387 managed
= args
->v0
.managed
!= 0;
388 addr
= args
->v0
.addr
;
389 size
= args
->v0
.size
;
393 if (!(uvmm
= kzalloc(sizeof(*uvmm
), GFP_KERNEL
)))
395 nvkm_object_ctor(&nvkm_uvmm
, oclass
, &uvmm
->object
);
396 *pobject
= &uvmm
->object
;
399 ret
= mmu
->func
->vmm
.ctor(mmu
, managed
, addr
, size
, argv
, argc
,
400 NULL
, "user", &uvmm
->vmm
);
404 uvmm
->vmm
->debug
= max(uvmm
->vmm
->debug
, oclass
->client
->debug
);
409 uvmm
->vmm
= nvkm_vmm_ref(mmu
->vmm
);
412 page
= uvmm
->vmm
->func
->page
;
413 args
->v0
.page_nr
= 0;
414 while (page
&& (page
++)->shift
)
416 args
->v0
.addr
= uvmm
->vmm
->start
;
417 args
->v0
.size
= uvmm
->vmm
->limit
;