2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
25 #include <core/memory.h>
27 #include <nvif/if000a.h>
28 #include <nvif/unpack.h>
31 struct nvkm_memory memory
;
32 enum nvkm_memory_target target
;
37 struct scatterlist
*sgl
;
42 static enum nvkm_memory_target
43 nvkm_mem_target(struct nvkm_memory
*memory
)
45 return nvkm_mem(memory
)->target
;
49 nvkm_mem_page(struct nvkm_memory
*memory
)
55 nvkm_mem_addr(struct nvkm_memory
*memory
)
57 struct nvkm_mem
*mem
= nvkm_mem(memory
);
58 if (mem
->pages
== 1 && mem
->mem
)
64 nvkm_mem_size(struct nvkm_memory
*memory
)
66 return nvkm_mem(memory
)->pages
<< PAGE_SHIFT
;
70 nvkm_mem_map_dma(struct nvkm_memory
*memory
, u64 offset
, struct nvkm_vmm
*vmm
,
71 struct nvkm_vma
*vma
, void *argv
, u32 argc
)
73 struct nvkm_mem
*mem
= nvkm_mem(memory
);
74 struct nvkm_vmm_map map
= {
75 .memory
= &mem
->memory
,
79 return nvkm_vmm_map(vmm
, vma
, argv
, argc
, &map
);
83 nvkm_mem_dtor(struct nvkm_memory
*memory
)
85 struct nvkm_mem
*mem
= nvkm_mem(memory
);
87 while (mem
->pages
--) {
88 dma_unmap_page(mem
->mmu
->subdev
.device
->dev
,
89 mem
->dma
[mem
->pages
], PAGE_SIZE
,
91 __free_page(mem
->mem
[mem
->pages
]);
99 static const struct nvkm_memory_func
101 .dtor
= nvkm_mem_dtor
,
102 .target
= nvkm_mem_target
,
103 .page
= nvkm_mem_page
,
104 .addr
= nvkm_mem_addr
,
105 .size
= nvkm_mem_size
,
106 .map
= nvkm_mem_map_dma
,
110 nvkm_mem_map_sgl(struct nvkm_memory
*memory
, u64 offset
, struct nvkm_vmm
*vmm
,
111 struct nvkm_vma
*vma
, void *argv
, u32 argc
)
113 struct nvkm_mem
*mem
= nvkm_mem(memory
);
114 struct nvkm_vmm_map map
= {
115 .memory
= &mem
->memory
,
119 return nvkm_vmm_map(vmm
, vma
, argv
, argc
, &map
);
122 static const struct nvkm_memory_func
124 .dtor
= nvkm_mem_dtor
,
125 .target
= nvkm_mem_target
,
126 .page
= nvkm_mem_page
,
127 .addr
= nvkm_mem_addr
,
128 .size
= nvkm_mem_size
,
129 .map
= nvkm_mem_map_sgl
,
133 nvkm_mem_map_host(struct nvkm_memory
*memory
, void **pmap
)
135 struct nvkm_mem
*mem
= nvkm_mem(memory
);
137 *pmap
= vmap(mem
->mem
, mem
->pages
, VM_MAP
, PAGE_KERNEL
);
138 return *pmap
? 0 : -EFAULT
;
144 nvkm_mem_new_host(struct nvkm_mmu
*mmu
, int type
, u8 page
, u64 size
,
145 void *argv
, u32 argc
, struct nvkm_memory
**pmemory
)
147 struct device
*dev
= mmu
->subdev
.device
->dev
;
149 struct nvif_mem_ram_vn vn
;
150 struct nvif_mem_ram_v0 v0
;
153 enum nvkm_memory_target target
;
154 struct nvkm_mem
*mem
;
155 gfp_t gfp
= GFP_USER
| __GFP_ZERO
;
157 if ( (mmu
->type
[type
].type
& NVKM_MEM_COHERENT
) &&
158 !(mmu
->type
[type
].type
& NVKM_MEM_UNCACHED
))
159 target
= NVKM_MEM_TARGET_HOST
;
161 target
= NVKM_MEM_TARGET_NCOH
;
163 if (page
!= PAGE_SHIFT
)
166 if (!(mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
)))
168 mem
->target
= target
;
170 *pmemory
= &mem
->memory
;
172 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
174 nvkm_memory_ctor(&nvkm_mem_dma
, &mem
->memory
);
175 mem
->dma
= args
->v0
.dma
;
177 nvkm_memory_ctor(&nvkm_mem_sgl
, &mem
->memory
);
178 mem
->sgl
= args
->v0
.sgl
;
181 if (!IS_ALIGNED(size
, PAGE_SIZE
))
183 mem
->pages
= size
>> PAGE_SHIFT
;
186 if ( (ret
= nvif_unvers(ret
, &argv
, &argc
, args
->vn
))) {
191 nvkm_memory_ctor(&nvkm_mem_dma
, &mem
->memory
);
192 size
= ALIGN(size
, PAGE_SIZE
) >> PAGE_SHIFT
;
194 if (!(mem
->mem
= kvmalloc_array(size
, sizeof(*mem
->mem
), GFP_KERNEL
)))
196 if (!(mem
->dma
= kvmalloc_array(size
, sizeof(*mem
->dma
), GFP_KERNEL
)))
199 if (mmu
->dma_bits
> 32)
204 for (mem
->pages
= 0; size
; size
--, mem
->pages
++) {
205 struct page
*p
= alloc_page(gfp
);
209 mem
->dma
[mem
->pages
] = dma_map_page(mmu
->subdev
.device
->dev
,
212 if (dma_mapping_error(dev
, mem
->dma
[mem
->pages
])) {
217 mem
->mem
[mem
->pages
] = p
;
224 nvkm_mem_new_type(struct nvkm_mmu
*mmu
, int type
, u8 page
, u64 size
,
225 void *argv
, u32 argc
, struct nvkm_memory
**pmemory
)
227 struct nvkm_memory
*memory
= NULL
;
230 if (mmu
->type
[type
].type
& NVKM_MEM_VRAM
) {
231 ret
= mmu
->func
->mem
.vram(mmu
, type
, page
, size
,
232 argv
, argc
, &memory
);
234 ret
= nvkm_mem_new_host(mmu
, type
, page
, size
,
235 argv
, argc
, &memory
);
239 nvkm_memory_unref(&memory
);