1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "nouveau_drv.h"
26 #include "nouveau_gem.h"
27 #include "nouveau_mem.h"
28 #include "nouveau_ttm.h"
30 #include <drm/drm_legacy.h>
32 #include <core/tegra.h>
35 nouveau_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
41 nouveau_manager_fini(struct ttm_mem_type_manager
*man
)
47 nouveau_manager_del(struct ttm_mem_type_manager
*man
, struct ttm_mem_reg
*reg
)
53 nouveau_manager_debug(struct ttm_mem_type_manager
*man
,
54 struct drm_printer
*printer
)
59 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
60 struct ttm_buffer_object
*bo
,
61 const struct ttm_place
*place
,
62 struct ttm_mem_reg
*reg
)
64 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
65 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
66 struct nouveau_mem
*mem
;
69 if (drm
->client
.device
.info
.ram_size
== 0)
72 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
73 mem
= nouveau_mem(reg
);
77 ret
= nouveau_mem_vram(reg
, nvbo
->contig
, nvbo
->page
);
90 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
91 .init
= nouveau_manager_init
,
92 .takedown
= nouveau_manager_fini
,
93 .get_node
= nouveau_vram_manager_new
,
94 .put_node
= nouveau_manager_del
,
95 .debug
= nouveau_manager_debug
,
99 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
100 struct ttm_buffer_object
*bo
,
101 const struct ttm_place
*place
,
102 struct ttm_mem_reg
*reg
)
104 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
105 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
106 struct nouveau_mem
*mem
;
109 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
110 mem
= nouveau_mem(reg
);
118 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
119 .init
= nouveau_manager_init
,
120 .takedown
= nouveau_manager_fini
,
121 .get_node
= nouveau_gart_manager_new
,
122 .put_node
= nouveau_manager_del
,
123 .debug
= nouveau_manager_debug
127 nv04_gart_manager_new(struct ttm_mem_type_manager
*man
,
128 struct ttm_buffer_object
*bo
,
129 const struct ttm_place
*place
,
130 struct ttm_mem_reg
*reg
)
132 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
133 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
134 struct nouveau_mem
*mem
;
137 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
138 mem
= nouveau_mem(reg
);
142 ret
= nvif_vmm_get(&mem
->cli
->vmm
.vmm
, PTES
, false, 12, 0,
143 reg
->num_pages
<< PAGE_SHIFT
, &mem
->vma
[0]);
145 nouveau_mem_del(reg
);
146 if (ret
== -ENOSPC
) {
153 reg
->start
= mem
->vma
[0].addr
>> PAGE_SHIFT
;
157 const struct ttm_mem_type_manager_func nv04_gart_manager
= {
158 .init
= nouveau_manager_init
,
159 .takedown
= nouveau_manager_fini
,
160 .get_node
= nv04_gart_manager_new
,
161 .put_node
= nouveau_manager_del
,
162 .debug
= nouveau_manager_debug
166 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
168 struct drm_file
*file_priv
= filp
->private_data
;
169 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
171 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
172 return drm_legacy_mmap(filp
, vma
);
174 return ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
178 nouveau_ttm_mem_global_init(struct drm_global_reference
*ref
)
180 return ttm_mem_global_init(ref
->object
);
184 nouveau_ttm_mem_global_release(struct drm_global_reference
*ref
)
186 ttm_mem_global_release(ref
->object
);
190 nouveau_ttm_global_init(struct nouveau_drm
*drm
)
192 struct drm_global_reference
*global_ref
;
195 global_ref
= &drm
->ttm
.mem_global_ref
;
196 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
197 global_ref
->size
= sizeof(struct ttm_mem_global
);
198 global_ref
->init
= &nouveau_ttm_mem_global_init
;
199 global_ref
->release
= &nouveau_ttm_mem_global_release
;
201 ret
= drm_global_item_ref(global_ref
);
202 if (unlikely(ret
!= 0)) {
203 DRM_ERROR("Failed setting up TTM memory accounting\n");
204 drm
->ttm
.mem_global_ref
.release
= NULL
;
208 drm
->ttm
.bo_global_ref
.mem_glob
= global_ref
->object
;
209 global_ref
= &drm
->ttm
.bo_global_ref
.ref
;
210 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
211 global_ref
->size
= sizeof(struct ttm_bo_global
);
212 global_ref
->init
= &ttm_bo_global_init
;
213 global_ref
->release
= &ttm_bo_global_release
;
215 ret
= drm_global_item_ref(global_ref
);
216 if (unlikely(ret
!= 0)) {
217 DRM_ERROR("Failed setting up TTM BO subsystem\n");
218 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
219 drm
->ttm
.mem_global_ref
.release
= NULL
;
227 nouveau_ttm_global_release(struct nouveau_drm
*drm
)
229 if (drm
->ttm
.mem_global_ref
.release
== NULL
)
232 drm_global_item_unref(&drm
->ttm
.bo_global_ref
.ref
);
233 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
234 drm
->ttm
.mem_global_ref
.release
= NULL
;
238 nouveau_ttm_init_host(struct nouveau_drm
*drm
, u8 kind
)
240 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
243 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
|
244 kind
| NVIF_MEM_COHERENT
);
248 drm
->ttm
.type_host
[!!kind
] = typei
;
250 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
| kind
);
254 drm
->ttm
.type_ncoh
[!!kind
] = typei
;
259 nouveau_ttm_init(struct nouveau_drm
*drm
)
261 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
262 struct nvkm_pci
*pci
= device
->pci
;
263 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
264 struct drm_device
*dev
= drm
->dev
;
267 ret
= nouveau_ttm_init_host(drm
, 0);
271 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
&&
272 drm
->client
.device
.info
.chipset
!= 0x50) {
273 ret
= nouveau_ttm_init_host(drm
, NVIF_MEM_KIND
);
278 if (drm
->client
.device
.info
.platform
!= NV_DEVICE_INFO_V0_SOC
&&
279 drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
280 typei
= nvif_mmu_type(mmu
, NVIF_MEM_VRAM
| NVIF_MEM_MAPPABLE
|
287 drm
->ttm
.type_vram
= typei
;
289 drm
->ttm
.type_vram
= -1;
292 if (pci
&& pci
->agp
.bridge
) {
293 drm
->agp
.bridge
= pci
->agp
.bridge
;
294 drm
->agp
.base
= pci
->agp
.base
;
295 drm
->agp
.size
= pci
->agp
.size
;
296 drm
->agp
.cma
= pci
->agp
.cma
;
299 ret
= nouveau_ttm_global_init(drm
);
303 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
,
304 drm
->ttm
.bo_global_ref
.ref
.object
,
306 dev
->anon_inode
->i_mapping
,
307 DRM_FILE_PAGE_OFFSET
,
308 drm
->client
.mmu
.dmabits
<= 32 ? true : false);
310 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
315 drm
->gem
.vram_available
= drm
->client
.device
.info
.ram_user
;
317 arch_io_reserve_memtype_wc(device
->func
->resource_addr(device
, 1),
318 device
->func
->resource_size(device
, 1));
320 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
,
321 drm
->gem
.vram_available
>> PAGE_SHIFT
);
323 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
327 drm
->ttm
.mtrr
= arch_phys_wc_add(device
->func
->resource_addr(device
, 1),
328 device
->func
->resource_size(device
, 1));
331 if (!drm
->agp
.bridge
) {
332 drm
->gem
.gart_available
= drm
->client
.vmm
.vmm
.limit
;
334 drm
->gem
.gart_available
= drm
->agp
.size
;
337 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_TT
,
338 drm
->gem
.gart_available
>> PAGE_SHIFT
);
340 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
344 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
345 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
350 nouveau_ttm_fini(struct nouveau_drm
*drm
)
352 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
354 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
355 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_TT
);
357 ttm_bo_device_release(&drm
->ttm
.bdev
);
359 nouveau_ttm_global_release(drm
);
361 arch_phys_wc_del(drm
->ttm
.mtrr
);
363 arch_io_free_memtype_wc(device
->func
->resource_addr(device
, 1),
364 device
->func
->resource_size(device
, 1));