1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "nouveau_drv.h"
26 #include "nouveau_gem.h"
27 #include "nouveau_mem.h"
28 #include "nouveau_ttm.h"
30 #include <drm/drm_legacy.h>
32 #include <core/tegra.h>
35 nouveau_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
41 nouveau_manager_fini(struct ttm_mem_type_manager
*man
)
47 nouveau_manager_del(struct ttm_mem_type_manager
*man
, struct ttm_mem_reg
*reg
)
53 nouveau_manager_debug(struct ttm_mem_type_manager
*man
,
54 struct drm_printer
*printer
)
59 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
60 struct ttm_buffer_object
*bo
,
61 const struct ttm_place
*place
,
62 struct ttm_mem_reg
*reg
)
64 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
65 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
68 if (drm
->client
.device
.info
.ram_size
== 0)
71 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
75 ret
= nouveau_mem_vram(reg
, nvbo
->contig
, nvbo
->page
);
88 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
89 .init
= nouveau_manager_init
,
90 .takedown
= nouveau_manager_fini
,
91 .get_node
= nouveau_vram_manager_new
,
92 .put_node
= nouveau_manager_del
,
93 .debug
= nouveau_manager_debug
,
97 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
98 struct ttm_buffer_object
*bo
,
99 const struct ttm_place
*place
,
100 struct ttm_mem_reg
*reg
)
102 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
103 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
106 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
114 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
115 .init
= nouveau_manager_init
,
116 .takedown
= nouveau_manager_fini
,
117 .get_node
= nouveau_gart_manager_new
,
118 .put_node
= nouveau_manager_del
,
119 .debug
= nouveau_manager_debug
123 nv04_gart_manager_new(struct ttm_mem_type_manager
*man
,
124 struct ttm_buffer_object
*bo
,
125 const struct ttm_place
*place
,
126 struct ttm_mem_reg
*reg
)
128 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
129 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
130 struct nouveau_mem
*mem
;
133 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
134 mem
= nouveau_mem(reg
);
138 ret
= nvif_vmm_get(&mem
->cli
->vmm
.vmm
, PTES
, false, 12, 0,
139 reg
->num_pages
<< PAGE_SHIFT
, &mem
->vma
[0]);
141 nouveau_mem_del(reg
);
142 if (ret
== -ENOSPC
) {
149 reg
->start
= mem
->vma
[0].addr
>> PAGE_SHIFT
;
153 const struct ttm_mem_type_manager_func nv04_gart_manager
= {
154 .init
= nouveau_manager_init
,
155 .takedown
= nouveau_manager_fini
,
156 .get_node
= nv04_gart_manager_new
,
157 .put_node
= nouveau_manager_del
,
158 .debug
= nouveau_manager_debug
162 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
164 struct drm_file
*file_priv
= filp
->private_data
;
165 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
167 return ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
171 nouveau_ttm_init_host(struct nouveau_drm
*drm
, u8 kind
)
173 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
176 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
|
177 kind
| NVIF_MEM_COHERENT
);
181 drm
->ttm
.type_host
[!!kind
] = typei
;
183 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
| kind
);
187 drm
->ttm
.type_ncoh
[!!kind
] = typei
;
192 nouveau_ttm_init(struct nouveau_drm
*drm
)
194 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
195 struct nvkm_pci
*pci
= device
->pci
;
196 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
197 struct drm_device
*dev
= drm
->dev
;
200 ret
= nouveau_ttm_init_host(drm
, 0);
204 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
&&
205 drm
->client
.device
.info
.chipset
!= 0x50) {
206 ret
= nouveau_ttm_init_host(drm
, NVIF_MEM_KIND
);
211 if (drm
->client
.device
.info
.platform
!= NV_DEVICE_INFO_V0_SOC
&&
212 drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
213 typei
= nvif_mmu_type(mmu
, NVIF_MEM_VRAM
| NVIF_MEM_MAPPABLE
|
220 drm
->ttm
.type_vram
= typei
;
222 drm
->ttm
.type_vram
= -1;
225 if (pci
&& pci
->agp
.bridge
) {
226 drm
->agp
.bridge
= pci
->agp
.bridge
;
227 drm
->agp
.base
= pci
->agp
.base
;
228 drm
->agp
.size
= pci
->agp
.size
;
229 drm
->agp
.cma
= pci
->agp
.cma
;
232 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
,
234 dev
->anon_inode
->i_mapping
,
235 dev
->vma_offset_manager
,
236 drm
->client
.mmu
.dmabits
<= 32 ? true : false);
238 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
243 drm
->gem
.vram_available
= drm
->client
.device
.info
.ram_user
;
245 arch_io_reserve_memtype_wc(device
->func
->resource_addr(device
, 1),
246 device
->func
->resource_size(device
, 1));
248 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
,
249 drm
->gem
.vram_available
>> PAGE_SHIFT
);
251 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
255 drm
->ttm
.mtrr
= arch_phys_wc_add(device
->func
->resource_addr(device
, 1),
256 device
->func
->resource_size(device
, 1));
259 if (!drm
->agp
.bridge
) {
260 drm
->gem
.gart_available
= drm
->client
.vmm
.vmm
.limit
;
262 drm
->gem
.gart_available
= drm
->agp
.size
;
265 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_TT
,
266 drm
->gem
.gart_available
>> PAGE_SHIFT
);
268 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
272 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
273 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
278 nouveau_ttm_fini(struct nouveau_drm
*drm
)
280 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
282 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
283 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_TT
);
285 ttm_bo_device_release(&drm
->ttm
.bdev
);
287 arch_phys_wc_del(drm
->ttm
.mtrr
);
289 arch_io_free_memtype_wc(device
->func
->resource_addr(device
, 1),
290 device
->func
->resource_size(device
, 1));