2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "nouveau_drv.h"
27 #include "nouveau_gem.h"
28 #include "nouveau_mem.h"
29 #include "nouveau_ttm.h"
31 #include <drm/drm_legacy.h>
33 #include <core/tegra.h>
36 nouveau_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
42 nouveau_manager_fini(struct ttm_mem_type_manager
*man
)
48 nouveau_manager_del(struct ttm_mem_type_manager
*man
, struct ttm_mem_reg
*reg
)
54 nouveau_manager_debug(struct ttm_mem_type_manager
*man
,
55 struct drm_printer
*printer
)
60 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
61 struct ttm_buffer_object
*bo
,
62 const struct ttm_place
*place
,
63 struct ttm_mem_reg
*reg
)
65 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
66 struct nouveau_drm
*drm
= nvbo
->cli
->drm
;
67 struct nouveau_mem
*mem
;
70 if (drm
->client
.device
.info
.ram_size
== 0)
73 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
74 mem
= nouveau_mem(reg
);
78 ret
= nouveau_mem_vram(reg
, nvbo
->contig
, nvbo
->page
);
91 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
92 .init
= nouveau_manager_init
,
93 .takedown
= nouveau_manager_fini
,
94 .get_node
= nouveau_vram_manager_new
,
95 .put_node
= nouveau_manager_del
,
96 .debug
= nouveau_manager_debug
,
100 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
101 struct ttm_buffer_object
*bo
,
102 const struct ttm_place
*place
,
103 struct ttm_mem_reg
*reg
)
105 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
106 struct nouveau_drm
*drm
= nvbo
->cli
->drm
;
107 struct nouveau_mem
*mem
;
110 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
111 mem
= nouveau_mem(reg
);
119 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
120 .init
= nouveau_manager_init
,
121 .takedown
= nouveau_manager_fini
,
122 .get_node
= nouveau_gart_manager_new
,
123 .put_node
= nouveau_manager_del
,
124 .debug
= nouveau_manager_debug
128 nv04_gart_manager_new(struct ttm_mem_type_manager
*man
,
129 struct ttm_buffer_object
*bo
,
130 const struct ttm_place
*place
,
131 struct ttm_mem_reg
*reg
)
133 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
134 struct nouveau_drm
*drm
= nvbo
->cli
->drm
;
135 struct nouveau_mem
*mem
;
138 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
139 mem
= nouveau_mem(reg
);
143 ret
= nvif_vmm_get(&mem
->cli
->vmm
.vmm
, PTES
, false, 12, 0,
144 reg
->num_pages
<< PAGE_SHIFT
, &mem
->vma
[0]);
146 nouveau_mem_del(reg
);
147 if (ret
== -ENOSPC
) {
154 reg
->start
= mem
->vma
[0].addr
>> PAGE_SHIFT
;
158 const struct ttm_mem_type_manager_func nv04_gart_manager
= {
159 .init
= nouveau_manager_init
,
160 .takedown
= nouveau_manager_fini
,
161 .get_node
= nv04_gart_manager_new
,
162 .put_node
= nouveau_manager_del
,
163 .debug
= nouveau_manager_debug
167 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
169 struct drm_file
*file_priv
= filp
->private_data
;
170 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
172 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
173 return drm_legacy_mmap(filp
, vma
);
175 return ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
179 nouveau_ttm_mem_global_init(struct drm_global_reference
*ref
)
181 return ttm_mem_global_init(ref
->object
);
185 nouveau_ttm_mem_global_release(struct drm_global_reference
*ref
)
187 ttm_mem_global_release(ref
->object
);
191 nouveau_ttm_global_init(struct nouveau_drm
*drm
)
193 struct drm_global_reference
*global_ref
;
196 global_ref
= &drm
->ttm
.mem_global_ref
;
197 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
198 global_ref
->size
= sizeof(struct ttm_mem_global
);
199 global_ref
->init
= &nouveau_ttm_mem_global_init
;
200 global_ref
->release
= &nouveau_ttm_mem_global_release
;
202 ret
= drm_global_item_ref(global_ref
);
203 if (unlikely(ret
!= 0)) {
204 DRM_ERROR("Failed setting up TTM memory accounting\n");
205 drm
->ttm
.mem_global_ref
.release
= NULL
;
209 drm
->ttm
.bo_global_ref
.mem_glob
= global_ref
->object
;
210 global_ref
= &drm
->ttm
.bo_global_ref
.ref
;
211 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
212 global_ref
->size
= sizeof(struct ttm_bo_global
);
213 global_ref
->init
= &ttm_bo_global_init
;
214 global_ref
->release
= &ttm_bo_global_release
;
216 ret
= drm_global_item_ref(global_ref
);
217 if (unlikely(ret
!= 0)) {
218 DRM_ERROR("Failed setting up TTM BO subsystem\n");
219 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
220 drm
->ttm
.mem_global_ref
.release
= NULL
;
228 nouveau_ttm_global_release(struct nouveau_drm
*drm
)
230 if (drm
->ttm
.mem_global_ref
.release
== NULL
)
233 drm_global_item_unref(&drm
->ttm
.bo_global_ref
.ref
);
234 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
235 drm
->ttm
.mem_global_ref
.release
= NULL
;
239 nouveau_ttm_init_host(struct nouveau_drm
*drm
, u8 kind
)
241 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
244 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
|
245 kind
| NVIF_MEM_COHERENT
);
249 drm
->ttm
.type_host
[!!kind
] = typei
;
251 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
| kind
);
255 drm
->ttm
.type_ncoh
[!!kind
] = typei
;
260 nouveau_ttm_init(struct nouveau_drm
*drm
)
262 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
263 struct nvkm_pci
*pci
= device
->pci
;
264 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
265 struct drm_device
*dev
= drm
->dev
;
268 ret
= nouveau_ttm_init_host(drm
, 0);
272 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
&&
273 drm
->client
.device
.info
.chipset
!= 0x50) {
274 ret
= nouveau_ttm_init_host(drm
, NVIF_MEM_KIND
);
279 if (drm
->client
.device
.info
.platform
!= NV_DEVICE_INFO_V0_SOC
&&
280 drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
281 typei
= nvif_mmu_type(mmu
, NVIF_MEM_VRAM
| NVIF_MEM_MAPPABLE
|
288 drm
->ttm
.type_vram
= typei
;
290 drm
->ttm
.type_vram
= -1;
293 if (pci
&& pci
->agp
.bridge
) {
294 drm
->agp
.bridge
= pci
->agp
.bridge
;
295 drm
->agp
.base
= pci
->agp
.base
;
296 drm
->agp
.size
= pci
->agp
.size
;
297 drm
->agp
.cma
= pci
->agp
.cma
;
300 ret
= nouveau_ttm_global_init(drm
);
304 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
,
305 drm
->ttm
.bo_global_ref
.ref
.object
,
307 dev
->anon_inode
->i_mapping
,
308 DRM_FILE_PAGE_OFFSET
,
309 drm
->client
.mmu
.dmabits
<= 32 ? true : false);
311 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
316 drm
->gem
.vram_available
= drm
->client
.device
.info
.ram_user
;
318 arch_io_reserve_memtype_wc(device
->func
->resource_addr(device
, 1),
319 device
->func
->resource_size(device
, 1));
321 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
,
322 drm
->gem
.vram_available
>> PAGE_SHIFT
);
324 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
328 drm
->ttm
.mtrr
= arch_phys_wc_add(device
->func
->resource_addr(device
, 1),
329 device
->func
->resource_size(device
, 1));
332 if (!drm
->agp
.bridge
) {
333 drm
->gem
.gart_available
= drm
->client
.vmm
.vmm
.limit
;
335 drm
->gem
.gart_available
= drm
->agp
.size
;
338 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_TT
,
339 drm
->gem
.gart_available
>> PAGE_SHIFT
);
341 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
345 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
346 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
351 nouveau_ttm_fini(struct nouveau_drm
*drm
)
353 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
355 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
356 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_TT
);
358 ttm_bo_device_release(&drm
->ttm
.bdev
);
360 nouveau_ttm_global_release(drm
);
362 arch_phys_wc_del(drm
->ttm
.mtrr
);
364 arch_io_free_memtype_wc(device
->func
->resource_addr(device
, 1),
365 device
->func
->resource_size(device
, 1));