2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
31 #include "drm_legacy.h"
33 #include <core/tegra.h>
36 nouveau_vram_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
38 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
39 struct nvkm_fb
*fb
= nvxx_fb(&drm
->device
);
45 nouveau_vram_manager_fini(struct ttm_mem_type_manager
*man
)
52 nvkm_mem_node_cleanup(struct nvkm_mem
*node
)
54 if (node
->vma
[0].node
) {
55 nvkm_vm_unmap(&node
->vma
[0]);
56 nvkm_vm_put(&node
->vma
[0]);
59 if (node
->vma
[1].node
) {
60 nvkm_vm_unmap(&node
->vma
[1]);
61 nvkm_vm_put(&node
->vma
[1]);
66 nouveau_vram_manager_del(struct ttm_mem_type_manager
*man
,
67 struct ttm_mem_reg
*mem
)
69 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
70 struct nvkm_ram
*ram
= nvxx_fb(&drm
->device
)->ram
;
71 nvkm_mem_node_cleanup(mem
->mm_node
);
72 ram
->func
->put(ram
, (struct nvkm_mem
**)&mem
->mm_node
);
76 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
77 struct ttm_buffer_object
*bo
,
78 const struct ttm_place
*place
,
79 struct ttm_mem_reg
*mem
)
81 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
82 struct nvkm_ram
*ram
= nvxx_fb(&drm
->device
)->ram
;
83 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
84 struct nvkm_mem
*node
;
88 if (drm
->device
.info
.ram_size
== 0)
91 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_NONCONTIG
)
92 size_nc
= 1 << nvbo
->page_shift
;
94 ret
= ram
->func
->get(ram
, mem
->num_pages
<< PAGE_SHIFT
,
95 mem
->page_alignment
<< PAGE_SHIFT
, size_nc
,
96 (nvbo
->tile_flags
>> 8) & 0x3ff, &node
);
99 return (ret
== -ENOSPC
) ? 0 : ret
;
102 node
->page_shift
= nvbo
->page_shift
;
105 mem
->start
= node
->offset
>> PAGE_SHIFT
;
109 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
110 nouveau_vram_manager_init
,
111 nouveau_vram_manager_fini
,
112 nouveau_vram_manager_new
,
113 nouveau_vram_manager_del
,
117 nouveau_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
123 nouveau_gart_manager_fini(struct ttm_mem_type_manager
*man
)
129 nouveau_gart_manager_del(struct ttm_mem_type_manager
*man
,
130 struct ttm_mem_reg
*mem
)
132 nvkm_mem_node_cleanup(mem
->mm_node
);
138 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
139 struct ttm_buffer_object
*bo
,
140 const struct ttm_place
*place
,
141 struct ttm_mem_reg
*mem
)
143 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
144 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
145 struct nvkm_mem
*node
;
147 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
151 node
->page_shift
= 12;
153 switch (drm
->device
.info
.family
) {
154 case NV_DEVICE_INFO_V0_TNT
:
155 case NV_DEVICE_INFO_V0_CELSIUS
:
156 case NV_DEVICE_INFO_V0_KELVIN
:
157 case NV_DEVICE_INFO_V0_RANKINE
:
158 case NV_DEVICE_INFO_V0_CURIE
:
160 case NV_DEVICE_INFO_V0_TESLA
:
161 if (drm
->device
.info
.chipset
!= 0x50)
162 node
->memtype
= (nvbo
->tile_flags
& 0x7f00) >> 8;
164 case NV_DEVICE_INFO_V0_FERMI
:
165 case NV_DEVICE_INFO_V0_KEPLER
:
166 case NV_DEVICE_INFO_V0_MAXWELL
:
167 node
->memtype
= (nvbo
->tile_flags
& 0xff00) >> 8;
170 NV_WARN(drm
, "%s: unhandled family type %x\n", __func__
,
171 drm
->device
.info
.family
);
181 nouveau_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
185 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
186 nouveau_gart_manager_init
,
187 nouveau_gart_manager_fini
,
188 nouveau_gart_manager_new
,
189 nouveau_gart_manager_del
,
190 nouveau_gart_manager_debug
194 #include <subdev/mmu/nv04.h>
196 nv04_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
198 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
199 struct nvkm_mmu
*mmu
= nvxx_mmu(&drm
->device
);
200 struct nv04_mmu
*priv
= (void *)mmu
;
201 struct nvkm_vm
*vm
= NULL
;
202 nvkm_vm_ref(priv
->vm
, &vm
, NULL
);
208 nv04_gart_manager_fini(struct ttm_mem_type_manager
*man
)
210 struct nvkm_vm
*vm
= man
->priv
;
211 nvkm_vm_ref(NULL
, &vm
, NULL
);
217 nv04_gart_manager_del(struct ttm_mem_type_manager
*man
, struct ttm_mem_reg
*mem
)
219 struct nvkm_mem
*node
= mem
->mm_node
;
220 if (node
->vma
[0].node
)
221 nvkm_vm_put(&node
->vma
[0]);
227 nv04_gart_manager_new(struct ttm_mem_type_manager
*man
,
228 struct ttm_buffer_object
*bo
,
229 const struct ttm_place
*place
,
230 struct ttm_mem_reg
*mem
)
232 struct nvkm_mem
*node
;
235 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
239 node
->page_shift
= 12;
241 ret
= nvkm_vm_get(man
->priv
, mem
->num_pages
<< 12, node
->page_shift
,
242 NV_MEM_ACCESS_RW
, &node
->vma
[0]);
249 mem
->start
= node
->vma
[0].offset
>> PAGE_SHIFT
;
254 nv04_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
258 const struct ttm_mem_type_manager_func nv04_gart_manager
= {
259 nv04_gart_manager_init
,
260 nv04_gart_manager_fini
,
261 nv04_gart_manager_new
,
262 nv04_gart_manager_del
,
263 nv04_gart_manager_debug
267 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
269 struct drm_file
*file_priv
= filp
->private_data
;
270 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
272 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
273 return drm_legacy_mmap(filp
, vma
);
275 return ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
279 nouveau_ttm_mem_global_init(struct drm_global_reference
*ref
)
281 return ttm_mem_global_init(ref
->object
);
285 nouveau_ttm_mem_global_release(struct drm_global_reference
*ref
)
287 ttm_mem_global_release(ref
->object
);
291 nouveau_ttm_global_init(struct nouveau_drm
*drm
)
293 struct drm_global_reference
*global_ref
;
296 global_ref
= &drm
->ttm
.mem_global_ref
;
297 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
298 global_ref
->size
= sizeof(struct ttm_mem_global
);
299 global_ref
->init
= &nouveau_ttm_mem_global_init
;
300 global_ref
->release
= &nouveau_ttm_mem_global_release
;
302 ret
= drm_global_item_ref(global_ref
);
303 if (unlikely(ret
!= 0)) {
304 DRM_ERROR("Failed setting up TTM memory accounting\n");
305 drm
->ttm
.mem_global_ref
.release
= NULL
;
309 drm
->ttm
.bo_global_ref
.mem_glob
= global_ref
->object
;
310 global_ref
= &drm
->ttm
.bo_global_ref
.ref
;
311 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
312 global_ref
->size
= sizeof(struct ttm_bo_global
);
313 global_ref
->init
= &ttm_bo_global_init
;
314 global_ref
->release
= &ttm_bo_global_release
;
316 ret
= drm_global_item_ref(global_ref
);
317 if (unlikely(ret
!= 0)) {
318 DRM_ERROR("Failed setting up TTM BO subsystem\n");
319 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
320 drm
->ttm
.mem_global_ref
.release
= NULL
;
328 nouveau_ttm_global_release(struct nouveau_drm
*drm
)
330 if (drm
->ttm
.mem_global_ref
.release
== NULL
)
333 drm_global_item_unref(&drm
->ttm
.bo_global_ref
.ref
);
334 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
335 drm
->ttm
.mem_global_ref
.release
= NULL
;
339 nouveau_ttm_init(struct nouveau_drm
*drm
)
341 struct nvkm_device
*device
= nvxx_device(&drm
->device
);
342 struct nvkm_pci
*pci
= device
->pci
;
343 struct drm_device
*dev
= drm
->dev
;
347 if (pci
&& pci
->agp
.bridge
) {
348 drm
->agp
.bridge
= pci
->agp
.bridge
;
349 drm
->agp
.base
= pci
->agp
.base
;
350 drm
->agp
.size
= pci
->agp
.size
;
351 drm
->agp
.cma
= pci
->agp
.cma
;
354 bits
= nvxx_mmu(&drm
->device
)->dma_bits
;
355 if (nvxx_device(&drm
->device
)->func
->pci
) {
358 } else if (device
->func
->tegra
) {
359 struct nvkm_device_tegra
*tegra
= device
->func
->tegra(device
);
362 * If the platform can use a IOMMU, then the addressable DMA
363 * space is constrained by the IOMMU bit
365 if (tegra
->func
->iommu_bit
)
366 bits
= min(bits
, tegra
->func
->iommu_bit
);
370 ret
= dma_set_mask(dev
->dev
, DMA_BIT_MASK(bits
));
371 if (ret
&& bits
!= 32) {
373 ret
= dma_set_mask(dev
->dev
, DMA_BIT_MASK(bits
));
378 ret
= dma_set_coherent_mask(dev
->dev
, DMA_BIT_MASK(bits
));
380 dma_set_coherent_mask(dev
->dev
, DMA_BIT_MASK(32));
382 ret
= nouveau_ttm_global_init(drm
);
386 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
,
387 drm
->ttm
.bo_global_ref
.ref
.object
,
389 dev
->anon_inode
->i_mapping
,
390 DRM_FILE_PAGE_OFFSET
,
391 bits
<= 32 ? true : false);
393 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
398 drm
->gem
.vram_available
= drm
->device
.info
.ram_user
;
400 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
,
401 drm
->gem
.vram_available
>> PAGE_SHIFT
);
403 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
407 drm
->ttm
.mtrr
= arch_phys_wc_add(device
->func
->resource_addr(device
, 1),
408 device
->func
->resource_size(device
, 1));
411 if (!drm
->agp
.bridge
) {
412 drm
->gem
.gart_available
= nvxx_mmu(&drm
->device
)->limit
;
414 drm
->gem
.gart_available
= drm
->agp
.size
;
417 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_TT
,
418 drm
->gem
.gart_available
>> PAGE_SHIFT
);
420 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
424 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
425 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
430 nouveau_ttm_fini(struct nouveau_drm
*drm
)
432 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
433 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_TT
);
435 ttm_bo_device_release(&drm
->ttm
.bdev
);
437 nouveau_ttm_global_release(drm
);
439 arch_phys_wc_del(drm
->ttm
.mtrr
);