2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
31 #include "drm_legacy.h"
33 nouveau_vram_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
35 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
36 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
42 nouveau_vram_manager_fini(struct ttm_mem_type_manager
*man
)
49 nvkm_mem_node_cleanup(struct nvkm_mem
*node
)
51 if (node
->vma
[0].node
) {
52 nvkm_vm_unmap(&node
->vma
[0]);
53 nvkm_vm_put(&node
->vma
[0]);
56 if (node
->vma
[1].node
) {
57 nvkm_vm_unmap(&node
->vma
[1]);
58 nvkm_vm_put(&node
->vma
[1]);
63 nouveau_vram_manager_del(struct ttm_mem_type_manager
*man
,
64 struct ttm_mem_reg
*mem
)
66 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
67 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
68 nvkm_mem_node_cleanup(mem
->mm_node
);
69 pfb
->ram
->put(pfb
, (struct nvkm_mem
**)&mem
->mm_node
);
73 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
74 struct ttm_buffer_object
*bo
,
75 const struct ttm_place
*place
,
76 struct ttm_mem_reg
*mem
)
78 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
79 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
80 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
81 struct nvkm_mem
*node
;
85 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_NONCONTIG
)
86 size_nc
= 1 << nvbo
->page_shift
;
88 ret
= pfb
->ram
->get(pfb
, mem
->num_pages
<< PAGE_SHIFT
,
89 mem
->page_alignment
<< PAGE_SHIFT
, size_nc
,
90 (nvbo
->tile_flags
>> 8) & 0x3ff, &node
);
93 return (ret
== -ENOSPC
) ? 0 : ret
;
96 node
->page_shift
= nvbo
->page_shift
;
99 mem
->start
= node
->offset
>> PAGE_SHIFT
;
104 nouveau_vram_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
106 struct nvkm_fb
*pfb
= man
->priv
;
107 struct nvkm_mm
*mm
= &pfb
->vram
;
108 struct nvkm_mm_node
*r
;
109 u32 total
= 0, free
= 0;
111 mutex_lock(&nv_subdev(pfb
)->mutex
);
112 list_for_each_entry(r
, &mm
->nodes
, nl_entry
) {
113 printk(KERN_DEBUG
"%s %d: 0x%010llx 0x%010llx\n",
114 prefix
, r
->type
, ((u64
)r
->offset
<< 12),
115 (((u64
)r
->offset
+ r
->length
) << 12));
121 mutex_unlock(&nv_subdev(pfb
)->mutex
);
123 printk(KERN_DEBUG
"%s total: 0x%010llx free: 0x%010llx\n",
124 prefix
, (u64
)total
<< 12, (u64
)free
<< 12);
125 printk(KERN_DEBUG
"%s block: 0x%08x\n",
126 prefix
, mm
->block_size
<< 12);
129 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
130 nouveau_vram_manager_init
,
131 nouveau_vram_manager_fini
,
132 nouveau_vram_manager_new
,
133 nouveau_vram_manager_del
,
134 nouveau_vram_manager_debug
138 nouveau_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
144 nouveau_gart_manager_fini(struct ttm_mem_type_manager
*man
)
150 nouveau_gart_manager_del(struct ttm_mem_type_manager
*man
,
151 struct ttm_mem_reg
*mem
)
153 nvkm_mem_node_cleanup(mem
->mm_node
);
159 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
160 struct ttm_buffer_object
*bo
,
161 const struct ttm_place
*place
,
162 struct ttm_mem_reg
*mem
)
164 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
165 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
166 struct nvkm_mem
*node
;
168 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
172 node
->page_shift
= 12;
174 switch (drm
->device
.info
.family
) {
175 case NV_DEVICE_INFO_V0_TESLA
:
176 if (drm
->device
.info
.chipset
!= 0x50)
177 node
->memtype
= (nvbo
->tile_flags
& 0x7f00) >> 8;
179 case NV_DEVICE_INFO_V0_FERMI
:
180 case NV_DEVICE_INFO_V0_KEPLER
:
181 node
->memtype
= (nvbo
->tile_flags
& 0xff00) >> 8;
193 nouveau_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
197 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
198 nouveau_gart_manager_init
,
199 nouveau_gart_manager_fini
,
200 nouveau_gart_manager_new
,
201 nouveau_gart_manager_del
,
202 nouveau_gart_manager_debug
206 #include <subdev/mmu/nv04.h>
208 nv04_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
210 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
211 struct nvkm_mmu
*mmu
= nvxx_mmu(&drm
->device
);
212 struct nv04_mmu_priv
*priv
= (void *)mmu
;
213 struct nvkm_vm
*vm
= NULL
;
214 nvkm_vm_ref(priv
->vm
, &vm
, NULL
);
220 nv04_gart_manager_fini(struct ttm_mem_type_manager
*man
)
222 struct nvkm_vm
*vm
= man
->priv
;
223 nvkm_vm_ref(NULL
, &vm
, NULL
);
229 nv04_gart_manager_del(struct ttm_mem_type_manager
*man
, struct ttm_mem_reg
*mem
)
231 struct nvkm_mem
*node
= mem
->mm_node
;
232 if (node
->vma
[0].node
)
233 nvkm_vm_put(&node
->vma
[0]);
239 nv04_gart_manager_new(struct ttm_mem_type_manager
*man
,
240 struct ttm_buffer_object
*bo
,
241 const struct ttm_place
*place
,
242 struct ttm_mem_reg
*mem
)
244 struct nvkm_mem
*node
;
247 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
251 node
->page_shift
= 12;
253 ret
= nvkm_vm_get(man
->priv
, mem
->num_pages
<< 12, node
->page_shift
,
254 NV_MEM_ACCESS_RW
, &node
->vma
[0]);
261 mem
->start
= node
->vma
[0].offset
>> PAGE_SHIFT
;
266 nv04_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
270 const struct ttm_mem_type_manager_func nv04_gart_manager
= {
271 nv04_gart_manager_init
,
272 nv04_gart_manager_fini
,
273 nv04_gart_manager_new
,
274 nv04_gart_manager_del
,
275 nv04_gart_manager_debug
279 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
281 struct drm_file
*file_priv
= filp
->private_data
;
282 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
284 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
285 return drm_legacy_mmap(filp
, vma
);
287 return ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
291 nouveau_ttm_mem_global_init(struct drm_global_reference
*ref
)
293 return ttm_mem_global_init(ref
->object
);
297 nouveau_ttm_mem_global_release(struct drm_global_reference
*ref
)
299 ttm_mem_global_release(ref
->object
);
303 nouveau_ttm_global_init(struct nouveau_drm
*drm
)
305 struct drm_global_reference
*global_ref
;
308 global_ref
= &drm
->ttm
.mem_global_ref
;
309 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
310 global_ref
->size
= sizeof(struct ttm_mem_global
);
311 global_ref
->init
= &nouveau_ttm_mem_global_init
;
312 global_ref
->release
= &nouveau_ttm_mem_global_release
;
314 ret
= drm_global_item_ref(global_ref
);
315 if (unlikely(ret
!= 0)) {
316 DRM_ERROR("Failed setting up TTM memory accounting\n");
317 drm
->ttm
.mem_global_ref
.release
= NULL
;
321 drm
->ttm
.bo_global_ref
.mem_glob
= global_ref
->object
;
322 global_ref
= &drm
->ttm
.bo_global_ref
.ref
;
323 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
324 global_ref
->size
= sizeof(struct ttm_bo_global
);
325 global_ref
->init
= &ttm_bo_global_init
;
326 global_ref
->release
= &ttm_bo_global_release
;
328 ret
= drm_global_item_ref(global_ref
);
329 if (unlikely(ret
!= 0)) {
330 DRM_ERROR("Failed setting up TTM BO subsystem\n");
331 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
332 drm
->ttm
.mem_global_ref
.release
= NULL
;
340 nouveau_ttm_global_release(struct nouveau_drm
*drm
)
342 if (drm
->ttm
.mem_global_ref
.release
== NULL
)
345 drm_global_item_unref(&drm
->ttm
.bo_global_ref
.ref
);
346 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
347 drm
->ttm
.mem_global_ref
.release
= NULL
;
351 nouveau_ttm_init(struct nouveau_drm
*drm
)
353 struct drm_device
*dev
= drm
->dev
;
357 bits
= nvxx_mmu(&drm
->device
)->dma_bits
;
358 if (nv_device_is_pci(nvxx_device(&drm
->device
))) {
359 if (drm
->agp
.stat
== ENABLED
||
360 !pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(bits
)))
363 ret
= pci_set_dma_mask(dev
->pdev
, DMA_BIT_MASK(bits
));
367 ret
= pci_set_consistent_dma_mask(dev
->pdev
,
370 pci_set_consistent_dma_mask(dev
->pdev
,
374 ret
= nouveau_ttm_global_init(drm
);
378 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
,
379 drm
->ttm
.bo_global_ref
.ref
.object
,
381 dev
->anon_inode
->i_mapping
,
382 DRM_FILE_PAGE_OFFSET
,
383 bits
<= 32 ? true : false);
385 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
390 drm
->gem
.vram_available
= drm
->device
.info
.ram_user
;
392 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
,
393 drm
->gem
.vram_available
>> PAGE_SHIFT
);
395 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
399 drm
->ttm
.mtrr
= arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm
->device
), 1),
400 nv_device_resource_len(nvxx_device(&drm
->device
), 1));
403 if (drm
->agp
.stat
!= ENABLED
) {
404 drm
->gem
.gart_available
= nvxx_mmu(&drm
->device
)->limit
;
406 drm
->gem
.gart_available
= drm
->agp
.size
;
409 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_TT
,
410 drm
->gem
.gart_available
>> PAGE_SHIFT
);
412 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
416 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
417 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
422 nouveau_ttm_fini(struct nouveau_drm
*drm
)
424 mutex_lock(&drm
->dev
->struct_mutex
);
425 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
426 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_TT
);
427 mutex_unlock(&drm
->dev
->struct_mutex
);
429 ttm_bo_device_release(&drm
->ttm
.bdev
);
431 nouveau_ttm_global_release(drm
);
433 arch_phys_wc_del(drm
->ttm
.mtrr
);