2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
31 #include "drm_legacy.h"
33 nouveau_vram_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
35 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
36 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
42 nouveau_vram_manager_fini(struct ttm_mem_type_manager
*man
)
49 nvkm_mem_node_cleanup(struct nvkm_mem
*node
)
51 if (node
->vma
[0].node
) {
52 nvkm_vm_unmap(&node
->vma
[0]);
53 nvkm_vm_put(&node
->vma
[0]);
56 if (node
->vma
[1].node
) {
57 nvkm_vm_unmap(&node
->vma
[1]);
58 nvkm_vm_put(&node
->vma
[1]);
63 nouveau_vram_manager_del(struct ttm_mem_type_manager
*man
,
64 struct ttm_mem_reg
*mem
)
66 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
67 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
68 nvkm_mem_node_cleanup(mem
->mm_node
);
69 pfb
->ram
->put(pfb
, (struct nvkm_mem
**)&mem
->mm_node
);
73 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
74 struct ttm_buffer_object
*bo
,
75 const struct ttm_place
*place
,
76 struct ttm_mem_reg
*mem
)
78 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
79 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
80 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
81 struct nvkm_mem
*node
;
85 if (drm
->device
.info
.ram_size
== 0)
88 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_NONCONTIG
)
89 size_nc
= 1 << nvbo
->page_shift
;
91 ret
= pfb
->ram
->get(pfb
, mem
->num_pages
<< PAGE_SHIFT
,
92 mem
->page_alignment
<< PAGE_SHIFT
, size_nc
,
93 (nvbo
->tile_flags
>> 8) & 0x3ff, &node
);
96 return (ret
== -ENOSPC
) ? 0 : ret
;
99 node
->page_shift
= nvbo
->page_shift
;
102 mem
->start
= node
->offset
>> PAGE_SHIFT
;
107 nouveau_vram_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
109 struct nvkm_fb
*pfb
= man
->priv
;
110 struct nvkm_mm
*mm
= &pfb
->vram
;
111 struct nvkm_mm_node
*r
;
112 u32 total
= 0, free
= 0;
114 mutex_lock(&nv_subdev(pfb
)->mutex
);
115 list_for_each_entry(r
, &mm
->nodes
, nl_entry
) {
116 printk(KERN_DEBUG
"%s %d: 0x%010llx 0x%010llx\n",
117 prefix
, r
->type
, ((u64
)r
->offset
<< 12),
118 (((u64
)r
->offset
+ r
->length
) << 12));
124 mutex_unlock(&nv_subdev(pfb
)->mutex
);
126 printk(KERN_DEBUG
"%s total: 0x%010llx free: 0x%010llx\n",
127 prefix
, (u64
)total
<< 12, (u64
)free
<< 12);
128 printk(KERN_DEBUG
"%s block: 0x%08x\n",
129 prefix
, mm
->block_size
<< 12);
132 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
133 nouveau_vram_manager_init
,
134 nouveau_vram_manager_fini
,
135 nouveau_vram_manager_new
,
136 nouveau_vram_manager_del
,
137 nouveau_vram_manager_debug
141 nouveau_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
147 nouveau_gart_manager_fini(struct ttm_mem_type_manager
*man
)
153 nouveau_gart_manager_del(struct ttm_mem_type_manager
*man
,
154 struct ttm_mem_reg
*mem
)
156 nvkm_mem_node_cleanup(mem
->mm_node
);
162 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
163 struct ttm_buffer_object
*bo
,
164 const struct ttm_place
*place
,
165 struct ttm_mem_reg
*mem
)
167 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
168 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
169 struct nvkm_mem
*node
;
171 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
175 node
->page_shift
= 12;
177 switch (drm
->device
.info
.family
) {
178 case NV_DEVICE_INFO_V0_TNT
:
179 case NV_DEVICE_INFO_V0_CELSIUS
:
180 case NV_DEVICE_INFO_V0_KELVIN
:
181 case NV_DEVICE_INFO_V0_RANKINE
:
182 case NV_DEVICE_INFO_V0_CURIE
:
184 case NV_DEVICE_INFO_V0_TESLA
:
185 if (drm
->device
.info
.chipset
!= 0x50)
186 node
->memtype
= (nvbo
->tile_flags
& 0x7f00) >> 8;
188 case NV_DEVICE_INFO_V0_FERMI
:
189 case NV_DEVICE_INFO_V0_KEPLER
:
190 case NV_DEVICE_INFO_V0_MAXWELL
:
191 node
->memtype
= (nvbo
->tile_flags
& 0xff00) >> 8;
194 NV_WARN(drm
, "%s: unhandled family type %x\n", __func__
,
195 drm
->device
.info
.family
);
205 nouveau_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
209 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
210 nouveau_gart_manager_init
,
211 nouveau_gart_manager_fini
,
212 nouveau_gart_manager_new
,
213 nouveau_gart_manager_del
,
214 nouveau_gart_manager_debug
218 #include <subdev/mmu/nv04.h>
220 nv04_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
222 struct nouveau_drm
*drm
= nouveau_bdev(man
->bdev
);
223 struct nvkm_mmu
*mmu
= nvxx_mmu(&drm
->device
);
224 struct nv04_mmu_priv
*priv
= (void *)mmu
;
225 struct nvkm_vm
*vm
= NULL
;
226 nvkm_vm_ref(priv
->vm
, &vm
, NULL
);
232 nv04_gart_manager_fini(struct ttm_mem_type_manager
*man
)
234 struct nvkm_vm
*vm
= man
->priv
;
235 nvkm_vm_ref(NULL
, &vm
, NULL
);
241 nv04_gart_manager_del(struct ttm_mem_type_manager
*man
, struct ttm_mem_reg
*mem
)
243 struct nvkm_mem
*node
= mem
->mm_node
;
244 if (node
->vma
[0].node
)
245 nvkm_vm_put(&node
->vma
[0]);
251 nv04_gart_manager_new(struct ttm_mem_type_manager
*man
,
252 struct ttm_buffer_object
*bo
,
253 const struct ttm_place
*place
,
254 struct ttm_mem_reg
*mem
)
256 struct nvkm_mem
*node
;
259 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
263 node
->page_shift
= 12;
265 ret
= nvkm_vm_get(man
->priv
, mem
->num_pages
<< 12, node
->page_shift
,
266 NV_MEM_ACCESS_RW
, &node
->vma
[0]);
273 mem
->start
= node
->vma
[0].offset
>> PAGE_SHIFT
;
278 nv04_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
282 const struct ttm_mem_type_manager_func nv04_gart_manager
= {
283 nv04_gart_manager_init
,
284 nv04_gart_manager_fini
,
285 nv04_gart_manager_new
,
286 nv04_gart_manager_del
,
287 nv04_gart_manager_debug
291 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
293 struct drm_file
*file_priv
= filp
->private_data
;
294 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
296 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
297 return drm_legacy_mmap(filp
, vma
);
299 return ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
303 nouveau_ttm_mem_global_init(struct drm_global_reference
*ref
)
305 return ttm_mem_global_init(ref
->object
);
309 nouveau_ttm_mem_global_release(struct drm_global_reference
*ref
)
311 ttm_mem_global_release(ref
->object
);
315 nouveau_ttm_global_init(struct nouveau_drm
*drm
)
317 struct drm_global_reference
*global_ref
;
320 global_ref
= &drm
->ttm
.mem_global_ref
;
321 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
322 global_ref
->size
= sizeof(struct ttm_mem_global
);
323 global_ref
->init
= &nouveau_ttm_mem_global_init
;
324 global_ref
->release
= &nouveau_ttm_mem_global_release
;
326 ret
= drm_global_item_ref(global_ref
);
327 if (unlikely(ret
!= 0)) {
328 DRM_ERROR("Failed setting up TTM memory accounting\n");
329 drm
->ttm
.mem_global_ref
.release
= NULL
;
333 drm
->ttm
.bo_global_ref
.mem_glob
= global_ref
->object
;
334 global_ref
= &drm
->ttm
.bo_global_ref
.ref
;
335 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
336 global_ref
->size
= sizeof(struct ttm_bo_global
);
337 global_ref
->init
= &ttm_bo_global_init
;
338 global_ref
->release
= &ttm_bo_global_release
;
340 ret
= drm_global_item_ref(global_ref
);
341 if (unlikely(ret
!= 0)) {
342 DRM_ERROR("Failed setting up TTM BO subsystem\n");
343 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
344 drm
->ttm
.mem_global_ref
.release
= NULL
;
352 nouveau_ttm_global_release(struct nouveau_drm
*drm
)
354 if (drm
->ttm
.mem_global_ref
.release
== NULL
)
357 drm_global_item_unref(&drm
->ttm
.bo_global_ref
.ref
);
358 drm_global_item_unref(&drm
->ttm
.mem_global_ref
);
359 drm
->ttm
.mem_global_ref
.release
= NULL
;
363 nouveau_ttm_init(struct nouveau_drm
*drm
)
365 struct drm_device
*dev
= drm
->dev
;
369 bits
= nvxx_mmu(&drm
->device
)->dma_bits
;
370 if (nv_device_is_pci(nvxx_device(&drm
->device
))) {
371 if (drm
->agp
.stat
== ENABLED
||
372 !pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(bits
)))
375 ret
= pci_set_dma_mask(dev
->pdev
, DMA_BIT_MASK(bits
));
379 ret
= pci_set_consistent_dma_mask(dev
->pdev
,
382 pci_set_consistent_dma_mask(dev
->pdev
,
386 ret
= nouveau_ttm_global_init(drm
);
390 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
,
391 drm
->ttm
.bo_global_ref
.ref
.object
,
393 dev
->anon_inode
->i_mapping
,
394 DRM_FILE_PAGE_OFFSET
,
395 bits
<= 32 ? true : false);
397 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
402 drm
->gem
.vram_available
= drm
->device
.info
.ram_user
;
404 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
,
405 drm
->gem
.vram_available
>> PAGE_SHIFT
);
407 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
411 drm
->ttm
.mtrr
= arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm
->device
), 1),
412 nv_device_resource_len(nvxx_device(&drm
->device
), 1));
415 if (drm
->agp
.stat
!= ENABLED
) {
416 drm
->gem
.gart_available
= nvxx_mmu(&drm
->device
)->limit
;
418 drm
->gem
.gart_available
= drm
->agp
.size
;
421 ret
= ttm_bo_init_mm(&drm
->ttm
.bdev
, TTM_PL_TT
,
422 drm
->gem
.gart_available
>> PAGE_SHIFT
);
424 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
428 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
429 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
434 nouveau_ttm_fini(struct nouveau_drm
*drm
)
436 mutex_lock(&drm
->dev
->struct_mutex
);
437 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
438 ttm_bo_clean_mm(&drm
->ttm
.bdev
, TTM_PL_TT
);
439 mutex_unlock(&drm
->dev
->struct_mutex
);
441 ttm_bo_device_release(&drm
->ttm
.bdev
);
443 nouveau_ttm_global_release(drm
);
445 arch_phys_wc_del(drm
->ttm
.mtrr
);