1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/limits.h>
27 #include <linux/swiotlb.h>
29 #include "nouveau_drv.h"
30 #include "nouveau_gem.h"
31 #include "nouveau_mem.h"
32 #include "nouveau_ttm.h"
34 #include <drm/drm_legacy.h>
36 #include <core/tegra.h>
39 nouveau_manager_del(struct ttm_resource_manager
*man
, struct ttm_resource
*reg
)
45 nouveau_vram_manager_new(struct ttm_resource_manager
*man
,
46 struct ttm_buffer_object
*bo
,
47 const struct ttm_place
*place
,
48 struct ttm_resource
*reg
)
50 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
51 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
54 if (drm
->client
.device
.info
.ram_size
== 0)
57 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
61 ret
= nouveau_mem_vram(reg
, nvbo
->contig
, nvbo
->page
);
70 const struct ttm_resource_manager_func nouveau_vram_manager
= {
71 .alloc
= nouveau_vram_manager_new
,
72 .free
= nouveau_manager_del
,
76 nouveau_gart_manager_new(struct ttm_resource_manager
*man
,
77 struct ttm_buffer_object
*bo
,
78 const struct ttm_place
*place
,
79 struct ttm_resource
*reg
)
81 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
82 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
85 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
93 const struct ttm_resource_manager_func nouveau_gart_manager
= {
94 .alloc
= nouveau_gart_manager_new
,
95 .free
= nouveau_manager_del
,
99 nv04_gart_manager_new(struct ttm_resource_manager
*man
,
100 struct ttm_buffer_object
*bo
,
101 const struct ttm_place
*place
,
102 struct ttm_resource
*reg
)
104 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
105 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
106 struct nouveau_mem
*mem
;
109 ret
= nouveau_mem_new(&drm
->master
, nvbo
->kind
, nvbo
->comp
, reg
);
110 mem
= nouveau_mem(reg
);
114 ret
= nvif_vmm_get(&mem
->cli
->vmm
.vmm
, PTES
, false, 12, 0,
115 (long)reg
->num_pages
<< PAGE_SHIFT
, &mem
->vma
[0]);
117 nouveau_mem_del(reg
);
121 reg
->start
= mem
->vma
[0].addr
>> PAGE_SHIFT
;
125 const struct ttm_resource_manager_func nv04_gart_manager
= {
126 .alloc
= nv04_gart_manager_new
,
127 .free
= nouveau_manager_del
,
130 static vm_fault_t
nouveau_ttm_fault(struct vm_fault
*vmf
)
132 struct vm_area_struct
*vma
= vmf
->vma
;
133 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
137 ret
= ttm_bo_vm_reserve(bo
, vmf
);
141 ret
= nouveau_ttm_fault_reserve_notify(bo
);
145 nouveau_bo_del_io_reserve_lru(bo
);
146 prot
= vm_get_page_prot(vma
->vm_flags
);
147 ret
= ttm_bo_vm_fault_reserved(vmf
, prot
, TTM_BO_VM_NUM_PREFAULT
, 1);
148 nouveau_bo_add_io_reserve_lru(bo
);
149 if (ret
== VM_FAULT_RETRY
&& !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
))
153 dma_resv_unlock(bo
->base
.resv
);
157 static struct vm_operations_struct nouveau_ttm_vm_ops
= {
158 .fault
= nouveau_ttm_fault
,
159 .open
= ttm_bo_vm_open
,
160 .close
= ttm_bo_vm_close
,
161 .access
= ttm_bo_vm_access
165 nouveau_ttm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
167 struct drm_file
*file_priv
= filp
->private_data
;
168 struct nouveau_drm
*drm
= nouveau_drm(file_priv
->minor
->dev
);
171 ret
= ttm_bo_mmap(filp
, vma
, &drm
->ttm
.bdev
);
175 vma
->vm_ops
= &nouveau_ttm_vm_ops
;
180 nouveau_ttm_init_host(struct nouveau_drm
*drm
, u8 kind
)
182 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
185 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
|
186 kind
| NVIF_MEM_COHERENT
);
190 drm
->ttm
.type_host
[!!kind
] = typei
;
192 typei
= nvif_mmu_type(mmu
, NVIF_MEM_HOST
| NVIF_MEM_MAPPABLE
| kind
);
196 drm
->ttm
.type_ncoh
[!!kind
] = typei
;
201 nouveau_ttm_init_vram(struct nouveau_drm
*drm
)
203 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
204 struct ttm_resource_manager
*man
= kzalloc(sizeof(*man
), GFP_KERNEL
);
209 man
->func
= &nouveau_vram_manager
;
211 ttm_resource_manager_init(man
,
212 drm
->gem
.vram_available
>> PAGE_SHIFT
);
213 ttm_set_driver_manager(&drm
->ttm
.bdev
, TTM_PL_VRAM
, man
);
214 ttm_resource_manager_set_used(man
, true);
217 return ttm_range_man_init(&drm
->ttm
.bdev
, TTM_PL_VRAM
, false,
218 drm
->gem
.vram_available
>> PAGE_SHIFT
);
223 nouveau_ttm_fini_vram(struct nouveau_drm
*drm
)
225 struct ttm_resource_manager
*man
= ttm_manager_type(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
227 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
228 ttm_resource_manager_set_used(man
, false);
229 ttm_resource_manager_evict_all(&drm
->ttm
.bdev
, man
);
230 ttm_resource_manager_cleanup(man
);
231 ttm_set_driver_manager(&drm
->ttm
.bdev
, TTM_PL_VRAM
, NULL
);
234 ttm_range_man_fini(&drm
->ttm
.bdev
, TTM_PL_VRAM
);
238 nouveau_ttm_init_gtt(struct nouveau_drm
*drm
)
240 struct ttm_resource_manager
*man
;
241 unsigned long size_pages
= drm
->gem
.gart_available
>> PAGE_SHIFT
;
242 const struct ttm_resource_manager_func
*func
= NULL
;
244 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
245 func
= &nouveau_gart_manager
;
246 else if (!drm
->agp
.bridge
)
247 func
= &nv04_gart_manager
;
249 return ttm_range_man_init(&drm
->ttm
.bdev
, TTM_PL_TT
, true,
252 man
= kzalloc(sizeof(*man
), GFP_KERNEL
);
258 ttm_resource_manager_init(man
, size_pages
);
259 ttm_set_driver_manager(&drm
->ttm
.bdev
, TTM_PL_TT
, man
);
260 ttm_resource_manager_set_used(man
, true);
265 nouveau_ttm_fini_gtt(struct nouveau_drm
*drm
)
267 struct ttm_resource_manager
*man
= ttm_manager_type(&drm
->ttm
.bdev
, TTM_PL_TT
);
269 if (drm
->client
.device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
&&
271 ttm_range_man_fini(&drm
->ttm
.bdev
, TTM_PL_TT
);
273 ttm_resource_manager_set_used(man
, false);
274 ttm_resource_manager_evict_all(&drm
->ttm
.bdev
, man
);
275 ttm_resource_manager_cleanup(man
);
276 ttm_set_driver_manager(&drm
->ttm
.bdev
, TTM_PL_TT
, NULL
);
282 nouveau_ttm_init(struct nouveau_drm
*drm
)
284 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
285 struct nvkm_pci
*pci
= device
->pci
;
286 struct nvif_mmu
*mmu
= &drm
->client
.mmu
;
287 struct drm_device
*dev
= drm
->dev
;
288 bool need_swiotlb
= false;
291 ret
= nouveau_ttm_init_host(drm
, 0);
295 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
&&
296 drm
->client
.device
.info
.chipset
!= 0x50) {
297 ret
= nouveau_ttm_init_host(drm
, NVIF_MEM_KIND
);
302 if (drm
->client
.device
.info
.platform
!= NV_DEVICE_INFO_V0_SOC
&&
303 drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
304 typei
= nvif_mmu_type(mmu
, NVIF_MEM_VRAM
| NVIF_MEM_MAPPABLE
|
311 drm
->ttm
.type_vram
= typei
;
313 drm
->ttm
.type_vram
= -1;
316 if (pci
&& pci
->agp
.bridge
) {
317 drm
->agp
.bridge
= pci
->agp
.bridge
;
318 drm
->agp
.base
= pci
->agp
.base
;
319 drm
->agp
.size
= pci
->agp
.size
;
320 drm
->agp
.cma
= pci
->agp
.cma
;
323 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
324 need_swiotlb
= !!swiotlb_nr_tbl();
327 ret
= ttm_bo_device_init(&drm
->ttm
.bdev
, &nouveau_bo_driver
,
328 drm
->dev
->dev
, dev
->anon_inode
->i_mapping
,
329 dev
->vma_offset_manager
, need_swiotlb
,
330 drm
->client
.mmu
.dmabits
<= 32);
332 NV_ERROR(drm
, "error initialising bo driver, %d\n", ret
);
337 drm
->gem
.vram_available
= drm
->client
.device
.info
.ram_user
;
339 arch_io_reserve_memtype_wc(device
->func
->resource_addr(device
, 1),
340 device
->func
->resource_size(device
, 1));
342 ret
= nouveau_ttm_init_vram(drm
);
344 NV_ERROR(drm
, "VRAM mm init failed, %d\n", ret
);
348 drm
->ttm
.mtrr
= arch_phys_wc_add(device
->func
->resource_addr(device
, 1),
349 device
->func
->resource_size(device
, 1));
352 if (!drm
->agp
.bridge
) {
353 drm
->gem
.gart_available
= drm
->client
.vmm
.vmm
.limit
;
355 drm
->gem
.gart_available
= drm
->agp
.size
;
358 ret
= nouveau_ttm_init_gtt(drm
);
360 NV_ERROR(drm
, "GART mm init failed, %d\n", ret
);
364 mutex_init(&drm
->ttm
.io_reserve_mutex
);
365 INIT_LIST_HEAD(&drm
->ttm
.io_reserve_lru
);
367 NV_INFO(drm
, "VRAM: %d MiB\n", (u32
)(drm
->gem
.vram_available
>> 20));
368 NV_INFO(drm
, "GART: %d MiB\n", (u32
)(drm
->gem
.gart_available
>> 20));
373 nouveau_ttm_fini(struct nouveau_drm
*drm
)
375 struct nvkm_device
*device
= nvxx_device(&drm
->client
.device
);
377 nouveau_ttm_fini_vram(drm
);
378 nouveau_ttm_fini_gtt(drm
);
380 ttm_bo_device_release(&drm
->ttm
.bdev
);
382 arch_phys_wc_del(drm
->ttm
.mtrr
);
384 arch_io_free_memtype_wc(device
->func
->resource_addr(device
, 1),
385 device
->func
->resource_size(device
, 1));