2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
27 #include <core/option.h>
28 #include <subdev/timer.h>
30 #define NV41_GART_SIZE (512 * 1024 * 1024)
31 #define NV41_GART_PAGE ( 4 * 1024)
33 /*******************************************************************************
34 * VM map/unmap callbacks
35 ******************************************************************************/
38 nv41_vm_map_sg(struct nvkm_vma
*vma
, struct nvkm_memory
*pgt
,
39 struct nvkm_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
44 u32 page
= PAGE_SIZE
/ NV41_GART_PAGE
;
45 u64 phys
= (u64
)*list
++;
46 while (cnt
&& page
--) {
47 nvkm_wo32(pgt
, pte
, (phys
>> 7) | 1);
48 phys
+= NV41_GART_PAGE
;
57 nv41_vm_unmap(struct nvkm_vma
*vma
, struct nvkm_memory
*pgt
, u32 pte
, u32 cnt
)
62 nvkm_wo32(pgt
, pte
, 0x00000000);
69 nv41_vm_flush(struct nvkm_vm
*vm
)
71 struct nv04_mmu
*mmu
= nv04_mmu(vm
->mmu
);
72 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
74 mutex_lock(&mmu
->base
.subdev
.mutex
);
75 nvkm_wr32(device
, 0x100810, 0x00000022);
76 nvkm_msec(device
, 2000,
77 if (nvkm_rd32(device
, 0x100810) & 0x00000020)
80 nvkm_wr32(device
, 0x100810, 0x00000000);
81 mutex_unlock(&mmu
->base
.subdev
.mutex
);
84 /*******************************************************************************
86 ******************************************************************************/
89 nv41_mmu_oneinit(struct nvkm_mmu
*base
)
91 struct nv04_mmu
*mmu
= nv04_mmu(base
);
92 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
95 ret
= nvkm_vm_create(&mmu
->base
, 0, NV41_GART_SIZE
, 0, 4096, NULL
,
100 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
101 (NV41_GART_SIZE
/ NV41_GART_PAGE
) * 4, 16, true,
102 &mmu
->vm
->pgt
[0].mem
[0]);
103 mmu
->vm
->pgt
[0].refcount
[0] = 1;
108 nv41_mmu_init(struct nvkm_mmu
*base
)
110 struct nv04_mmu
*mmu
= nv04_mmu(base
);
111 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
112 struct nvkm_memory
*dma
= mmu
->vm
->pgt
[0].mem
[0];
113 nvkm_wr32(device
, 0x100800, 0x00000002 | nvkm_memory_addr(dma
));
114 nvkm_mask(device
, 0x10008c, 0x00000100, 0x00000100);
115 nvkm_wr32(device
, 0x100820, 0x00000000);
118 static const struct nvkm_mmu_func
120 .dtor
= nv04_mmu_dtor
,
121 .oneinit
= nv41_mmu_oneinit
,
122 .init
= nv41_mmu_init
,
123 .limit
= NV41_GART_SIZE
,
128 .map_sg
= nv41_vm_map_sg
,
129 .unmap
= nv41_vm_unmap
,
130 .flush
= nv41_vm_flush
,
134 nv41_mmu_new(struct nvkm_device
*device
, int index
, struct nvkm_mmu
**pmmu
)
136 if (device
->type
== NVKM_DEVICE_AGP
||
137 !nvkm_boolopt(device
->cfgopt
, "NvPCIE", true))
138 return nv04_mmu_new(device
, index
, pmmu
);
140 return nv04_mmu_new_(&nv41_mmu
, device
, index
, pmmu
);