2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
27 #include <core/option.h>
28 #include <subdev/timer.h>
30 #define NV44_GART_SIZE (512 * 1024 * 1024)
31 #define NV44_GART_PAGE ( 4 * 1024)
33 /*******************************************************************************
34 * VM map/unmap callbacks
35 ******************************************************************************/
38 nv44_vm_fill(struct nvkm_memory
*pgt
, dma_addr_t null
,
39 dma_addr_t
*list
, u32 pte
, u32 cnt
)
41 u32 base
= (pte
<< 2) & ~0x0000000f;
44 tmp
[0] = nvkm_ro32(pgt
, base
+ 0x0);
45 tmp
[1] = nvkm_ro32(pgt
, base
+ 0x4);
46 tmp
[2] = nvkm_ro32(pgt
, base
+ 0x8);
47 tmp
[3] = nvkm_ro32(pgt
, base
+ 0xc);
50 u32 addr
= list
? (*list
++ >> 12) : (null
>> 12);
51 switch (pte
++ & 0x3) {
53 tmp
[0] &= ~0x07ffffff;
57 tmp
[0] &= ~0xf8000000;
59 tmp
[1] &= ~0x003fffff;
63 tmp
[1] &= ~0xffc00000;
65 tmp
[2] &= ~0x0001ffff;
69 tmp
[2] &= ~0xfffe0000;
71 tmp
[3] &= ~0x00000fff;
77 nvkm_wo32(pgt
, base
+ 0x0, tmp
[0]);
78 nvkm_wo32(pgt
, base
+ 0x4, tmp
[1]);
79 nvkm_wo32(pgt
, base
+ 0x8, tmp
[2]);
80 nvkm_wo32(pgt
, base
+ 0xc, tmp
[3] | 0x40000000);
84 nv44_vm_map_sg(struct nvkm_vma
*vma
, struct nvkm_memory
*pgt
,
85 struct nvkm_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
87 struct nv04_mmu
*mmu
= nv04_mmu(vma
->vm
->mmu
);
93 u32 max
= 4 - (pte
& 3);
94 u32 part
= (cnt
> max
) ? max
: cnt
;
95 nv44_vm_fill(pgt
, mmu
->null
, list
, pte
, part
);
102 for (i
= 0; i
< 4; i
++)
103 tmp
[i
] = *list
++ >> 12;
104 nvkm_wo32(pgt
, pte
++ * 4, tmp
[0] >> 0 | tmp
[1] << 27);
105 nvkm_wo32(pgt
, pte
++ * 4, tmp
[1] >> 5 | tmp
[2] << 22);
106 nvkm_wo32(pgt
, pte
++ * 4, tmp
[2] >> 10 | tmp
[3] << 17);
107 nvkm_wo32(pgt
, pte
++ * 4, tmp
[3] >> 15 | 0x40000000);
112 nv44_vm_fill(pgt
, mmu
->null
, list
, pte
, cnt
);
117 nv44_vm_unmap(struct nvkm_vma
*vma
, struct nvkm_memory
*pgt
, u32 pte
, u32 cnt
)
119 struct nv04_mmu
*mmu
= nv04_mmu(vma
->vm
->mmu
);
123 u32 max
= 4 - (pte
& 3);
124 u32 part
= (cnt
> max
) ? max
: cnt
;
125 nv44_vm_fill(pgt
, mmu
->null
, NULL
, pte
, part
);
131 nvkm_wo32(pgt
, pte
++ * 4, 0x00000000);
132 nvkm_wo32(pgt
, pte
++ * 4, 0x00000000);
133 nvkm_wo32(pgt
, pte
++ * 4, 0x00000000);
134 nvkm_wo32(pgt
, pte
++ * 4, 0x00000000);
139 nv44_vm_fill(pgt
, mmu
->null
, NULL
, pte
, cnt
);
144 nv44_vm_flush(struct nvkm_vm
*vm
)
146 struct nv04_mmu
*mmu
= nv04_mmu(vm
->mmu
);
147 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
148 nvkm_wr32(device
, 0x100814, mmu
->base
.limit
- NV44_GART_PAGE
);
149 nvkm_wr32(device
, 0x100808, 0x00000020);
150 nvkm_msec(device
, 2000,
151 if (nvkm_rd32(device
, 0x100808) & 0x00000001)
154 nvkm_wr32(device
, 0x100808, 0x00000000);
157 /*******************************************************************************
159 ******************************************************************************/
162 nv44_mmu_oneinit(struct nvkm_mmu
*base
)
164 struct nv04_mmu
*mmu
= nv04_mmu(base
);
165 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
168 mmu
->nullp
= dma_alloc_coherent(device
->dev
, 16 * 1024,
169 &mmu
->null
, GFP_KERNEL
);
171 nvkm_warn(&mmu
->base
.subdev
, "unable to allocate dummy pages\n");
175 ret
= nvkm_vm_create(&mmu
->base
, 0, NV44_GART_SIZE
, 0, 4096, NULL
,
180 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
181 (NV44_GART_SIZE
/ NV44_GART_PAGE
) * 4,
183 &mmu
->vm
->pgt
[0].mem
[0]);
184 mmu
->vm
->pgt
[0].refcount
[0] = 1;
189 nv44_mmu_init(struct nvkm_mmu
*base
)
191 struct nv04_mmu
*mmu
= nv04_mmu(base
);
192 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
193 struct nvkm_memory
*gart
= mmu
->vm
->pgt
[0].mem
[0];
196 /* calculate vram address of this PRAMIN block, object must be
197 * allocated on 512KiB alignment, and not exceed a total size
198 * of 512KiB for this to work correctly
200 addr
= nvkm_rd32(device
, 0x10020c);
201 addr
-= ((nvkm_memory_addr(gart
) >> 19) + 1) << 19;
203 nvkm_wr32(device
, 0x100850, 0x80000000);
204 nvkm_wr32(device
, 0x100818, mmu
->null
);
205 nvkm_wr32(device
, 0x100804, NV44_GART_SIZE
);
206 nvkm_wr32(device
, 0x100850, 0x00008000);
207 nvkm_mask(device
, 0x10008c, 0x00000200, 0x00000200);
208 nvkm_wr32(device
, 0x100820, 0x00000000);
209 nvkm_wr32(device
, 0x10082c, 0x00000001);
210 nvkm_wr32(device
, 0x100800, addr
| 0x00000010);
213 static const struct nvkm_mmu_func
215 .dtor
= nv04_mmu_dtor
,
216 .oneinit
= nv44_mmu_oneinit
,
217 .init
= nv44_mmu_init
,
218 .limit
= NV44_GART_SIZE
,
223 .map_sg
= nv44_vm_map_sg
,
224 .unmap
= nv44_vm_unmap
,
225 .flush
= nv44_vm_flush
,
229 nv44_mmu_new(struct nvkm_device
*device
, int index
, struct nvkm_mmu
**pmmu
)
231 if (device
->type
== NVKM_DEVICE_AGP
||
232 !nvkm_boolopt(device
->cfgopt
, "NvPCIE", true))
233 return nv04_mmu_new(device
, index
, pmmu
);
235 return nv04_mmu_new_(&nv44_mmu
, device
, index
, pmmu
);