2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
28 #define NV04_PDMA_SIZE (128 * 1024 * 1024)
29 #define NV04_PDMA_PAGE ( 4 * 1024)
31 /*******************************************************************************
32 * VM map/unmap callbacks
33 ******************************************************************************/
36 nv04_vm_map_sg(struct nvkm_vma
*vma
, struct nvkm_memory
*pgt
,
37 struct nvkm_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
39 pte
= 0x00008 + (pte
* 4);
42 u32 page
= PAGE_SIZE
/ NV04_PDMA_PAGE
;
43 u32 phys
= (u32
)*list
++;
44 while (cnt
&& page
--) {
45 nvkm_wo32(pgt
, pte
, phys
| 3);
46 phys
+= NV04_PDMA_PAGE
;
55 nv04_vm_unmap(struct nvkm_vma
*vma
, struct nvkm_memory
*pgt
, u32 pte
, u32 cnt
)
57 pte
= 0x00008 + (pte
* 4);
60 nvkm_wo32(pgt
, pte
, 0x00000000);
67 nv04_vm_flush(struct nvkm_vm
*vm
)
71 /*******************************************************************************
73 ******************************************************************************/
76 nv04_mmu_oneinit(struct nvkm_mmu
*base
)
78 struct nv04_mmu
*mmu
= nv04_mmu(base
);
79 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
80 struct nvkm_memory
*dma
;
83 ret
= nvkm_vm_create(&mmu
->base
, 0, NV04_PDMA_SIZE
, 0, 4096, NULL
,
88 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
,
89 (NV04_PDMA_SIZE
/ NV04_PDMA_PAGE
) * 4 + 8,
91 mmu
->vm
->pgt
[0].mem
[0] = dma
;
92 mmu
->vm
->pgt
[0].refcount
[0] = 1;
97 nvkm_wo32(dma
, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
98 nvkm_wo32(dma
, 0x00004, NV04_PDMA_SIZE
- 1);
104 nv04_mmu_dtor(struct nvkm_mmu
*base
)
106 struct nv04_mmu
*mmu
= nv04_mmu(base
);
107 struct nvkm_device
*device
= mmu
->base
.subdev
.device
;
109 nvkm_memory_del(&mmu
->vm
->pgt
[0].mem
[0]);
110 nvkm_vm_ref(NULL
, &mmu
->vm
, NULL
);
113 dma_free_coherent(device
->dev
, 16 * 1024,
114 mmu
->nullp
, mmu
->null
);
120 nv04_mmu_new_(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
121 int index
, struct nvkm_mmu
**pmmu
)
123 struct nv04_mmu
*mmu
;
124 if (!(mmu
= kzalloc(sizeof(*mmu
), GFP_KERNEL
)))
127 nvkm_mmu_ctor(func
, device
, index
, &mmu
->base
);
131 const struct nvkm_mmu_func
133 .oneinit
= nv04_mmu_oneinit
,
134 .dtor
= nv04_mmu_dtor
,
135 .limit
= NV04_PDMA_SIZE
,
140 .map_sg
= nv04_vm_map_sg
,
141 .unmap
= nv04_vm_unmap
,
142 .flush
= nv04_vm_flush
,
146 nv04_mmu_new(struct nvkm_device
*device
, int index
, struct nvkm_mmu
**pmmu
)
148 return nv04_mmu_new_(&nv04_mmu
, device
, index
, pmmu
);