2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/gpuobj.h>
25 #include <core/engine.h>
27 #include <subdev/instmem.h>
28 #include <subdev/bar.h>
29 #include <subdev/mmu.h>
31 /* fast-path, where backend is able to provide direct pointer to memory */
33 nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj
*gpuobj
, u32 offset
)
35 return ioread32_native(gpuobj
->map
+ offset
);
39 nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj
*gpuobj
, u32 offset
, u32 data
)
41 iowrite32_native(data
, gpuobj
->map
+ offset
);
44 /* accessor functions for gpuobjs allocated directly from instmem */
46 nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj
*gpuobj
, u32 offset
)
48 return nvkm_ro32(gpuobj
->memory
, offset
);
52 nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj
*gpuobj
, u32 offset
, u32 data
)
54 nvkm_wo32(gpuobj
->memory
, offset
, data
);
57 static const struct nvkm_gpuobj_func nvkm_gpuobj_heap
;
59 nvkm_gpuobj_heap_release(struct nvkm_gpuobj
*gpuobj
)
61 gpuobj
->func
= &nvkm_gpuobj_heap
;
62 nvkm_done(gpuobj
->memory
);
65 static const struct nvkm_gpuobj_func
66 nvkm_gpuobj_heap_fast
= {
67 .release
= nvkm_gpuobj_heap_release
,
68 .rd32
= nvkm_gpuobj_rd32_fast
,
69 .wr32
= nvkm_gpuobj_wr32_fast
,
72 static const struct nvkm_gpuobj_func
73 nvkm_gpuobj_heap_slow
= {
74 .release
= nvkm_gpuobj_heap_release
,
75 .rd32
= nvkm_gpuobj_heap_rd32
,
76 .wr32
= nvkm_gpuobj_heap_wr32
,
80 nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj
*gpuobj
)
82 gpuobj
->map
= nvkm_kmap(gpuobj
->memory
);
83 if (likely(gpuobj
->map
))
84 gpuobj
->func
= &nvkm_gpuobj_heap_fast
;
86 gpuobj
->func
= &nvkm_gpuobj_heap_slow
;
90 static const struct nvkm_gpuobj_func
92 .acquire
= nvkm_gpuobj_heap_acquire
,
95 /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
97 nvkm_gpuobj_rd32(struct nvkm_gpuobj
*gpuobj
, u32 offset
)
99 return nvkm_ro32(gpuobj
->parent
, gpuobj
->node
->offset
+ offset
);
103 nvkm_gpuobj_wr32(struct nvkm_gpuobj
*gpuobj
, u32 offset
, u32 data
)
105 nvkm_wo32(gpuobj
->parent
, gpuobj
->node
->offset
+ offset
, data
);
108 static const struct nvkm_gpuobj_func nvkm_gpuobj_func
;
110 nvkm_gpuobj_release(struct nvkm_gpuobj
*gpuobj
)
112 gpuobj
->func
= &nvkm_gpuobj_func
;
113 nvkm_done(gpuobj
->parent
);
116 static const struct nvkm_gpuobj_func
118 .release
= nvkm_gpuobj_release
,
119 .rd32
= nvkm_gpuobj_rd32_fast
,
120 .wr32
= nvkm_gpuobj_wr32_fast
,
123 static const struct nvkm_gpuobj_func
125 .release
= nvkm_gpuobj_release
,
126 .rd32
= nvkm_gpuobj_rd32
,
127 .wr32
= nvkm_gpuobj_wr32
,
131 nvkm_gpuobj_acquire(struct nvkm_gpuobj
*gpuobj
)
133 gpuobj
->map
= nvkm_kmap(gpuobj
->parent
);
134 if (likely(gpuobj
->map
)) {
135 gpuobj
->map
= (u8
*)gpuobj
->map
+ gpuobj
->node
->offset
;
136 gpuobj
->func
= &nvkm_gpuobj_fast
;
138 gpuobj
->func
= &nvkm_gpuobj_slow
;
143 static const struct nvkm_gpuobj_func
145 .acquire
= nvkm_gpuobj_acquire
,
149 nvkm_gpuobj_ctor(struct nvkm_device
*device
, u32 size
, int align
, bool zero
,
150 struct nvkm_gpuobj
*parent
, struct nvkm_gpuobj
*gpuobj
)
157 ret
= nvkm_mm_head(&parent
->heap
, 0, 1, size
, size
,
158 max(align
, 1), &gpuobj
->node
);
160 ret
= nvkm_mm_tail(&parent
->heap
, 0, 1, size
, size
,
161 -align
, &gpuobj
->node
);
166 gpuobj
->parent
= parent
;
167 gpuobj
->func
= &nvkm_gpuobj_func
;
168 gpuobj
->addr
= parent
->addr
+ gpuobj
->node
->offset
;
169 gpuobj
->size
= gpuobj
->node
->length
;
173 for (offset
= 0; offset
< gpuobj
->size
; offset
+= 4)
174 nvkm_wo32(gpuobj
, offset
, 0x00000000);
178 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
, size
,
179 abs(align
), zero
, &gpuobj
->memory
);
183 gpuobj
->func
= &nvkm_gpuobj_heap
;
184 gpuobj
->addr
= nvkm_memory_addr(gpuobj
->memory
);
185 gpuobj
->size
= nvkm_memory_size(gpuobj
->memory
);
188 return nvkm_mm_init(&gpuobj
->heap
, 0, gpuobj
->size
, 1);
192 nvkm_gpuobj_del(struct nvkm_gpuobj
**pgpuobj
)
194 struct nvkm_gpuobj
*gpuobj
= *pgpuobj
;
197 nvkm_mm_free(&gpuobj
->parent
->heap
, &gpuobj
->node
);
198 nvkm_mm_fini(&gpuobj
->heap
);
199 nvkm_memory_del(&gpuobj
->memory
);
206 nvkm_gpuobj_new(struct nvkm_device
*device
, u32 size
, int align
, bool zero
,
207 struct nvkm_gpuobj
*parent
, struct nvkm_gpuobj
**pgpuobj
)
209 struct nvkm_gpuobj
*gpuobj
;
212 if (!(gpuobj
= *pgpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
)))
215 ret
= nvkm_gpuobj_ctor(device
, size
, align
, zero
, parent
, gpuobj
);
217 nvkm_gpuobj_del(pgpuobj
);
222 nvkm_gpuobj_map(struct nvkm_gpuobj
*gpuobj
, struct nvkm_vm
*vm
,
223 u32 access
, struct nvkm_vma
*vma
)
225 struct nvkm_memory
*memory
= gpuobj
->memory
;
226 int ret
= nvkm_vm_get(vm
, gpuobj
->size
, 12, access
, vma
);
228 nvkm_memory_map(memory
, vma
, 0);
233 nvkm_gpuobj_unmap(struct nvkm_vma
*vma
)
241 /* the below is basically only here to support sharing the paged dma object
242 * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
247 nvkm_gpuobj_wrap(struct nvkm_memory
*memory
, struct nvkm_gpuobj
**pgpuobj
)
249 if (!(*pgpuobj
= kzalloc(sizeof(**pgpuobj
), GFP_KERNEL
)))
252 (*pgpuobj
)->addr
= nvkm_memory_addr(memory
);
253 (*pgpuobj
)->size
= nvkm_memory_size(memory
);