ARM: pmu: add support for interrupt-affinity property
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nvkm / core / gpuobj.c
blob2eba801aae6f278b9fbf0193d55a44003290bc97
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include <core/gpuobj.h>
25 #include <core/engine.h>
27 #include <subdev/instmem.h>
28 #include <subdev/bar.h>
29 #include <subdev/mmu.h>
31 void
32 nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
34 int i;
36 if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
37 for (i = 0; i < gpuobj->size; i += 4)
38 nv_wo32(gpuobj, i, 0x00000000);
41 if (gpuobj->node)
42 nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
44 if (gpuobj->heap.block_size)
45 nvkm_mm_fini(&gpuobj->heap);
47 nvkm_object_destroy(&gpuobj->object);
50 int
51 nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
52 struct nvkm_oclass *oclass, u32 pclass,
53 struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
54 int length, void **pobject)
56 struct nvkm_instmem *imem = nvkm_instmem(parent);
57 struct nvkm_bar *bar = nvkm_bar(parent);
58 struct nvkm_gpuobj *gpuobj;
59 struct nvkm_mm *heap = NULL;
60 int ret, i;
61 u64 addr;
63 *pobject = NULL;
65 if (pargpu) {
66 while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
67 if (nv_gpuobj(pargpu)->heap.block_size)
68 break;
69 pargpu = pargpu->parent;
72 if (unlikely(pargpu == NULL)) {
73 nv_error(parent, "no gpuobj heap\n");
74 return -EINVAL;
77 addr = nv_gpuobj(pargpu)->addr;
78 heap = &nv_gpuobj(pargpu)->heap;
79 atomic_inc(&parent->refcount);
80 } else {
81 ret = imem->alloc(imem, parent, size, align, &parent);
82 pargpu = parent;
83 if (ret)
84 return ret;
86 addr = nv_memobj(pargpu)->addr;
87 size = nv_memobj(pargpu)->size;
89 if (bar && bar->alloc) {
90 struct nvkm_instobj *iobj = (void *)parent;
91 struct nvkm_mem **mem = (void *)(iobj + 1);
92 struct nvkm_mem *node = *mem;
93 if (!bar->alloc(bar, parent, node, &pargpu)) {
94 nvkm_object_ref(NULL, &parent);
95 parent = pargpu;
100 ret = nvkm_object_create_(parent, engine, oclass, pclass |
101 NV_GPUOBJ_CLASS, length, pobject);
102 nvkm_object_ref(NULL, &parent);
103 gpuobj = *pobject;
104 if (ret)
105 return ret;
107 gpuobj->parent = pargpu;
108 gpuobj->flags = flags;
109 gpuobj->addr = addr;
110 gpuobj->size = size;
112 if (heap) {
113 ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
114 &gpuobj->node);
115 if (ret)
116 return ret;
118 gpuobj->addr += gpuobj->node->offset;
121 if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
122 ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
123 if (ret)
124 return ret;
127 if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
128 for (i = 0; i < gpuobj->size; i += 4)
129 nv_wo32(gpuobj, i, 0x00000000);
132 return ret;
135 struct nvkm_gpuobj_class {
136 struct nvkm_object *pargpu;
137 u64 size;
138 u32 align;
139 u32 flags;
142 static int
143 _nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
144 struct nvkm_oclass *oclass, void *data, u32 size,
145 struct nvkm_object **pobject)
147 struct nvkm_gpuobj_class *args = data;
148 struct nvkm_gpuobj *object;
149 int ret;
151 ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
152 args->size, args->align, args->flags,
153 &object);
154 *pobject = nv_object(object);
155 if (ret)
156 return ret;
158 return 0;
161 void
162 _nvkm_gpuobj_dtor(struct nvkm_object *object)
164 nvkm_gpuobj_destroy(nv_gpuobj(object));
168 _nvkm_gpuobj_init(struct nvkm_object *object)
170 return nvkm_gpuobj_init(nv_gpuobj(object));
174 _nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
176 return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
180 _nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
182 struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
183 struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
184 if (gpuobj->node)
185 addr += gpuobj->node->offset;
186 return pfuncs->rd32(gpuobj->parent, addr);
189 void
190 _nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
192 struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
193 struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
194 if (gpuobj->node)
195 addr += gpuobj->node->offset;
196 pfuncs->wr32(gpuobj->parent, addr, data);
199 static struct nvkm_oclass
200 _nvkm_gpuobj_oclass = {
201 .handle = 0x00000000,
202 .ofuncs = &(struct nvkm_ofuncs) {
203 .ctor = _nvkm_gpuobj_ctor,
204 .dtor = _nvkm_gpuobj_dtor,
205 .init = _nvkm_gpuobj_init,
206 .fini = _nvkm_gpuobj_fini,
207 .rd32 = _nvkm_gpuobj_rd32,
208 .wr32 = _nvkm_gpuobj_wr32,
213 nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
214 u32 size, u32 align, u32 flags,
215 struct nvkm_gpuobj **pgpuobj)
217 struct nvkm_object *engine = parent;
218 struct nvkm_gpuobj_class args = {
219 .pargpu = pargpu,
220 .size = size,
221 .align = align,
222 .flags = flags,
225 if (!nv_iclass(engine, NV_SUBDEV_CLASS))
226 engine = &engine->engine->subdev.object;
227 BUG_ON(engine == NULL);
229 return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
230 &args, sizeof(args),
231 (struct nvkm_object **)pgpuobj);
235 nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
237 struct nvkm_bar *bar = nvkm_bar(gpuobj);
238 int ret = -EINVAL;
240 if (bar && bar->umap) {
241 struct nvkm_instobj *iobj = (void *)
242 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
243 struct nvkm_mem **mem = (void *)(iobj + 1);
244 ret = bar->umap(bar, *mem, access, vma);
247 return ret;
251 nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
252 u32 access, struct nvkm_vma *vma)
254 struct nvkm_instobj *iobj = (void *)
255 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
256 struct nvkm_mem **mem = (void *)(iobj + 1);
257 int ret;
259 ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
260 if (ret)
261 return ret;
263 nvkm_vm_map(vma, *mem);
264 return 0;
267 void
268 nvkm_gpuobj_unmap(struct nvkm_vma *vma)
270 if (vma->node) {
271 nvkm_vm_unmap(vma);
272 nvkm_vm_put(vma);
276 /* the below is basically only here to support sharing the paged dma object
277 * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
278 * anywhere else.
281 static void
282 nvkm_gpudup_dtor(struct nvkm_object *object)
284 struct nvkm_gpuobj *gpuobj = (void *)object;
285 nvkm_object_ref(NULL, &gpuobj->parent);
286 nvkm_object_destroy(&gpuobj->object);
289 static struct nvkm_oclass
290 nvkm_gpudup_oclass = {
291 .handle = NV_GPUOBJ_CLASS,
292 .ofuncs = &(struct nvkm_ofuncs) {
293 .dtor = nvkm_gpudup_dtor,
294 .init = nvkm_object_init,
295 .fini = nvkm_object_fini,
300 nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
301 struct nvkm_gpuobj **pgpuobj)
303 struct nvkm_gpuobj *gpuobj;
304 int ret;
306 ret = nvkm_object_create(parent, &parent->engine->subdev.object,
307 &nvkm_gpudup_oclass, 0, &gpuobj);
308 *pgpuobj = gpuobj;
309 if (ret)
310 return ret;
312 nvkm_object_ref(nv_object(base), &gpuobj->parent);
313 gpuobj->addr = base->addr;
314 gpuobj->size = base->size;
315 return 0;