ARM: pmu: add support for interrupt-affinity property
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nvkm / core / engctx.c
blobfb2acbca75d969fed4599b3e53568f4d163cfb65
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include <core/engctx.h>
25 #include <core/engine.h>
26 #include <core/client.h>
28 static inline int
29 nvkm_engctx_exists(struct nvkm_object *parent,
30 struct nvkm_engine *engine, void **pobject)
32 struct nvkm_engctx *engctx;
33 struct nvkm_object *parctx;
35 list_for_each_entry(engctx, &engine->contexts, head) {
36 parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
37 if (parctx == parent) {
38 atomic_inc(&nv_object(engctx)->refcount);
39 *pobject = engctx;
40 return 1;
44 return 0;
47 int
48 nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
49 struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
50 u32 size, u32 align, u32 flags, int length, void **pobject)
52 struct nvkm_client *client = nvkm_client(parent);
53 struct nvkm_engine *engine = nv_engine(engobj);
54 struct nvkm_object *engctx;
55 unsigned long save;
56 int ret;
58 /* check if this engine already has a context for the parent object,
59 * and reference it instead of creating a new one
61 spin_lock_irqsave(&engine->lock, save);
62 ret = nvkm_engctx_exists(parent, engine, pobject);
63 spin_unlock_irqrestore(&engine->lock, save);
64 if (ret)
65 return ret;
67 /* create the new context, supports creating both raw objects and
68 * objects backed by instance memory
70 if (size) {
71 ret = nvkm_gpuobj_create_(parent, engobj, oclass,
72 NV_ENGCTX_CLASS, pargpu, size,
73 align, flags, length, pobject);
74 } else {
75 ret = nvkm_object_create_(parent, engobj, oclass,
76 NV_ENGCTX_CLASS, length, pobject);
79 engctx = *pobject;
80 if (ret)
81 return ret;
83 /* must take the lock again and re-check a context doesn't already
84 * exist (in case of a race) - the lock had to be dropped before as
85 * it's not possible to allocate the object with it held.
87 spin_lock_irqsave(&engine->lock, save);
88 ret = nvkm_engctx_exists(parent, engine, pobject);
89 if (ret) {
90 spin_unlock_irqrestore(&engine->lock, save);
91 nvkm_object_ref(NULL, &engctx);
92 return ret;
95 if (client->vm)
96 atomic_inc(&client->vm->engref[nv_engidx(engine)]);
97 list_add(&nv_engctx(engctx)->head, &engine->contexts);
98 nv_engctx(engctx)->addr = ~0ULL;
99 spin_unlock_irqrestore(&engine->lock, save);
100 return 0;
103 void
104 nvkm_engctx_destroy(struct nvkm_engctx *engctx)
106 struct nvkm_engine *engine = engctx->gpuobj.object.engine;
107 struct nvkm_client *client = nvkm_client(engctx);
108 unsigned long save;
110 nvkm_gpuobj_unmap(&engctx->vma);
111 spin_lock_irqsave(&engine->lock, save);
112 list_del(&engctx->head);
113 spin_unlock_irqrestore(&engine->lock, save);
115 if (client->vm)
116 atomic_dec(&client->vm->engref[nv_engidx(engine)]);
118 if (engctx->gpuobj.size)
119 nvkm_gpuobj_destroy(&engctx->gpuobj);
120 else
121 nvkm_object_destroy(&engctx->gpuobj.object);
125 nvkm_engctx_init(struct nvkm_engctx *engctx)
127 struct nvkm_object *object = nv_object(engctx);
128 struct nvkm_subdev *subdev = nv_subdev(object->engine);
129 struct nvkm_object *parent;
130 struct nvkm_subdev *pardev;
131 int ret;
133 ret = nvkm_gpuobj_init(&engctx->gpuobj);
134 if (ret)
135 return ret;
137 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
138 pardev = nv_subdev(parent->engine);
139 if (nv_parent(parent)->context_attach) {
140 mutex_lock(&pardev->mutex);
141 ret = nv_parent(parent)->context_attach(parent, object);
142 mutex_unlock(&pardev->mutex);
145 if (ret) {
146 nv_error(parent, "failed to attach %s context, %d\n",
147 subdev->name, ret);
148 return ret;
151 nv_debug(parent, "attached %s context\n", subdev->name);
152 return 0;
156 nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
158 struct nvkm_object *object = nv_object(engctx);
159 struct nvkm_subdev *subdev = nv_subdev(object->engine);
160 struct nvkm_object *parent;
161 struct nvkm_subdev *pardev;
162 int ret = 0;
164 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
165 pardev = nv_subdev(parent->engine);
166 if (nv_parent(parent)->context_detach) {
167 mutex_lock(&pardev->mutex);
168 ret = nv_parent(parent)->context_detach(parent, suspend, object);
169 mutex_unlock(&pardev->mutex);
172 if (ret) {
173 nv_error(parent, "failed to detach %s context, %d\n",
174 subdev->name, ret);
175 return ret;
178 nv_debug(parent, "detached %s context\n", subdev->name);
179 return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
183 _nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
184 struct nvkm_oclass *oclass, void *data, u32 size,
185 struct nvkm_object **pobject)
187 struct nvkm_engctx *engctx;
188 int ret;
190 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
191 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
192 *pobject = nv_object(engctx);
193 return ret;
196 void
197 _nvkm_engctx_dtor(struct nvkm_object *object)
199 nvkm_engctx_destroy(nv_engctx(object));
203 _nvkm_engctx_init(struct nvkm_object *object)
205 return nvkm_engctx_init(nv_engctx(object));
209 _nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
211 return nvkm_engctx_fini(nv_engctx(object), suspend);
214 struct nvkm_object *
215 nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
217 struct nvkm_engctx *engctx;
218 unsigned long flags;
220 spin_lock_irqsave(&engine->lock, flags);
221 list_for_each_entry(engctx, &engine->contexts, head) {
222 if (engctx->addr == addr) {
223 engctx->save = flags;
224 return nv_object(engctx);
227 spin_unlock_irqrestore(&engine->lock, flags);
228 return NULL;
231 void
232 nvkm_engctx_put(struct nvkm_object *object)
234 if (object) {
235 struct nvkm_engine *engine = nv_engine(object->engine);
236 struct nvkm_engctx *engctx = nv_engctx(object);
237 spin_unlock_irqrestore(&engine->lock, engctx->save);