2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/engctx.h>
25 #include <core/engine.h>
26 #include <core/client.h>
29 nvkm_engctx_exists(struct nvkm_object
*parent
,
30 struct nvkm_engine
*engine
, void **pobject
)
32 struct nvkm_engctx
*engctx
;
33 struct nvkm_object
*parctx
;
35 list_for_each_entry(engctx
, &engine
->contexts
, head
) {
36 parctx
= nv_pclass(nv_object(engctx
), NV_PARENT_CLASS
);
37 if (parctx
== parent
) {
38 atomic_inc(&nv_object(engctx
)->refcount
);
48 nvkm_engctx_create_(struct nvkm_object
*parent
, struct nvkm_object
*engobj
,
49 struct nvkm_oclass
*oclass
, struct nvkm_object
*pargpu
,
50 u32 size
, u32 align
, u32 flags
, int length
, void **pobject
)
52 struct nvkm_client
*client
= nvkm_client(parent
);
53 struct nvkm_engine
*engine
= nv_engine(engobj
);
54 struct nvkm_object
*engctx
;
58 /* check if this engine already has a context for the parent object,
59 * and reference it instead of creating a new one
61 spin_lock_irqsave(&engine
->lock
, save
);
62 ret
= nvkm_engctx_exists(parent
, engine
, pobject
);
63 spin_unlock_irqrestore(&engine
->lock
, save
);
67 /* create the new context, supports creating both raw objects and
68 * objects backed by instance memory
71 ret
= nvkm_gpuobj_create_(parent
, engobj
, oclass
,
72 NV_ENGCTX_CLASS
, pargpu
, size
,
73 align
, flags
, length
, pobject
);
75 ret
= nvkm_object_create_(parent
, engobj
, oclass
,
76 NV_ENGCTX_CLASS
, length
, pobject
);
83 /* must take the lock again and re-check a context doesn't already
84 * exist (in case of a race) - the lock had to be dropped before as
85 * it's not possible to allocate the object with it held.
87 spin_lock_irqsave(&engine
->lock
, save
);
88 ret
= nvkm_engctx_exists(parent
, engine
, pobject
);
90 spin_unlock_irqrestore(&engine
->lock
, save
);
91 nvkm_object_ref(NULL
, &engctx
);
96 atomic_inc(&client
->vm
->engref
[nv_engidx(engine
)]);
97 list_add(&nv_engctx(engctx
)->head
, &engine
->contexts
);
98 nv_engctx(engctx
)->addr
= ~0ULL;
99 spin_unlock_irqrestore(&engine
->lock
, save
);
104 nvkm_engctx_destroy(struct nvkm_engctx
*engctx
)
106 struct nvkm_engine
*engine
= engctx
->gpuobj
.object
.engine
;
107 struct nvkm_client
*client
= nvkm_client(engctx
);
110 nvkm_gpuobj_unmap(&engctx
->vma
);
111 spin_lock_irqsave(&engine
->lock
, save
);
112 list_del(&engctx
->head
);
113 spin_unlock_irqrestore(&engine
->lock
, save
);
116 atomic_dec(&client
->vm
->engref
[nv_engidx(engine
)]);
118 if (engctx
->gpuobj
.size
)
119 nvkm_gpuobj_destroy(&engctx
->gpuobj
);
121 nvkm_object_destroy(&engctx
->gpuobj
.object
);
125 nvkm_engctx_init(struct nvkm_engctx
*engctx
)
127 struct nvkm_object
*object
= nv_object(engctx
);
128 struct nvkm_subdev
*subdev
= nv_subdev(object
->engine
);
129 struct nvkm_object
*parent
;
130 struct nvkm_subdev
*pardev
;
133 ret
= nvkm_gpuobj_init(&engctx
->gpuobj
);
137 parent
= nv_pclass(object
->parent
, NV_PARENT_CLASS
);
138 pardev
= nv_subdev(parent
->engine
);
139 if (nv_parent(parent
)->context_attach
) {
140 mutex_lock(&pardev
->mutex
);
141 ret
= nv_parent(parent
)->context_attach(parent
, object
);
142 mutex_unlock(&pardev
->mutex
);
146 nv_error(parent
, "failed to attach %s context, %d\n",
151 nv_debug(parent
, "attached %s context\n", subdev
->name
);
156 nvkm_engctx_fini(struct nvkm_engctx
*engctx
, bool suspend
)
158 struct nvkm_object
*object
= nv_object(engctx
);
159 struct nvkm_subdev
*subdev
= nv_subdev(object
->engine
);
160 struct nvkm_object
*parent
;
161 struct nvkm_subdev
*pardev
;
164 parent
= nv_pclass(object
->parent
, NV_PARENT_CLASS
);
165 pardev
= nv_subdev(parent
->engine
);
166 if (nv_parent(parent
)->context_detach
) {
167 mutex_lock(&pardev
->mutex
);
168 ret
= nv_parent(parent
)->context_detach(parent
, suspend
, object
);
169 mutex_unlock(&pardev
->mutex
);
173 nv_error(parent
, "failed to detach %s context, %d\n",
178 nv_debug(parent
, "detached %s context\n", subdev
->name
);
179 return nvkm_gpuobj_fini(&engctx
->gpuobj
, suspend
);
183 _nvkm_engctx_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
184 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
185 struct nvkm_object
**pobject
)
187 struct nvkm_engctx
*engctx
;
190 ret
= nvkm_engctx_create(parent
, engine
, oclass
, NULL
, 256, 256,
191 NVOBJ_FLAG_ZERO_ALLOC
, &engctx
);
192 *pobject
= nv_object(engctx
);
197 _nvkm_engctx_dtor(struct nvkm_object
*object
)
199 nvkm_engctx_destroy(nv_engctx(object
));
203 _nvkm_engctx_init(struct nvkm_object
*object
)
205 return nvkm_engctx_init(nv_engctx(object
));
209 _nvkm_engctx_fini(struct nvkm_object
*object
, bool suspend
)
211 return nvkm_engctx_fini(nv_engctx(object
), suspend
);
215 nvkm_engctx_get(struct nvkm_engine
*engine
, u64 addr
)
217 struct nvkm_engctx
*engctx
;
220 spin_lock_irqsave(&engine
->lock
, flags
);
221 list_for_each_entry(engctx
, &engine
->contexts
, head
) {
222 if (engctx
->addr
== addr
) {
223 engctx
->save
= flags
;
224 return nv_object(engctx
);
227 spin_unlock_irqrestore(&engine
->lock
, flags
);
232 nvkm_engctx_put(struct nvkm_object
*object
)
235 struct nvkm_engine
*engine
= nv_engine(object
->engine
);
236 struct nvkm_engctx
*engctx
= nv_engctx(object
);
237 spin_unlock_irqrestore(&engine
->lock
, engctx
->save
);