2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <core/oproxy.h>
29 #include <subdev/mmu.h>
30 #include <engine/dma.h>
32 struct nvkm_fifo_chan_object
{
33 struct nvkm_oproxy oproxy
;
34 struct nvkm_fifo_chan
*chan
;
39 nvkm_fifo_chan_child_fini(struct nvkm_oproxy
*base
, bool suspend
)
41 struct nvkm_fifo_chan_object
*object
=
42 container_of(base
, typeof(*object
), oproxy
);
43 struct nvkm_engine
*engine
= object
->oproxy
.object
->engine
;
44 struct nvkm_fifo_chan
*chan
= object
->chan
;
45 struct nvkm_fifo_engn
*engn
= &chan
->engn
[engine
->subdev
.index
];
46 const char *name
= nvkm_subdev_name
[engine
->subdev
.index
];
52 if (chan
->func
->engine_fini
) {
53 ret
= chan
->func
->engine_fini(chan
, engine
, suspend
);
55 nvif_error(&chan
->object
,
56 "detach %s failed, %d\n", name
, ret
);
62 ret
= nvkm_object_fini(engn
->object
, suspend
);
67 nvif_trace(&chan
->object
, "detached %s\n", name
);
72 nvkm_fifo_chan_child_init(struct nvkm_oproxy
*base
)
74 struct nvkm_fifo_chan_object
*object
=
75 container_of(base
, typeof(*object
), oproxy
);
76 struct nvkm_engine
*engine
= object
->oproxy
.object
->engine
;
77 struct nvkm_fifo_chan
*chan
= object
->chan
;
78 struct nvkm_fifo_engn
*engn
= &chan
->engn
[engine
->subdev
.index
];
79 const char *name
= nvkm_subdev_name
[engine
->subdev
.index
];
86 ret
= nvkm_object_init(engn
->object
);
91 if (chan
->func
->engine_init
) {
92 ret
= chan
->func
->engine_init(chan
, engine
);
94 nvif_error(&chan
->object
,
95 "attach %s failed, %d\n", name
, ret
);
100 nvif_trace(&chan
->object
, "attached %s\n", name
);
105 nvkm_fifo_chan_child_del(struct nvkm_oproxy
*base
)
107 struct nvkm_fifo_chan_object
*object
=
108 container_of(base
, typeof(*object
), oproxy
);
109 struct nvkm_engine
*engine
= object
->oproxy
.base
.engine
;
110 struct nvkm_fifo_chan
*chan
= object
->chan
;
111 struct nvkm_fifo_engn
*engn
= &chan
->engn
[engine
->subdev
.index
];
113 if (chan
->func
->object_dtor
)
114 chan
->func
->object_dtor(chan
, object
->hash
);
116 if (!--engn
->refcount
) {
117 if (chan
->func
->engine_dtor
)
118 chan
->func
->engine_dtor(chan
, engine
);
119 nvkm_object_del(&engn
->object
);
121 atomic_dec(&chan
->vmm
->engref
[engine
->subdev
.index
]);
125 static const struct nvkm_oproxy_func
126 nvkm_fifo_chan_child_func
= {
127 .dtor
[0] = nvkm_fifo_chan_child_del
,
128 .init
[0] = nvkm_fifo_chan_child_init
,
129 .fini
[0] = nvkm_fifo_chan_child_fini
,
133 nvkm_fifo_chan_child_new(const struct nvkm_oclass
*oclass
, void *data
, u32 size
,
134 struct nvkm_object
**pobject
)
136 struct nvkm_engine
*engine
= oclass
->engine
;
137 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(oclass
->parent
);
138 struct nvkm_fifo_engn
*engn
= &chan
->engn
[engine
->subdev
.index
];
139 struct nvkm_fifo_chan_object
*object
;
142 if (!(object
= kzalloc(sizeof(*object
), GFP_KERNEL
)))
144 nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func
, oclass
, &object
->oproxy
);
146 *pobject
= &object
->oproxy
.base
;
148 if (!engn
->refcount
++) {
149 struct nvkm_oclass cclass
= {
150 .client
= oclass
->client
,
151 .engine
= oclass
->engine
,
155 atomic_inc(&chan
->vmm
->engref
[engine
->subdev
.index
]);
157 if (engine
->func
->fifo
.cclass
) {
158 ret
= engine
->func
->fifo
.cclass(chan
, &cclass
,
161 if (engine
->func
->cclass
) {
162 ret
= nvkm_object_new_(engine
->func
->cclass
, &cclass
,
163 NULL
, 0, &engn
->object
);
168 if (chan
->func
->engine_ctor
) {
169 ret
= chan
->func
->engine_ctor(chan
, oclass
->engine
,
176 ret
= oclass
->base
.ctor(&(const struct nvkm_oclass
) {
177 .base
= oclass
->base
,
178 .engn
= oclass
->engn
,
179 .handle
= oclass
->handle
,
180 .object
= oclass
->object
,
181 .client
= oclass
->client
,
182 .parent
= engn
->object
?
186 }, data
, size
, &object
->oproxy
.object
);
190 if (chan
->func
->object_ctor
) {
192 chan
->func
->object_ctor(chan
, object
->oproxy
.object
);
193 if (object
->hash
< 0)
201 nvkm_fifo_chan_child_get(struct nvkm_object
*object
, int index
,
202 struct nvkm_oclass
*oclass
)
204 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
205 struct nvkm_fifo
*fifo
= chan
->fifo
;
206 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
207 struct nvkm_engine
*engine
;
208 u64 mask
= chan
->engines
;
211 for (; c
= 0, i
= __ffs64(mask
), mask
; mask
&= ~(1ULL << i
)) {
212 if (!(engine
= nvkm_device_engine(device
, i
)))
214 oclass
->engine
= engine
;
215 oclass
->base
.oclass
= 0;
217 if (engine
->func
->fifo
.sclass
) {
218 ret
= engine
->func
->fifo
.sclass(oclass
, index
);
219 if (oclass
->base
.oclass
) {
220 if (!oclass
->base
.ctor
)
221 oclass
->base
.ctor
= nvkm_object_new
;
222 oclass
->ctor
= nvkm_fifo_chan_child_new
;
230 while (engine
->func
->sclass
[c
].oclass
) {
232 oclass
->base
= engine
->func
->sclass
[index
];
233 if (!oclass
->base
.ctor
)
234 oclass
->base
.ctor
= nvkm_object_new
;
235 oclass
->ctor
= nvkm_fifo_chan_child_new
;
246 nvkm_fifo_chan_ntfy(struct nvkm_object
*object
, u32 type
,
247 struct nvkm_event
**pevent
)
249 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
250 if (chan
->func
->ntfy
)
251 return chan
->func
->ntfy(chan
, type
, pevent
);
256 nvkm_fifo_chan_map(struct nvkm_object
*object
, void *argv
, u32 argc
,
257 enum nvkm_object_map
*type
, u64
*addr
, u64
*size
)
259 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
260 *type
= NVKM_OBJECT_MAP_IO
;
267 nvkm_fifo_chan_rd32(struct nvkm_object
*object
, u64 addr
, u32
*data
)
269 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
270 if (unlikely(!chan
->user
)) {
271 chan
->user
= ioremap(chan
->addr
, chan
->size
);
275 if (unlikely(addr
+ 4 > chan
->size
))
277 *data
= ioread32_native(chan
->user
+ addr
);
282 nvkm_fifo_chan_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
284 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
285 if (unlikely(!chan
->user
)) {
286 chan
->user
= ioremap(chan
->addr
, chan
->size
);
290 if (unlikely(addr
+ 4 > chan
->size
))
292 iowrite32_native(data
, chan
->user
+ addr
);
297 nvkm_fifo_chan_fini(struct nvkm_object
*object
, bool suspend
)
299 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
300 chan
->func
->fini(chan
);
305 nvkm_fifo_chan_init(struct nvkm_object
*object
)
307 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
308 chan
->func
->init(chan
);
313 nvkm_fifo_chan_dtor(struct nvkm_object
*object
)
315 struct nvkm_fifo_chan
*chan
= nvkm_fifo_chan(object
);
316 struct nvkm_fifo
*fifo
= chan
->fifo
;
317 void *data
= chan
->func
->dtor(chan
);
320 spin_lock_irqsave(&fifo
->lock
, flags
);
321 if (!list_empty(&chan
->head
)) {
322 __clear_bit(chan
->chid
, fifo
->mask
);
323 list_del(&chan
->head
);
325 spin_unlock_irqrestore(&fifo
->lock
, flags
);
331 nvkm_vmm_part(chan
->vmm
, chan
->inst
->memory
);
332 nvkm_vmm_unref(&chan
->vmm
);
335 nvkm_gpuobj_del(&chan
->push
);
336 nvkm_gpuobj_del(&chan
->inst
);
340 static const struct nvkm_object_func
341 nvkm_fifo_chan_func
= {
342 .dtor
= nvkm_fifo_chan_dtor
,
343 .init
= nvkm_fifo_chan_init
,
344 .fini
= nvkm_fifo_chan_fini
,
345 .ntfy
= nvkm_fifo_chan_ntfy
,
346 .map
= nvkm_fifo_chan_map
,
347 .rd32
= nvkm_fifo_chan_rd32
,
348 .wr32
= nvkm_fifo_chan_wr32
,
349 .sclass
= nvkm_fifo_chan_child_get
,
353 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func
*func
,
354 struct nvkm_fifo
*fifo
, u32 size
, u32 align
, bool zero
,
355 u64 hvmm
, u64 push
, u64 engines
, int bar
, u32 base
,
356 u32 user
, const struct nvkm_oclass
*oclass
,
357 struct nvkm_fifo_chan
*chan
)
359 struct nvkm_client
*client
= oclass
->client
;
360 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
361 struct nvkm_dmaobj
*dmaobj
;
365 nvkm_object_ctor(&nvkm_fifo_chan_func
, oclass
, &chan
->object
);
368 chan
->engines
= engines
;
369 INIT_LIST_HEAD(&chan
->head
);
371 /* instance memory */
372 ret
= nvkm_gpuobj_new(device
, size
, align
, zero
, NULL
, &chan
->inst
);
376 /* allocate push buffer ctxdma instance */
378 dmaobj
= nvkm_dmaobj_search(client
, push
);
380 return PTR_ERR(dmaobj
);
382 ret
= nvkm_object_bind(&dmaobj
->object
, chan
->inst
, -16,
388 /* channel address space */
390 struct nvkm_vmm
*vmm
= nvkm_uvmm_search(client
, hvmm
);
394 if (vmm
->mmu
!= device
->mmu
)
397 ret
= nvkm_vmm_join(vmm
, chan
->inst
->memory
);
401 chan
->vmm
= nvkm_vmm_ref(vmm
);
404 /* allocate channel id */
405 spin_lock_irqsave(&fifo
->lock
, flags
);
406 chan
->chid
= find_first_zero_bit(fifo
->mask
, NVKM_FIFO_CHID_NR
);
407 if (chan
->chid
>= NVKM_FIFO_CHID_NR
) {
408 spin_unlock_irqrestore(&fifo
->lock
, flags
);
411 list_add(&chan
->head
, &fifo
->chan
);
412 __set_bit(chan
->chid
, fifo
->mask
);
413 spin_unlock_irqrestore(&fifo
->lock
, flags
);
415 /* determine address of this channel's user registers */
416 chan
->addr
= device
->func
->resource_addr(device
, bar
) +
417 base
+ user
* chan
->chid
;
420 nvkm_fifo_cevent(fifo
);