2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "changk104.h"
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/mmu.h>
30 #include <subdev/timer.h>
32 #include <nvif/class.h>
33 #include <nvif/cla06f.h>
34 #include <nvif/unpack.h>
37 gk104_fifo_gpfifo_kick(struct gk104_fifo_chan
*chan
)
39 struct gk104_fifo
*fifo
= chan
->fifo
;
40 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
41 struct nvkm_device
*device
= subdev
->device
;
42 struct nvkm_client
*client
= chan
->base
.object
.client
;
45 mutex_lock(&subdev
->mutex
);
46 nvkm_wr32(device
, 0x002634, chan
->base
.chid
);
47 if (nvkm_msec(device
, 2000,
48 if (!(nvkm_rd32(device
, 0x002634) & 0x00100000))
51 nvkm_error(subdev
, "channel %d [%s] kick timeout\n",
52 chan
->base
.chid
, client
->name
);
53 nvkm_fifo_recover_chan(&fifo
->base
, chan
->base
.chid
);
56 mutex_unlock(&subdev
->mutex
);
61 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine
*engine
)
63 switch (engine
->subdev
.index
) {
65 case NVKM_ENGINE_CE0
:
66 case NVKM_ENGINE_CE1
:
67 case NVKM_ENGINE_CE2
: return 0x0000;
68 case NVKM_ENGINE_GR
: return 0x0210;
69 case NVKM_ENGINE_SEC
: return 0x0220;
70 case NVKM_ENGINE_MSPDEC
: return 0x0250;
71 case NVKM_ENGINE_MSPPP
: return 0x0260;
72 case NVKM_ENGINE_MSVLD
: return 0x0270;
73 case NVKM_ENGINE_VIC
: return 0x0280;
74 case NVKM_ENGINE_MSENC
: return 0x0290;
75 case NVKM_ENGINE_NVDEC
: return 0x02100270;
76 case NVKM_ENGINE_NVENC0
: return 0x02100290;
77 case NVKM_ENGINE_NVENC1
: return 0x0210;
85 gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan
*base
,
86 struct nvkm_engine
*engine
, bool suspend
)
88 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
89 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
90 u32 offset
= gk104_fifo_gpfifo_engine_addr(engine
);
93 ret
= gk104_fifo_gpfifo_kick(chan
);
99 nvkm_wo32(inst
, (offset
& 0xffff) + 0x00, 0x00000000);
100 nvkm_wo32(inst
, (offset
& 0xffff) + 0x04, 0x00000000);
101 if ((offset
>>= 16)) {
102 nvkm_wo32(inst
, offset
+ 0x00, 0x00000000);
103 nvkm_wo32(inst
, offset
+ 0x04, 0x00000000);
112 gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan
*base
,
113 struct nvkm_engine
*engine
)
115 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
116 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
117 u32 offset
= gk104_fifo_gpfifo_engine_addr(engine
);
120 u64 addr
= chan
->engn
[engine
->subdev
.index
].vma
->addr
;
121 u32 datalo
= lower_32_bits(addr
) | 0x00000004;
122 u32 datahi
= upper_32_bits(addr
);
124 nvkm_wo32(inst
, (offset
& 0xffff) + 0x00, datalo
);
125 nvkm_wo32(inst
, (offset
& 0xffff) + 0x04, datahi
);
126 if ((offset
>>= 16)) {
127 nvkm_wo32(inst
, offset
+ 0x00, datalo
);
128 nvkm_wo32(inst
, offset
+ 0x04, datahi
);
137 gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan
*base
,
138 struct nvkm_engine
*engine
)
140 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
141 nvkm_vmm_put(chan
->base
.vmm
, &chan
->engn
[engine
->subdev
.index
].vma
);
142 nvkm_gpuobj_del(&chan
->engn
[engine
->subdev
.index
].inst
);
146 gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan
*base
,
147 struct nvkm_engine
*engine
,
148 struct nvkm_object
*object
)
150 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
151 int engn
= engine
->subdev
.index
;
154 if (!gk104_fifo_gpfifo_engine_addr(engine
))
157 ret
= nvkm_object_bind(object
, NULL
, 0, &chan
->engn
[engn
].inst
);
161 ret
= nvkm_vmm_get(chan
->base
.vmm
, 12, chan
->engn
[engn
].inst
->size
,
162 &chan
->engn
[engn
].vma
);
166 return nvkm_memory_map(chan
->engn
[engn
].inst
, 0, chan
->base
.vmm
,
167 chan
->engn
[engn
].vma
, NULL
, 0);
171 gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan
*base
)
173 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
174 struct gk104_fifo
*fifo
= chan
->fifo
;
175 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
176 u32 coff
= chan
->base
.chid
* 8;
178 if (!list_empty(&chan
->head
)) {
179 gk104_fifo_runlist_remove(fifo
, chan
);
180 nvkm_mask(device
, 0x800004 + coff
, 0x00000800, 0x00000800);
181 gk104_fifo_gpfifo_kick(chan
);
182 gk104_fifo_runlist_commit(fifo
, chan
->runl
);
185 nvkm_wr32(device
, 0x800000 + coff
, 0x00000000);
189 gk104_fifo_gpfifo_init(struct nvkm_fifo_chan
*base
)
191 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
192 struct gk104_fifo
*fifo
= chan
->fifo
;
193 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
194 u32 addr
= chan
->base
.inst
->addr
>> 12;
195 u32 coff
= chan
->base
.chid
* 8;
197 nvkm_mask(device
, 0x800004 + coff
, 0x000f0000, chan
->runl
<< 16);
198 nvkm_wr32(device
, 0x800000 + coff
, 0x80000000 | addr
);
200 if (list_empty(&chan
->head
) && !chan
->killed
) {
201 gk104_fifo_runlist_insert(fifo
, chan
);
202 nvkm_mask(device
, 0x800004 + coff
, 0x00000400, 0x00000400);
203 gk104_fifo_runlist_commit(fifo
, chan
->runl
);
204 nvkm_mask(device
, 0x800004 + coff
, 0x00000400, 0x00000400);
209 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan
*base
)
211 return gk104_fifo_chan(base
);
214 static const struct nvkm_fifo_chan_func
215 gk104_fifo_gpfifo_func
= {
216 .dtor
= gk104_fifo_gpfifo_dtor
,
217 .init
= gk104_fifo_gpfifo_init
,
218 .fini
= gk104_fifo_gpfifo_fini
,
219 .ntfy
= gf100_fifo_chan_ntfy
,
220 .engine_ctor
= gk104_fifo_gpfifo_engine_ctor
,
221 .engine_dtor
= gk104_fifo_gpfifo_engine_dtor
,
222 .engine_init
= gk104_fifo_gpfifo_engine_init
,
223 .engine_fini
= gk104_fifo_gpfifo_engine_fini
,
226 struct gk104_fifo_chan_func
{
232 gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func
*func
,
233 struct gk104_fifo
*fifo
, u32
*engmask
, u16
*chid
,
234 u64 vmm
, u64 ioffset
, u64 ilength
,
235 const struct nvkm_oclass
*oclass
,
236 struct nvkm_object
**pobject
)
238 struct gk104_fifo_chan
*chan
;
239 int runlist
= -1, ret
= -ENOSYS
, i
, j
;
240 u32 engines
= 0, present
= 0;
247 /* Determine which downstream engines are present */
248 for (i
= 0; i
< fifo
->engine_nr
; i
++) {
249 struct nvkm_engine
*engine
= fifo
->engine
[i
].engine
;
251 u64 submask
= BIT_ULL(engine
->subdev
.index
);
252 for (j
= 0; func
[j
].subdev
; j
++) {
253 if (func
[j
].subdev
& submask
) {
254 present
|= func
[j
].engine
;
262 if (runlist
< 0 && (*engmask
& present
))
263 runlist
= fifo
->engine
[i
].runl
;
264 if (runlist
== fifo
->engine
[i
].runl
) {
265 engines
|= func
[j
].engine
;
266 subdevs
|= func
[j
].subdev
;
271 /* Just an engine mask query? All done here! */
274 return nvkm_object_new(oclass
, NULL
, 0, pobject
);
277 /* No runlist? No supported engines. */
283 /* Allocate the channel. */
284 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
286 *pobject
= &chan
->base
.object
;
288 chan
->runl
= runlist
;
289 INIT_LIST_HEAD(&chan
->head
);
291 ret
= nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func
, &fifo
->base
,
292 0x1000, 0x1000, true, vmm
, 0, subdevs
,
293 1, fifo
->user
.bar
->addr
, 0x200,
294 oclass
, &chan
->base
);
298 *chid
= chan
->base
.chid
;
300 /* Clear channel control registers. */
301 usermem
= chan
->base
.chid
* 0x200;
302 ilength
= order_base_2(ilength
/ 8);
304 nvkm_kmap(fifo
->user
.mem
);
305 for (i
= 0; i
< 0x200; i
+= 4)
306 nvkm_wo32(fifo
->user
.mem
, usermem
+ i
, 0x00000000);
307 nvkm_done(fifo
->user
.mem
);
308 usermem
= nvkm_memory_addr(fifo
->user
.mem
) + usermem
;
311 nvkm_kmap(chan
->base
.inst
);
312 nvkm_wo32(chan
->base
.inst
, 0x08, lower_32_bits(usermem
));
313 nvkm_wo32(chan
->base
.inst
, 0x0c, upper_32_bits(usermem
));
314 nvkm_wo32(chan
->base
.inst
, 0x10, 0x0000face);
315 nvkm_wo32(chan
->base
.inst
, 0x30, 0xfffff902);
316 nvkm_wo32(chan
->base
.inst
, 0x48, lower_32_bits(ioffset
));
317 nvkm_wo32(chan
->base
.inst
, 0x4c, upper_32_bits(ioffset
) |
319 nvkm_wo32(chan
->base
.inst
, 0x84, 0x20400000);
320 nvkm_wo32(chan
->base
.inst
, 0x94, 0x30000001);
321 nvkm_wo32(chan
->base
.inst
, 0x9c, 0x00000100);
322 nvkm_wo32(chan
->base
.inst
, 0xac, 0x0000001f);
323 nvkm_wo32(chan
->base
.inst
, 0xe8, chan
->base
.chid
);
324 nvkm_wo32(chan
->base
.inst
, 0xb8, 0xf8000000);
325 nvkm_wo32(chan
->base
.inst
, 0xf8, 0x10003080); /* 0x002310 */
326 nvkm_wo32(chan
->base
.inst
, 0xfc, 0x10000010); /* 0x002350 */
327 nvkm_done(chan
->base
.inst
);
331 static const struct gk104_fifo_chan_func
332 gk104_fifo_gpfifo
[] = {
333 { NVA06F_V0_ENGINE_SW
| NVA06F_V0_ENGINE_GR
,
334 BIT_ULL(NVKM_ENGINE_SW
) | BIT_ULL(NVKM_ENGINE_GR
)
336 { NVA06F_V0_ENGINE_SEC
, BIT_ULL(NVKM_ENGINE_SEC
) },
337 { NVA06F_V0_ENGINE_MSVLD
, BIT_ULL(NVKM_ENGINE_MSVLD
) },
338 { NVA06F_V0_ENGINE_MSPDEC
, BIT_ULL(NVKM_ENGINE_MSPDEC
) },
339 { NVA06F_V0_ENGINE_MSPPP
, BIT_ULL(NVKM_ENGINE_MSPPP
) },
340 { NVA06F_V0_ENGINE_MSENC
, BIT_ULL(NVKM_ENGINE_MSENC
) },
341 { NVA06F_V0_ENGINE_VIC
, BIT_ULL(NVKM_ENGINE_VIC
) },
342 { NVA06F_V0_ENGINE_NVDEC
, BIT_ULL(NVKM_ENGINE_NVDEC
) },
343 { NVA06F_V0_ENGINE_NVENC0
, BIT_ULL(NVKM_ENGINE_NVENC0
) },
344 { NVA06F_V0_ENGINE_NVENC1
, BIT_ULL(NVKM_ENGINE_NVENC1
) },
345 { NVA06F_V0_ENGINE_CE0
, BIT_ULL(NVKM_ENGINE_CE0
) },
346 { NVA06F_V0_ENGINE_CE1
, BIT_ULL(NVKM_ENGINE_CE1
) },
347 { NVA06F_V0_ENGINE_CE2
, BIT_ULL(NVKM_ENGINE_CE2
) },
352 gk104_fifo_gpfifo_new(struct nvkm_fifo
*base
, const struct nvkm_oclass
*oclass
,
353 void *data
, u32 size
, struct nvkm_object
**pobject
)
355 struct nvkm_object
*parent
= oclass
->parent
;
357 struct kepler_channel_gpfifo_a_v0 v0
;
359 struct gk104_fifo
*fifo
= gk104_fifo(base
);
362 nvif_ioctl(parent
, "create channel gpfifo size %d\n", size
);
363 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
364 nvif_ioctl(parent
, "create channel gpfifo vers %d vmm %llx "
365 "ioffset %016llx ilength %08x engine %08x\n",
366 args
->v0
.version
, args
->v0
.vmm
, args
->v0
.ioffset
,
367 args
->v0
.ilength
, args
->v0
.engines
);
368 return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo
, fifo
,
380 const struct nvkm_fifo_chan_oclass
381 gk104_fifo_gpfifo_oclass
= {
382 .base
.oclass
= KEPLER_CHANNEL_GPFIFO_A
,
385 .ctor
= gk104_fifo_gpfifo_new
,