2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "changk104.h"
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <subdev/fb.h>
30 #include <subdev/mmu.h>
31 #include <subdev/timer.h>
33 #include <nvif/class.h>
34 #include <nvif/cla06f.h>
35 #include <nvif/unpack.h>
38 gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan
*chan
)
40 struct gk104_fifo
*fifo
= chan
->fifo
;
41 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
42 struct nvkm_device
*device
= subdev
->device
;
43 struct nvkm_client
*client
= chan
->base
.object
.client
;
44 struct nvkm_fifo_cgrp
*cgrp
= chan
->cgrp
;
48 nvkm_wr32(device
, 0x002634, cgrp
->id
| 0x01000000);
50 nvkm_wr32(device
, 0x002634, chan
->base
.chid
);
51 if (nvkm_msec(device
, 2000,
52 if (!(nvkm_rd32(device
, 0x002634) & 0x00100000))
55 nvkm_error(subdev
, "%s %d [%s] kick timeout\n",
56 cgrp
? "tsg" : "channel",
57 cgrp
? cgrp
->id
: chan
->base
.chid
, client
->name
);
58 nvkm_fifo_recover_chan(&fifo
->base
, chan
->base
.chid
);
65 gk104_fifo_gpfifo_kick(struct gk104_fifo_chan
*chan
)
68 mutex_lock(&chan
->base
.fifo
->engine
.subdev
.mutex
);
69 ret
= gk104_fifo_gpfifo_kick_locked(chan
);
70 mutex_unlock(&chan
->base
.fifo
->engine
.subdev
.mutex
);
75 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine
*engine
)
77 switch (engine
->subdev
.index
) {
79 case NVKM_ENGINE_CE0
...NVKM_ENGINE_CE_LAST
:
81 case NVKM_ENGINE_GR
: return 0x0210;
82 case NVKM_ENGINE_SEC
: return 0x0220;
83 case NVKM_ENGINE_MSPDEC
: return 0x0250;
84 case NVKM_ENGINE_MSPPP
: return 0x0260;
85 case NVKM_ENGINE_MSVLD
: return 0x0270;
86 case NVKM_ENGINE_VIC
: return 0x0280;
87 case NVKM_ENGINE_MSENC
: return 0x0290;
88 case NVKM_ENGINE_NVDEC0
: return 0x02100270;
89 case NVKM_ENGINE_NVENC0
: return 0x02100290;
90 case NVKM_ENGINE_NVENC1
: return 0x0210;
98 gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan
*base
,
99 struct nvkm_engine
*engine
, bool suspend
)
101 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
102 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
103 u32 offset
= gk104_fifo_gpfifo_engine_addr(engine
);
106 ret
= gk104_fifo_gpfifo_kick(chan
);
112 nvkm_wo32(inst
, (offset
& 0xffff) + 0x00, 0x00000000);
113 nvkm_wo32(inst
, (offset
& 0xffff) + 0x04, 0x00000000);
114 if ((offset
>>= 16)) {
115 nvkm_wo32(inst
, offset
+ 0x00, 0x00000000);
116 nvkm_wo32(inst
, offset
+ 0x04, 0x00000000);
125 gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan
*base
,
126 struct nvkm_engine
*engine
)
128 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
129 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
130 u32 offset
= gk104_fifo_gpfifo_engine_addr(engine
);
133 u64 addr
= chan
->engn
[engine
->subdev
.index
].vma
->addr
;
134 u32 datalo
= lower_32_bits(addr
) | 0x00000004;
135 u32 datahi
= upper_32_bits(addr
);
137 nvkm_wo32(inst
, (offset
& 0xffff) + 0x00, datalo
);
138 nvkm_wo32(inst
, (offset
& 0xffff) + 0x04, datahi
);
139 if ((offset
>>= 16)) {
140 nvkm_wo32(inst
, offset
+ 0x00, datalo
);
141 nvkm_wo32(inst
, offset
+ 0x04, datahi
);
150 gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan
*base
,
151 struct nvkm_engine
*engine
)
153 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
154 nvkm_vmm_put(chan
->base
.vmm
, &chan
->engn
[engine
->subdev
.index
].vma
);
155 nvkm_gpuobj_del(&chan
->engn
[engine
->subdev
.index
].inst
);
159 gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan
*base
,
160 struct nvkm_engine
*engine
,
161 struct nvkm_object
*object
)
163 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
164 int engn
= engine
->subdev
.index
;
167 if (!gk104_fifo_gpfifo_engine_addr(engine
))
170 ret
= nvkm_object_bind(object
, NULL
, 0, &chan
->engn
[engn
].inst
);
174 ret
= nvkm_vmm_get(chan
->base
.vmm
, 12, chan
->engn
[engn
].inst
->size
,
175 &chan
->engn
[engn
].vma
);
179 return nvkm_memory_map(chan
->engn
[engn
].inst
, 0, chan
->base
.vmm
,
180 chan
->engn
[engn
].vma
, NULL
, 0);
184 gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan
*base
)
186 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
187 struct gk104_fifo
*fifo
= chan
->fifo
;
188 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
189 u32 coff
= chan
->base
.chid
* 8;
191 if (!list_empty(&chan
->head
)) {
192 gk104_fifo_runlist_remove(fifo
, chan
);
193 nvkm_mask(device
, 0x800004 + coff
, 0x00000800, 0x00000800);
194 gk104_fifo_gpfifo_kick(chan
);
195 gk104_fifo_runlist_update(fifo
, chan
->runl
);
198 nvkm_wr32(device
, 0x800000 + coff
, 0x00000000);
202 gk104_fifo_gpfifo_init(struct nvkm_fifo_chan
*base
)
204 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
205 struct gk104_fifo
*fifo
= chan
->fifo
;
206 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
207 u32 addr
= chan
->base
.inst
->addr
>> 12;
208 u32 coff
= chan
->base
.chid
* 8;
210 nvkm_mask(device
, 0x800004 + coff
, 0x000f0000, chan
->runl
<< 16);
211 nvkm_wr32(device
, 0x800000 + coff
, 0x80000000 | addr
);
213 if (list_empty(&chan
->head
) && !chan
->killed
) {
214 gk104_fifo_runlist_insert(fifo
, chan
);
215 nvkm_mask(device
, 0x800004 + coff
, 0x00000400, 0x00000400);
216 gk104_fifo_runlist_update(fifo
, chan
->runl
);
217 nvkm_mask(device
, 0x800004 + coff
, 0x00000400, 0x00000400);
222 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan
*base
)
224 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
225 nvkm_memory_unref(&chan
->mthd
);
230 const struct nvkm_fifo_chan_func
231 gk104_fifo_gpfifo_func
= {
232 .dtor
= gk104_fifo_gpfifo_dtor
,
233 .init
= gk104_fifo_gpfifo_init
,
234 .fini
= gk104_fifo_gpfifo_fini
,
235 .ntfy
= gf100_fifo_chan_ntfy
,
236 .engine_ctor
= gk104_fifo_gpfifo_engine_ctor
,
237 .engine_dtor
= gk104_fifo_gpfifo_engine_dtor
,
238 .engine_init
= gk104_fifo_gpfifo_engine_init
,
239 .engine_fini
= gk104_fifo_gpfifo_engine_fini
,
243 gk104_fifo_gpfifo_new_(struct gk104_fifo
*fifo
, u64
*runlists
, u16
*chid
,
244 u64 vmm
, u64 ioffset
, u64 ilength
, u64
*inst
, bool priv
,
245 const struct nvkm_oclass
*oclass
,
246 struct nvkm_object
**pobject
)
248 struct gk104_fifo_chan
*chan
;
249 int runlist
= ffs(*runlists
) -1, ret
, i
;
254 if (!vmm
|| runlist
< 0 || runlist
>= fifo
->runlist_nr
)
256 *runlists
= BIT_ULL(runlist
);
258 engm
= fifo
->runlist
[runlist
].engm
;
259 for_each_set_bit(i
, &engm
, fifo
->engine_nr
) {
260 if (fifo
->engine
[i
].engine
)
261 subdevs
|= BIT_ULL(fifo
->engine
[i
].engine
->subdev
.index
);
264 if (subdevs
& BIT_ULL(NVKM_ENGINE_GR
))
265 subdevs
|= BIT_ULL(NVKM_ENGINE_SW
);
267 /* Allocate the channel. */
268 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
270 *pobject
= &chan
->base
.object
;
272 chan
->runl
= runlist
;
273 INIT_LIST_HEAD(&chan
->head
);
275 ret
= nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func
, &fifo
->base
,
276 0x1000, 0x1000, true, vmm
, 0, subdevs
,
277 1, fifo
->user
.bar
->addr
, 0x200,
278 oclass
, &chan
->base
);
282 *chid
= chan
->base
.chid
;
283 *inst
= chan
->base
.inst
->addr
;
285 /* Hack to support GPUs where even individual channels should be
286 * part of a channel group.
288 if (fifo
->func
->cgrp_force
) {
289 if (!(chan
->cgrp
= kmalloc(sizeof(*chan
->cgrp
), GFP_KERNEL
)))
291 chan
->cgrp
->id
= chan
->base
.chid
;
292 INIT_LIST_HEAD(&chan
->cgrp
->head
);
293 INIT_LIST_HEAD(&chan
->cgrp
->chan
);
294 chan
->cgrp
->chan_nr
= 0;
297 /* Clear channel control registers. */
298 usermem
= chan
->base
.chid
* 0x200;
299 ilength
= order_base_2(ilength
/ 8);
301 nvkm_kmap(fifo
->user
.mem
);
302 for (i
= 0; i
< 0x200; i
+= 4)
303 nvkm_wo32(fifo
->user
.mem
, usermem
+ i
, 0x00000000);
304 nvkm_done(fifo
->user
.mem
);
305 usermem
= nvkm_memory_addr(fifo
->user
.mem
) + usermem
;
308 nvkm_kmap(chan
->base
.inst
);
309 nvkm_wo32(chan
->base
.inst
, 0x08, lower_32_bits(usermem
));
310 nvkm_wo32(chan
->base
.inst
, 0x0c, upper_32_bits(usermem
));
311 nvkm_wo32(chan
->base
.inst
, 0x10, 0x0000face);
312 nvkm_wo32(chan
->base
.inst
, 0x30, 0xfffff902);
313 nvkm_wo32(chan
->base
.inst
, 0x48, lower_32_bits(ioffset
));
314 nvkm_wo32(chan
->base
.inst
, 0x4c, upper_32_bits(ioffset
) |
316 nvkm_wo32(chan
->base
.inst
, 0x84, 0x20400000);
317 nvkm_wo32(chan
->base
.inst
, 0x94, 0x30000001);
318 nvkm_wo32(chan
->base
.inst
, 0x9c, 0x00000100);
319 nvkm_wo32(chan
->base
.inst
, 0xac, 0x0000001f);
320 nvkm_wo32(chan
->base
.inst
, 0xe4, priv
? 0x00000020 : 0x00000000);
321 nvkm_wo32(chan
->base
.inst
, 0xe8, chan
->base
.chid
);
322 nvkm_wo32(chan
->base
.inst
, 0xb8, 0xf8000000);
323 nvkm_wo32(chan
->base
.inst
, 0xf8, 0x10003080); /* 0x002310 */
324 nvkm_wo32(chan
->base
.inst
, 0xfc, 0x10000010); /* 0x002350 */
325 nvkm_done(chan
->base
.inst
);
330 gk104_fifo_gpfifo_new(struct gk104_fifo
*fifo
, const struct nvkm_oclass
*oclass
,
331 void *data
, u32 size
, struct nvkm_object
**pobject
)
333 struct nvkm_object
*parent
= oclass
->parent
;
335 struct kepler_channel_gpfifo_a_v0 v0
;
339 nvif_ioctl(parent
, "create channel gpfifo size %d\n", size
);
340 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
341 nvif_ioctl(parent
, "create channel gpfifo vers %d vmm %llx "
342 "ioffset %016llx ilength %08x "
343 "runlist %016llx priv %d\n",
344 args
->v0
.version
, args
->v0
.vmm
, args
->v0
.ioffset
,
345 args
->v0
.ilength
, args
->v0
.runlist
, args
->v0
.priv
);
346 if (args
->v0
.priv
&& !oclass
->client
->super
)
348 return gk104_fifo_gpfifo_new_(fifo
,