2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "changk104.h"
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/mmu.h>
30 #include <subdev/timer.h>
32 #include <nvif/class.h>
33 #include <nvif/unpack.h>
36 gk104_fifo_gpfifo_kick(struct gk104_fifo_chan
*chan
)
38 struct gk104_fifo
*fifo
= chan
->fifo
;
39 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
40 struct nvkm_device
*device
= subdev
->device
;
41 struct nvkm_client
*client
= chan
->base
.object
.client
;
43 nvkm_wr32(device
, 0x002634, chan
->base
.chid
);
44 if (nvkm_msec(device
, 2000,
45 if (!(nvkm_rd32(device
, 0x002634) & 0x00100000))
48 nvkm_error(subdev
, "channel %d [%s] kick timeout\n",
49 chan
->base
.chid
, client
->name
);
57 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine
*engine
)
59 switch (engine
->subdev
.index
) {
61 case NVKM_ENGINE_CE0
:
62 case NVKM_ENGINE_CE1
:
63 case NVKM_ENGINE_CE2
: return 0x0000;
64 case NVKM_ENGINE_GR
: return 0x0210;
65 case NVKM_ENGINE_MSPDEC
: return 0x0250;
66 case NVKM_ENGINE_MSPPP
: return 0x0260;
67 case NVKM_ENGINE_MSVLD
: return 0x0270;
75 gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan
*base
,
76 struct nvkm_engine
*engine
, bool suspend
)
78 const u32 offset
= gk104_fifo_gpfifo_engine_addr(engine
);
79 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
80 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
83 ret
= gk104_fifo_gpfifo_kick(chan
);
89 nvkm_wo32(inst
, offset
+ 0x00, 0x00000000);
90 nvkm_wo32(inst
, offset
+ 0x04, 0x00000000);
98 gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan
*base
,
99 struct nvkm_engine
*engine
)
101 const u32 offset
= gk104_fifo_gpfifo_engine_addr(engine
);
102 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
103 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
106 u64 addr
= chan
->engn
[engine
->subdev
.index
].vma
.offset
;
108 nvkm_wo32(inst
, offset
+ 0x00, lower_32_bits(addr
) | 4);
109 nvkm_wo32(inst
, offset
+ 0x04, upper_32_bits(addr
));
117 gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan
*base
,
118 struct nvkm_engine
*engine
)
120 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
121 nvkm_gpuobj_unmap(&chan
->engn
[engine
->subdev
.index
].vma
);
122 nvkm_gpuobj_del(&chan
->engn
[engine
->subdev
.index
].inst
);
126 gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan
*base
,
127 struct nvkm_engine
*engine
,
128 struct nvkm_object
*object
)
130 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
131 int engn
= engine
->subdev
.index
;
134 if (!gk104_fifo_gpfifo_engine_addr(engine
))
137 ret
= nvkm_object_bind(object
, NULL
, 0, &chan
->engn
[engn
].inst
);
141 return nvkm_gpuobj_map(chan
->engn
[engn
].inst
, chan
->vm
,
142 NV_MEM_ACCESS_RW
, &chan
->engn
[engn
].vma
);
146 gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan
*base
)
148 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
149 struct gk104_fifo
*fifo
= chan
->fifo
;
150 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
151 u32 coff
= chan
->base
.chid
* 8;
153 if (!list_empty(&chan
->head
)) {
154 list_del_init(&chan
->head
);
155 nvkm_mask(device
, 0x800004 + coff
, 0x00000800, 0x00000800);
156 gk104_fifo_runlist_update(fifo
, chan
->engine
);
159 nvkm_wr32(device
, 0x800000 + coff
, 0x00000000);
163 gk104_fifo_gpfifo_init(struct nvkm_fifo_chan
*base
)
165 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
166 struct gk104_fifo
*fifo
= chan
->fifo
;
167 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
168 u32 addr
= chan
->base
.inst
->addr
>> 12;
169 u32 coff
= chan
->base
.chid
* 8;
171 nvkm_mask(device
, 0x800004 + coff
, 0x000f0000, chan
->engine
<< 16);
172 nvkm_wr32(device
, 0x800000 + coff
, 0x80000000 | addr
);
174 if (list_empty(&chan
->head
) && !chan
->killed
) {
175 list_add_tail(&chan
->head
, &fifo
->engine
[chan
->engine
].chan
);
176 nvkm_mask(device
, 0x800004 + coff
, 0x00000400, 0x00000400);
177 gk104_fifo_runlist_update(fifo
, chan
->engine
);
178 nvkm_mask(device
, 0x800004 + coff
, 0x00000400, 0x00000400);
183 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan
*base
)
185 struct gk104_fifo_chan
*chan
= gk104_fifo_chan(base
);
186 nvkm_vm_ref(NULL
, &chan
->vm
, chan
->pgd
);
187 nvkm_gpuobj_del(&chan
->pgd
);
191 static const struct nvkm_fifo_chan_func
192 gk104_fifo_gpfifo_func
= {
193 .dtor
= gk104_fifo_gpfifo_dtor
,
194 .init
= gk104_fifo_gpfifo_init
,
195 .fini
= gk104_fifo_gpfifo_fini
,
196 .ntfy
= g84_fifo_chan_ntfy
,
197 .engine_ctor
= gk104_fifo_gpfifo_engine_ctor
,
198 .engine_dtor
= gk104_fifo_gpfifo_engine_dtor
,
199 .engine_init
= gk104_fifo_gpfifo_engine_init
,
200 .engine_fini
= gk104_fifo_gpfifo_engine_fini
,
204 gk104_fifo_gpfifo_new(struct nvkm_fifo
*base
, const struct nvkm_oclass
*oclass
,
205 void *data
, u32 size
, struct nvkm_object
**pobject
)
208 struct kepler_channel_gpfifo_a_v0 v0
;
210 struct gk104_fifo
*fifo
= gk104_fifo(base
);
211 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
212 struct nvkm_object
*parent
= oclass
->parent
;
213 struct gk104_fifo_chan
*chan
;
214 u64 usermem
, ioffset
, ilength
;
218 nvif_ioctl(parent
, "create channel gpfifo size %d\n", size
);
219 if (nvif_unpack(args
->v0
, 0, 0, false)) {
220 nvif_ioctl(parent
, "create channel gpfifo vers %d vm %llx "
221 "ioffset %016llx ilength %08x engine %08x\n",
222 args
->v0
.version
, args
->v0
.vm
, args
->v0
.ioffset
,
223 args
->v0
.ilength
, args
->v0
.engine
);
227 /* determine which downstream engines are present */
228 for (i
= 0, engines
= 0; i
< ARRAY_SIZE(fifo
->engine
); i
++) {
229 u64 subdevs
= gk104_fifo_engine_subdev(i
);
230 if (!nvkm_device_engine(device
, __ffs64(subdevs
)))
235 /* if this is an engine mask query, we're done */
236 if (!args
->v0
.engine
) {
237 args
->v0
.engine
= engines
;
238 return nvkm_object_new(oclass
, NULL
, 0, pobject
);
241 /* check that we support a requested engine - note that the user
242 * argument is a mask in order to allow the user to request (for
243 * example) *any* copy engine, but doesn't matter which.
245 args
->v0
.engine
&= engines
;
246 if (!args
->v0
.engine
) {
247 nvif_ioctl(parent
, "no supported engine\n");
251 /* allocate the channel */
252 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
254 *pobject
= &chan
->base
.object
;
256 chan
->engine
= __ffs(args
->v0
.engine
);
257 INIT_LIST_HEAD(&chan
->head
);
259 ret
= nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func
, &fifo
->base
,
260 0x1000, 0x1000, true, args
->v0
.vm
, 0,
261 gk104_fifo_engine_subdev(chan
->engine
),
262 1, fifo
->user
.bar
.offset
, 0x200,
263 oclass
, &chan
->base
);
267 args
->v0
.chid
= chan
->base
.chid
;
270 ret
= nvkm_gpuobj_new(device
, 0x10000, 0x1000, false, NULL
, &chan
->pgd
);
274 nvkm_kmap(chan
->base
.inst
);
275 nvkm_wo32(chan
->base
.inst
, 0x0200, lower_32_bits(chan
->pgd
->addr
));
276 nvkm_wo32(chan
->base
.inst
, 0x0204, upper_32_bits(chan
->pgd
->addr
));
277 nvkm_wo32(chan
->base
.inst
, 0x0208, 0xffffffff);
278 nvkm_wo32(chan
->base
.inst
, 0x020c, 0x000000ff);
279 nvkm_done(chan
->base
.inst
);
281 ret
= nvkm_vm_ref(chan
->base
.vm
, &chan
->vm
, chan
->pgd
);
285 /* clear channel control registers */
286 usermem
= chan
->base
.chid
* 0x200;
287 ioffset
= args
->v0
.ioffset
;
288 ilength
= order_base_2(args
->v0
.ilength
/ 8);
290 nvkm_kmap(fifo
->user
.mem
);
291 for (i
= 0; i
< 0x200; i
+= 4)
292 nvkm_wo32(fifo
->user
.mem
, usermem
+ i
, 0x00000000);
293 nvkm_done(fifo
->user
.mem
);
294 usermem
= nvkm_memory_addr(fifo
->user
.mem
) + usermem
;
297 nvkm_kmap(chan
->base
.inst
);
298 nvkm_wo32(chan
->base
.inst
, 0x08, lower_32_bits(usermem
));
299 nvkm_wo32(chan
->base
.inst
, 0x0c, upper_32_bits(usermem
));
300 nvkm_wo32(chan
->base
.inst
, 0x10, 0x0000face);
301 nvkm_wo32(chan
->base
.inst
, 0x30, 0xfffff902);
302 nvkm_wo32(chan
->base
.inst
, 0x48, lower_32_bits(ioffset
));
303 nvkm_wo32(chan
->base
.inst
, 0x4c, upper_32_bits(ioffset
) |
305 nvkm_wo32(chan
->base
.inst
, 0x84, 0x20400000);
306 nvkm_wo32(chan
->base
.inst
, 0x94, 0x30000001);
307 nvkm_wo32(chan
->base
.inst
, 0x9c, 0x00000100);
308 nvkm_wo32(chan
->base
.inst
, 0xac, 0x0000001f);
309 nvkm_wo32(chan
->base
.inst
, 0xe8, chan
->base
.chid
);
310 nvkm_wo32(chan
->base
.inst
, 0xb8, 0xf8000000);
311 nvkm_wo32(chan
->base
.inst
, 0xf8, 0x10003080); /* 0x002310 */
312 nvkm_wo32(chan
->base
.inst
, 0xfc, 0x10000010); /* 0x002350 */
313 nvkm_done(chan
->base
.inst
);
317 const struct nvkm_fifo_chan_oclass
318 gk104_fifo_gpfifo_oclass
= {
319 .base
.oclass
= KEPLER_CHANNEL_GPFIFO_A
,
322 .ctor
= gk104_fifo_gpfifo_new
,