2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "changf100.h"
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29 #include <subdev/timer.h>
31 #include <nvif/class.h>
32 #include <nvif/cl906f.h>
33 #include <nvif/unpack.h>
36 gf100_fifo_gpfifo_engine_addr(struct nvkm_engine
*engine
)
38 switch (engine
->subdev
.index
) {
39 case NVKM_ENGINE_SW
: return 0;
40 case NVKM_ENGINE_GR
: return 0x0210;
41 case NVKM_ENGINE_CE0
: return 0x0230;
42 case NVKM_ENGINE_CE1
: return 0x0240;
43 case NVKM_ENGINE_MSPDEC
: return 0x0250;
44 case NVKM_ENGINE_MSPPP
: return 0x0260;
45 case NVKM_ENGINE_MSVLD
: return 0x0270;
53 gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan
*base
,
54 struct nvkm_engine
*engine
, bool suspend
)
56 const u32 offset
= gf100_fifo_gpfifo_engine_addr(engine
);
57 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
58 struct nvkm_subdev
*subdev
= &chan
->fifo
->base
.engine
.subdev
;
59 struct nvkm_device
*device
= subdev
->device
;
60 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
63 nvkm_wr32(device
, 0x002634, chan
->base
.chid
);
64 if (nvkm_msec(device
, 2000,
65 if (nvkm_rd32(device
, 0x002634) == chan
->base
.chid
)
68 nvkm_error(subdev
, "channel %d [%s] kick timeout\n",
69 chan
->base
.chid
, chan
->base
.object
.client
->name
);
77 nvkm_wo32(inst
, offset
+ 0x00, 0x00000000);
78 nvkm_wo32(inst
, offset
+ 0x04, 0x00000000);
86 gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan
*base
,
87 struct nvkm_engine
*engine
)
89 const u32 offset
= gf100_fifo_gpfifo_engine_addr(engine
);
90 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
91 struct nvkm_gpuobj
*inst
= chan
->base
.inst
;
94 u64 addr
= chan
->engn
[engine
->subdev
.index
].vma
.offset
;
96 nvkm_wo32(inst
, offset
+ 0x00, lower_32_bits(addr
) | 4);
97 nvkm_wo32(inst
, offset
+ 0x04, upper_32_bits(addr
));
105 gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan
*base
,
106 struct nvkm_engine
*engine
)
108 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
109 nvkm_gpuobj_unmap(&chan
->engn
[engine
->subdev
.index
].vma
);
110 nvkm_gpuobj_del(&chan
->engn
[engine
->subdev
.index
].inst
);
114 gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan
*base
,
115 struct nvkm_engine
*engine
,
116 struct nvkm_object
*object
)
118 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
119 int engn
= engine
->subdev
.index
;
122 if (!gf100_fifo_gpfifo_engine_addr(engine
))
125 ret
= nvkm_object_bind(object
, NULL
, 0, &chan
->engn
[engn
].inst
);
129 return nvkm_gpuobj_map(chan
->engn
[engn
].inst
, chan
->vm
,
130 NV_MEM_ACCESS_RW
, &chan
->engn
[engn
].vma
);
134 gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan
*base
)
136 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
137 struct gf100_fifo
*fifo
= chan
->fifo
;
138 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
139 u32 coff
= chan
->base
.chid
* 8;
141 if (!list_empty(&chan
->head
) && !chan
->killed
) {
142 gf100_fifo_runlist_remove(fifo
, chan
);
143 nvkm_mask(device
, 0x003004 + coff
, 0x00000001, 0x00000000);
144 gf100_fifo_runlist_commit(fifo
);
147 gf100_fifo_intr_engine(fifo
);
149 nvkm_wr32(device
, 0x003000 + coff
, 0x00000000);
153 gf100_fifo_gpfifo_init(struct nvkm_fifo_chan
*base
)
155 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
156 struct gf100_fifo
*fifo
= chan
->fifo
;
157 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
158 u32 addr
= chan
->base
.inst
->addr
>> 12;
159 u32 coff
= chan
->base
.chid
* 8;
161 nvkm_wr32(device
, 0x003000 + coff
, 0xc0000000 | addr
);
163 if (list_empty(&chan
->head
) && !chan
->killed
) {
164 gf100_fifo_runlist_insert(fifo
, chan
);
165 nvkm_wr32(device
, 0x003004 + coff
, 0x001f0001);
166 gf100_fifo_runlist_commit(fifo
);
171 gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan
*base
)
173 struct gf100_fifo_chan
*chan
= gf100_fifo_chan(base
);
174 nvkm_vm_ref(NULL
, &chan
->vm
, chan
->pgd
);
175 nvkm_gpuobj_del(&chan
->pgd
);
179 static const struct nvkm_fifo_chan_func
180 gf100_fifo_gpfifo_func
= {
181 .dtor
= gf100_fifo_gpfifo_dtor
,
182 .init
= gf100_fifo_gpfifo_init
,
183 .fini
= gf100_fifo_gpfifo_fini
,
184 .ntfy
= g84_fifo_chan_ntfy
,
185 .engine_ctor
= gf100_fifo_gpfifo_engine_ctor
,
186 .engine_dtor
= gf100_fifo_gpfifo_engine_dtor
,
187 .engine_init
= gf100_fifo_gpfifo_engine_init
,
188 .engine_fini
= gf100_fifo_gpfifo_engine_fini
,
192 gf100_fifo_gpfifo_new(struct nvkm_fifo
*base
, const struct nvkm_oclass
*oclass
,
193 void *data
, u32 size
, struct nvkm_object
**pobject
)
196 struct fermi_channel_gpfifo_v0 v0
;
198 struct gf100_fifo
*fifo
= gf100_fifo(base
);
199 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
200 struct nvkm_object
*parent
= oclass
->parent
;
201 struct gf100_fifo_chan
*chan
;
202 u64 usermem
, ioffset
, ilength
;
203 int ret
= -ENOSYS
, i
;
205 nvif_ioctl(parent
, "create channel gpfifo size %d\n", size
);
206 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
207 nvif_ioctl(parent
, "create channel gpfifo vers %d vm %llx "
208 "ioffset %016llx ilength %08x\n",
209 args
->v0
.version
, args
->v0
.vm
, args
->v0
.ioffset
,
214 /* allocate channel */
215 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
217 *pobject
= &chan
->base
.object
;
219 INIT_LIST_HEAD(&chan
->head
);
221 ret
= nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func
, &fifo
->base
,
222 0x1000, 0x1000, true, args
->v0
.vm
, 0,
223 (1ULL << NVKM_ENGINE_CE0
) |
224 (1ULL << NVKM_ENGINE_CE1
) |
225 (1ULL << NVKM_ENGINE_GR
) |
226 (1ULL << NVKM_ENGINE_MSPDEC
) |
227 (1ULL << NVKM_ENGINE_MSPPP
) |
228 (1ULL << NVKM_ENGINE_MSVLD
) |
229 (1ULL << NVKM_ENGINE_SW
),
230 1, fifo
->user
.bar
.offset
, 0x1000,
231 oclass
, &chan
->base
);
235 args
->v0
.chid
= chan
->base
.chid
;
238 ret
= nvkm_gpuobj_new(device
, 0x10000, 0x1000, false, NULL
, &chan
->pgd
);
242 nvkm_kmap(chan
->base
.inst
);
243 nvkm_wo32(chan
->base
.inst
, 0x0200, lower_32_bits(chan
->pgd
->addr
));
244 nvkm_wo32(chan
->base
.inst
, 0x0204, upper_32_bits(chan
->pgd
->addr
));
245 nvkm_wo32(chan
->base
.inst
, 0x0208, 0xffffffff);
246 nvkm_wo32(chan
->base
.inst
, 0x020c, 0x000000ff);
247 nvkm_done(chan
->base
.inst
);
249 ret
= nvkm_vm_ref(chan
->base
.vm
, &chan
->vm
, chan
->pgd
);
253 /* clear channel control registers */
255 usermem
= chan
->base
.chid
* 0x1000;
256 ioffset
= args
->v0
.ioffset
;
257 ilength
= order_base_2(args
->v0
.ilength
/ 8);
259 nvkm_kmap(fifo
->user
.mem
);
260 for (i
= 0; i
< 0x1000; i
+= 4)
261 nvkm_wo32(fifo
->user
.mem
, usermem
+ i
, 0x00000000);
262 nvkm_done(fifo
->user
.mem
);
263 usermem
= nvkm_memory_addr(fifo
->user
.mem
) + usermem
;
266 nvkm_kmap(chan
->base
.inst
);
267 nvkm_wo32(chan
->base
.inst
, 0x08, lower_32_bits(usermem
));
268 nvkm_wo32(chan
->base
.inst
, 0x0c, upper_32_bits(usermem
));
269 nvkm_wo32(chan
->base
.inst
, 0x10, 0x0000face);
270 nvkm_wo32(chan
->base
.inst
, 0x30, 0xfffff902);
271 nvkm_wo32(chan
->base
.inst
, 0x48, lower_32_bits(ioffset
));
272 nvkm_wo32(chan
->base
.inst
, 0x4c, upper_32_bits(ioffset
) |
274 nvkm_wo32(chan
->base
.inst
, 0x54, 0x00000002);
275 nvkm_wo32(chan
->base
.inst
, 0x84, 0x20400000);
276 nvkm_wo32(chan
->base
.inst
, 0x94, 0x30000001);
277 nvkm_wo32(chan
->base
.inst
, 0x9c, 0x00000100);
278 nvkm_wo32(chan
->base
.inst
, 0xa4, 0x1f1f1f1f);
279 nvkm_wo32(chan
->base
.inst
, 0xa8, 0x1f1f1f1f);
280 nvkm_wo32(chan
->base
.inst
, 0xac, 0x0000001f);
281 nvkm_wo32(chan
->base
.inst
, 0xb8, 0xf8000000);
282 nvkm_wo32(chan
->base
.inst
, 0xf8, 0x10003080); /* 0x002310 */
283 nvkm_wo32(chan
->base
.inst
, 0xfc, 0x10000010); /* 0x002350 */
284 nvkm_done(chan
->base
.inst
);
288 const struct nvkm_fifo_chan_oclass
289 gf100_fifo_gpfifo_oclass
= {
290 .base
.oclass
= FERMI_CHANNEL_GPFIFO
,
293 .ctor
= gf100_fifo_gpfifo_new
,