2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
26 #include <linux/module.h>
30 #include "nouveau_drv.h"
31 #include "nouveau_mm.h"
32 #include "nouveau_fifo.h"
34 #include "nve0_graph.h"
37 nve0_graph_ctxctl_debug_unit(struct drm_device
*dev
, u32 base
)
39 NV_INFO(dev
, "PGRAPH: %06x - done 0x%08x\n", base
,
40 nv_rd32(dev
, base
+ 0x400));
41 NV_INFO(dev
, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base
,
42 nv_rd32(dev
, base
+ 0x800), nv_rd32(dev
, base
+ 0x804),
43 nv_rd32(dev
, base
+ 0x808), nv_rd32(dev
, base
+ 0x80c));
44 NV_INFO(dev
, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base
,
45 nv_rd32(dev
, base
+ 0x810), nv_rd32(dev
, base
+ 0x814),
46 nv_rd32(dev
, base
+ 0x818), nv_rd32(dev
, base
+ 0x81c));
50 nve0_graph_ctxctl_debug(struct drm_device
*dev
)
52 u32 gpcnr
= nv_rd32(dev
, 0x409604) & 0xffff;
55 nve0_graph_ctxctl_debug_unit(dev
, 0x409000);
56 for (gpc
= 0; gpc
< gpcnr
; gpc
++)
57 nve0_graph_ctxctl_debug_unit(dev
, 0x502000 + (gpc
* 0x8000));
61 nve0_graph_load_context(struct nouveau_channel
*chan
)
63 struct drm_device
*dev
= chan
->dev
;
65 nv_wr32(dev
, 0x409840, 0x00000030);
66 nv_wr32(dev
, 0x409500, 0x80000000 | chan
->ramin
->vinst
>> 12);
67 nv_wr32(dev
, 0x409504, 0x00000003);
68 if (!nv_wait(dev
, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev
, "PGRAPH: load_ctx timeout\n");
75 nve0_graph_unload_context_to(struct drm_device
*dev
, u64 chan
)
77 nv_wr32(dev
, 0x409840, 0x00000003);
78 nv_wr32(dev
, 0x409500, 0x80000000 | chan
>> 12);
79 nv_wr32(dev
, 0x409504, 0x00000009);
80 if (!nv_wait(dev
, 0x409800, 0x00000001, 0x00000000)) {
81 NV_ERROR(dev
, "PGRAPH: unload_ctx timeout\n");
89 nve0_graph_construct_context(struct nouveau_channel
*chan
)
91 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
92 struct nve0_graph_priv
*priv
= nv_engine(chan
->dev
, NVOBJ_ENGINE_GR
);
93 struct nve0_graph_chan
*grch
= chan
->engctx
[NVOBJ_ENGINE_GR
];
94 struct drm_device
*dev
= chan
->dev
;
98 ctx
= kmalloc(priv
->grctx_size
, GFP_KERNEL
);
102 nve0_graph_load_context(chan
);
104 nv_wo32(grch
->grctx
, 0x1c, 1);
105 nv_wo32(grch
->grctx
, 0x20, 0);
106 nv_wo32(grch
->grctx
, 0x28, 0);
107 nv_wo32(grch
->grctx
, 0x2c, 0);
108 dev_priv
->engine
.instmem
.flush(dev
);
110 ret
= nve0_grctx_generate(chan
);
114 ret
= nve0_graph_unload_context_to(dev
, chan
->ramin
->vinst
);
118 for (i
= 0; i
< priv
->grctx_size
; i
+= 4)
119 ctx
[i
/ 4] = nv_ro32(grch
->grctx
, i
);
121 priv
->grctx_vals
= ctx
;
130 nve0_graph_create_context_mmio_list(struct nouveau_channel
*chan
)
132 struct nve0_graph_priv
*priv
= nv_engine(chan
->dev
, NVOBJ_ENGINE_GR
);
133 struct nve0_graph_chan
*grch
= chan
->engctx
[NVOBJ_ENGINE_GR
];
134 struct drm_device
*dev
= chan
->dev
;
135 u32 magic
[GPC_MAX
][2];
140 ret
= nouveau_gpuobj_new(dev
, chan
, 0x3000, 256, NVOBJ_FLAG_VM
,
145 ret
= nouveau_gpuobj_new(dev
, chan
, 0x8000, 256, NVOBJ_FLAG_VM
,
150 ret
= nouveau_gpuobj_new(dev
, chan
, 384 * 1024, 4096,
151 NVOBJ_FLAG_VM
| NVOBJ_FLAG_VM_USER
,
156 ret
= nouveau_gpuobj_new(dev
, chan
, 0x1000, 0, NVOBJ_FLAG_VM
,
161 #define mmio(r,v) do { \
162 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
163 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
166 mmio(0x40800c, grch
->unk40800c
->linst
>> 8);
167 mmio(0x408010, 0x80000000);
168 mmio(0x419004, grch
->unk40800c
->linst
>> 8);
169 mmio(0x419008, 0x00000000);
170 mmio(0x4064cc, 0x80000000);
171 mmio(0x408004, grch
->unk408004
->linst
>> 8);
172 mmio(0x408008, 0x80000030);
173 mmio(0x418808, grch
->unk408004
->linst
>> 8);
174 mmio(0x41880c, 0x80000030);
175 mmio(0x4064c8, 0x01800600);
176 mmio(0x418810, 0x80000000 | grch
->unk418810
->linst
>> 12);
177 mmio(0x419848, 0x10000000 | grch
->unk418810
->linst
>> 12);
178 mmio(0x405830, 0x02180648);
179 mmio(0x4064c4, 0x0192ffff);
181 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
182 u16 magic0
= 0x0218 * priv
->tpc_nr
[gpc
];
183 u16 magic1
= 0x0648 * priv
->tpc_nr
[gpc
];
184 magic
[gpc
][0] = 0x10000000 | (magic0
<< 16) | offset
;
185 magic
[gpc
][1] = 0x00000000 | (magic1
<< 16);
186 offset
+= 0x0324 * priv
->tpc_nr
[gpc
];
189 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
190 mmio(GPC_UNIT(gpc
, 0x30c0), magic
[gpc
][0]);
191 mmio(GPC_UNIT(gpc
, 0x30e4), magic
[gpc
][1] | offset
);
192 offset
+= 0x07ff * priv
->tpc_nr
[gpc
];
195 mmio(0x17e91c, 0x06060609);
196 mmio(0x17e920, 0x00090a05);
202 nve0_graph_context_new(struct nouveau_channel
*chan
, int engine
)
204 struct drm_device
*dev
= chan
->dev
;
205 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
206 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
207 struct nve0_graph_priv
*priv
= nv_engine(dev
, engine
);
208 struct nve0_graph_chan
*grch
;
209 struct nouveau_gpuobj
*grctx
;
212 grch
= kzalloc(sizeof(*grch
), GFP_KERNEL
);
215 chan
->engctx
[NVOBJ_ENGINE_GR
] = grch
;
217 ret
= nouveau_gpuobj_new(dev
, chan
, priv
->grctx_size
, 256,
218 NVOBJ_FLAG_VM
| NVOBJ_FLAG_ZERO_ALLOC
,
224 ret
= nve0_graph_create_context_mmio_list(chan
);
228 nv_wo32(chan
->ramin
, 0x0210, lower_32_bits(grctx
->linst
) | 4);
229 nv_wo32(chan
->ramin
, 0x0214, upper_32_bits(grctx
->linst
));
230 pinstmem
->flush(dev
);
232 if (!priv
->grctx_vals
) {
233 ret
= nve0_graph_construct_context(chan
);
238 for (i
= 0; i
< priv
->grctx_size
; i
+= 4)
239 nv_wo32(grctx
, i
, priv
->grctx_vals
[i
/ 4]);
240 nv_wo32(grctx
, 0xf4, 0);
241 nv_wo32(grctx
, 0xf8, 0);
242 nv_wo32(grctx
, 0x10, grch
->mmio_nr
);
243 nv_wo32(grctx
, 0x14, lower_32_bits(grch
->mmio
->linst
));
244 nv_wo32(grctx
, 0x18, upper_32_bits(grch
->mmio
->linst
));
245 nv_wo32(grctx
, 0x1c, 1);
246 nv_wo32(grctx
, 0x20, 0);
247 nv_wo32(grctx
, 0x28, 0);
248 nv_wo32(grctx
, 0x2c, 0);
250 pinstmem
->flush(dev
);
254 priv
->base
.context_del(chan
, engine
);
259 nve0_graph_context_del(struct nouveau_channel
*chan
, int engine
)
261 struct nve0_graph_chan
*grch
= chan
->engctx
[engine
];
263 nouveau_gpuobj_ref(NULL
, &grch
->mmio
);
264 nouveau_gpuobj_ref(NULL
, &grch
->unk418810
);
265 nouveau_gpuobj_ref(NULL
, &grch
->unk40800c
);
266 nouveau_gpuobj_ref(NULL
, &grch
->unk408004
);
267 nouveau_gpuobj_ref(NULL
, &grch
->grctx
);
268 chan
->engctx
[engine
] = NULL
;
272 nve0_graph_object_new(struct nouveau_channel
*chan
, int engine
,
273 u32 handle
, u16
class)
279 nve0_graph_fini(struct drm_device
*dev
, int engine
, bool suspend
)
285 nve0_graph_init_obj418880(struct drm_device
*dev
)
287 struct nve0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
290 nv_wr32(dev
, GPC_BCAST(0x0880), 0x00000000);
291 nv_wr32(dev
, GPC_BCAST(0x08a4), 0x00000000);
292 for (i
= 0; i
< 4; i
++)
293 nv_wr32(dev
, GPC_BCAST(0x0888) + (i
* 4), 0x00000000);
294 nv_wr32(dev
, GPC_BCAST(0x08b4), priv
->unk4188b4
->vinst
>> 8);
295 nv_wr32(dev
, GPC_BCAST(0x08b8), priv
->unk4188b8
->vinst
>> 8);
299 nve0_graph_init_regs(struct drm_device
*dev
)
301 nv_wr32(dev
, 0x400080, 0x003083c2);
302 nv_wr32(dev
, 0x400088, 0x0001ffe7);
303 nv_wr32(dev
, 0x40008c, 0x00000000);
304 nv_wr32(dev
, 0x400090, 0x00000030);
305 nv_wr32(dev
, 0x40013c, 0x003901f7);
306 nv_wr32(dev
, 0x400140, 0x00000100);
307 nv_wr32(dev
, 0x400144, 0x00000000);
308 nv_wr32(dev
, 0x400148, 0x00000110);
309 nv_wr32(dev
, 0x400138, 0x00000000);
310 nv_wr32(dev
, 0x400130, 0x00000000);
311 nv_wr32(dev
, 0x400134, 0x00000000);
312 nv_wr32(dev
, 0x400124, 0x00000002);
316 nve0_graph_init_units(struct drm_device
*dev
)
318 nv_wr32(dev
, 0x409ffc, 0x00000000);
319 nv_wr32(dev
, 0x409c14, 0x00003e3e);
320 nv_wr32(dev
, 0x409c24, 0x000f0000);
322 nv_wr32(dev
, 0x404000, 0xc0000000);
323 nv_wr32(dev
, 0x404600, 0xc0000000);
324 nv_wr32(dev
, 0x408030, 0xc0000000);
325 nv_wr32(dev
, 0x404490, 0xc0000000);
326 nv_wr32(dev
, 0x406018, 0xc0000000);
327 nv_wr32(dev
, 0x407020, 0xc0000000);
328 nv_wr32(dev
, 0x405840, 0xc0000000);
329 nv_wr32(dev
, 0x405844, 0x00ffffff);
331 nv_mask(dev
, 0x419cc0, 0x00000008, 0x00000008);
332 nv_mask(dev
, 0x419eb4, 0x00001000, 0x00001000);
337 nve0_graph_init_gpc_0(struct drm_device
*dev
)
339 struct nve0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
340 const u32 magicgpc918
= DIV_ROUND_UP(0x00800000, priv
->tpc_total
);
341 u32 data
[TPC_MAX
/ 8];
345 nv_wr32(dev
, GPC_UNIT(0, 0x3018), 0x00000001);
347 memset(data
, 0x00, sizeof(data
));
348 memcpy(tpcnr
, priv
->tpc_nr
, sizeof(priv
->tpc_nr
));
349 for (i
= 0, gpc
= -1; i
< priv
->tpc_total
; i
++) {
351 gpc
= (gpc
+ 1) % priv
->gpc_nr
;
352 } while (!tpcnr
[gpc
]);
353 tpc
= priv
->tpc_nr
[gpc
] - tpcnr
[gpc
]--;
355 data
[i
/ 8] |= tpc
<< ((i
% 8) * 4);
358 nv_wr32(dev
, GPC_BCAST(0x0980), data
[0]);
359 nv_wr32(dev
, GPC_BCAST(0x0984), data
[1]);
360 nv_wr32(dev
, GPC_BCAST(0x0988), data
[2]);
361 nv_wr32(dev
, GPC_BCAST(0x098c), data
[3]);
363 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
364 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0914), priv
->magic_not_rop_nr
<< 8 |
366 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0910), 0x00040000 | priv
->tpc_total
);
367 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0918), magicgpc918
);
370 nv_wr32(dev
, GPC_BCAST(0x1bd4), magicgpc918
);
371 nv_wr32(dev
, GPC_BCAST(0x08ac), nv_rd32(dev
, 0x100800));
375 nve0_graph_init_gpc_1(struct drm_device
*dev
)
377 struct nve0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
380 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
381 nv_wr32(dev
, GPC_UNIT(gpc
, 0x3038), 0xc0000000);
382 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0420), 0xc0000000);
383 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0900), 0xc0000000);
384 nv_wr32(dev
, GPC_UNIT(gpc
, 0x1028), 0xc0000000);
385 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0824), 0xc0000000);
386 for (tpc
= 0; tpc
< priv
->tpc_nr
[gpc
]; tpc
++) {
387 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x508), 0xffffffff);
388 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x50c), 0xffffffff);
389 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x224), 0xc0000000);
390 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x48c), 0xc0000000);
391 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x084), 0xc0000000);
392 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x644), 0x001ffffe);
393 nv_wr32(dev
, TPC_UNIT(gpc
, tpc
, 0x64c), 0x0000000f);
395 nv_wr32(dev
, GPC_UNIT(gpc
, 0x2c90), 0xffffffff);
396 nv_wr32(dev
, GPC_UNIT(gpc
, 0x2c94), 0xffffffff);
401 nve0_graph_init_rop(struct drm_device
*dev
)
403 struct nve0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
406 for (rop
= 0; rop
< priv
->rop_nr
; rop
++) {
407 nv_wr32(dev
, ROP_UNIT(rop
, 0x144), 0xc0000000);
408 nv_wr32(dev
, ROP_UNIT(rop
, 0x070), 0xc0000000);
409 nv_wr32(dev
, ROP_UNIT(rop
, 0x204), 0xffffffff);
410 nv_wr32(dev
, ROP_UNIT(rop
, 0x208), 0xffffffff);
415 nve0_graph_init_fuc(struct drm_device
*dev
, u32 fuc_base
,
416 struct nve0_graph_fuc
*code
, struct nve0_graph_fuc
*data
)
420 nv_wr32(dev
, fuc_base
+ 0x01c0, 0x01000000);
421 for (i
= 0; i
< data
->size
/ 4; i
++)
422 nv_wr32(dev
, fuc_base
+ 0x01c4, data
->data
[i
]);
424 nv_wr32(dev
, fuc_base
+ 0x0180, 0x01000000);
425 for (i
= 0; i
< code
->size
/ 4; i
++) {
427 nv_wr32(dev
, fuc_base
+ 0x0188, i
>> 6);
428 nv_wr32(dev
, fuc_base
+ 0x0184, code
->data
[i
]);
433 nve0_graph_init_ctxctl(struct drm_device
*dev
)
435 struct nve0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
438 /* load fuc microcode */
439 r000260
= nv_mask(dev
, 0x000260, 0x00000001, 0x00000000);
440 nve0_graph_init_fuc(dev
, 0x409000, &priv
->fuc409c
, &priv
->fuc409d
);
441 nve0_graph_init_fuc(dev
, 0x41a000, &priv
->fuc41ac
, &priv
->fuc41ad
);
442 nv_wr32(dev
, 0x000260, r000260
);
444 /* start both of them running */
445 nv_wr32(dev
, 0x409840, 0xffffffff);
446 nv_wr32(dev
, 0x41a10c, 0x00000000);
447 nv_wr32(dev
, 0x40910c, 0x00000000);
448 nv_wr32(dev
, 0x41a100, 0x00000002);
449 nv_wr32(dev
, 0x409100, 0x00000002);
450 if (!nv_wait(dev
, 0x409800, 0x00000001, 0x00000001))
451 NV_INFO(dev
, "0x409800 wait failed\n");
453 nv_wr32(dev
, 0x409840, 0xffffffff);
454 nv_wr32(dev
, 0x409500, 0x7fffffff);
455 nv_wr32(dev
, 0x409504, 0x00000021);
457 nv_wr32(dev
, 0x409840, 0xffffffff);
458 nv_wr32(dev
, 0x409500, 0x00000000);
459 nv_wr32(dev
, 0x409504, 0x00000010);
460 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
461 NV_ERROR(dev
, "fuc09 req 0x10 timeout\n");
464 priv
->grctx_size
= nv_rd32(dev
, 0x409800);
466 nv_wr32(dev
, 0x409840, 0xffffffff);
467 nv_wr32(dev
, 0x409500, 0x00000000);
468 nv_wr32(dev
, 0x409504, 0x00000016);
469 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
470 NV_ERROR(dev
, "fuc09 req 0x16 timeout\n");
474 nv_wr32(dev
, 0x409840, 0xffffffff);
475 nv_wr32(dev
, 0x409500, 0x00000000);
476 nv_wr32(dev
, 0x409504, 0x00000025);
477 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
478 NV_ERROR(dev
, "fuc09 req 0x25 timeout\n");
482 nv_wr32(dev
, 0x409800, 0x00000000);
483 nv_wr32(dev
, 0x409500, 0x00000001);
484 nv_wr32(dev
, 0x409504, 0x00000030);
485 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
486 NV_ERROR(dev
, "fuc09 req 0x30 timeout\n");
490 nv_wr32(dev
, 0x409810, 0xb00095c8);
491 nv_wr32(dev
, 0x409800, 0x00000000);
492 nv_wr32(dev
, 0x409500, 0x00000001);
493 nv_wr32(dev
, 0x409504, 0x00000031);
494 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
495 NV_ERROR(dev
, "fuc09 req 0x31 timeout\n");
499 nv_wr32(dev
, 0x409810, 0x00080420);
500 nv_wr32(dev
, 0x409800, 0x00000000);
501 nv_wr32(dev
, 0x409500, 0x00000001);
502 nv_wr32(dev
, 0x409504, 0x00000032);
503 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
504 NV_ERROR(dev
, "fuc09 req 0x32 timeout\n");
508 nv_wr32(dev
, 0x409614, 0x00000070);
509 nv_wr32(dev
, 0x409614, 0x00000770);
510 nv_wr32(dev
, 0x40802c, 0x00000001);
515 nve0_graph_init(struct drm_device
*dev
, int engine
)
519 nv_mask(dev
, 0x000200, 0x18001000, 0x00000000);
520 nv_mask(dev
, 0x000200, 0x18001000, 0x18001000);
522 nve0_graph_init_obj418880(dev
);
523 nve0_graph_init_regs(dev
);
524 nve0_graph_init_gpc_0(dev
);
526 nv_wr32(dev
, 0x400500, 0x00010001);
527 nv_wr32(dev
, 0x400100, 0xffffffff);
528 nv_wr32(dev
, 0x40013c, 0xffffffff);
530 nve0_graph_init_units(dev
);
531 nve0_graph_init_gpc_1(dev
);
532 nve0_graph_init_rop(dev
);
534 nv_wr32(dev
, 0x400108, 0xffffffff);
535 nv_wr32(dev
, 0x400138, 0xffffffff);
536 nv_wr32(dev
, 0x400118, 0xffffffff);
537 nv_wr32(dev
, 0x400130, 0xffffffff);
538 nv_wr32(dev
, 0x40011c, 0xffffffff);
539 nv_wr32(dev
, 0x400134, 0xffffffff);
540 nv_wr32(dev
, 0x400054, 0x34ce3464);
542 ret
= nve0_graph_init_ctxctl(dev
);
550 nve0_graph_isr_chid(struct drm_device
*dev
, u64 inst
)
552 struct nouveau_fifo_priv
*pfifo
= nv_engine(dev
, NVOBJ_ENGINE_FIFO
);
553 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
554 struct nouveau_channel
*chan
;
558 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
559 for (i
= 0; i
< pfifo
->channels
; i
++) {
560 chan
= dev_priv
->channels
.ptr
[i
];
561 if (!chan
|| !chan
->ramin
)
564 if (inst
== chan
->ramin
->vinst
)
567 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
572 nve0_graph_ctxctl_isr(struct drm_device
*dev
)
574 u32 ustat
= nv_rd32(dev
, 0x409c18);
576 if (ustat
& 0x00000001)
577 NV_INFO(dev
, "PGRAPH: CTXCTRL ucode error\n");
578 if (ustat
& 0x00080000)
579 NV_INFO(dev
, "PGRAPH: CTXCTRL watchdog timeout\n");
580 if (ustat
& ~0x00080001)
581 NV_INFO(dev
, "PGRAPH: CTXCTRL 0x%08x\n", ustat
);
583 nve0_graph_ctxctl_debug(dev
);
584 nv_wr32(dev
, 0x409c20, ustat
);
588 nve0_graph_trap_isr(struct drm_device
*dev
, int chid
)
590 struct nve0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
591 u32 trap
= nv_rd32(dev
, 0x400108);
594 if (trap
& 0x00000001) {
595 u32 stat
= nv_rd32(dev
, 0x404000);
596 NV_INFO(dev
, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid
, stat
);
597 nv_wr32(dev
, 0x404000, 0xc0000000);
598 nv_wr32(dev
, 0x400108, 0x00000001);
602 if (trap
& 0x00000010) {
603 u32 stat
= nv_rd32(dev
, 0x405840);
604 NV_INFO(dev
, "PGRAPH: SHADER ch %d 0x%08x\n", chid
, stat
);
605 nv_wr32(dev
, 0x405840, 0xc0000000);
606 nv_wr32(dev
, 0x400108, 0x00000010);
610 if (trap
& 0x02000000) {
611 for (rop
= 0; rop
< priv
->rop_nr
; rop
++) {
612 u32 statz
= nv_rd32(dev
, ROP_UNIT(rop
, 0x070));
613 u32 statc
= nv_rd32(dev
, ROP_UNIT(rop
, 0x144));
614 NV_INFO(dev
, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
615 rop
, chid
, statz
, statc
);
616 nv_wr32(dev
, ROP_UNIT(rop
, 0x070), 0xc0000000);
617 nv_wr32(dev
, ROP_UNIT(rop
, 0x144), 0xc0000000);
619 nv_wr32(dev
, 0x400108, 0x02000000);
624 NV_INFO(dev
, "PGRAPH: TRAP ch %d 0x%08x\n", chid
, trap
);
625 nv_wr32(dev
, 0x400108, trap
);
630 nve0_graph_isr(struct drm_device
*dev
)
632 u64 inst
= (u64
)(nv_rd32(dev
, 0x409b00) & 0x0fffffff) << 12;
633 u32 chid
= nve0_graph_isr_chid(dev
, inst
);
634 u32 stat
= nv_rd32(dev
, 0x400100);
635 u32 addr
= nv_rd32(dev
, 0x400704);
636 u32 mthd
= (addr
& 0x00003ffc);
637 u32 subc
= (addr
& 0x00070000) >> 16;
638 u32 data
= nv_rd32(dev
, 0x400708);
639 u32 code
= nv_rd32(dev
, 0x400110);
640 u32
class = nv_rd32(dev
, 0x404200 + (subc
* 4));
642 if (stat
& 0x00000010) {
643 if (nouveau_gpuobj_mthd_call2(dev
, chid
, class, mthd
, data
)) {
644 NV_INFO(dev
, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
645 "subc %d class 0x%04x mthd 0x%04x "
647 chid
, inst
, subc
, class, mthd
, data
);
649 nv_wr32(dev
, 0x400100, 0x00000010);
653 if (stat
& 0x00000020) {
654 NV_INFO(dev
, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
655 "class 0x%04x mthd 0x%04x data 0x%08x\n",
656 chid
, inst
, subc
, class, mthd
, data
);
657 nv_wr32(dev
, 0x400100, 0x00000020);
661 if (stat
& 0x00100000) {
662 NV_INFO(dev
, "PGRAPH: DATA_ERROR [");
663 nouveau_enum_print(nv50_data_error_names
, code
);
664 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
665 "mthd 0x%04x data 0x%08x\n",
666 chid
, inst
, subc
, class, mthd
, data
);
667 nv_wr32(dev
, 0x400100, 0x00100000);
671 if (stat
& 0x00200000) {
672 nve0_graph_trap_isr(dev
, chid
);
673 nv_wr32(dev
, 0x400100, 0x00200000);
677 if (stat
& 0x00080000) {
678 nve0_graph_ctxctl_isr(dev
);
679 nv_wr32(dev
, 0x400100, 0x00080000);
684 NV_INFO(dev
, "PGRAPH: unknown stat 0x%08x\n", stat
);
685 nv_wr32(dev
, 0x400100, stat
);
688 nv_wr32(dev
, 0x400500, 0x00010001);
692 nve0_graph_create_fw(struct drm_device
*dev
, const char *fwname
,
693 struct nve0_graph_fuc
*fuc
)
695 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
696 const struct firmware
*fw
;
700 snprintf(f
, sizeof(f
), "nouveau/nv%02x_%s", dev_priv
->chipset
, fwname
);
701 ret
= request_firmware(&fw
, f
, &dev
->pdev
->dev
);
705 fuc
->size
= fw
->size
;
706 fuc
->data
= kmemdup(fw
->data
, fuc
->size
, GFP_KERNEL
);
707 release_firmware(fw
);
708 return (fuc
->data
!= NULL
) ? 0 : -ENOMEM
;
712 nve0_graph_destroy_fw(struct nve0_graph_fuc
*fuc
)
721 nve0_graph_destroy(struct drm_device
*dev
, int engine
)
723 struct nve0_graph_priv
*priv
= nv_engine(dev
, engine
);
725 nve0_graph_destroy_fw(&priv
->fuc409c
);
726 nve0_graph_destroy_fw(&priv
->fuc409d
);
727 nve0_graph_destroy_fw(&priv
->fuc41ac
);
728 nve0_graph_destroy_fw(&priv
->fuc41ad
);
730 nouveau_irq_unregister(dev
, 12);
732 nouveau_gpuobj_ref(NULL
, &priv
->unk4188b8
);
733 nouveau_gpuobj_ref(NULL
, &priv
->unk4188b4
);
735 if (priv
->grctx_vals
)
736 kfree(priv
->grctx_vals
);
738 NVOBJ_ENGINE_DEL(dev
, GR
);
743 nve0_graph_create(struct drm_device
*dev
)
745 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
746 struct nve0_graph_priv
*priv
;
750 kepler
= nve0_graph_class(dev
);
752 NV_ERROR(dev
, "PGRAPH: unsupported chipset, please report!\n");
756 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
760 priv
->base
.destroy
= nve0_graph_destroy
;
761 priv
->base
.init
= nve0_graph_init
;
762 priv
->base
.fini
= nve0_graph_fini
;
763 priv
->base
.context_new
= nve0_graph_context_new
;
764 priv
->base
.context_del
= nve0_graph_context_del
;
765 priv
->base
.object_new
= nve0_graph_object_new
;
767 NVOBJ_ENGINE_ADD(dev
, GR
, &priv
->base
);
768 nouveau_irq_register(dev
, 12, nve0_graph_isr
);
770 NV_INFO(dev
, "PGRAPH: using external firmware\n");
771 if (nve0_graph_create_fw(dev
, "fuc409c", &priv
->fuc409c
) ||
772 nve0_graph_create_fw(dev
, "fuc409d", &priv
->fuc409d
) ||
773 nve0_graph_create_fw(dev
, "fuc41ac", &priv
->fuc41ac
) ||
774 nve0_graph_create_fw(dev
, "fuc41ad", &priv
->fuc41ad
)) {
779 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x1000, 256, 0, &priv
->unk4188b4
);
783 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x1000, 256, 0, &priv
->unk4188b8
);
787 for (i
= 0; i
< 0x1000; i
+= 4) {
788 nv_wo32(priv
->unk4188b4
, i
, 0x00000010);
789 nv_wo32(priv
->unk4188b8
, i
, 0x00000010);
792 priv
->gpc_nr
= nv_rd32(dev
, 0x409604) & 0x0000001f;
793 priv
->rop_nr
= (nv_rd32(dev
, 0x409604) & 0x001f0000) >> 16;
794 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
795 priv
->tpc_nr
[gpc
] = nv_rd32(dev
, GPC_UNIT(gpc
, 0x2608));
796 priv
->tpc_total
+= priv
->tpc_nr
[gpc
];
799 switch (dev_priv
->chipset
) {
801 if (priv
->tpc_total
== 8)
802 priv
->magic_not_rop_nr
= 3;
804 if (priv
->tpc_total
== 7)
805 priv
->magic_not_rop_nr
= 1;
808 priv
->magic_not_rop_nr
= 1;
814 if (!priv
->magic_not_rop_nr
) {
815 NV_ERROR(dev
, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
816 priv
->tpc_nr
[0], priv
->tpc_nr
[1], priv
->tpc_nr
[2],
817 priv
->tpc_nr
[3], priv
->rop_nr
);
818 priv
->magic_not_rop_nr
= 0x00;
821 NVOBJ_CLASS(dev
, 0xa097, GR
); /* subc 0: 3D */
822 NVOBJ_CLASS(dev
, 0xa0c0, GR
); /* subc 1: COMPUTE */
823 NVOBJ_CLASS(dev
, 0xa040, GR
); /* subc 2: P2MF */
824 NVOBJ_CLASS(dev
, 0x902d, GR
); /* subc 3: 2D */
825 NVOBJ_CLASS(dev
, 0xa0b5, GR
); /* subc 4: COPY */
829 nve0_graph_destroy(dev
, NVOBJ_ENGINE_GR
);