Merge pull request #5 from polachok/new
[pscnv.git] / pscnv / nvc0_graph.c
blob1c504a3c736a79e3ebacd4571835c05bef977c7a
1 /*
2 * Copyright 2010 Red Hat Inc.
3 * Copyright (C) 2010 Christoph Bumiller.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "drm.h"
28 #include "nouveau_drv.h"
29 #include "nouveau_reg.h"
30 #include "pscnv_engine.h"
31 #include "pscnv_chan.h"
32 #include "nvc0_vm.h"
33 #include "nvc0_graph.h"
34 #include "nvc0_pgraph.xml.h"
36 * If you want to use NVIDIA's firmware microcode, activate the macro:
37 * #define USE_BLOB_UCODE
39 #ifdef USE_BLOB_UCODE
40 #include "nvc0_ctxctl.h"
41 #else
42 #include "nvc0_grhub.fuc.h"
43 #include "nvc0_grgpc.fuc.h"
44 #endif
46 struct nvc0_graph_chan {
47 struct pscnv_bo *grctx;
48 struct pscnv_mm_node *grctx_vm;
51 #define GPC_REG(i, r) (NVC0_PGRAPH_GPC(i) + (r))
52 #define TP_REG(i, j, r) (NVC0_PGRAPH_GPC_TP(i, j) + (r))
53 #define GPC_BC(n) NVC0_PGRAPH_GPC_BROADCAST_##n
54 #define CTXCTL(n) NVC0_PGRAPH_CTXCTL_##n
55 #define BC_CTXCTL(n) NVC0_PGRAPH_GPC_BROADCAST_CTXCTL_##n
56 #define GPC_CTXCTL(n) NVC0_PGRAPH_GPC_CTXCTL_##n
57 #define ROPC_REG(i, r) (NVC0_PGRAPH_ROPC(i) + (r))
58 #define __TRAP_CLEAR_AND_ENABLE \
59 (NVC0_PGRAPH_DISPATCH_TRAP_CLEAR | NVC0_PGRAPH_DISPATCH_TRAP_ENABLE)
61 void nvc0_graph_takedown(struct pscnv_engine *eng);
62 int nvc0_graph_chan_alloc(struct pscnv_engine *eng, struct pscnv_chan *ch);
63 void nvc0_graph_chan_free(struct pscnv_engine *eng, struct pscnv_chan *ch);
64 void nvc0_graph_chan_kill(struct pscnv_engine *eng, struct pscnv_chan *ch);
65 void nvc0_graph_irq_handler(struct drm_device *dev, int irq);
66 void nvc0_ctxctl_load_fuc(struct drm_device *dev);
68 static inline void
69 nvc0_graph_init_reset(struct drm_device *dev)
71 nv_wr32(dev, 0x200, nv_rd32(dev, 0x200) & 0xffffefff);
72 nv_wr32(dev, 0x200, nv_rd32(dev, 0x200) | 0x00001000);
75 static void
76 nvc0_graph_init_intr(struct drm_device *dev)
78 nv_wr32(dev, NVC0_PGRAPH_TRAP, 0xffffffff);
79 nv_wr32(dev, NVC0_PGRAPH_TRAP_EN, 0xffffffff);
81 nv_wr32(dev, NVC0_PGRAPH_TRAP_GPCS, 0xffffffff);
82 nv_wr32(dev, NVC0_PGRAPH_TRAP_GPCS_EN, 0xffffffff);
83 nv_wr32(dev, NVC0_PGRAPH_TRAP_ROPCS, 0xffffffff);
84 nv_wr32(dev, NVC0_PGRAPH_TRAP_ROPCS_EN, 0xffffffff);
86 nv_wr32(dev, 0x400054, 0x34ce3464);
89 static void
90 nvc0_graph_init_units(struct drm_device *dev)
92 nv_wr32(dev, CTXCTL(INTR_UP_ENABLE), 0xf0000);
94 nv_wr32(dev, NVC0_PGRAPH_DISPATCH_TRAP, 0xc0000000);
95 nv_wr32(dev, NVC0_PGRAPH_M2MF_TRAP, 0xc0000000);
96 nv_wr32(dev, NVC0_PGRAPH_CCACHE_TRAP, 0xc0000000);
97 nv_wr32(dev, NVC0_PGRAPH_UNK6000_TRAP_UNK1, 0xc0000000);
98 nv_wr32(dev, NVC0_PGRAPH_MACRO_TRAP, 0xc0000000);
99 nv_wr32(dev, NVC0_PGRAPH_UNK6000_TRAP_UNK0, 0xc0000000);
100 nv_wr32(dev, NVC0_PGRAPH_UNK5800_TRAP, 0xc0000000);
102 nv_wr32(dev, NVC0_PGRAPH_UNK5800_TRAP_UNK44, 0x00ffffff);
104 nv_mask(dev, GPC_BC(TP_BROADCAST_L1) + 0xc0, 0, 8);
105 nv_mask(dev, GPC_BC(TP_BROADCAST_MP) + 0xb4, 0, 0x1000);
108 static void
109 nvc0_graph_init_gpc(struct drm_device *dev, struct nvc0_graph_engine *graph)
111 uint32_t magicgpc918;
112 uint32_t data[NVC0_TP_MAX / 8];
113 uint8_t gpc_tp_count[NVC0_GPC_MAX];
114 int i, gpc, tp;
116 for (gpc = 0; gpc < graph->gpc_count; gpc++) {
117 /* the number of TPs per GPC. */
118 graph->gpc_tp_count[gpc] = nv_rd32(dev, GPC_REG(gpc, 0x2608)) & 0xffff;
119 /* the number of total TPs. */
120 graph->tp_count += graph->gpc_tp_count[gpc];
123 magicgpc918 = (0x00800000 + (graph->tp_count - 1)) / graph->tp_count;
126 * TP ROP UNKVAL(magic_val)
127 * 450: 4/0/0/0 2 3
128 * 460: 3/4/0/0 4 1
129 * 465: 3/4/4/0 4 7
130 * 470: 3/3/4/4 5 5
131 * 480: 3/4/4/4 6 6
133 memset(data, 0x00, sizeof(data));
134 memcpy(gpc_tp_count, graph->gpc_tp_count, sizeof(graph->gpc_tp_count));
135 for (i = 0, gpc = -1; i < graph->tp_count; i++) {
136 do {
137 gpc = (gpc + 1) % graph->gpc_count;
138 } while (!gpc_tp_count[gpc]);
139 tp = graph->gpc_tp_count[gpc] - gpc_tp_count[gpc]--;
141 data[i / 8] |= tp << ((i % 8) * 4);
144 /* some unknown broadcast areas. */
145 nv_wr32(dev, 0x418980, data[0]);
146 nv_wr32(dev, 0x418984, data[1]);
147 nv_wr32(dev, 0x418988, data[2]);
148 nv_wr32(dev, 0x41898c, data[3]);
150 for (gpc = 0; gpc < graph->gpc_count; gpc++) {
151 nv_wr32(dev, GPC_REG(gpc, 0x0914),
152 (graph->ropc_count << 8) | graph->gpc_tp_count[gpc]);
153 nv_wr32(dev, GPC_REG(gpc, 0x0910),
154 (graph->gpc_count << 16) | graph->tp_count);
155 nv_wr32(dev, GPC_REG(gpc, 0x0918), magicgpc918);
158 /* some unknown broadcast areas. */
159 nv_wr32(dev, 0x419bd4, magicgpc918);
160 nv_wr32(dev, 0x4188ac, graph->ropc_count);
162 for (gpc = 0; gpc < graph->gpc_count; gpc++) {
163 nv_wr32(dev, GPC_REG(gpc, 0x0420), 0xc0000000);
164 nv_wr32(dev, GPC_REG(gpc, 0x0900), 0xc0000000);
165 nv_wr32(dev, GPC_REG(gpc, 0x1028), 0xc0000000);
166 nv_wr32(dev, GPC_REG(gpc, 0x0824), 0xc0000000);
167 for (tp = 0; tp < graph->gpc_tp_count[gpc]; tp++) {
168 nv_wr32(dev, TP_REG(gpc, tp, 0x508), 0xffffffff);
169 nv_wr32(dev, TP_REG(gpc, tp, 0x50c), 0xffffffff);
170 nv_wr32(dev, TP_REG(gpc, tp, 0x224), 0xc0000000);
171 nv_wr32(dev, TP_REG(gpc, tp, 0x48c), 0xc0000000);
172 nv_wr32(dev, TP_REG(gpc, tp, 0x084), 0xc0000000);
173 nv_wr32(dev, TP_REG(gpc, tp, 0x644), 0x1ffffe);
174 nv_wr32(dev, TP_REG(gpc, tp, 0x64c), 0xf);
176 nv_wr32(dev, GPC_REG(gpc, 0x2c90), 0xffffffff); /* CTXCTL */
177 nv_wr32(dev, GPC_REG(gpc, 0x2c94), 0xffffffff); /* CTXCTL */
181 static void
182 nvc0_graph_init_ropc(struct drm_device *dev, struct nvc0_graph_engine *graph)
184 int i;
186 for (i = 0; i < graph->ropc_count; ++i) {
187 nv_wr32(dev, ROPC_REG(i, 0x144), 0xc0000000);
188 nv_wr32(dev, ROPC_REG(i, 0x070), 0xc0000000);
189 nv_wr32(dev, NVC0_PGRAPH_ROPC_TRAP(i), 0xffffffff);
190 nv_wr32(dev, NVC0_PGRAPH_ROPC_TRAP_EN(i), 0xffffffff);
194 static void
195 nvc0_graph_init_regs(struct drm_device *dev)
197 nv_wr32(dev, 0x400080, 0x003083c2);
198 nv_wr32(dev, 0x400088, 0x00006fe7);
199 nv_wr32(dev, 0x40008c, 0x00000000);
200 nv_wr32(dev, 0x400090, 0x00000030);
202 nv_wr32(dev, NVC0_PGRAPH_INTR_EN, 0x013901f7);
203 nv_wr32(dev, NVC0_PGRAPH_INTR_DISPATCH_CTXCTL_DOWN, 0x00000100);
204 nv_wr32(dev, NVC0_PGRAPH_INTR_CTXCTL_DOWN, 0x00000000);
205 nv_wr32(dev, NVC0_PGRAPH_INTR_EN_CTXCTL_DOWN, 0x00000110);
206 nv_wr32(dev, NVC0_PGRAPH_TRAP_EN, 0x00000000);
207 nv_wr32(dev, NVC0_PGRAPH_TRAP_GPCS_EN, 0x00000000);
208 nv_wr32(dev, NVC0_PGRAPH_TRAP_ROPCS_EN, 0x00000000);
209 nv_wr32(dev, 0x400124, 0x00000002);
211 nv_wr32(dev, 0x4188ac, 0x00000005);
214 #ifdef USE_BLOB_UCODE
215 static int
216 nvc0_graph_start_microcode(struct drm_device *dev,
217 struct nvc0_graph_engine *graph)
219 int i, j, cx_num;
221 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0xffffffff);
222 nv_wr32(dev, 0x41a10c, 0);
223 nv_wr32(dev, 0x40910c, 0);
224 nv_wr32(dev, BC_CTXCTL(UC_CTRL), BC_CTXCTL(UC_CTRL_START_TRIGGER));
225 nv_wr32(dev, CTXCTL(UC_CTRL), CTXCTL(UC_CTRL_START_TRIGGER));
227 if (!nv_wait(dev, CTXCTL(CC_SCRATCH(0)), 0x1, 0x1)) {
228 NV_ERROR(dev, "PGRAPH: HUB_INIT/GPC_INIT timed out\n");
229 return -EBUSY;
232 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0xffffffff);
233 nv_wr32(dev, CTXCTL(WRCMD_DATA), 0x7fffffff);
234 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x21);
236 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0xffffffff);
237 nv_wr32(dev, CTXCTL(WRCMD_DATA), 0);
238 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x10); /* grctx size request */
239 if (!nv_wait_neq(dev, CTXCTL(CC_SCRATCH(0)), ~0, 0x0)) {
240 NV_ERROR(dev, "PGRAPH: GRCTX_SIZE timed out\n");
241 return -EBUSY;
244 graph->grctx_size = nv_rd32(dev, CTXCTL(CC_SCRATCH(0)));
245 graph->grctx_size = (graph->grctx_size + 0xffff) & ~0xffff;
247 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0xffffffff);
248 nv_wr32(dev, CTXCTL(WRCMD_DATA), 0);
249 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x16);
250 if (!nv_wait_neq(dev, CTXCTL(CC_SCRATCH(0)), ~0, 0x0)) {
251 NV_ERROR(dev, "PGRAPH: CMD 0x16 timed out\n");
252 return -EBUSY;
255 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0xffffffff);
256 nv_wr32(dev, CTXCTL(WRCMD_DATA), 0);
257 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x25);
258 if (!nv_wait_neq(dev, CTXCTL(CC_SCRATCH(0)), ~0, 0x0)) {
259 NV_ERROR(dev, "PGRAPH: CMD 0x25 timed out\n");
260 return -EBUSY;
263 cx_num = nv_rd32(dev, CTXCTL(STRANDS));
264 for (i = 0; i < cx_num; ++i) {
265 nv_wr32(dev, CTXCTL(HOST_IO_INDEX), i);
266 nv_rd32(dev, CTXCTL(STRAND_SIZE));
269 cx_num = nv_rd32(dev, GPC_REG(0, 0x2880));
270 for (i = 0; i < graph->gpc_count; ++i) {
271 for (j = 0; j < cx_num; ++j) {
272 nv_wr32(dev, GPC_CTXCTL(HOST_IO_INDEX(i)), j);
273 nv_rd32(dev, GPC_CTXCTL(STRAND_SIZE(i)));
277 return 0;
280 static int
281 nvc0_graph_load_ctx(struct drm_device *dev, struct pscnv_bo *vo)
283 uint32_t inst = vo->start >> 12;
285 nv_wr32(dev, CTXCTL(RED_SWITCH), 0x070);
286 nv_wr32(dev, CTXCTL(RED_SWITCH), 0x770);
287 nv_wr32(dev, 0x40802c, 1); /* ??? */
288 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0x30);
290 nv_wr32(dev, CTXCTL(WRCMD_DATA), (0x8 << 28) | inst);
291 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x3);
293 return 0;
297 nvc0_graph_store_ctx(struct drm_device *dev)
299 uint32_t inst = nv_rd32(dev, 0x409b00) & 0xfffffff;
301 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0x3);
302 nv_wr32(dev, CTXCTL(WRCMD_DATA), (0x8 << 28) | inst);
303 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x9);
305 if (!nv_wait(dev, CTXCTL(CC_SCRATCH(0)), ~0, 0x1)) {
306 NV_ERROR(dev, "PGRAPH: failed to store context\n");
307 return -EBUSY;
309 NV_INFO(dev, "PGRAPH: context stored: 0x%08x\n",
310 nv_rd32(dev, CTXCTL(CC_SCRATCH(0))));
312 return 0;
315 #else
316 static void
317 nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
319 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
320 nv_rd32(dev, base + 0x400));
321 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
322 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
323 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
324 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
325 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
326 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
329 static void
330 nvc0_graph_ctxctl_debug(struct drm_device *dev)
332 u32 gpcnr = nv_rd32(dev, CTXCTL(UNITS)) & 0xffff;
333 u32 gpc;
335 nvc0_graph_ctxctl_debug_unit(dev, CTXCTL(INTR_TRIGGER));
336 for (gpc = 0; gpc < gpcnr; gpc++)
337 nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
340 static int
341 nvc0_graph_start_microcode(struct drm_device *dev,
342 struct nvc0_graph_engine *graph)
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
346 /* start HUB ucode running, it'll init the GPCs */
347 nv_wr32(dev, CTXCTL(CC_SCRATCH(0)), dev_priv->chipset);
348 nv_wr32(dev, 0x40910c, 0x00000000);
349 nv_wr32(dev, CTXCTL(UC_CTRL), CTXCTL(UC_CTRL_START_TRIGGER));
351 if (!nv_wait(dev, CTXCTL(CC_SCRATCH(0)), 0x80000000, 0x80000000)) {
352 NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
353 nvc0_graph_ctxctl_debug(dev);
354 return -EBUSY;
356 graph->grctx_size = nv_rd32(dev, CTXCTL(CC_SCRATCH(1)));
358 return 0;
360 #endif
362 static void
363 nvc0_graph_load_microcode(struct drm_device *dev)
365 int i;
366 const uint32_t val260 = nv_rd32(dev, 0x260);
368 nv_wr32(dev, 0x260, val260 & ~1);
370 /* load HUB microcode. */
371 nv_wr32(dev, CTXCTL(DATA_INDEX(0)), CTXCTL(DATA_INDEX_WRITE_AUTOINCR));
372 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
373 nv_wr32(dev, CTXCTL(DATA(0)), ((uint32_t *)nvc0_grhub_data)[i]);
375 nv_wr32(dev, CTXCTL(CODE_INDEX), CTXCTL(CODE_INDEX_WRITE_AUTOINCR));
376 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
377 if ((i & 0x3f) == 0)
378 nv_wr32(dev, CTXCTL(CODE_VIRT_ADDR), i >> 6);
379 nv_wr32(dev, CTXCTL(CODE), ((uint32_t *)nvc0_grhub_code)[i]);
382 /* load GPC microcode. */
383 nv_wr32(dev, BC_CTXCTL(DATA_INDEX(0)), BC_CTXCTL(DATA_INDEX_WRITE_AUTOINCR));
384 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
385 nv_wr32(dev, BC_CTXCTL(DATA(0)), ((uint32_t *)nvc0_grgpc_data)[i]);
387 nv_wr32(dev, BC_CTXCTL(CODE_INDEX), BC_CTXCTL(CODE_INDEX_WRITE_AUTOINCR));
388 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
389 if ((i & 0x3f) == 0)
390 nv_wr32(dev, BC_CTXCTL(CODE_VIRT_ADDR), i >> 6);
391 nv_wr32(dev, BC_CTXCTL(CODE), ((uint32_t *)nvc0_grgpc_code)[i]);
394 nv_wr32(dev, 0x260, val260);
397 static int
398 nvc0_graph_init_ctxctl(struct drm_device *dev, struct nvc0_graph_engine *graph)
400 nvc0_graph_load_microcode(dev);
401 nvc0_graph_start_microcode(dev, graph);
403 return 0;
406 static int
407 nvc0_graph_generate_context(struct drm_device *dev,
408 struct nvc0_graph_engine *graph,
409 struct pscnv_chan *chan)
411 #ifdef USE_BLOB_UCODE
412 struct drm_nouveau_private *dev_priv = dev->dev_private;
413 #endif
414 struct nvc0_graph_chan *grch = chan->engdata[PSCNV_ENGINE_GRAPH];
415 int i, ret;
416 uint32_t *grctx;
418 if (graph->grctx_initvals)
419 return 0;
420 NV_INFO(dev, "PGRAPH: generating default grctx\n");
422 grctx = kzalloc(graph->grctx_size, GFP_KERNEL);
423 if (!grctx)
424 return -ENOMEM;
426 #ifdef USE_BLOB_UCODE
427 nvc0_graph_load_ctx(dev, chan->bo);
428 nv_wv32(grch->grctx, 0x1c, 1);
429 nv_wv32(grch->grctx, 0x20, 0);
430 dev_priv->vm->bar_flush(dev);
431 nv_wv32(grch->grctx, 0x28, 0);
432 nv_wv32(grch->grctx, 0x2c, 0);
433 dev_priv->vm->bar_flush(dev);
434 #else
435 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0x80000000);
436 nv_wr32(dev, CTXCTL(WRCMD_DATA), 0x80000000 | chan->bo->start >> 12);
437 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x00000001);
438 if (!nv_wait(dev, CTXCTL(CC_SCRATCH(0)), 0x80000000, 0x80000000)) {
439 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
440 nvc0_graph_ctxctl_debug(dev);
441 ret = -EBUSY;
442 goto err;
444 #endif
446 ret = nvc0_grctx_construct(dev, graph, chan);
447 if (ret)
448 goto err;
450 #ifdef USE_BLOB_UCODE
451 ret = nvc0_graph_store_ctx(dev);
452 if (ret)
453 goto err;
454 #else
455 nv_wr32(dev, CTXCTL(CC_SCRATCH_CLEAR(0)), 0x80000000);
456 nv_wr32(dev, CTXCTL(WRCMD_DATA), 0x80000000 | chan->bo->start >> 12);
457 nv_wr32(dev, CTXCTL(WRCMD_CMD), 0x00000002);
458 if (!nv_wait(dev, CTXCTL(CC_SCRATCH(0)), 0x80000000, 0x80000000)) {
459 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
460 nvc0_graph_ctxctl_debug(dev);
461 ret = -EBUSY;
462 goto err;
464 #endif
466 for (i = 0; i < graph->grctx_size / 4; ++i)
467 grctx[i] = nv_rv32(grch->grctx, i * 4);
469 graph->grctx_initvals = grctx;
471 #ifdef USE_BLOB_CODE
472 nv_wr32(dev, 0x104048, nv_rd32(dev, 0x104048) | 3);
473 nv_wr32(dev, 0x105048, nv_rd32(dev, 0x105048) | 3);
475 nv_wv32(grch->grctx, 0xf4, 0);
476 nv_wv32(grch->grctx, 0xf8, 0);
477 nv_wv32(grch->grctx, 0x10, 0); /* mmio list size */
478 nv_wv32(grch->grctx, 0x14, 0); /* mmio list */
479 nv_wv32(grch->grctx, 0x18, 0);
480 nv_wv32(grch->grctx, 0x1c, 1);
481 nv_wv32(grch->grctx, 0x20, 0);
482 nv_wv32(grch->grctx, 0x28, 0);
483 nv_wv32(grch->grctx, 0x2c, 0);
484 dev_priv->vm->bar_flush(dev);
485 #endif
487 return 0;
489 err:
490 kfree(grctx);
491 return ret;
494 void
495 nvc0_graph_takedown(struct pscnv_engine *eng)
497 struct nvc0_graph_engine *graph = NVC0_GRAPH(eng);
499 nouveau_irq_unregister(eng->dev, 12);
501 pscnv_mem_free(graph->obj19848);
502 pscnv_mem_free(graph->obj0800c);
503 pscnv_mem_free(graph->obj08004);
504 pscnv_mem_free(graph->obj188b8);
505 pscnv_mem_free(graph->obj188b4);
507 if (graph->grctx_initvals)
508 kfree(graph->grctx_initvals);
510 kfree(graph);
512 nv_wr32(eng->dev, NVC0_PGRAPH_TRAP_EN, 0);
513 nv_wr32(eng->dev, NVC0_PGRAPH_INTR_EN, 0);
517 nvc0_graph_init(struct drm_device *dev)
519 struct drm_nouveau_private *dev_priv = dev->dev_private;
520 struct pscnv_bo *vo;
521 int i, ret;
522 struct nvc0_graph_engine *res = kzalloc(sizeof *res, GFP_KERNEL);
524 if (!res) {
525 NV_ERROR(dev, "PGRAPH: Couldn't allocate engine!\n");
526 return -ENOMEM;
528 NV_INFO(dev, "PGRAPH: Initializing...\n");
530 dev_priv->engines[PSCNV_ENGINE_GRAPH] = &res->base;
531 res->base.dev = dev;
532 res->base.takedown = nvc0_graph_takedown;
533 res->base.chan_alloc = nvc0_graph_chan_alloc;
534 res->base.chan_kill = nvc0_graph_chan_kill;
535 res->base.chan_free = nvc0_graph_chan_free;
537 vo = pscnv_mem_alloc(dev, 0x1000, PSCNV_GEM_CONTIG, 0,
538 NVC0_PGRAPH_GPC_BROADCAST_FFB_UNK34_ADDR);
539 if (!vo)
540 return -ENOMEM;
541 ret = dev_priv->vm->map_kernel(vo);
542 if (ret)
543 return ret;
544 res->obj188b4 = vo; /* PGRAPH_GPC_BROADCAST_FFB_UNK32_ADDR */
546 vo = pscnv_mem_alloc(dev, 0x1000, PSCNV_GEM_CONTIG, 0,
547 NVC0_PGRAPH_GPC_BROADCAST_FFB_UNK38_ADDR);
548 if (!vo)
549 return -ENOMEM;
550 ret = dev_priv->vm->map_kernel(vo);
551 if (ret)
552 return ret;
553 res->obj188b8 = vo; /* PGRAPH_GPC_BROADCAST_FFB_UNK38_ADDR */
555 for (i = 0; i < 0x1000; i += 4) {
556 nv_wv32(res->obj188b4, i, 0x10);
557 nv_wv32(res->obj188b8, i, 0x10);
559 dev_priv->vm->bar_flush(dev);
561 vo = pscnv_mem_alloc(dev, 0x2000, PSCNV_GEM_CONTIG | PSCNV_GEM_NOUSER, 0,
562 NVC0_PGRAPH_CCACHE_HUB2GPC_ADDR);
563 if (!vo)
564 return -ENOMEM;
565 ret = dev_priv->vm->map_kernel(vo);
566 if (ret)
567 return ret;
568 res->obj08004 = vo; /* PGRAPH_CCACHE_HUB2GPC_ADDR */
570 vo = pscnv_mem_alloc(dev, 0x8000, PSCNV_GEM_CONTIG | PSCNV_GEM_NOUSER, 0,
571 NVC0_PGRAPH_CCACHE_HUB2ESETUP_ADDR);
572 if (!vo)
573 return -ENOMEM;
574 ret = dev_priv->vm->map_kernel(vo);
575 if (ret)
576 return ret;
577 res->obj0800c = vo; /* PGRAPH_CCACHE_HUB2ESETUP_ADDR */
579 vo = pscnv_mem_alloc(dev, 3 << 17, PSCNV_GEM_CONTIG, 0,
580 GPC_BC(TP_BROADCAST_POLY_POLY2ESETUP));
581 if (!vo)
582 return -ENOMEM;
583 ret = dev_priv->vm->map_kernel(vo);
584 if (ret)
585 return ret;
586 res->obj19848 = vo;
588 nv_wr32(dev, NVC0_PGRAPH_FIFO_CONTROL,
589 nv_rd32(dev, NVC0_PGRAPH_FIFO_CONTROL) & ~0x00010001);
591 nvc0_graph_init_reset(dev);
593 res->gpc_count = nv_rd32(dev, CTXCTL(UNITS)) & 0x1f;
594 res->ropc_count = nv_rd32(dev, CTXCTL(UNITS)) >> 16;
596 nv_wr32(dev, NVC0_PGRAPH_GPC_BROADCAST_FFB, 0x00000000);
597 nv_wr32(dev, 0x4188a4, 0x00000000); /* ??? */
598 for (i = 0; i < 4; ++i)
599 nv_wr32(dev, 0x418888 + i * 4, 0x00000000); /* ??? */
601 nv_wr32(dev, NVC0_PGRAPH_GPC_BROADCAST_FFB_UNK34_ADDR,
602 res->obj188b4->start >> 8);
603 nv_wr32(dev, NVC0_PGRAPH_GPC_BROADCAST_FFB_UNK38_ADDR,
604 res->obj188b4->start >> 8);
606 nvc0_graph_init_regs(dev);
608 nv_wr32(dev, NVC0_PGRAPH_FIFO_CONTROL,
609 NVC0_PGRAPH_FIFO_CONTROL_UNK16 | NVC0_PGRAPH_FIFO_CONTROL_PULL);
611 nv_wr32(dev, NVC0_PGRAPH_INTR, 0xffffffff);
612 nv_wr32(dev, NVC0_PGRAPH_INTR_EN, 0xffffffff);
614 nvc0_graph_init_units(dev);
615 nvc0_graph_init_gpc(dev, res);
616 nvc0_graph_init_ropc(dev, res);
617 nvc0_graph_init_intr(dev);
619 ret = nvc0_graph_init_ctxctl(dev, res);
620 if (ret)
621 return ret;
623 nouveau_irq_register(dev, 12, nvc0_graph_irq_handler);
625 /*XXX: these need figuring out... */
626 switch (dev_priv->chipset) {
627 case 0xc0:
628 if (res->tp_count == 11) /* 465, 3/4/4/0, 4 */
629 res->magic_val = 0x07;
630 else if (res->tp_count == 14) /* 470, 3/3/4/4, 5 */
631 res->magic_val = 0x05;
632 else if (res->tp_count == 15) /* 480, 3/4/4/4, 6 */
633 res->magic_val = 0x06;
634 break;
635 case 0xc3: /* 450, 4/0/0/0, 2 */
636 res->magic_val = 0x03;
637 break;
638 case 0xc4: /* 460, 3/4/0/0, 4 */
639 res->magic_val = 0x01;
640 break;
641 case 0xc1: /* 2/0/0/0, 1 */
642 res->magic_val = 0x01;
643 break;
644 case 0xc8: /* 4/4/3/4, 5 */
645 res->magic_val = 0x06;
646 break;
647 case 0xce: /* 4/4/0/0, 4 */
648 res->magic_val = 0x03;
649 break;
650 case 0xcf: /* 4/0/0/0, 3 */
651 res->magic_val = 0x03;
652 break;
653 case 0xd9: /* 1/0/0/0, 1 */
654 res->magic_val = 0x01;
655 break;
658 if (!res->magic_val) {
659 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
660 res->gpc_tp_count[0], res->gpc_tp_count[1],
661 res->gpc_tp_count[2], res->gpc_tp_count[3], res->ropc_count);
662 /* use 0xc3's values... */
663 res->magic_val = 0x03;
666 return 0;
669 /* list of PGRAPH writes put in grctx+0x14, count of writes grctx+0x10 */
670 static int
671 nvc0_graph_create_context_mmio_list(struct pscnv_vspace *vs,
672 struct nvc0_graph_engine *graph)
674 struct drm_device *dev = vs->dev;
675 struct drm_nouveau_private *dev_priv = dev->dev_private;
676 struct pscnv_bo *vo;
677 int i = 0, gpc, tp, ret;
678 u32 magic;
680 vo = pscnv_mem_alloc(vs->dev, 0x1000, PSCNV_GEM_CONTIG, 0, 0x33101157);
681 if (!vo)
682 return -ENOMEM;
683 nvc0_vs(vs)->mmio_bo = vo;
685 ret = dev_priv->vm->map_kernel(nvc0_vs(vs)->mmio_bo);
686 if (ret)
687 return ret;
689 ret = pscnv_vspace_map(vs, vo, 0x1000, (1ULL << 40) - 1, 0,
690 &nvc0_vs(vs)->mmio_vm);
691 if (ret)
692 return ret;
694 i = 0;
695 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_CCACHE_HUB2GPC_ADDR);
696 nv_wv32(vo, i++ * 4, nvc0_vs(vs)->obj08004->start >> 8);
698 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_CCACHE_HUB2GPC_CONF);
699 nv_wv32(vo, i++ * 4, 0x80000018);
701 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_CCACHE_HUB2ESETUP_ADDR);
702 nv_wv32(vo, i++ * 4, nvc0_vs(vs)->obj0800c->start >> 8);
704 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_CCACHE_HUB2ESETUP_CONF);
705 nv_wv32(vo, i++ * 4, 0x80000000);
707 nv_wv32(vo, i++ * 4, GPC_BC(ESETUP_POLY2ESETUP));
708 nv_wv32(vo, i++ * 4, (8 << 28) | (nvc0_vs(vs)->obj19848->start >> 12));
710 nv_wv32(vo, i++ * 4, GPC_BC(TP_BROADCAST_POLY_POLY2ESETUP));
711 nv_wv32(vo, i++ * 4, (1 << 28) | (nvc0_vs(vs)->obj19848->start >> 12));
713 nv_wv32(vo, i++ * 4, GPC_BC(CCACHE_HUB2GPC_ADDR));
714 nv_wv32(vo, i++ * 4, nvc0_vs(vs)->obj0800c->start >> 8);
716 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_GPC_BROADCAST_CCACHE_HUB2GPC_CONF);
717 nv_wv32(vo, i++ * 4, 0);
719 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_GPC_BROADCAST_ESETUP_HUB2ESETUP_ADDR);
720 nv_wv32(vo, i++ * 4, nvc0_vs(vs)->obj08004->start >> 8);
722 nv_wv32(vo, i++ * 4, NVC0_PGRAPH_GPC_BROADCAST_ESETUP_HUB2ESETUP_CONF);
723 nv_wv32(vo, i++ * 4, 0x80000018);
725 magic = 0x02180000;
726 if (dev_priv->chipset != 0xc1) {
727 nv_wv32(vo, i++ * 4, 0x00405830);
728 nv_wv32(vo, i++ * 4, magic);
729 for (gpc = 0; gpc < graph->gpc_count; gpc++) {
730 for (tp = 0; tp < graph->gpc_tp_count[gpc]; tp++, magic += 0x0324) {
731 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
732 nv_wv32(vo, i++ * 4, reg);
733 nv_wv32(vo, i++ * 4, magic);
737 else {
738 nv_wv32(vo, i++ * 4, 0x00405830);
739 nv_wv32(vo, i++ * 4, magic | 0x0000218);
740 nv_wv32(vo, i++ * 4, 0x004064c4);
741 nv_wv32(vo, i++ * 4, 0x0086ffff);
742 for (gpc = 0; gpc < graph->gpc_count; gpc++) {
743 for (tp = 0; tp < graph->gpc_tp_count[gpc]; tp++) {
744 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
745 nv_wv32(vo, i++ * 4, reg);
746 nv_wv32(vo, i++ * 4, (1 << 28) | magic);
747 magic += 0x0324;
749 for (tp = 0; tp < graph->gpc_tp_count[gpc]; tp++) {
750 u32 reg = 0x504544 + (gpc * 0x8000) + (tp * 0x0800);
751 nv_wv32(vo, i++ * 4, reg);
752 nv_wv32(vo, i++ * 4, magic);
753 magic += 0x0324;
758 nvc0_vs(vs)->mmio_count = i / 2;
760 return 0;
764 nvc0_graph_chan_alloc(struct pscnv_engine *eng, struct pscnv_chan *chan)
766 struct drm_device *dev = eng->dev;
767 struct drm_nouveau_private *dev_priv = dev->dev_private;
768 struct nvc0_graph_engine *graph = NVC0_GRAPH(eng);
769 struct nvc0_graph_chan *grch = kzalloc(sizeof *grch, GFP_KERNEL);
770 int i, ret;
772 if (!grch) {
773 NV_ERROR(dev, "PGRAPH: Couldn't allocate channel !\n");
774 return -ENOMEM;
777 grch->grctx = pscnv_mem_alloc(dev, graph->grctx_size,
778 PSCNV_GEM_CONTIG | PSCNV_GEM_NOUSER,
779 0, 0x93ac0747);
780 if (!grch->grctx)
781 return -ENOMEM;
783 ret = dev_priv->vm->map_kernel(grch->grctx);
784 if (ret)
785 goto err;
787 ret = pscnv_vspace_map(chan->vspace,
788 grch->grctx, 0x1000, (1ULL << 40) - 1,
789 0, &grch->grctx_vm);
790 if (ret)
791 goto err;
793 nv_wv32(chan->bo, 0x210, grch->grctx_vm->start | 4);
794 nv_wv32(chan->bo, 0x214, grch->grctx_vm->start >> 32);
795 dev_priv->vm->bar_flush(dev);
797 if (!nvc0_vs(chan->vspace)->obj08004) {
798 ret = pscnv_vspace_map(chan->vspace, graph->obj08004,
799 0x1000, (1ULL << 40) - 1, 0,
800 &nvc0_vs(chan->vspace)->obj08004);
801 if (ret)
802 goto err;
804 ret = pscnv_vspace_map(chan->vspace, graph->obj0800c,
805 0x1000, (1ULL << 40) - 1, 0,
806 &nvc0_vs(chan->vspace)->obj0800c);
807 if (ret)
808 goto err;
810 ret = pscnv_vspace_map(chan->vspace, graph->obj19848,
811 0x1000, (1ULL << 40) - 1, 0,
812 &nvc0_vs(chan->vspace)->obj19848);
813 if (ret)
814 goto err;
817 chan->engdata[PSCNV_ENGINE_GRAPH] = grch;
819 if (!nvc0_vs(chan->vspace)->mmio_bo) {
820 ret = nvc0_graph_create_context_mmio_list(chan->vspace, graph);
821 if (ret)
822 goto err;
825 if (!graph->grctx_initvals)
826 return nvc0_graph_generate_context(dev, graph, chan);
828 /* fill in context values generated for 1st context */
829 for (i = 0; i < graph->grctx_size / 4; ++i)
830 nv_wv32(grch->grctx, i * 4, graph->grctx_initvals[i]);
832 #ifdef USE_BLOB_UCODE
833 nv_wv32(grch->grctx, 0xf4, 0);
834 nv_wv32(grch->grctx, 0xf8, 0);
835 nv_wv32(grch->grctx, 0x10, nvc0_vs(chan->vspace)->mmio_count);
836 nv_wv32(grch->grctx, 0x14, nvc0_vs(chan->vspace)->mmio_vm->start);
837 nv_wv32(grch->grctx, 0x18, nvc0_vs(chan->vspace)->mmio_vm->start >> 32);
838 nv_wv32(grch->grctx, 0x1c, 1);
839 nv_wv32(grch->grctx, 0x20, 0);
840 nv_wv32(grch->grctx, 0x28, 0);
841 nv_wv32(grch->grctx, 0x2c, 0);
842 #else
843 nv_wv32(grch->grctx, 0x00, nvc0_vs(chan->vspace)->mmio_count);
844 nv_wv32(grch->grctx, 0x04, nvc0_vs(chan->vspace)->mmio_vm->start >> 8);
845 #endif
846 dev_priv->vm->bar_flush(dev);
848 return 0;
850 err:
851 pscnv_mem_free(grch->grctx);
852 return ret;
855 void
856 nvc0_graph_chan_kill(struct pscnv_engine *eng, struct pscnv_chan *ch)
858 /* FIXME */
861 void
862 nvc0_graph_chan_free(struct pscnv_engine *eng, struct pscnv_chan *ch)
864 struct nvc0_graph_chan *grch = ch->engdata[PSCNV_ENGINE_GRAPH];
865 pscnv_vspace_unmap_node(grch->grctx_vm);
866 pscnv_mem_free(grch->grctx);
868 kfree(grch);
869 ch->engdata[PSCNV_ENGINE_GRAPH] = NULL;
872 /* IRQ Handler */
874 struct pscnv_enum {
875 int value;
876 const char *name;
877 void *data;
880 static const struct pscnv_enum dispatch_errors[] = {
881 { 3, "INVALID_QUERY_OR_TEXTURE", 0 },
882 { 4, "INVALID_VALUE", 0 },
883 { 5, "INVALID_ENUM", 0 },
885 { 8, "INVALID_OBJECT", 0 },
887 { 0xb, "INVALID_ADDRESS_ALIGNMENT", 0 },
888 { 0xc, "INVALID_BITFIELD", 0 },
890 { 0x10, "RT_DOUBLE_BIND", 0 },
891 { 0x11, "RT_TYPES_MISMATCH", 0 },
892 { 0x12, "RT_LINEAR_WITH_ZETA", 0 },
894 { 0x1b, "SAMPLER_OVER_LIMIT", 0 },
895 { 0x1c, "TEXTURE_OVER_LIMIT", 0 },
897 { 0x21, "Z_OUT_OF_BOUNDS", 0 },
899 { 0x23, "M2MF_OUT_OF_BOUNDS", 0 },
901 { 0x27, "CP_MORE_PARAMS_THAN_SHARED", 0 },
902 { 0x28, "CP_NO_REG_SPACE_STRIPED", 0 },
903 { 0x29, "CP_NO_REG_SPACE_PACKED", 0 },
904 { 0x2a, "CP_NOT_ENOUGH_WARPS", 0 },
905 { 0x2b, "CP_BLOCK_SIZE_MISMATCH", 0 },
906 { 0x2c, "CP_NOT_ENOUGH_LOCAL_WARPS", 0 },
907 { 0x2d, "CP_NOT_ENOUGH_STACK_WARPS", 0 },
908 { 0x2e, "CP_NO_BLOCKDIM_LATCH", 0 },
910 { 0x31, "ENG2D_FORMAT_MISMATCH", 0 },
912 { 0x47, "VP_CLIP_OVER_LIMIT", 0 },
914 { 0, NULL, 0 },
917 static const struct pscnv_enum *
918 pscnv_enum_find(const struct pscnv_enum *list, int val)
920 for (; list->value != val && list->name; ++list);
921 return list->name ? list : NULL;
924 static void
925 nvc0_graph_trap_handler(struct drm_device *dev, int cid)
927 uint32_t status = nv_rd32(dev, NVC0_PGRAPH_TRAP);
928 uint32_t ustatus;
930 if (status & NVC0_PGRAPH_TRAP_DISPATCH) {
931 ustatus = nv_rd32(dev, NVC0_PGRAPH_DISPATCH_TRAP) & 0x7fffffff;
932 if (ustatus & 0x00000001) {
933 NV_ERROR(dev, "PGRAPH_TRAP_DISPATCH: ch %d\n", cid);
935 if (ustatus & 0x00000002) {
936 NV_ERROR(dev, "PGRAPH_TRAP_QUERY: ch %d\n", cid);
938 ustatus &= ~0x00000003;
939 if (ustatus)
940 NV_ERROR(dev, "PGRAPH_TRAP_DISPATCH: unknown ustatus "
941 "%08x on ch %d\n", ustatus, cid);
943 nv_wr32(dev, NVC0_PGRAPH_DISPATCH_TRAP, __TRAP_CLEAR_AND_ENABLE);
944 nv_wr32(dev, NVC0_PGRAPH_TRAP, NVC0_PGRAPH_TRAP_DISPATCH);
945 status &= ~NVC0_PGRAPH_TRAP_DISPATCH;
948 if (status & NVC0_PGRAPH_TRAP_M2MF) {
949 ustatus = nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP) & 0x7fffffff;
950 if (ustatus & 1)
951 NV_ERROR(dev, "PGRAPH_TRAP_M2MF_NOTIFY: ch %d "
952 "%08x %08x %08x %08x\n", cid,
953 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x04),
954 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x08),
955 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x0c),
956 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x10));
957 if (ustatus & 2)
958 NV_ERROR(dev, "PGRAPH_TRAP_M2MF_IN: ch %d "
959 "%08x %08x %08x %08x\n", cid,
960 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x04),
961 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x08),
962 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x0c),
963 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x10));
964 if (ustatus & 4)
965 NV_ERROR(dev, "PGRAPH_TRAP_M2MF_OUT: ch %d "
966 "%08x %08x %08x %08x\n", cid,
967 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x04),
968 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x08),
969 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x0c),
970 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP + 0x10));
971 ustatus &= ~0x00000007;
972 if (ustatus)
973 NV_ERROR(dev, "PGRAPH_TRAP_M2MF: unknown ustatus %08x "
974 "on ch %d\n", cid, ustatus);
975 nv_wr32(dev, NVC0_PGRAPH_M2MF_TRAP, __TRAP_CLEAR_AND_ENABLE);
976 nv_wr32(dev, NVC0_PGRAPH_TRAP, NVC0_PGRAPH_TRAP_M2MF);
977 status &= ~NVC0_PGRAPH_TRAP_M2MF;
980 if (status & NVC0_PGRAPH_TRAP_UNK4) {
981 ustatus = nv_rd32(dev, NVC0_PGRAPH_UNK5800_TRAP);
982 if (ustatus & (1 << 24))
983 NV_ERROR(dev, "PGRAPH_TRAP_SHADERS: VPA fail\n");
984 if (ustatus & (1 << 25))
985 NV_ERROR(dev, "PGRAPH_TRAP_SHADERS: VPB fail\n");
986 if (ustatus & (1 << 26))
987 NV_ERROR(dev, "PGRAPH_TRAP_SHADERS: TCP fail\n");
988 if (ustatus & (1 << 27))
989 NV_ERROR(dev, "PGRAPH_TRAP_SHADERS: TEP fail\n");
990 if (ustatus & (1 << 28))
991 NV_ERROR(dev, "PGRAPH_TRAP_SHADERS: GP fail\n");
992 if (ustatus & (1 << 29))
993 NV_ERROR(dev, "PGRAPH_TRAP_SHADERS: FP fail\n");
994 NV_ERROR(dev, "PGRAPH_TRAP_SHDERS: ustatus = %08x\n", ustatus);
995 nv_wr32(dev, NVC0_PGRAPH_UNK5800_TRAP, __TRAP_CLEAR_AND_ENABLE);
996 nv_wr32(dev, NVC0_PGRAPH_TRAP, NVC0_PGRAPH_TRAP_UNK4);
997 status &= ~NVC0_PGRAPH_TRAP_UNK4;
1000 if (status & NVC0_PGRAPH_TRAP_MACRO) {
1001 ustatus = nv_rd32(dev, NVC0_PGRAPH_MACRO_TRAP) & 0x7fffffff;
1002 if (ustatus & NVC0_PGRAPH_MACRO_TRAP_TOO_FEW_PARAMS)
1003 NV_ERROR(dev, "PGRAPH_TRAP_MACRO: TOO_FEW_PARAMS %08x\n",
1004 nv_rd32(dev, 0x404424));
1005 if (ustatus & NVC0_PGRAPH_MACRO_TRAP_TOO_MANY_PARAMS)
1006 NV_ERROR(dev, "PGRAPH_TRAP_MACRO: TOO_MANY_PARAMS %08x\n",
1007 nv_rd32(dev, 0x404424));
1008 if (ustatus & NVC0_PGRAPH_MACRO_TRAP_ILLEGAL_OPCODE)
1009 NV_ERROR(dev, "PGRAPH_TRAP_MACRO: ILLEGAL_OPCODE %08x\n",
1010 nv_rd32(dev, 0x404424));
1011 if (ustatus & NVC0_PGRAPH_MACRO_TRAP_DOUBLE_BRANCH)
1012 NV_ERROR(dev, "PGRAPH_TRAP_MACRO: DOUBLE_BRANCH %08x\n",
1013 nv_rd32(dev, 0x404424));
1014 ustatus &= ~0xf;
1015 if (ustatus)
1016 NV_ERROR(dev, "PGRAPH_TRAP_MACRO: unknown ustatus %08x\n", ustatus);
1017 nv_wr32(dev, NVC0_PGRAPH_MACRO_TRAP, __TRAP_CLEAR_AND_ENABLE);
1018 nv_wr32(dev, NVC0_PGRAPH_TRAP, NVC0_PGRAPH_TRAP_MACRO);
1019 status &= ~NVC0_PGRAPH_TRAP_MACRO;
1022 if (status) {
1023 NV_ERROR(dev, "PGRAPH: unknown trap %08x on ch %d\n", status, cid);
1024 NV_INFO(dev,
1025 "DISPATCH_TRAP = %08x\n"
1026 "M2MF_TRAP = %08x\n"
1027 "CCACHE_TRAP = %08x\n"
1028 "UNK6000_TRAP_UNK0 = %08x\n"
1029 "UNK6000_TRAP_UNK1 = %08x\n"
1030 "MACRO_TRAP = %08x\n"
1031 "UNK5800_TRAP = %08x\n",
1032 nv_rd32(dev, NVC0_PGRAPH_DISPATCH_TRAP),
1033 nv_rd32(dev, NVC0_PGRAPH_M2MF_TRAP),
1034 nv_rd32(dev, NVC0_PGRAPH_CCACHE_TRAP),
1035 nv_rd32(dev, NVC0_PGRAPH_UNK6000_TRAP_UNK0),
1036 nv_rd32(dev, NVC0_PGRAPH_UNK6000_TRAP_UNK1),
1037 nv_rd32(dev, NVC0_PGRAPH_MACRO_TRAP),
1038 nv_rd32(dev, NVC0_PGRAPH_UNK5800_TRAP));
1040 nv_wr32(dev, NVC0_PGRAPH_TRAP, status);
1044 void nvc0_graph_irq_handler(struct drm_device *dev, int irq)
1046 struct drm_nouveau_private *dev_priv = dev->dev_private;
1047 struct nvc0_graph_engine *graph;
1048 uint32_t status;
1049 uint32_t pgraph, addr, datal, datah, ecode, grcl, subc, mthd;
1050 int cid;
1051 #define PGRAPH_ERROR(name) \
1052 NV_ERROR(dev, "%s: st %08x ch %d sub %d [%04x] mthd %04x data %08x%08x\n", \
1053 name, pgraph, cid, subc, grcl, mthd, datah, datal);
1055 graph = NVC0_GRAPH(dev_priv->engines[PSCNV_ENGINE_GRAPH]);
1057 status = nv_rd32(dev, NVC0_PGRAPH_INTR);
1058 ecode = nv_rd32(dev, NVC0_PGRAPH_DATA_ERROR);
1059 pgraph = nv_rd32(dev, NVC0_PGRAPH_STATUS);
1060 addr = nv_rd32(dev, NVC0_PGRAPH_TRAPPED_ADDR);
1061 mthd = addr & NVC0_PGRAPH_TRAPPED_ADDR_MTHD__MASK;
1062 subc = (addr & NVC0_PGRAPH_TRAPPED_ADDR_SUBCH__MASK) >>
1063 NVC0_PGRAPH_TRAPPED_ADDR_SUBCH__SHIFT;
1064 datal = nv_rd32(dev, NVC0_PGRAPH_TRAPPED_DATA_LOW);
1065 datah = nv_rd32(dev, NVC0_PGRAPH_TRAPPED_DATA_HIGH);
1066 grcl = nv_rd32(dev, NVC0_PGRAPH_DISPATCH_CTX_SWITCH) & 0xffff;
1067 cid = -1;
1069 if (status & NVC0_PGRAPH_INTR_NOTIFY) {
1070 PGRAPH_ERROR("PGRAPH_NOTIFY");
1071 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_NOTIFY);
1072 status &= ~NVC0_PGRAPH_INTR_NOTIFY;
1074 if (status & NVC0_PGRAPH_INTR_QUERY) {
1075 PGRAPH_ERROR("PGRAPH_QUERY");
1076 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_QUERY);
1077 status &= ~NVC0_PGRAPH_INTR_QUERY;
1079 if (status & NVC0_PGRAPH_INTR_SYNC) {
1080 PGRAPH_ERROR("PGRAPH_SYNC");
1081 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_SYNC);
1082 status &= ~NVC0_PGRAPH_INTR_SYNC;
1084 if (status & NVC0_PGRAPH_INTR_ILLEGAL_MTHD) {
1085 PGRAPH_ERROR("PGRAPH_ILLEGAL_MTHD");
1086 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_ILLEGAL_MTHD);
1087 status &= ~NVC0_PGRAPH_INTR_ILLEGAL_MTHD;
1089 if (status & NVC0_PGRAPH_INTR_ILLEGAL_CLASS) {
1090 PGRAPH_ERROR("PGRAPH_ILLEGAL_CLASS");
1091 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_ILLEGAL_CLASS);
1092 status &= ~NVC0_PGRAPH_INTR_ILLEGAL_CLASS;
1094 if (status & NVC0_PGRAPH_INTR_DOUBLE_NOTIFY) {
1095 PGRAPH_ERROR("PGRAPH_DOUBLE_NOITFY");
1096 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_DOUBLE_NOTIFY);
1097 status &= ~NVC0_PGRAPH_INTR_DOUBLE_NOTIFY;
1099 if (status & NVC0_PGRAPH_INTR_UNK7) {
1100 PGRAPH_ERROR("PGRAPH_UNK7");
1101 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_UNK7);
1102 status &= ~NVC0_PGRAPH_INTR_UNK7;
1104 if (status & NVC0_PGRAPH_INTR_FIRMWARE_MTHD) {
1105 PGRAPH_ERROR("PGRAPH_FIRMWARE_MTHD");
1106 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_FIRMWARE_MTHD);
1107 status &= ~NVC0_PGRAPH_INTR_FIRMWARE_MTHD;
1109 if (status & NVC0_PGRAPH_INTR_BUFFER_NOTIFY) {
1110 PGRAPH_ERROR("PGRAPH_BUFFER_NOTIFY");
1111 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_BUFFER_NOTIFY);
1112 status &= ~NVC0_PGRAPH_INTR_BUFFER_NOTIFY;
1114 if (status & NVC0_PGRAPH_INTR_CTXCTL_UP) {
1115 PGRAPH_ERROR("PGRAPH_CTXCTL_UP");
1116 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_CTXCTL_UP);
1117 status &= ~NVC0_PGRAPH_INTR_CTXCTL_UP;
1119 if (status & NVC0_PGRAPH_INTR_DATA_ERROR) {
1120 const struct pscnv_enum *ev;
1121 ev = pscnv_enum_find(dispatch_errors, ecode);
1122 if (ev) {
1123 NV_ERROR(dev, "PGRAPH_DATA_ERROR [%s]", ev->name);
1124 PGRAPH_ERROR("");
1125 } else {
1126 NV_ERROR(dev, "PGRAPH_DATA_ERROR [%x]", ecode);
1128 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_DATA_ERROR);
1129 status &= ~NVC0_PGRAPH_INTR_DATA_ERROR;
1131 if (status & NVC0_PGRAPH_INTR_TRAP) {
1132 nvc0_graph_trap_handler(dev, cid);
1133 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_TRAP);
1134 status &= ~NVC0_PGRAPH_INTR_TRAP;
1136 if (status & NVC0_PGRAPH_INTR_SINGLE_STEP) {
1137 PGRAPH_ERROR("PGRAPH_SINGLE_STEP");
1138 nv_wr32(dev, NVC0_PGRAPH_INTR, NVC0_PGRAPH_INTR_SINGLE_STEP);
1139 status &= ~NVC0_PGRAPH_INTR_SINGLE_STEP;
1141 if (status) {
1142 NV_ERROR(dev, "Unknown PGRAPH interrupt(s) %08x\n", status);
1143 PGRAPH_ERROR("PGRAPH");
1144 nv_wr32(dev, NVC0_PGRAPH_INTR, status);
1147 nv_wr32(dev, NVC0_PGRAPH_FIFO_CONTROL, (1 << 16) | 1);