2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <engine/fifo.h>
31 nv50_gr_units(struct nvkm_gr
*gr
)
33 return nvkm_rd32(gr
->engine
.subdev
.device
, 0x1540);
36 /*******************************************************************************
37 * Graphics object classes
38 ******************************************************************************/
41 nv50_gr_object_bind(struct nvkm_object
*object
, struct nvkm_gpuobj
*parent
,
42 int align
, struct nvkm_gpuobj
**pgpuobj
)
44 int ret
= nvkm_gpuobj_new(object
->engine
->subdev
.device
, 16,
45 align
, false, parent
, pgpuobj
);
48 nvkm_wo32(*pgpuobj
, 0x00, object
->oclass
);
49 nvkm_wo32(*pgpuobj
, 0x04, 0x00000000);
50 nvkm_wo32(*pgpuobj
, 0x08, 0x00000000);
51 nvkm_wo32(*pgpuobj
, 0x0c, 0x00000000);
57 const struct nvkm_object_func
59 .bind
= nv50_gr_object_bind
,
62 /*******************************************************************************
64 ******************************************************************************/
67 nv50_gr_chan_bind(struct nvkm_object
*object
, struct nvkm_gpuobj
*parent
,
68 int align
, struct nvkm_gpuobj
**pgpuobj
)
70 struct nv50_gr
*gr
= nv50_gr_chan(object
)->gr
;
71 int ret
= nvkm_gpuobj_new(gr
->base
.engine
.subdev
.device
, gr
->size
,
72 align
, true, parent
, pgpuobj
);
75 nv50_grctx_fill(gr
->base
.engine
.subdev
.device
, *pgpuobj
);
81 static const struct nvkm_object_func
83 .bind
= nv50_gr_chan_bind
,
87 nv50_gr_chan_new(struct nvkm_gr
*base
, struct nvkm_fifo_chan
*fifoch
,
88 const struct nvkm_oclass
*oclass
, struct nvkm_object
**pobject
)
90 struct nv50_gr
*gr
= nv50_gr(base
);
91 struct nv50_gr_chan
*chan
;
93 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
95 nvkm_object_ctor(&nv50_gr_chan
, oclass
, &chan
->object
);
97 *pobject
= &chan
->object
;
101 /*******************************************************************************
102 * PGRAPH engine/subdev functions
103 ******************************************************************************/
105 static const struct nvkm_bitfield nv50_mp_exec_errors
[] = {
106 { 0x01, "STACK_UNDERFLOW" },
107 { 0x02, "STACK_MISMATCH" },
108 { 0x04, "QUADON_ACTIVE" },
110 { 0x10, "INVALID_OPCODE" },
111 { 0x20, "PM_OVERFLOW" },
112 { 0x40, "BREAKPOINT" },
116 static const struct nvkm_bitfield nv50_mpc_traps
[] = {
117 { 0x0000001, "LOCAL_LIMIT_READ" },
118 { 0x0000010, "LOCAL_LIMIT_WRITE" },
119 { 0x0000040, "STACK_LIMIT" },
120 { 0x0000100, "GLOBAL_LIMIT_READ" },
121 { 0x0001000, "GLOBAL_LIMIT_WRITE" },
122 { 0x0010000, "MP0" },
123 { 0x0020000, "MP1" },
124 { 0x0040000, "GLOBAL_LIMIT_RED" },
125 { 0x0400000, "GLOBAL_LIMIT_ATOM" },
126 { 0x4000000, "MP2" },
130 static const struct nvkm_bitfield nv50_tex_traps
[] = {
131 { 0x00000001, "" }, /* any bit set? */
132 { 0x00000002, "FAULT" },
133 { 0x00000004, "STORAGE_TYPE_MISMATCH" },
134 { 0x00000008, "LINEAR_MISMATCH" },
135 { 0x00000020, "WRONG_MEMTYPE" },
139 static const struct nvkm_bitfield nv50_gr_trap_m2mf
[] = {
140 { 0x00000001, "NOTIFY" },
141 { 0x00000002, "IN" },
142 { 0x00000004, "OUT" },
146 static const struct nvkm_bitfield nv50_gr_trap_vfetch
[] = {
147 { 0x00000001, "FAULT" },
151 static const struct nvkm_bitfield nv50_gr_trap_strmout
[] = {
152 { 0x00000001, "FAULT" },
156 static const struct nvkm_bitfield nv50_gr_trap_ccache
[] = {
157 { 0x00000001, "FAULT" },
161 /* There must be a *lot* of these. Will take some time to gather them up. */
162 const struct nvkm_enum nv50_data_error_names
[] = {
163 { 0x00000003, "INVALID_OPERATION", NULL
},
164 { 0x00000004, "INVALID_VALUE", NULL
},
165 { 0x00000005, "INVALID_ENUM", NULL
},
166 { 0x00000008, "INVALID_OBJECT", NULL
},
167 { 0x00000009, "READ_ONLY_OBJECT", NULL
},
168 { 0x0000000a, "SUPERVISOR_OBJECT", NULL
},
169 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL
},
170 { 0x0000000c, "INVALID_BITFIELD", NULL
},
171 { 0x0000000d, "BEGIN_END_ACTIVE", NULL
},
172 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL
},
173 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL
},
174 { 0x00000010, "RT_DOUBLE_BIND", NULL
},
175 { 0x00000011, "RT_TYPES_MISMATCH", NULL
},
176 { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL
},
177 { 0x00000015, "FP_TOO_FEW_REGS", NULL
},
178 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL
},
179 { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL
},
180 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL
},
181 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL
},
182 { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL
},
183 { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL
},
184 { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL
},
185 { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL
},
186 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL
},
187 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL
},
188 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL
},
189 { 0x00000024, "VP_ZERO_INPUTS", NULL
},
190 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL
},
191 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL
},
192 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL
},
193 { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL
},
194 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL
},
195 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL
},
196 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL
},
197 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL
},
198 { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL
},
199 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL
},
200 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL
},
201 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL
},
202 { 0x00000046, "LAYER_ID_NEEDS_GP", NULL
},
203 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL
},
204 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL
},
208 static const struct nvkm_bitfield nv50_gr_intr_name
[] = {
209 { 0x00000001, "NOTIFY" },
210 { 0x00000002, "COMPUTE_QUERY" },
211 { 0x00000010, "ILLEGAL_MTHD" },
212 { 0x00000020, "ILLEGAL_CLASS" },
213 { 0x00000040, "DOUBLE_NOTIFY" },
214 { 0x00001000, "CONTEXT_SWITCH" },
215 { 0x00010000, "BUFFER_NOTIFY" },
216 { 0x00100000, "DATA_ERROR" },
217 { 0x00200000, "TRAP" },
218 { 0x01000000, "SINGLE_STEP" },
222 static const struct nvkm_bitfield nv50_gr_trap_prop
[] = {
223 { 0x00000004, "SURF_WIDTH_OVERRUN" },
224 { 0x00000008, "SURF_HEIGHT_OVERRUN" },
225 { 0x00000010, "DST2D_FAULT" },
226 { 0x00000020, "ZETA_FAULT" },
227 { 0x00000040, "RT_FAULT" },
228 { 0x00000080, "CUDA_FAULT" },
229 { 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
230 { 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
231 { 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
232 { 0x00000800, "DST2D_LINEAR_MISMATCH" },
233 { 0x00001000, "RT_LINEAR_MISMATCH" },
238 nv50_gr_prop_trap(struct nv50_gr
*gr
, u32 ustatus_addr
, u32 ustatus
, u32 tp
)
240 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
241 struct nvkm_device
*device
= subdev
->device
;
242 u32 e0c
= nvkm_rd32(device
, ustatus_addr
+ 0x04);
243 u32 e10
= nvkm_rd32(device
, ustatus_addr
+ 0x08);
244 u32 e14
= nvkm_rd32(device
, ustatus_addr
+ 0x0c);
245 u32 e18
= nvkm_rd32(device
, ustatus_addr
+ 0x10);
246 u32 e1c
= nvkm_rd32(device
, ustatus_addr
+ 0x14);
247 u32 e20
= nvkm_rd32(device
, ustatus_addr
+ 0x18);
248 u32 e24
= nvkm_rd32(device
, ustatus_addr
+ 0x1c);
251 /* CUDA memory: l[], g[] or stack. */
252 if (ustatus
& 0x00000080) {
253 if (e18
& 0x80000000) {
254 /* g[] read fault? */
255 nvkm_error(subdev
, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
256 tp
, e14
, e10
| ((e18
>> 24) & 0x1f));
258 } else if (e18
& 0xc) {
259 /* g[] write fault? */
260 nvkm_error(subdev
, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
261 tp
, e14
, e10
| ((e18
>> 7) & 0x1f));
264 nvkm_error(subdev
, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
267 ustatus
&= ~0x00000080;
270 nvkm_snprintbf(msg
, sizeof(msg
), nv50_gr_trap_prop
, ustatus
);
271 nvkm_error(subdev
, "TRAP_PROP - TP %d - %08x [%s] - "
272 "Address %02x%08x\n",
273 tp
, ustatus
, msg
, e14
, e10
);
275 nvkm_error(subdev
, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
276 tp
, e0c
, e18
, e1c
, e20
, e24
);
280 nv50_gr_mp_trap(struct nv50_gr
*gr
, int tpid
, int display
)
282 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
283 struct nvkm_device
*device
= subdev
->device
;
284 u32 units
= nvkm_rd32(device
, 0x1540);
285 u32 addr
, mp10
, status
, pc
, oplow
, ophigh
;
289 for (i
= 0; i
< 4; i
++) {
290 if (!(units
& 1 << (i
+24)))
292 if (device
->chipset
< 0xa0)
293 addr
= 0x408200 + (tpid
<< 12) + (i
<< 7);
295 addr
= 0x408100 + (tpid
<< 11) + (i
<< 7);
296 mp10
= nvkm_rd32(device
, addr
+ 0x10);
297 status
= nvkm_rd32(device
, addr
+ 0x14);
301 nvkm_rd32(device
, addr
+ 0x20);
302 pc
= nvkm_rd32(device
, addr
+ 0x24);
303 oplow
= nvkm_rd32(device
, addr
+ 0x70);
304 ophigh
= nvkm_rd32(device
, addr
+ 0x74);
305 nvkm_snprintbf(msg
, sizeof(msg
),
306 nv50_mp_exec_errors
, status
);
307 nvkm_error(subdev
, "TRAP_MP_EXEC - TP %d MP %d: "
308 "%08x [%s] at %06x warp %d, "
309 "opcode %08x %08x\n",
310 tpid
, i
, status
, msg
, pc
& 0xffffff,
311 pc
>> 24, oplow
, ophigh
);
313 nvkm_wr32(device
, addr
+ 0x10, mp10
);
314 nvkm_wr32(device
, addr
+ 0x14, 0);
318 nvkm_error(subdev
, "TRAP_MP_EXEC - TP %d: "
319 "No MPs claiming errors?\n", tpid
);
323 nv50_gr_tp_trap(struct nv50_gr
*gr
, int type
, u32 ustatus_old
,
324 u32 ustatus_new
, int display
, const char *name
)
326 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
327 struct nvkm_device
*device
= subdev
->device
;
328 u32 units
= nvkm_rd32(device
, 0x1540);
332 u32 ustatus_addr
, ustatus
;
333 for (i
= 0; i
< 16; i
++) {
334 if (!(units
& (1 << i
)))
336 if (device
->chipset
< 0xa0)
337 ustatus_addr
= ustatus_old
+ (i
<< 12);
339 ustatus_addr
= ustatus_new
+ (i
<< 11);
340 ustatus
= nvkm_rd32(device
, ustatus_addr
) & 0x7fffffff;
345 case 6: /* texture error... unknown for now */
347 nvkm_error(subdev
, "magic set %d:\n", i
);
348 for (r
= ustatus_addr
+ 4; r
<= ustatus_addr
+ 0x10; r
+= 4)
349 nvkm_error(subdev
, "\t%08x: %08x\n", r
,
350 nvkm_rd32(device
, r
));
352 nvkm_snprintbf(msg
, sizeof(msg
),
353 nv50_tex_traps
, ustatus
);
355 "%s - TP%d: %08x [%s]\n",
356 name
, i
, ustatus
, msg
);
361 case 7: /* MP error */
362 if (ustatus
& 0x04030000) {
363 nv50_gr_mp_trap(gr
, i
, display
);
364 ustatus
&= ~0x04030000;
366 if (ustatus
&& display
) {
367 nvkm_snprintbf(msg
, sizeof(msg
),
368 nv50_mpc_traps
, ustatus
);
369 nvkm_error(subdev
, "%s - TP%d: %08x [%s]\n",
370 name
, i
, ustatus
, msg
);
374 case 8: /* PROP error */
377 gr
, ustatus_addr
, ustatus
, i
);
383 nvkm_error(subdev
, "%s - TP%d: Unhandled ustatus %08x\n", name
, i
, ustatus
);
385 nvkm_wr32(device
, ustatus_addr
, 0xc0000000);
389 nvkm_warn(subdev
, "%s - No TPs claiming errors?\n", name
);
393 nv50_gr_trap_handler(struct nv50_gr
*gr
, u32 display
,
394 int chid
, u64 inst
, const char *name
)
396 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
397 struct nvkm_device
*device
= subdev
->device
;
398 u32 status
= nvkm_rd32(device
, 0x400108);
402 if (!status
&& display
) {
403 nvkm_error(subdev
, "TRAP: no units reporting traps?\n");
407 /* DISPATCH: Relays commands to other units and handles NOTIFY,
408 * COND, QUERY. If you get a trap from it, the command is still stuck
409 * in DISPATCH and you need to do something about it. */
410 if (status
& 0x001) {
411 ustatus
= nvkm_rd32(device
, 0x400804) & 0x7fffffff;
412 if (!ustatus
&& display
) {
413 nvkm_error(subdev
, "TRAP_DISPATCH - no ustatus?\n");
416 nvkm_wr32(device
, 0x400500, 0x00000000);
418 /* Known to be triggered by screwed up NOTIFY and COND... */
419 if (ustatus
& 0x00000001) {
420 u32 addr
= nvkm_rd32(device
, 0x400808);
421 u32 subc
= (addr
& 0x00070000) >> 16;
422 u32 mthd
= (addr
& 0x00001ffc);
423 u32 datal
= nvkm_rd32(device
, 0x40080c);
424 u32 datah
= nvkm_rd32(device
, 0x400810);
425 u32
class = nvkm_rd32(device
, 0x400814);
426 u32 r848
= nvkm_rd32(device
, 0x400848);
428 nvkm_error(subdev
, "TRAP DISPATCH_FAULT\n");
429 if (display
&& (addr
& 0x80000000)) {
431 "ch %d [%010llx %s] subc %d "
432 "class %04x mthd %04x data %08x%08x "
433 "400808 %08x 400848 %08x\n",
434 chid
, inst
, name
, subc
, class, mthd
,
435 datah
, datal
, addr
, r848
);
438 nvkm_error(subdev
, "no stuck command?\n");
441 nvkm_wr32(device
, 0x400808, 0);
442 nvkm_wr32(device
, 0x4008e8, nvkm_rd32(device
, 0x4008e8) & 3);
443 nvkm_wr32(device
, 0x400848, 0);
444 ustatus
&= ~0x00000001;
447 if (ustatus
& 0x00000002) {
448 u32 addr
= nvkm_rd32(device
, 0x40084c);
449 u32 subc
= (addr
& 0x00070000) >> 16;
450 u32 mthd
= (addr
& 0x00001ffc);
451 u32 data
= nvkm_rd32(device
, 0x40085c);
452 u32
class = nvkm_rd32(device
, 0x400814);
454 nvkm_error(subdev
, "TRAP DISPATCH_QUERY\n");
455 if (display
&& (addr
& 0x80000000)) {
457 "ch %d [%010llx %s] subc %d "
458 "class %04x mthd %04x data %08x "
459 "40084c %08x\n", chid
, inst
, name
,
460 subc
, class, mthd
, data
, addr
);
463 nvkm_error(subdev
, "no stuck command?\n");
466 nvkm_wr32(device
, 0x40084c, 0);
467 ustatus
&= ~0x00000002;
470 if (ustatus
&& display
) {
471 nvkm_error(subdev
, "TRAP_DISPATCH "
472 "(unknown %08x)\n", ustatus
);
475 nvkm_wr32(device
, 0x400804, 0xc0000000);
476 nvkm_wr32(device
, 0x400108, 0x001);
482 /* M2MF: Memory to memory copy engine. */
483 if (status
& 0x002) {
484 u32 ustatus
= nvkm_rd32(device
, 0x406800) & 0x7fffffff;
486 nvkm_snprintbf(msg
, sizeof(msg
),
487 nv50_gr_trap_m2mf
, ustatus
);
488 nvkm_error(subdev
, "TRAP_M2MF %08x [%s]\n",
490 nvkm_error(subdev
, "TRAP_M2MF %08x %08x %08x %08x\n",
491 nvkm_rd32(device
, 0x406804),
492 nvkm_rd32(device
, 0x406808),
493 nvkm_rd32(device
, 0x40680c),
494 nvkm_rd32(device
, 0x406810));
497 /* No sane way found yet -- just reset the bugger. */
498 nvkm_wr32(device
, 0x400040, 2);
499 nvkm_wr32(device
, 0x400040, 0);
500 nvkm_wr32(device
, 0x406800, 0xc0000000);
501 nvkm_wr32(device
, 0x400108, 0x002);
505 /* VFETCH: Fetches data from vertex buffers. */
506 if (status
& 0x004) {
507 u32 ustatus
= nvkm_rd32(device
, 0x400c04) & 0x7fffffff;
509 nvkm_snprintbf(msg
, sizeof(msg
),
510 nv50_gr_trap_vfetch
, ustatus
);
511 nvkm_error(subdev
, "TRAP_VFETCH %08x [%s]\n",
513 nvkm_error(subdev
, "TRAP_VFETCH %08x %08x %08x %08x\n",
514 nvkm_rd32(device
, 0x400c00),
515 nvkm_rd32(device
, 0x400c08),
516 nvkm_rd32(device
, 0x400c0c),
517 nvkm_rd32(device
, 0x400c10));
520 nvkm_wr32(device
, 0x400c04, 0xc0000000);
521 nvkm_wr32(device
, 0x400108, 0x004);
525 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
526 if (status
& 0x008) {
527 ustatus
= nvkm_rd32(device
, 0x401800) & 0x7fffffff;
529 nvkm_snprintbf(msg
, sizeof(msg
),
530 nv50_gr_trap_strmout
, ustatus
);
531 nvkm_error(subdev
, "TRAP_STRMOUT %08x [%s]\n",
533 nvkm_error(subdev
, "TRAP_STRMOUT %08x %08x %08x %08x\n",
534 nvkm_rd32(device
, 0x401804),
535 nvkm_rd32(device
, 0x401808),
536 nvkm_rd32(device
, 0x40180c),
537 nvkm_rd32(device
, 0x401810));
540 /* No sane way found yet -- just reset the bugger. */
541 nvkm_wr32(device
, 0x400040, 0x80);
542 nvkm_wr32(device
, 0x400040, 0);
543 nvkm_wr32(device
, 0x401800, 0xc0000000);
544 nvkm_wr32(device
, 0x400108, 0x008);
548 /* CCACHE: Handles code and c[] caches and fills them. */
549 if (status
& 0x010) {
550 ustatus
= nvkm_rd32(device
, 0x405018) & 0x7fffffff;
552 nvkm_snprintbf(msg
, sizeof(msg
),
553 nv50_gr_trap_ccache
, ustatus
);
554 nvkm_error(subdev
, "TRAP_CCACHE %08x [%s]\n",
556 nvkm_error(subdev
, "TRAP_CCACHE %08x %08x %08x %08x "
558 nvkm_rd32(device
, 0x405000),
559 nvkm_rd32(device
, 0x405004),
560 nvkm_rd32(device
, 0x405008),
561 nvkm_rd32(device
, 0x40500c),
562 nvkm_rd32(device
, 0x405010),
563 nvkm_rd32(device
, 0x405014),
564 nvkm_rd32(device
, 0x40501c));
567 nvkm_wr32(device
, 0x405018, 0xc0000000);
568 nvkm_wr32(device
, 0x400108, 0x010);
572 /* Unknown, not seen yet... 0x402000 is the only trap status reg
573 * remaining, so try to handle it anyway. Perhaps related to that
574 * unknown DMA slot on tesla? */
576 ustatus
= nvkm_rd32(device
, 0x402000) & 0x7fffffff;
578 nvkm_error(subdev
, "TRAP_UNKC04 %08x\n", ustatus
);
579 nvkm_wr32(device
, 0x402000, 0xc0000000);
580 /* no status modifiction on purpose */
583 /* TEXTURE: CUDA texturing units */
584 if (status
& 0x040) {
585 nv50_gr_tp_trap(gr
, 6, 0x408900, 0x408600, display
,
587 nvkm_wr32(device
, 0x400108, 0x040);
591 /* MP: CUDA execution engines. */
592 if (status
& 0x080) {
593 nv50_gr_tp_trap(gr
, 7, 0x408314, 0x40831c, display
,
595 nvkm_wr32(device
, 0x400108, 0x080);
599 /* PROP: Handles TP-initiated uncached memory accesses:
600 * l[], g[], stack, 2d surfaces, render targets. */
601 if (status
& 0x100) {
602 nv50_gr_tp_trap(gr
, 8, 0x408e08, 0x408708, display
,
604 nvkm_wr32(device
, 0x400108, 0x100);
610 nvkm_error(subdev
, "TRAP: unknown %08x\n", status
);
611 nvkm_wr32(device
, 0x400108, status
);
618 nv50_gr_intr(struct nvkm_gr
*base
)
620 struct nv50_gr
*gr
= nv50_gr(base
);
621 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
622 struct nvkm_device
*device
= subdev
->device
;
623 struct nvkm_fifo_chan
*chan
;
624 u32 stat
= nvkm_rd32(device
, 0x400100);
625 u32 inst
= nvkm_rd32(device
, 0x40032c) & 0x0fffffff;
626 u32 addr
= nvkm_rd32(device
, 0x400704);
627 u32 subc
= (addr
& 0x00070000) >> 16;
628 u32 mthd
= (addr
& 0x00001ffc);
629 u32 data
= nvkm_rd32(device
, 0x400708);
630 u32
class = nvkm_rd32(device
, 0x400814);
631 u32 show
= stat
, show_bitfield
= stat
;
632 const struct nvkm_enum
*en
;
634 const char *name
= "unknown";
638 chan
= nvkm_fifo_chan_inst(device
->fifo
, (u64
)inst
<< 12, &flags
);
640 name
= chan
->object
.client
->name
;
644 if (show
& 0x00100000) {
645 u32 ecode
= nvkm_rd32(device
, 0x400110);
646 en
= nvkm_enum_find(nv50_data_error_names
, ecode
);
647 nvkm_error(subdev
, "DATA_ERROR %08x [%s]\n",
648 ecode
, en
? en
->name
: "");
649 show_bitfield
&= ~0x00100000;
652 if (stat
& 0x00200000) {
653 if (!nv50_gr_trap_handler(gr
, show
, chid
, (u64
)inst
<< 12, name
))
655 show_bitfield
&= ~0x00200000;
658 nvkm_wr32(device
, 0x400100, stat
);
659 nvkm_wr32(device
, 0x400500, 0x00010001);
662 show
&= show_bitfield
;
663 nvkm_snprintbf(msg
, sizeof(msg
), nv50_gr_intr_name
, show
);
664 nvkm_error(subdev
, "%08x [%s] ch %d [%010llx %s] subc %d "
665 "class %04x mthd %04x data %08x\n",
666 stat
, msg
, chid
, (u64
)inst
<< 12, name
,
667 subc
, class, mthd
, data
);
670 if (nvkm_rd32(device
, 0x400824) & (1 << 31))
671 nvkm_wr32(device
, 0x400824, nvkm_rd32(device
, 0x400824) & ~(1 << 31));
673 nvkm_fifo_chan_put(device
->fifo
, flags
, &chan
);
677 nv50_gr_init(struct nvkm_gr
*base
)
679 struct nv50_gr
*gr
= nv50_gr(base
);
680 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
683 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
684 nvkm_wr32(device
, 0x40008c, 0x00000004);
686 /* reset/enable traps and interrupts */
687 nvkm_wr32(device
, 0x400804, 0xc0000000);
688 nvkm_wr32(device
, 0x406800, 0xc0000000);
689 nvkm_wr32(device
, 0x400c04, 0xc0000000);
690 nvkm_wr32(device
, 0x401800, 0xc0000000);
691 nvkm_wr32(device
, 0x405018, 0xc0000000);
692 nvkm_wr32(device
, 0x402000, 0xc0000000);
694 units
= nvkm_rd32(device
, 0x001540);
695 for (i
= 0; i
< 16; i
++) {
696 if (!(units
& (1 << i
)))
699 if (device
->chipset
< 0xa0) {
700 nvkm_wr32(device
, 0x408900 + (i
<< 12), 0xc0000000);
701 nvkm_wr32(device
, 0x408e08 + (i
<< 12), 0xc0000000);
702 nvkm_wr32(device
, 0x408314 + (i
<< 12), 0xc0000000);
704 nvkm_wr32(device
, 0x408600 + (i
<< 11), 0xc0000000);
705 nvkm_wr32(device
, 0x408708 + (i
<< 11), 0xc0000000);
706 nvkm_wr32(device
, 0x40831c + (i
<< 11), 0xc0000000);
710 nvkm_wr32(device
, 0x400108, 0xffffffff);
711 nvkm_wr32(device
, 0x400138, 0xffffffff);
712 nvkm_wr32(device
, 0x400100, 0xffffffff);
713 nvkm_wr32(device
, 0x40013c, 0xffffffff);
714 nvkm_wr32(device
, 0x400500, 0x00010001);
716 /* upload context program, initialise ctxctl defaults */
717 ret
= nv50_grctx_init(device
, &gr
->size
);
721 nvkm_wr32(device
, 0x400824, 0x00000000);
722 nvkm_wr32(device
, 0x400828, 0x00000000);
723 nvkm_wr32(device
, 0x40082c, 0x00000000);
724 nvkm_wr32(device
, 0x400830, 0x00000000);
725 nvkm_wr32(device
, 0x40032c, 0x00000000);
726 nvkm_wr32(device
, 0x400330, 0x00000000);
728 /* some unknown zcull magic */
729 switch (device
->chipset
& 0xf0) {
733 nvkm_wr32(device
, 0x402ca8, 0x00000800);
737 if (device
->chipset
== 0xa0 ||
738 device
->chipset
== 0xaa ||
739 device
->chipset
== 0xac) {
740 nvkm_wr32(device
, 0x402ca8, 0x00000802);
742 nvkm_wr32(device
, 0x402cc0, 0x00000000);
743 nvkm_wr32(device
, 0x402ca8, 0x00000002);
749 /* zero out zcull regions */
750 for (i
= 0; i
< 8; i
++) {
751 nvkm_wr32(device
, 0x402c20 + (i
* 0x10), 0x00000000);
752 nvkm_wr32(device
, 0x402c24 + (i
* 0x10), 0x00000000);
753 nvkm_wr32(device
, 0x402c28 + (i
* 0x10), 0x00000000);
754 nvkm_wr32(device
, 0x402c2c + (i
* 0x10), 0x00000000);
761 nv50_gr_new_(const struct nvkm_gr_func
*func
, struct nvkm_device
*device
,
762 int index
, struct nvkm_gr
**pgr
)
766 if (!(gr
= kzalloc(sizeof(*gr
), GFP_KERNEL
)))
768 spin_lock_init(&gr
->lock
);
771 return nvkm_gr_ctor(func
, device
, index
, 0x00201000, true, &gr
->base
);
774 static const struct nvkm_gr_func
776 .init
= nv50_gr_init
,
777 .intr
= nv50_gr_intr
,
778 .chan_new
= nv50_gr_chan_new
,
779 .units
= nv50_gr_units
,
781 { -1, -1, 0x0030, &nv50_gr_object
},
782 { -1, -1, 0x502d, &nv50_gr_object
},
783 { -1, -1, 0x5039, &nv50_gr_object
},
784 { -1, -1, 0x5097, &nv50_gr_object
},
785 { -1, -1, 0x50c0, &nv50_gr_object
},
791 nv50_gr_new(struct nvkm_device
*device
, int index
, struct nvkm_gr
**pgr
)
793 return nv50_gr_new_(&nv50_gr
, device
, index
, pgr
);