2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/firmware.h>
29 #include "nouveau_drv.h"
30 #include "nouveau_mm.h"
32 #include "nvc0_graph.h"
33 #include "nvc0_grhub.fuc.h"
34 #include "nvc0_grgpc.fuc.h"
37 nvc0_graph_ctxctl_debug_unit(struct drm_device
*dev
, u32 base
)
39 NV_INFO(dev
, "PGRAPH: %06x - done 0x%08x\n", base
,
40 nv_rd32(dev
, base
+ 0x400));
41 NV_INFO(dev
, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base
,
42 nv_rd32(dev
, base
+ 0x800), nv_rd32(dev
, base
+ 0x804),
43 nv_rd32(dev
, base
+ 0x808), nv_rd32(dev
, base
+ 0x80c));
44 NV_INFO(dev
, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base
,
45 nv_rd32(dev
, base
+ 0x810), nv_rd32(dev
, base
+ 0x814),
46 nv_rd32(dev
, base
+ 0x818), nv_rd32(dev
, base
+ 0x81c));
50 nvc0_graph_ctxctl_debug(struct drm_device
*dev
)
52 u32 gpcnr
= nv_rd32(dev
, 0x409604) & 0xffff;
55 nvc0_graph_ctxctl_debug_unit(dev
, 0x409000);
56 for (gpc
= 0; gpc
< gpcnr
; gpc
++)
57 nvc0_graph_ctxctl_debug_unit(dev
, 0x502000 + (gpc
* 0x8000));
61 nvc0_graph_load_context(struct nouveau_channel
*chan
)
63 struct drm_device
*dev
= chan
->dev
;
65 nv_wr32(dev
, 0x409840, 0x00000030);
66 nv_wr32(dev
, 0x409500, 0x80000000 | chan
->ramin
->vinst
>> 12);
67 nv_wr32(dev
, 0x409504, 0x00000003);
68 if (!nv_wait(dev
, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev
, "PGRAPH: load_ctx timeout\n");
75 nvc0_graph_unload_context_to(struct drm_device
*dev
, u64 chan
)
77 nv_wr32(dev
, 0x409840, 0x00000003);
78 nv_wr32(dev
, 0x409500, 0x80000000 | chan
>> 12);
79 nv_wr32(dev
, 0x409504, 0x00000009);
80 if (!nv_wait(dev
, 0x409800, 0x00000001, 0x00000000)) {
81 NV_ERROR(dev
, "PGRAPH: unload_ctx timeout\n");
89 nvc0_graph_construct_context(struct nouveau_channel
*chan
)
91 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
92 struct nvc0_graph_priv
*priv
= nv_engine(chan
->dev
, NVOBJ_ENGINE_GR
);
93 struct nvc0_graph_chan
*grch
= chan
->engctx
[NVOBJ_ENGINE_GR
];
94 struct drm_device
*dev
= chan
->dev
;
98 ctx
= kmalloc(priv
->grctx_size
, GFP_KERNEL
);
102 if (!nouveau_ctxfw
) {
103 nv_wr32(dev
, 0x409840, 0x80000000);
104 nv_wr32(dev
, 0x409500, 0x80000000 | chan
->ramin
->vinst
>> 12);
105 nv_wr32(dev
, 0x409504, 0x00000001);
106 if (!nv_wait(dev
, 0x409800, 0x80000000, 0x80000000)) {
107 NV_ERROR(dev
, "PGRAPH: HUB_SET_CHAN timeout\n");
108 nvc0_graph_ctxctl_debug(dev
);
113 nvc0_graph_load_context(chan
);
115 nv_wo32(grch
->grctx
, 0x1c, 1);
116 nv_wo32(grch
->grctx
, 0x20, 0);
117 nv_wo32(grch
->grctx
, 0x28, 0);
118 nv_wo32(grch
->grctx
, 0x2c, 0);
119 dev_priv
->engine
.instmem
.flush(dev
);
122 ret
= nvc0_grctx_generate(chan
);
126 if (!nouveau_ctxfw
) {
127 nv_wr32(dev
, 0x409840, 0x80000000);
128 nv_wr32(dev
, 0x409500, 0x80000000 | chan
->ramin
->vinst
>> 12);
129 nv_wr32(dev
, 0x409504, 0x00000002);
130 if (!nv_wait(dev
, 0x409800, 0x80000000, 0x80000000)) {
131 NV_ERROR(dev
, "PGRAPH: HUB_CTX_SAVE timeout\n");
132 nvc0_graph_ctxctl_debug(dev
);
137 ret
= nvc0_graph_unload_context_to(dev
, chan
->ramin
->vinst
);
142 for (i
= 0; i
< priv
->grctx_size
; i
+= 4)
143 ctx
[i
/ 4] = nv_ro32(grch
->grctx
, i
);
145 priv
->grctx_vals
= ctx
;
154 nvc0_graph_create_context_mmio_list(struct nouveau_channel
*chan
)
156 struct nvc0_graph_priv
*priv
= nv_engine(chan
->dev
, NVOBJ_ENGINE_GR
);
157 struct nvc0_graph_chan
*grch
= chan
->engctx
[NVOBJ_ENGINE_GR
];
158 struct drm_device
*dev
= chan
->dev
;
159 int i
= 0, gpc
, tp
, ret
;
162 ret
= nouveau_gpuobj_new(dev
, chan
, 0x2000, 256, NVOBJ_FLAG_VM
,
167 ret
= nouveau_gpuobj_new(dev
, chan
, 0x8000, 256, NVOBJ_FLAG_VM
,
172 ret
= nouveau_gpuobj_new(dev
, chan
, 384 * 1024, 4096,
173 NVOBJ_FLAG_VM
| NVOBJ_FLAG_VM_USER
,
178 ret
= nouveau_gpuobj_new(dev
, chan
, 0x1000, 0, NVOBJ_FLAG_VM
,
184 nv_wo32(grch
->mmio
, i
++ * 4, 0x00408004);
185 nv_wo32(grch
->mmio
, i
++ * 4, grch
->unk408004
->linst
>> 8);
186 nv_wo32(grch
->mmio
, i
++ * 4, 0x00408008);
187 nv_wo32(grch
->mmio
, i
++ * 4, 0x80000018);
189 nv_wo32(grch
->mmio
, i
++ * 4, 0x0040800c);
190 nv_wo32(grch
->mmio
, i
++ * 4, grch
->unk40800c
->linst
>> 8);
191 nv_wo32(grch
->mmio
, i
++ * 4, 0x00408010);
192 nv_wo32(grch
->mmio
, i
++ * 4, 0x80000000);
194 nv_wo32(grch
->mmio
, i
++ * 4, 0x00418810);
195 nv_wo32(grch
->mmio
, i
++ * 4, 0x80000000 | grch
->unk418810
->linst
>> 12);
196 nv_wo32(grch
->mmio
, i
++ * 4, 0x00419848);
197 nv_wo32(grch
->mmio
, i
++ * 4, 0x10000000 | grch
->unk418810
->linst
>> 12);
199 nv_wo32(grch
->mmio
, i
++ * 4, 0x00419004);
200 nv_wo32(grch
->mmio
, i
++ * 4, grch
->unk40800c
->linst
>> 8);
201 nv_wo32(grch
->mmio
, i
++ * 4, 0x00419008);
202 nv_wo32(grch
->mmio
, i
++ * 4, 0x00000000);
204 nv_wo32(grch
->mmio
, i
++ * 4, 0x00418808);
205 nv_wo32(grch
->mmio
, i
++ * 4, grch
->unk408004
->linst
>> 8);
206 nv_wo32(grch
->mmio
, i
++ * 4, 0x0041880c);
207 nv_wo32(grch
->mmio
, i
++ * 4, 0x80000018);
210 nv_wo32(grch
->mmio
, i
++ * 4, 0x00405830);
211 nv_wo32(grch
->mmio
, i
++ * 4, magic
);
212 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
213 for (tp
= 0; tp
< priv
->tp_nr
[gpc
]; tp
++, magic
+= 0x0324) {
214 u32 reg
= 0x504520 + (gpc
* 0x8000) + (tp
* 0x0800);
215 nv_wo32(grch
->mmio
, i
++ * 4, reg
);
216 nv_wo32(grch
->mmio
, i
++ * 4, magic
);
220 grch
->mmio_nr
= i
/ 2;
225 nvc0_graph_context_new(struct nouveau_channel
*chan
, int engine
)
227 struct drm_device
*dev
= chan
->dev
;
228 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
229 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
230 struct nvc0_graph_priv
*priv
= nv_engine(dev
, engine
);
231 struct nvc0_graph_chan
*grch
;
232 struct nouveau_gpuobj
*grctx
;
235 grch
= kzalloc(sizeof(*grch
), GFP_KERNEL
);
238 chan
->engctx
[NVOBJ_ENGINE_GR
] = grch
;
240 ret
= nouveau_gpuobj_new(dev
, chan
, priv
->grctx_size
, 256,
241 NVOBJ_FLAG_VM
| NVOBJ_FLAG_ZERO_ALLOC
,
247 ret
= nvc0_graph_create_context_mmio_list(chan
);
251 nv_wo32(chan
->ramin
, 0x0210, lower_32_bits(grctx
->linst
) | 4);
252 nv_wo32(chan
->ramin
, 0x0214, upper_32_bits(grctx
->linst
));
253 pinstmem
->flush(dev
);
255 if (!priv
->grctx_vals
) {
256 ret
= nvc0_graph_construct_context(chan
);
261 for (i
= 0; i
< priv
->grctx_size
; i
+= 4)
262 nv_wo32(grctx
, i
, priv
->grctx_vals
[i
/ 4]);
264 if (!nouveau_ctxfw
) {
265 nv_wo32(grctx
, 0x00, grch
->mmio_nr
);
266 nv_wo32(grctx
, 0x04, grch
->mmio
->linst
>> 8);
268 nv_wo32(grctx
, 0xf4, 0);
269 nv_wo32(grctx
, 0xf8, 0);
270 nv_wo32(grctx
, 0x10, grch
->mmio_nr
);
271 nv_wo32(grctx
, 0x14, lower_32_bits(grch
->mmio
->linst
));
272 nv_wo32(grctx
, 0x18, upper_32_bits(grch
->mmio
->linst
));
273 nv_wo32(grctx
, 0x1c, 1);
274 nv_wo32(grctx
, 0x20, 0);
275 nv_wo32(grctx
, 0x28, 0);
276 nv_wo32(grctx
, 0x2c, 0);
278 pinstmem
->flush(dev
);
282 priv
->base
.context_del(chan
, engine
);
287 nvc0_graph_context_del(struct nouveau_channel
*chan
, int engine
)
289 struct nvc0_graph_chan
*grch
= chan
->engctx
[engine
];
291 nouveau_gpuobj_ref(NULL
, &grch
->mmio
);
292 nouveau_gpuobj_ref(NULL
, &grch
->unk418810
);
293 nouveau_gpuobj_ref(NULL
, &grch
->unk40800c
);
294 nouveau_gpuobj_ref(NULL
, &grch
->unk408004
);
295 nouveau_gpuobj_ref(NULL
, &grch
->grctx
);
296 chan
->engctx
[engine
] = NULL
;
300 nvc0_graph_object_new(struct nouveau_channel
*chan
, int engine
,
301 u32 handle
, u16
class)
307 nvc0_graph_fini(struct drm_device
*dev
, int engine
, bool suspend
)
313 nvc0_graph_mthd_page_flip(struct nouveau_channel
*chan
,
314 u32
class, u32 mthd
, u32 data
)
316 nouveau_finish_page_flip(chan
, NULL
);
321 nvc0_graph_init_obj418880(struct drm_device
*dev
)
323 struct nvc0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
326 nv_wr32(dev
, GPC_BCAST(0x0880), 0x00000000);
327 nv_wr32(dev
, GPC_BCAST(0x08a4), 0x00000000);
328 for (i
= 0; i
< 4; i
++)
329 nv_wr32(dev
, GPC_BCAST(0x0888) + (i
* 4), 0x00000000);
330 nv_wr32(dev
, GPC_BCAST(0x08b4), priv
->unk4188b4
->vinst
>> 8);
331 nv_wr32(dev
, GPC_BCAST(0x08b8), priv
->unk4188b8
->vinst
>> 8);
335 nvc0_graph_init_regs(struct drm_device
*dev
)
337 nv_wr32(dev
, 0x400080, 0x003083c2);
338 nv_wr32(dev
, 0x400088, 0x00006fe7);
339 nv_wr32(dev
, 0x40008c, 0x00000000);
340 nv_wr32(dev
, 0x400090, 0x00000030);
341 nv_wr32(dev
, 0x40013c, 0x013901f7);
342 nv_wr32(dev
, 0x400140, 0x00000100);
343 nv_wr32(dev
, 0x400144, 0x00000000);
344 nv_wr32(dev
, 0x400148, 0x00000110);
345 nv_wr32(dev
, 0x400138, 0x00000000);
346 nv_wr32(dev
, 0x400130, 0x00000000);
347 nv_wr32(dev
, 0x400134, 0x00000000);
348 nv_wr32(dev
, 0x400124, 0x00000002);
352 nvc0_graph_init_gpc_0(struct drm_device
*dev
)
354 struct nvc0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
355 const u32 magicgpc918
= DIV_ROUND_UP(0x00800000, priv
->tp_total
);
356 u32 data
[TP_MAX
/ 8];
361 * TP ROP UNKVAL(magic_not_rop_nr)
369 memset(data
, 0x00, sizeof(data
));
370 memcpy(tpnr
, priv
->tp_nr
, sizeof(priv
->tp_nr
));
371 for (i
= 0, gpc
= -1; i
< priv
->tp_total
; i
++) {
373 gpc
= (gpc
+ 1) % priv
->gpc_nr
;
374 } while (!tpnr
[gpc
]);
375 tpc
= priv
->tp_nr
[gpc
] - tpnr
[gpc
]--;
377 data
[i
/ 8] |= tpc
<< ((i
% 8) * 4);
380 nv_wr32(dev
, GPC_BCAST(0x0980), data
[0]);
381 nv_wr32(dev
, GPC_BCAST(0x0984), data
[1]);
382 nv_wr32(dev
, GPC_BCAST(0x0988), data
[2]);
383 nv_wr32(dev
, GPC_BCAST(0x098c), data
[3]);
385 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
386 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0914), priv
->magic_not_rop_nr
<< 8 |
388 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0910), 0x00040000 | priv
->tp_total
);
389 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0918), magicgpc918
);
392 nv_wr32(dev
, GPC_BCAST(0x1bd4), magicgpc918
);
393 nv_wr32(dev
, GPC_BCAST(0x08ac), priv
->rop_nr
);
397 nvc0_graph_init_units(struct drm_device
*dev
)
399 nv_wr32(dev
, 0x409c24, 0x000f0000);
400 nv_wr32(dev
, 0x404000, 0xc0000000); /* DISPATCH */
401 nv_wr32(dev
, 0x404600, 0xc0000000); /* M2MF */
402 nv_wr32(dev
, 0x408030, 0xc0000000);
403 nv_wr32(dev
, 0x40601c, 0xc0000000);
404 nv_wr32(dev
, 0x404490, 0xc0000000); /* MACRO */
405 nv_wr32(dev
, 0x406018, 0xc0000000);
406 nv_wr32(dev
, 0x405840, 0xc0000000);
407 nv_wr32(dev
, 0x405844, 0x00ffffff);
408 nv_mask(dev
, 0x419cc0, 0x00000008, 0x00000008);
409 nv_mask(dev
, 0x419eb4, 0x00001000, 0x00001000);
413 nvc0_graph_init_gpc_1(struct drm_device
*dev
)
415 struct nvc0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
418 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
419 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0420), 0xc0000000);
420 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0900), 0xc0000000);
421 nv_wr32(dev
, GPC_UNIT(gpc
, 0x1028), 0xc0000000);
422 nv_wr32(dev
, GPC_UNIT(gpc
, 0x0824), 0xc0000000);
423 for (tp
= 0; tp
< priv
->tp_nr
[gpc
]; tp
++) {
424 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x508), 0xffffffff);
425 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x50c), 0xffffffff);
426 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x224), 0xc0000000);
427 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x48c), 0xc0000000);
428 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x084), 0xc0000000);
429 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x644), 0x001ffffe);
430 nv_wr32(dev
, TP_UNIT(gpc
, tp
, 0x64c), 0x0000000f);
432 nv_wr32(dev
, GPC_UNIT(gpc
, 0x2c90), 0xffffffff);
433 nv_wr32(dev
, GPC_UNIT(gpc
, 0x2c94), 0xffffffff);
438 nvc0_graph_init_rop(struct drm_device
*dev
)
440 struct nvc0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
443 for (rop
= 0; rop
< priv
->rop_nr
; rop
++) {
444 nv_wr32(dev
, ROP_UNIT(rop
, 0x144), 0xc0000000);
445 nv_wr32(dev
, ROP_UNIT(rop
, 0x070), 0xc0000000);
446 nv_wr32(dev
, ROP_UNIT(rop
, 0x204), 0xffffffff);
447 nv_wr32(dev
, ROP_UNIT(rop
, 0x208), 0xffffffff);
452 nvc0_graph_init_fuc(struct drm_device
*dev
, u32 fuc_base
,
453 struct nvc0_graph_fuc
*code
, struct nvc0_graph_fuc
*data
)
457 nv_wr32(dev
, fuc_base
+ 0x01c0, 0x01000000);
458 for (i
= 0; i
< data
->size
/ 4; i
++)
459 nv_wr32(dev
, fuc_base
+ 0x01c4, data
->data
[i
]);
461 nv_wr32(dev
, fuc_base
+ 0x0180, 0x01000000);
462 for (i
= 0; i
< code
->size
/ 4; i
++) {
464 nv_wr32(dev
, fuc_base
+ 0x0188, i
>> 6);
465 nv_wr32(dev
, fuc_base
+ 0x0184, code
->data
[i
]);
470 nvc0_graph_init_ctxctl(struct drm_device
*dev
)
472 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
473 struct nvc0_graph_priv
*priv
= nv_engine(dev
, NVOBJ_ENGINE_GR
);
477 if (!nouveau_ctxfw
) {
478 /* load HUB microcode */
479 r000260
= nv_mask(dev
, 0x000260, 0x00000001, 0x00000000);
480 nv_wr32(dev
, 0x4091c0, 0x01000000);
481 for (i
= 0; i
< sizeof(nvc0_grhub_data
) / 4; i
++)
482 nv_wr32(dev
, 0x4091c4, nvc0_grhub_data
[i
]);
484 nv_wr32(dev
, 0x409180, 0x01000000);
485 for (i
= 0; i
< sizeof(nvc0_grhub_code
) / 4; i
++) {
487 nv_wr32(dev
, 0x409188, i
>> 6);
488 nv_wr32(dev
, 0x409184, nvc0_grhub_code
[i
]);
491 /* load GPC microcode */
492 nv_wr32(dev
, 0x41a1c0, 0x01000000);
493 for (i
= 0; i
< sizeof(nvc0_grgpc_data
) / 4; i
++)
494 nv_wr32(dev
, 0x41a1c4, nvc0_grgpc_data
[i
]);
496 nv_wr32(dev
, 0x41a180, 0x01000000);
497 for (i
= 0; i
< sizeof(nvc0_grgpc_code
) / 4; i
++) {
499 nv_wr32(dev
, 0x41a188, i
>> 6);
500 nv_wr32(dev
, 0x41a184, nvc0_grgpc_code
[i
]);
502 nv_wr32(dev
, 0x000260, r000260
);
504 /* start HUB ucode running, it'll init the GPCs */
505 nv_wr32(dev
, 0x409800, dev_priv
->chipset
);
506 nv_wr32(dev
, 0x40910c, 0x00000000);
507 nv_wr32(dev
, 0x409100, 0x00000002);
508 if (!nv_wait(dev
, 0x409800, 0x80000000, 0x80000000)) {
509 NV_ERROR(dev
, "PGRAPH: HUB_INIT timed out\n");
510 nvc0_graph_ctxctl_debug(dev
);
514 priv
->grctx_size
= nv_rd32(dev
, 0x409804);
518 /* load fuc microcode */
519 r000260
= nv_mask(dev
, 0x000260, 0x00000001, 0x00000000);
520 nvc0_graph_init_fuc(dev
, 0x409000, &priv
->fuc409c
, &priv
->fuc409d
);
521 nvc0_graph_init_fuc(dev
, 0x41a000, &priv
->fuc41ac
, &priv
->fuc41ad
);
522 nv_wr32(dev
, 0x000260, r000260
);
524 /* start both of them running */
525 nv_wr32(dev
, 0x409840, 0xffffffff);
526 nv_wr32(dev
, 0x41a10c, 0x00000000);
527 nv_wr32(dev
, 0x40910c, 0x00000000);
528 nv_wr32(dev
, 0x41a100, 0x00000002);
529 nv_wr32(dev
, 0x409100, 0x00000002);
530 if (!nv_wait(dev
, 0x409800, 0x00000001, 0x00000001))
531 NV_INFO(dev
, "0x409800 wait failed\n");
533 nv_wr32(dev
, 0x409840, 0xffffffff);
534 nv_wr32(dev
, 0x409500, 0x7fffffff);
535 nv_wr32(dev
, 0x409504, 0x00000021);
537 nv_wr32(dev
, 0x409840, 0xffffffff);
538 nv_wr32(dev
, 0x409500, 0x00000000);
539 nv_wr32(dev
, 0x409504, 0x00000010);
540 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
541 NV_ERROR(dev
, "fuc09 req 0x10 timeout\n");
544 priv
->grctx_size
= nv_rd32(dev
, 0x409800);
546 nv_wr32(dev
, 0x409840, 0xffffffff);
547 nv_wr32(dev
, 0x409500, 0x00000000);
548 nv_wr32(dev
, 0x409504, 0x00000016);
549 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
550 NV_ERROR(dev
, "fuc09 req 0x16 timeout\n");
554 nv_wr32(dev
, 0x409840, 0xffffffff);
555 nv_wr32(dev
, 0x409500, 0x00000000);
556 nv_wr32(dev
, 0x409504, 0x00000025);
557 if (!nv_wait_ne(dev
, 0x409800, 0xffffffff, 0x00000000)) {
558 NV_ERROR(dev
, "fuc09 req 0x25 timeout\n");
566 nvc0_graph_init(struct drm_device
*dev
, int engine
)
570 nv_mask(dev
, 0x000200, 0x18001000, 0x00000000);
571 nv_mask(dev
, 0x000200, 0x18001000, 0x18001000);
573 nvc0_graph_init_obj418880(dev
);
574 nvc0_graph_init_regs(dev
);
575 /*nvc0_graph_init_unitplemented_magics(dev);*/
576 nvc0_graph_init_gpc_0(dev
);
577 /*nvc0_graph_init_unitplemented_c242(dev);*/
579 nv_wr32(dev
, 0x400500, 0x00010001);
580 nv_wr32(dev
, 0x400100, 0xffffffff);
581 nv_wr32(dev
, 0x40013c, 0xffffffff);
583 nvc0_graph_init_units(dev
);
584 nvc0_graph_init_gpc_1(dev
);
585 nvc0_graph_init_rop(dev
);
587 nv_wr32(dev
, 0x400108, 0xffffffff);
588 nv_wr32(dev
, 0x400138, 0xffffffff);
589 nv_wr32(dev
, 0x400118, 0xffffffff);
590 nv_wr32(dev
, 0x400130, 0xffffffff);
591 nv_wr32(dev
, 0x40011c, 0xffffffff);
592 nv_wr32(dev
, 0x400134, 0xffffffff);
593 nv_wr32(dev
, 0x400054, 0x34ce3464);
595 ret
= nvc0_graph_init_ctxctl(dev
);
603 nvc0_graph_isr_chid(struct drm_device
*dev
, u64 inst
)
605 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
606 struct nouveau_channel
*chan
;
610 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
611 for (i
= 0; i
< dev_priv
->engine
.fifo
.channels
; i
++) {
612 chan
= dev_priv
->channels
.ptr
[i
];
613 if (!chan
|| !chan
->ramin
)
616 if (inst
== chan
->ramin
->vinst
)
619 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
624 nvc0_graph_ctxctl_isr(struct drm_device
*dev
)
626 u32 ustat
= nv_rd32(dev
, 0x409c18);
628 if (ustat
& 0x00000001)
629 NV_INFO(dev
, "PGRAPH: CTXCTRL ucode error\n");
630 if (ustat
& 0x00080000)
631 NV_INFO(dev
, "PGRAPH: CTXCTRL watchdog timeout\n");
632 if (ustat
& ~0x00080001)
633 NV_INFO(dev
, "PGRAPH: CTXCTRL 0x%08x\n", ustat
);
635 nvc0_graph_ctxctl_debug(dev
);
636 nv_wr32(dev
, 0x409c20, ustat
);
640 nvc0_graph_isr(struct drm_device
*dev
)
642 u64 inst
= (u64
)(nv_rd32(dev
, 0x409b00) & 0x0fffffff) << 12;
643 u32 chid
= nvc0_graph_isr_chid(dev
, inst
);
644 u32 stat
= nv_rd32(dev
, 0x400100);
645 u32 addr
= nv_rd32(dev
, 0x400704);
646 u32 mthd
= (addr
& 0x00003ffc);
647 u32 subc
= (addr
& 0x00070000) >> 16;
648 u32 data
= nv_rd32(dev
, 0x400708);
649 u32 code
= nv_rd32(dev
, 0x400110);
650 u32
class = nv_rd32(dev
, 0x404200 + (subc
* 4));
652 if (stat
& 0x00000010) {
653 if (nouveau_gpuobj_mthd_call2(dev
, chid
, class, mthd
, data
)) {
654 NV_INFO(dev
, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
655 "subc %d class 0x%04x mthd 0x%04x "
657 chid
, inst
, subc
, class, mthd
, data
);
659 nv_wr32(dev
, 0x400100, 0x00000010);
663 if (stat
& 0x00000020) {
664 NV_INFO(dev
, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
665 "class 0x%04x mthd 0x%04x data 0x%08x\n",
666 chid
, inst
, subc
, class, mthd
, data
);
667 nv_wr32(dev
, 0x400100, 0x00000020);
671 if (stat
& 0x00100000) {
672 NV_INFO(dev
, "PGRAPH: DATA_ERROR [");
673 nouveau_enum_print(nv50_data_error_names
, code
);
674 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
675 "mthd 0x%04x data 0x%08x\n",
676 chid
, inst
, subc
, class, mthd
, data
);
677 nv_wr32(dev
, 0x400100, 0x00100000);
681 if (stat
& 0x00200000) {
682 u32 trap
= nv_rd32(dev
, 0x400108);
683 NV_INFO(dev
, "PGRAPH: TRAP ch %d status 0x%08x\n", chid
, trap
);
684 nv_wr32(dev
, 0x400108, trap
);
685 nv_wr32(dev
, 0x400100, 0x00200000);
689 if (stat
& 0x00080000) {
690 nvc0_graph_ctxctl_isr(dev
);
691 nv_wr32(dev
, 0x400100, 0x00080000);
696 NV_INFO(dev
, "PGRAPH: unknown stat 0x%08x\n", stat
);
697 nv_wr32(dev
, 0x400100, stat
);
700 nv_wr32(dev
, 0x400500, 0x00010001);
704 nvc0_runk140_isr(struct drm_device
*dev
)
706 u32 units
= nv_rd32(dev
, 0x00017c) & 0x1f;
709 u32 unit
= ffs(units
) - 1;
710 u32 reg
= 0x140000 + unit
* 0x2000;
711 u32 st0
= nv_mask(dev
, reg
+ 0x1020, 0, 0);
712 u32 st1
= nv_mask(dev
, reg
+ 0x1420, 0, 0);
714 NV_DEBUG(dev
, "PRUNK140: %d 0x%08x 0x%08x\n", unit
, st0
, st1
);
715 units
&= ~(1 << unit
);
720 nvc0_graph_create_fw(struct drm_device
*dev
, const char *fwname
,
721 struct nvc0_graph_fuc
*fuc
)
723 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
724 const struct firmware
*fw
;
728 snprintf(f
, sizeof(f
), "nouveau/nv%02x_%s", dev_priv
->chipset
, fwname
);
729 ret
= request_firmware(&fw
, f
, &dev
->pdev
->dev
);
731 snprintf(f
, sizeof(f
), "nouveau/%s", fwname
);
732 ret
= request_firmware(&fw
, f
, &dev
->pdev
->dev
);
734 NV_ERROR(dev
, "failed to load %s\n", fwname
);
739 fuc
->size
= fw
->size
;
740 fuc
->data
= kmemdup(fw
->data
, fuc
->size
, GFP_KERNEL
);
741 release_firmware(fw
);
742 return (fuc
->data
!= NULL
) ? 0 : -ENOMEM
;
746 nvc0_graph_destroy_fw(struct nvc0_graph_fuc
*fuc
)
755 nvc0_graph_destroy(struct drm_device
*dev
, int engine
)
757 struct nvc0_graph_priv
*priv
= nv_engine(dev
, engine
);
760 nvc0_graph_destroy_fw(&priv
->fuc409c
);
761 nvc0_graph_destroy_fw(&priv
->fuc409d
);
762 nvc0_graph_destroy_fw(&priv
->fuc41ac
);
763 nvc0_graph_destroy_fw(&priv
->fuc41ad
);
766 nouveau_irq_unregister(dev
, 12);
767 nouveau_irq_unregister(dev
, 25);
769 nouveau_gpuobj_ref(NULL
, &priv
->unk4188b8
);
770 nouveau_gpuobj_ref(NULL
, &priv
->unk4188b4
);
772 if (priv
->grctx_vals
)
773 kfree(priv
->grctx_vals
);
775 NVOBJ_ENGINE_DEL(dev
, GR
);
780 nvc0_graph_create(struct drm_device
*dev
)
782 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
783 struct nvc0_graph_priv
*priv
;
787 fermi
= nvc0_graph_class(dev
);
789 NV_ERROR(dev
, "PGRAPH: unsupported chipset, please report!\n");
793 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
797 priv
->base
.destroy
= nvc0_graph_destroy
;
798 priv
->base
.init
= nvc0_graph_init
;
799 priv
->base
.fini
= nvc0_graph_fini
;
800 priv
->base
.context_new
= nvc0_graph_context_new
;
801 priv
->base
.context_del
= nvc0_graph_context_del
;
802 priv
->base
.object_new
= nvc0_graph_object_new
;
804 NVOBJ_ENGINE_ADD(dev
, GR
, &priv
->base
);
805 nouveau_irq_register(dev
, 12, nvc0_graph_isr
);
806 nouveau_irq_register(dev
, 25, nvc0_runk140_isr
);
809 NV_INFO(dev
, "PGRAPH: using external firmware\n");
810 if (nvc0_graph_create_fw(dev
, "fuc409c", &priv
->fuc409c
) ||
811 nvc0_graph_create_fw(dev
, "fuc409d", &priv
->fuc409d
) ||
812 nvc0_graph_create_fw(dev
, "fuc41ac", &priv
->fuc41ac
) ||
813 nvc0_graph_create_fw(dev
, "fuc41ad", &priv
->fuc41ad
)) {
819 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x1000, 256, 0, &priv
->unk4188b4
);
823 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x1000, 256, 0, &priv
->unk4188b8
);
827 for (i
= 0; i
< 0x1000; i
+= 4) {
828 nv_wo32(priv
->unk4188b4
, i
, 0x00000010);
829 nv_wo32(priv
->unk4188b8
, i
, 0x00000010);
832 priv
->gpc_nr
= nv_rd32(dev
, 0x409604) & 0x0000001f;
833 priv
->rop_nr
= (nv_rd32(dev
, 0x409604) & 0x001f0000) >> 16;
834 for (gpc
= 0; gpc
< priv
->gpc_nr
; gpc
++) {
835 priv
->tp_nr
[gpc
] = nv_rd32(dev
, GPC_UNIT(gpc
, 0x2608));
836 priv
->tp_total
+= priv
->tp_nr
[gpc
];
839 /*XXX: these need figuring out... */
840 switch (dev_priv
->chipset
) {
842 if (priv
->tp_total
== 11) { /* 465, 3/4/4/0, 4 */
843 priv
->magic_not_rop_nr
= 0x07;
845 if (priv
->tp_total
== 14) { /* 470, 3/3/4/4, 5 */
846 priv
->magic_not_rop_nr
= 0x05;
848 if (priv
->tp_total
== 15) { /* 480, 3/4/4/4, 6 */
849 priv
->magic_not_rop_nr
= 0x06;
852 case 0xc3: /* 450, 4/0/0/0, 2 */
853 priv
->magic_not_rop_nr
= 0x03;
855 case 0xc4: /* 460, 3/4/0/0, 4 */
856 priv
->magic_not_rop_nr
= 0x01;
858 case 0xc1: /* 2/0/0/0, 1 */
859 priv
->magic_not_rop_nr
= 0x01;
861 case 0xc8: /* 4/4/3/4, 5 */
862 priv
->magic_not_rop_nr
= 0x06;
864 case 0xce: /* 4/4/0/0, 4 */
865 priv
->magic_not_rop_nr
= 0x03;
869 if (!priv
->magic_not_rop_nr
) {
870 NV_ERROR(dev
, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
871 priv
->tp_nr
[0], priv
->tp_nr
[1], priv
->tp_nr
[2],
872 priv
->tp_nr
[3], priv
->rop_nr
);
873 /* use 0xc3's values... */
874 priv
->magic_not_rop_nr
= 0x03;
877 NVOBJ_CLASS(dev
, 0x902d, GR
); /* 2D */
878 NVOBJ_CLASS(dev
, 0x9039, GR
); /* M2MF */
879 NVOBJ_MTHD (dev
, 0x9039, 0x0500, nvc0_graph_mthd_page_flip
);
880 NVOBJ_CLASS(dev
, 0x9097, GR
); /* 3D */
882 NVOBJ_CLASS(dev
, 0x9197, GR
); /* 3D (NVC1-) */
884 NVOBJ_CLASS(dev
, 0x9297, GR
); /* 3D (NVC8-) */
885 NVOBJ_CLASS(dev
, 0x90c0, GR
); /* COMPUTE */
889 nvc0_graph_destroy(dev
, NVOBJ_ENGINE_GR
);
893 MODULE_FIRMWARE("nouveau/nvc0_fuc409c");
894 MODULE_FIRMWARE("nouveau/nvc0_fuc409d");
895 MODULE_FIRMWARE("nouveau/nvc0_fuc41ac");
896 MODULE_FIRMWARE("nouveau/nvc0_fuc41ad");
897 MODULE_FIRMWARE("nouveau/nvc3_fuc409c");
898 MODULE_FIRMWARE("nouveau/nvc3_fuc409d");
899 MODULE_FIRMWARE("nouveau/nvc3_fuc41ac");
900 MODULE_FIRMWARE("nouveau/nvc3_fuc41ad");
901 MODULE_FIRMWARE("nouveau/nvc4_fuc409c");
902 MODULE_FIRMWARE("nouveau/nvc4_fuc409d");
903 MODULE_FIRMWARE("nouveau/nvc4_fuc41ac");
904 MODULE_FIRMWARE("nouveau/nvc4_fuc41ad");
905 MODULE_FIRMWARE("nouveau/fuc409c");
906 MODULE_FIRMWARE("nouveau/fuc409d");
907 MODULE_FIRMWARE("nouveau/fuc41ac");
908 MODULE_FIRMWARE("nouveau/fuc41ad");