2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
30 #include "nouveau_grctx.h"
31 #include "nouveau_ramht.h"
33 struct nv40_graph_engine
{
34 struct nouveau_exec_engine base
;
39 nv40_graph_context_new(struct nouveau_channel
*chan
, int engine
)
41 struct nv40_graph_engine
*pgraph
= nv_engine(chan
->dev
, engine
);
42 struct drm_device
*dev
= chan
->dev
;
43 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
44 struct nouveau_gpuobj
*grctx
= NULL
;
45 struct nouveau_grctx ctx
= {};
49 ret
= nouveau_gpuobj_new(dev
, NULL
, pgraph
->grctx_size
, 16,
50 NVOBJ_FLAG_ZERO_ALLOC
, &grctx
);
54 /* Initialise default context values */
56 ctx
.mode
= NOUVEAU_GRCTX_VALS
;
58 nv40_grctx_init(&ctx
);
60 nv_wo32(grctx
, 0, grctx
->vinst
);
62 /* init grctx pointer in ramfc, and on PFIFO if channel is
63 * already active there
65 spin_lock_irqsave(&dev_priv
->context_switch_lock
, flags
);
66 nv_wo32(chan
->ramfc
, 0x38, grctx
->vinst
>> 4);
67 nv_mask(dev
, 0x002500, 0x00000001, 0x00000000);
68 if ((nv_rd32(dev
, 0x003204) & 0x0000001f) == chan
->id
)
69 nv_wr32(dev
, 0x0032e0, grctx
->vinst
>> 4);
70 nv_mask(dev
, 0x002500, 0x00000001, 0x00000001);
71 spin_unlock_irqrestore(&dev_priv
->context_switch_lock
, flags
);
73 chan
->engctx
[engine
] = grctx
;
78 nv40_graph_context_del(struct nouveau_channel
*chan
, int engine
)
80 struct nouveau_gpuobj
*grctx
= chan
->engctx
[engine
];
81 struct drm_device
*dev
= chan
->dev
;
82 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
83 u32 inst
= 0x01000000 | (grctx
->pinst
>> 4);
86 spin_lock_irqsave(&dev_priv
->context_switch_lock
, flags
);
87 nv_mask(dev
, 0x400720, 0x00000000, 0x00000001);
88 if (nv_rd32(dev
, 0x40032c) == inst
)
89 nv_mask(dev
, 0x40032c, 0x01000000, 0x00000000);
90 if (nv_rd32(dev
, 0x400330) == inst
)
91 nv_mask(dev
, 0x400330, 0x01000000, 0x00000000);
92 nv_mask(dev
, 0x400720, 0x00000001, 0x00000001);
93 spin_unlock_irqrestore(&dev_priv
->context_switch_lock
, flags
);
95 /* Free the context resources */
96 nouveau_gpuobj_ref(NULL
, &grctx
);
97 chan
->engctx
[engine
] = NULL
;
101 nv40_graph_object_new(struct nouveau_channel
*chan
, int engine
,
102 u32 handle
, u16
class)
104 struct drm_device
*dev
= chan
->dev
;
105 struct nouveau_gpuobj
*obj
= NULL
;
108 ret
= nouveau_gpuobj_new(dev
, chan
, 20, 16, NVOBJ_FLAG_ZERO_FREE
, &obj
);
114 nv_wo32(obj
, 0x00, class);
115 nv_wo32(obj
, 0x04, 0x00000000);
117 nv_wo32(obj
, 0x08, 0x00000000);
119 nv_wo32(obj
, 0x08, 0x01000000);
121 nv_wo32(obj
, 0x0c, 0x00000000);
122 nv_wo32(obj
, 0x10, 0x00000000);
124 ret
= nouveau_ramht_insert(chan
, handle
, obj
);
125 nouveau_gpuobj_ref(NULL
, &obj
);
130 nv40_graph_set_tile_region(struct drm_device
*dev
, int i
)
132 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
133 struct nouveau_tile_reg
*tile
= &dev_priv
->tile
.reg
[i
];
135 switch (dev_priv
->chipset
) {
137 case 0x41: /* guess */
140 case 0x45: /* guess */
142 nv_wr32(dev
, NV20_PGRAPH_TSIZE(i
), tile
->pitch
);
143 nv_wr32(dev
, NV20_PGRAPH_TLIMIT(i
), tile
->limit
);
144 nv_wr32(dev
, NV20_PGRAPH_TILE(i
), tile
->addr
);
145 nv_wr32(dev
, NV40_PGRAPH_TSIZE1(i
), tile
->pitch
);
146 nv_wr32(dev
, NV40_PGRAPH_TLIMIT1(i
), tile
->limit
);
147 nv_wr32(dev
, NV40_PGRAPH_TILE1(i
), tile
->addr
);
151 nv_wr32(dev
, NV20_PGRAPH_TSIZE(i
), tile
->pitch
);
152 nv_wr32(dev
, NV20_PGRAPH_TLIMIT(i
), tile
->limit
);
153 nv_wr32(dev
, NV20_PGRAPH_TILE(i
), tile
->addr
);
162 nv_wr32(dev
, NV47_PGRAPH_TSIZE(i
), tile
->pitch
);
163 nv_wr32(dev
, NV47_PGRAPH_TLIMIT(i
), tile
->limit
);
164 nv_wr32(dev
, NV47_PGRAPH_TILE(i
), tile
->addr
);
165 nv_wr32(dev
, NV40_PGRAPH_TSIZE1(i
), tile
->pitch
);
166 nv_wr32(dev
, NV40_PGRAPH_TLIMIT1(i
), tile
->limit
);
167 nv_wr32(dev
, NV40_PGRAPH_TILE1(i
), tile
->addr
);
182 nv40_graph_init(struct drm_device
*dev
, int engine
)
184 struct nv40_graph_engine
*pgraph
= nv_engine(dev
, engine
);
185 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
186 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
187 struct nouveau_grctx ctx
= {};
188 uint32_t vramsz
, *cp
;
191 nv_wr32(dev
, NV03_PMC_ENABLE
, nv_rd32(dev
, NV03_PMC_ENABLE
) &
192 ~NV_PMC_ENABLE_PGRAPH
);
193 nv_wr32(dev
, NV03_PMC_ENABLE
, nv_rd32(dev
, NV03_PMC_ENABLE
) |
194 NV_PMC_ENABLE_PGRAPH
);
196 cp
= kmalloc(sizeof(*cp
) * 256, GFP_KERNEL
);
201 ctx
.mode
= NOUVEAU_GRCTX_PROG
;
203 ctx
.ctxprog_max
= 256;
204 nv40_grctx_init(&ctx
);
205 pgraph
->grctx_size
= ctx
.ctxvals_pos
* 4;
207 nv_wr32(dev
, NV40_PGRAPH_CTXCTL_UCODE_INDEX
, 0);
208 for (i
= 0; i
< ctx
.ctxprog_len
; i
++)
209 nv_wr32(dev
, NV40_PGRAPH_CTXCTL_UCODE_DATA
, cp
[i
]);
213 /* No context present currently */
214 nv_wr32(dev
, NV40_PGRAPH_CTXCTL_CUR
, 0x00000000);
216 nv_wr32(dev
, NV03_PGRAPH_INTR
, 0xFFFFFFFF);
217 nv_wr32(dev
, NV40_PGRAPH_INTR_EN
, 0xFFFFFFFF);
219 nv_wr32(dev
, NV04_PGRAPH_DEBUG_0
, 0xFFFFFFFF);
220 nv_wr32(dev
, NV04_PGRAPH_DEBUG_0
, 0x00000000);
221 nv_wr32(dev
, NV04_PGRAPH_DEBUG_1
, 0x401287c0);
222 nv_wr32(dev
, NV04_PGRAPH_DEBUG_3
, 0xe0de8055);
223 nv_wr32(dev
, NV10_PGRAPH_DEBUG_4
, 0x00008000);
224 nv_wr32(dev
, NV04_PGRAPH_LIMIT_VIOL_PIX
, 0x00be3c5f);
226 nv_wr32(dev
, NV10_PGRAPH_CTX_CONTROL
, 0x10010100);
227 nv_wr32(dev
, NV10_PGRAPH_STATE
, 0xFFFFFFFF);
229 j
= nv_rd32(dev
, 0x1540) & 0xff;
231 for (i
= 0; !(j
& 1); j
>>= 1, i
++)
233 nv_wr32(dev
, 0x405000, i
);
236 if (dev_priv
->chipset
== 0x40) {
237 nv_wr32(dev
, 0x4009b0, 0x83280fff);
238 nv_wr32(dev
, 0x4009b4, 0x000000a0);
240 nv_wr32(dev
, 0x400820, 0x83280eff);
241 nv_wr32(dev
, 0x400824, 0x000000a0);
244 switch (dev_priv
->chipset
) {
247 nv_wr32(dev
, 0x4009b8, 0x0078e366);
248 nv_wr32(dev
, 0x4009bc, 0x0000014c);
251 case 0x42: /* pciid also 0x00Cx */
252 /* case 0x0120: XXX (pciid) */
253 nv_wr32(dev
, 0x400828, 0x007596ff);
254 nv_wr32(dev
, 0x40082c, 0x00000108);
257 nv_wr32(dev
, 0x400828, 0x0072cb77);
258 nv_wr32(dev
, 0x40082c, 0x00000108);
263 case 0x4c: /* G7x-based C51 */
265 nv_wr32(dev
, 0x400860, 0);
266 nv_wr32(dev
, 0x400864, 0);
271 nv_wr32(dev
, 0x400828, 0x07830610);
272 nv_wr32(dev
, 0x40082c, 0x0000016A);
278 nv_wr32(dev
, 0x400b38, 0x2ffff800);
279 nv_wr32(dev
, 0x400b3c, 0x00006000);
281 /* Tiling related stuff. */
282 switch (dev_priv
->chipset
) {
285 nv_wr32(dev
, 0x400bc4, 0x1003d888);
286 nv_wr32(dev
, 0x400bbc, 0xb7a7b500);
289 nv_wr32(dev
, 0x400bc4, 0x0000e024);
290 nv_wr32(dev
, 0x400bbc, 0xb7a7b520);
295 nv_wr32(dev
, 0x400bc4, 0x1003d888);
296 nv_wr32(dev
, 0x400bbc, 0xb7a7b540);
302 /* Turn all the tiling regions off. */
303 for (i
= 0; i
< pfb
->num_tiles
; i
++)
304 nv40_graph_set_tile_region(dev
, i
);
306 /* begin RAM config */
307 vramsz
= pci_resource_len(dev
->pdev
, 0) - 1;
308 switch (dev_priv
->chipset
) {
310 nv_wr32(dev
, 0x4009A4, nv_rd32(dev
, NV04_PFB_CFG0
));
311 nv_wr32(dev
, 0x4009A8, nv_rd32(dev
, NV04_PFB_CFG1
));
312 nv_wr32(dev
, 0x4069A4, nv_rd32(dev
, NV04_PFB_CFG0
));
313 nv_wr32(dev
, 0x4069A8, nv_rd32(dev
, NV04_PFB_CFG1
));
314 nv_wr32(dev
, 0x400820, 0);
315 nv_wr32(dev
, 0x400824, 0);
316 nv_wr32(dev
, 0x400864, vramsz
);
317 nv_wr32(dev
, 0x400868, vramsz
);
320 switch (dev_priv
->chipset
) {
328 nv_wr32(dev
, 0x4009F0, nv_rd32(dev
, NV04_PFB_CFG0
));
329 nv_wr32(dev
, 0x4009F4, nv_rd32(dev
, NV04_PFB_CFG1
));
332 nv_wr32(dev
, 0x400DF0, nv_rd32(dev
, NV04_PFB_CFG0
));
333 nv_wr32(dev
, 0x400DF4, nv_rd32(dev
, NV04_PFB_CFG1
));
336 nv_wr32(dev
, 0x4069F0, nv_rd32(dev
, NV04_PFB_CFG0
));
337 nv_wr32(dev
, 0x4069F4, nv_rd32(dev
, NV04_PFB_CFG1
));
338 nv_wr32(dev
, 0x400840, 0);
339 nv_wr32(dev
, 0x400844, 0);
340 nv_wr32(dev
, 0x4008A0, vramsz
);
341 nv_wr32(dev
, 0x4008A4, vramsz
);
349 nv40_graph_fini(struct drm_device
*dev
, int engine
, bool suspend
)
351 u32 inst
= nv_rd32(dev
, 0x40032c);
352 if (inst
& 0x01000000) {
353 nv_wr32(dev
, 0x400720, 0x00000000);
354 nv_wr32(dev
, 0x400784, inst
);
355 nv_mask(dev
, 0x400310, 0x00000020, 0x00000020);
356 nv_mask(dev
, 0x400304, 0x00000001, 0x00000001);
357 if (!nv_wait(dev
, 0x400300, 0x00000001, 0x00000000)) {
358 u32 insn
= nv_rd32(dev
, 0x400308);
359 NV_ERROR(dev
, "PGRAPH: ctxprog timeout 0x%08x\n", insn
);
361 nv_mask(dev
, 0x40032c, 0x01000000, 0x00000000);
367 nv40_graph_isr_chid(struct drm_device
*dev
, u32 inst
)
369 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
370 struct nouveau_gpuobj
*grctx
;
374 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
375 for (i
= 0; i
< dev_priv
->engine
.fifo
.channels
; i
++) {
376 if (!dev_priv
->channels
.ptr
[i
])
378 grctx
= dev_priv
->channels
.ptr
[i
]->engctx
[NVOBJ_ENGINE_GR
];
380 if (grctx
&& grctx
->pinst
== inst
)
383 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
388 nv40_graph_isr(struct drm_device
*dev
)
392 while ((stat
= nv_rd32(dev
, NV03_PGRAPH_INTR
))) {
393 u32 nsource
= nv_rd32(dev
, NV03_PGRAPH_NSOURCE
);
394 u32 nstatus
= nv_rd32(dev
, NV03_PGRAPH_NSTATUS
);
395 u32 inst
= (nv_rd32(dev
, 0x40032c) & 0x000fffff) << 4;
396 u32 chid
= nv40_graph_isr_chid(dev
, inst
);
397 u32 addr
= nv_rd32(dev
, NV04_PGRAPH_TRAPPED_ADDR
);
398 u32 subc
= (addr
& 0x00070000) >> 16;
399 u32 mthd
= (addr
& 0x00001ffc);
400 u32 data
= nv_rd32(dev
, NV04_PGRAPH_TRAPPED_DATA
);
401 u32
class = nv_rd32(dev
, 0x400160 + subc
* 4) & 0xffff;
404 if (stat
& NV_PGRAPH_INTR_ERROR
) {
405 if (nsource
& NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD
) {
406 if (!nouveau_gpuobj_mthd_call2(dev
, chid
, class, mthd
, data
))
407 show
&= ~NV_PGRAPH_INTR_ERROR
;
409 if (nsource
& NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION
) {
410 nv_mask(dev
, 0x402000, 0, 0);
414 nv_wr32(dev
, NV03_PGRAPH_INTR
, stat
);
415 nv_wr32(dev
, NV04_PGRAPH_FIFO
, 0x00000001);
417 if (show
&& nouveau_ratelimit()) {
418 NV_INFO(dev
, "PGRAPH -");
419 nouveau_bitfield_print(nv10_graph_intr
, show
);
421 nouveau_bitfield_print(nv04_graph_nsource
, nsource
);
423 nouveau_bitfield_print(nv10_graph_nstatus
, nstatus
);
425 NV_INFO(dev
, "PGRAPH - ch %d (0x%08x) subc %d "
426 "class 0x%04x mthd 0x%04x data 0x%08x\n",
427 chid
, inst
, subc
, class, mthd
, data
);
433 nv40_graph_destroy(struct drm_device
*dev
, int engine
)
435 struct nv40_graph_engine
*pgraph
= nv_engine(dev
, engine
);
437 nouveau_irq_unregister(dev
, 12);
439 NVOBJ_ENGINE_DEL(dev
, GR
);
444 nv40_graph_create(struct drm_device
*dev
)
446 struct nv40_graph_engine
*pgraph
;
448 pgraph
= kzalloc(sizeof(*pgraph
), GFP_KERNEL
);
452 pgraph
->base
.destroy
= nv40_graph_destroy
;
453 pgraph
->base
.init
= nv40_graph_init
;
454 pgraph
->base
.fini
= nv40_graph_fini
;
455 pgraph
->base
.context_new
= nv40_graph_context_new
;
456 pgraph
->base
.context_del
= nv40_graph_context_del
;
457 pgraph
->base
.object_new
= nv40_graph_object_new
;
458 pgraph
->base
.set_tile_region
= nv40_graph_set_tile_region
;
460 NVOBJ_ENGINE_ADD(dev
, GR
, &pgraph
->base
);
461 nouveau_irq_register(dev
, 12, nv40_graph_isr
);
463 NVOBJ_CLASS(dev
, 0x506e, SW
); /* nvsw */
464 NVOBJ_CLASS(dev
, 0x0030, GR
); /* null */
465 NVOBJ_CLASS(dev
, 0x0039, GR
); /* m2mf */
466 NVOBJ_CLASS(dev
, 0x004a, GR
); /* gdirect */
467 NVOBJ_CLASS(dev
, 0x009f, GR
); /* imageblit (nv12) */
468 NVOBJ_CLASS(dev
, 0x008a, GR
); /* ifc */
469 NVOBJ_CLASS(dev
, 0x0089, GR
); /* sifm */
470 NVOBJ_CLASS(dev
, 0x3089, GR
); /* sifm (nv40) */
471 NVOBJ_CLASS(dev
, 0x0062, GR
); /* surf2d */
472 NVOBJ_CLASS(dev
, 0x3062, GR
); /* surf2d (nv40) */
473 NVOBJ_CLASS(dev
, 0x0043, GR
); /* rop */
474 NVOBJ_CLASS(dev
, 0x0012, GR
); /* beta1 */
475 NVOBJ_CLASS(dev
, 0x0072, GR
); /* beta4 */
476 NVOBJ_CLASS(dev
, 0x0019, GR
); /* cliprect */
477 NVOBJ_CLASS(dev
, 0x0044, GR
); /* pattern */
478 NVOBJ_CLASS(dev
, 0x309e, GR
); /* swzsurf */
481 if (nv44_graph_class(dev
))
482 NVOBJ_CLASS(dev
, 0x4497, GR
);
484 NVOBJ_CLASS(dev
, 0x4097, GR
);
487 NVOBJ_CLASS(dev
, 0x506e, SW
);
488 NVOBJ_MTHD (dev
, 0x506e, 0x0500, nv04_graph_mthd_page_flip
);