2 * Copyright 2007 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragr) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/gpuobj.h>
29 #include <engine/fifo.h>
30 #include <engine/fifo/chan.h>
31 #include <subdev/instmem.h>
32 #include <subdev/timer.h>
35 nv04_gr_ctx_regs
[] = {
40 NV04_PGRAPH_CTX_SWITCH1
,
41 NV04_PGRAPH_CTX_SWITCH2
,
42 NV04_PGRAPH_CTX_SWITCH3
,
43 NV04_PGRAPH_CTX_SWITCH4
,
44 NV04_PGRAPH_CTX_CACHE1
,
45 NV04_PGRAPH_CTX_CACHE2
,
46 NV04_PGRAPH_CTX_CACHE3
,
47 NV04_PGRAPH_CTX_CACHE4
,
77 NV04_PGRAPH_DMA_START_0
,
78 NV04_PGRAPH_DMA_START_1
,
79 NV04_PGRAPH_DMA_LENGTH
,
81 NV04_PGRAPH_DMA_PITCH
,
107 NV04_PGRAPH_BSWIZZLE2
,
108 NV04_PGRAPH_BSWIZZLE5
,
111 NV04_PGRAPH_PATT_COLOR0
,
112 NV04_PGRAPH_PATT_COLOR1
,
113 NV04_PGRAPH_PATT_COLORRAM
+0x00,
114 NV04_PGRAPH_PATT_COLORRAM
+0x04,
115 NV04_PGRAPH_PATT_COLORRAM
+0x08,
116 NV04_PGRAPH_PATT_COLORRAM
+0x0c,
117 NV04_PGRAPH_PATT_COLORRAM
+0x10,
118 NV04_PGRAPH_PATT_COLORRAM
+0x14,
119 NV04_PGRAPH_PATT_COLORRAM
+0x18,
120 NV04_PGRAPH_PATT_COLORRAM
+0x1c,
121 NV04_PGRAPH_PATT_COLORRAM
+0x20,
122 NV04_PGRAPH_PATT_COLORRAM
+0x24,
123 NV04_PGRAPH_PATT_COLORRAM
+0x28,
124 NV04_PGRAPH_PATT_COLORRAM
+0x2c,
125 NV04_PGRAPH_PATT_COLORRAM
+0x30,
126 NV04_PGRAPH_PATT_COLORRAM
+0x34,
127 NV04_PGRAPH_PATT_COLORRAM
+0x38,
128 NV04_PGRAPH_PATT_COLORRAM
+0x3c,
129 NV04_PGRAPH_PATT_COLORRAM
+0x40,
130 NV04_PGRAPH_PATT_COLORRAM
+0x44,
131 NV04_PGRAPH_PATT_COLORRAM
+0x48,
132 NV04_PGRAPH_PATT_COLORRAM
+0x4c,
133 NV04_PGRAPH_PATT_COLORRAM
+0x50,
134 NV04_PGRAPH_PATT_COLORRAM
+0x54,
135 NV04_PGRAPH_PATT_COLORRAM
+0x58,
136 NV04_PGRAPH_PATT_COLORRAM
+0x5c,
137 NV04_PGRAPH_PATT_COLORRAM
+0x60,
138 NV04_PGRAPH_PATT_COLORRAM
+0x64,
139 NV04_PGRAPH_PATT_COLORRAM
+0x68,
140 NV04_PGRAPH_PATT_COLORRAM
+0x6c,
141 NV04_PGRAPH_PATT_COLORRAM
+0x70,
142 NV04_PGRAPH_PATT_COLORRAM
+0x74,
143 NV04_PGRAPH_PATT_COLORRAM
+0x78,
144 NV04_PGRAPH_PATT_COLORRAM
+0x7c,
145 NV04_PGRAPH_PATT_COLORRAM
+0x80,
146 NV04_PGRAPH_PATT_COLORRAM
+0x84,
147 NV04_PGRAPH_PATT_COLORRAM
+0x88,
148 NV04_PGRAPH_PATT_COLORRAM
+0x8c,
149 NV04_PGRAPH_PATT_COLORRAM
+0x90,
150 NV04_PGRAPH_PATT_COLORRAM
+0x94,
151 NV04_PGRAPH_PATT_COLORRAM
+0x98,
152 NV04_PGRAPH_PATT_COLORRAM
+0x9c,
153 NV04_PGRAPH_PATT_COLORRAM
+0xa0,
154 NV04_PGRAPH_PATT_COLORRAM
+0xa4,
155 NV04_PGRAPH_PATT_COLORRAM
+0xa8,
156 NV04_PGRAPH_PATT_COLORRAM
+0xac,
157 NV04_PGRAPH_PATT_COLORRAM
+0xb0,
158 NV04_PGRAPH_PATT_COLORRAM
+0xb4,
159 NV04_PGRAPH_PATT_COLORRAM
+0xb8,
160 NV04_PGRAPH_PATT_COLORRAM
+0xbc,
161 NV04_PGRAPH_PATT_COLORRAM
+0xc0,
162 NV04_PGRAPH_PATT_COLORRAM
+0xc4,
163 NV04_PGRAPH_PATT_COLORRAM
+0xc8,
164 NV04_PGRAPH_PATT_COLORRAM
+0xcc,
165 NV04_PGRAPH_PATT_COLORRAM
+0xd0,
166 NV04_PGRAPH_PATT_COLORRAM
+0xd4,
167 NV04_PGRAPH_PATT_COLORRAM
+0xd8,
168 NV04_PGRAPH_PATT_COLORRAM
+0xdc,
169 NV04_PGRAPH_PATT_COLORRAM
+0xe0,
170 NV04_PGRAPH_PATT_COLORRAM
+0xe4,
171 NV04_PGRAPH_PATT_COLORRAM
+0xe8,
172 NV04_PGRAPH_PATT_COLORRAM
+0xec,
173 NV04_PGRAPH_PATT_COLORRAM
+0xf0,
174 NV04_PGRAPH_PATT_COLORRAM
+0xf4,
175 NV04_PGRAPH_PATT_COLORRAM
+0xf8,
176 NV04_PGRAPH_PATT_COLORRAM
+0xfc,
179 NV04_PGRAPH_PATTERN_SHAPE
,
183 NV04_PGRAPH_BETA_AND
,
184 NV04_PGRAPH_BETA_PREMULT
,
185 NV04_PGRAPH_CONTROL0
,
186 NV04_PGRAPH_CONTROL1
,
187 NV04_PGRAPH_CONTROL2
,
189 NV04_PGRAPH_STORED_FMT
,
190 NV04_PGRAPH_SOURCE_COLOR
,
334 NV04_PGRAPH_PASSTHRU_0
,
335 NV04_PGRAPH_PASSTHRU_1
,
336 NV04_PGRAPH_PASSTHRU_2
,
337 NV04_PGRAPH_DVD_COLORFMT
,
338 NV04_PGRAPH_SCALED_FORMAT
,
339 NV04_PGRAPH_MISC24_0
,
340 NV04_PGRAPH_MISC24_1
,
341 NV04_PGRAPH_MISC24_2
,
349 #define nv04_gr(p) container_of((p), struct nv04_gr, base)
353 struct nv04_gr_chan
*chan
[16];
357 #define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
359 struct nv04_gr_chan
{
360 struct nvkm_object object
;
363 u32 nv04
[ARRAY_SIZE(nv04_gr_ctx_regs
)];
366 /*******************************************************************************
367 * Graphics object classes
368 ******************************************************************************/
371 * Software methods, why they are needed, and how they all work:
373 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
374 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
375 * 3 words long on both. grobj format on NV04 is:
379 * - bit 12: color key active
380 * - bit 13: clip rect active
381 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
382 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
383 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
384 * NV03_CONTEXT_SURFACE_DST].
385 * - bits 15-17: 2d operation [aka patch config]
386 * - bit 24: patch valid [enables rendering using this object]
387 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
389 * - bits 0-1: mono format
390 * - bits 8-13: color format
391 * - bits 16-31: DMA_NOTIFY instance
393 * - bits 0-15: DMA_A instance
394 * - bits 16-31: DMA_B instance
400 * - bit 12: color key active
401 * - bit 13: clip rect active
402 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
403 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
404 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
405 * NV03_CONTEXT_SURFACE_DST].
406 * - bits 15-17: 2d operation [aka patch config]
407 * - bits 20-22: dither mode
408 * - bit 24: patch valid [enables rendering using this object]
409 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
410 * - bit 26: surface_src/surface_zeta valid
411 * - bit 27: pattern valid
412 * - bit 28: rop valid
413 * - bit 29: beta1 valid
414 * - bit 30: beta4 valid
416 * - bits 0-1: mono format
417 * - bits 8-13: color format
418 * - bits 16-31: DMA_NOTIFY instance
420 * - bits 0-15: DMA_A instance
421 * - bits 16-31: DMA_B instance
423 * NV05 will set/unset the relevant valid bits when you poke the relevant
424 * object-binding methods with object of the proper type, or with the NULL
425 * type. It'll only allow rendering using the grobj if all needed objects
426 * are bound. The needed set of objects depends on selected operation: for
427 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
429 * NV04 doesn't have these methods implemented at all, and doesn't have the
430 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
431 * is set. So we have to emulate them in software, internally keeping the
432 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
433 * but the last word isn't actually used for anything, we abuse it for this
436 * Actually, NV05 can optionally check bit 24 too, but we disable this since
437 * there's no use for it.
439 * For unknown reasons, NV04 implements surf3d binding in hardware as an
440 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
441 * methods on the surf3d object, so we have to emulate them too.
445 nv04_gr_set_ctx1(struct nvkm_device
*device
, u32 inst
, u32 mask
, u32 value
)
447 int subc
= (nvkm_rd32(device
, NV04_PGRAPH_TRAPPED_ADDR
) >> 13) & 0x7;
450 tmp
= nvkm_rd32(device
, 0x700000 + inst
);
453 nvkm_wr32(device
, 0x700000 + inst
, tmp
);
455 nvkm_wr32(device
, NV04_PGRAPH_CTX_SWITCH1
, tmp
);
456 nvkm_wr32(device
, NV04_PGRAPH_CTX_CACHE1
+ (subc
<< 2), tmp
);
460 nv04_gr_set_ctx_val(struct nvkm_device
*device
, u32 inst
, u32 mask
, u32 value
)
462 int class, op
, valid
= 1;
465 ctx1
= nvkm_rd32(device
, 0x700000 + inst
);
467 op
= (ctx1
>> 15) & 7;
469 tmp
= nvkm_rd32(device
, 0x70000c + inst
);
472 nvkm_wr32(device
, 0x70000c + inst
, tmp
);
474 /* check for valid surf2d/surf_dst/surf_color */
475 if (!(tmp
& 0x02000000))
477 /* check for valid surf_src/surf_zeta */
478 if ((class == 0x1f || class == 0x48) && !(tmp
& 0x04000000))
482 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
486 /* ROP_AND: requires pattern and rop */
488 if (!(tmp
& 0x18000000))
491 /* BLEND_AND: requires beta1 */
493 if (!(tmp
& 0x20000000))
496 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
499 if (!(tmp
& 0x40000000))
504 nv04_gr_set_ctx1(device
, inst
, 0x01000000, valid
<< 24);
508 nv04_gr_mthd_set_operation(struct nvkm_device
*device
, u32 inst
, u32 data
)
510 u8
class = nvkm_rd32(device
, 0x700000) & 0x000000ff;
513 /* Old versions of the objects only accept first three operations. */
514 if (data
> 2 && class < 0x40)
516 nv04_gr_set_ctx1(device
, inst
, 0x00038000, data
<< 15);
517 /* changing operation changes set of objects needed for validation */
518 nv04_gr_set_ctx_val(device
, inst
, 0, 0);
523 nv04_gr_mthd_surf3d_clip_h(struct nvkm_device
*device
, u32 inst
, u32 data
)
525 u32 min
= data
& 0xffff, max
;
531 /* yes, it accepts negative for some reason. */
535 nvkm_wr32(device
, 0x40053c, min
);
536 nvkm_wr32(device
, 0x400544, max
);
541 nv04_gr_mthd_surf3d_clip_v(struct nvkm_device
*device
, u32 inst
, u32 data
)
543 u32 min
= data
& 0xffff, max
;
549 /* yes, it accepts negative for some reason. */
553 nvkm_wr32(device
, 0x400540, min
);
554 nvkm_wr32(device
, 0x400548, max
);
559 nv04_gr_mthd_bind_class(struct nvkm_device
*device
, u32 inst
)
561 return nvkm_rd32(device
, 0x700000 + (inst
<< 4));
565 nv04_gr_mthd_bind_surf2d(struct nvkm_device
*device
, u32 inst
, u32 data
)
567 switch (nv04_gr_mthd_bind_class(device
, data
)) {
569 nv04_gr_set_ctx1(device
, inst
, 0x00004000, 0);
570 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0);
573 nv04_gr_set_ctx1(device
, inst
, 0x00004000, 0);
574 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0x02000000);
581 nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device
*device
, u32 inst
, u32 data
)
583 switch (nv04_gr_mthd_bind_class(device
, data
)) {
585 nv04_gr_set_ctx1(device
, inst
, 0x00004000, 0);
586 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0);
589 nv04_gr_set_ctx1(device
, inst
, 0x00004000, 0);
590 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0x02000000);
593 nv04_gr_set_ctx1(device
, inst
, 0x00004000, 0x00004000);
594 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0x02000000);
601 nv01_gr_mthd_bind_patt(struct nvkm_device
*device
, u32 inst
, u32 data
)
603 switch (nv04_gr_mthd_bind_class(device
, data
)) {
605 nv04_gr_set_ctx_val(device
, inst
, 0x08000000, 0);
608 nv04_gr_set_ctx_val(device
, inst
, 0x08000000, 0x08000000);
615 nv04_gr_mthd_bind_patt(struct nvkm_device
*device
, u32 inst
, u32 data
)
617 switch (nv04_gr_mthd_bind_class(device
, data
)) {
619 nv04_gr_set_ctx_val(device
, inst
, 0x08000000, 0);
622 nv04_gr_set_ctx_val(device
, inst
, 0x08000000, 0x08000000);
629 nv04_gr_mthd_bind_rop(struct nvkm_device
*device
, u32 inst
, u32 data
)
631 switch (nv04_gr_mthd_bind_class(device
, data
)) {
633 nv04_gr_set_ctx_val(device
, inst
, 0x10000000, 0);
636 nv04_gr_set_ctx_val(device
, inst
, 0x10000000, 0x10000000);
643 nv04_gr_mthd_bind_beta1(struct nvkm_device
*device
, u32 inst
, u32 data
)
645 switch (nv04_gr_mthd_bind_class(device
, data
)) {
647 nv04_gr_set_ctx_val(device
, inst
, 0x20000000, 0);
650 nv04_gr_set_ctx_val(device
, inst
, 0x20000000, 0x20000000);
657 nv04_gr_mthd_bind_beta4(struct nvkm_device
*device
, u32 inst
, u32 data
)
659 switch (nv04_gr_mthd_bind_class(device
, data
)) {
661 nv04_gr_set_ctx_val(device
, inst
, 0x40000000, 0);
664 nv04_gr_set_ctx_val(device
, inst
, 0x40000000, 0x40000000);
671 nv04_gr_mthd_bind_surf_dst(struct nvkm_device
*device
, u32 inst
, u32 data
)
673 switch (nv04_gr_mthd_bind_class(device
, data
)) {
675 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0);
678 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0x02000000);
685 nv04_gr_mthd_bind_surf_src(struct nvkm_device
*device
, u32 inst
, u32 data
)
687 switch (nv04_gr_mthd_bind_class(device
, data
)) {
689 nv04_gr_set_ctx_val(device
, inst
, 0x04000000, 0);
692 nv04_gr_set_ctx_val(device
, inst
, 0x04000000, 0x04000000);
699 nv04_gr_mthd_bind_surf_color(struct nvkm_device
*device
, u32 inst
, u32 data
)
701 switch (nv04_gr_mthd_bind_class(device
, data
)) {
703 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0);
706 nv04_gr_set_ctx_val(device
, inst
, 0x02000000, 0x02000000);
713 nv04_gr_mthd_bind_surf_zeta(struct nvkm_device
*device
, u32 inst
, u32 data
)
715 switch (nv04_gr_mthd_bind_class(device
, data
)) {
717 nv04_gr_set_ctx_val(device
, inst
, 0x04000000, 0);
720 nv04_gr_set_ctx_val(device
, inst
, 0x04000000, 0x04000000);
727 nv01_gr_mthd_bind_clip(struct nvkm_device
*device
, u32 inst
, u32 data
)
729 switch (nv04_gr_mthd_bind_class(device
, data
)) {
731 nv04_gr_set_ctx1(device
, inst
, 0x2000, 0);
734 nv04_gr_set_ctx1(device
, inst
, 0x2000, 0x2000);
741 nv01_gr_mthd_bind_chroma(struct nvkm_device
*device
, u32 inst
, u32 data
)
743 switch (nv04_gr_mthd_bind_class(device
, data
)) {
745 nv04_gr_set_ctx1(device
, inst
, 0x1000, 0);
747 /* Yes, for some reason even the old versions of objects
748 * accept 0x57 and not 0x17. Consistency be damned.
751 nv04_gr_set_ctx1(device
, inst
, 0x1000, 0x1000);
758 nv03_gr_mthd_gdi(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
760 bool (*func
)(struct nvkm_device
*, u32
, u32
);
762 case 0x0184: func
= nv01_gr_mthd_bind_patt
; break;
763 case 0x0188: func
= nv04_gr_mthd_bind_rop
; break;
764 case 0x018c: func
= nv04_gr_mthd_bind_beta1
; break;
765 case 0x0190: func
= nv04_gr_mthd_bind_surf_dst
; break;
766 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
770 return func(device
, inst
, data
);
774 nv04_gr_mthd_gdi(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
776 bool (*func
)(struct nvkm_device
*, u32
, u32
);
778 case 0x0188: func
= nv04_gr_mthd_bind_patt
; break;
779 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
780 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
781 case 0x0194: func
= nv04_gr_mthd_bind_beta4
; break;
782 case 0x0198: func
= nv04_gr_mthd_bind_surf2d
; break;
783 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
787 return func(device
, inst
, data
);
791 nv01_gr_mthd_blit(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
793 bool (*func
)(struct nvkm_device
*, u32
, u32
);
795 case 0x0184: func
= nv01_gr_mthd_bind_chroma
; break;
796 case 0x0188: func
= nv01_gr_mthd_bind_clip
; break;
797 case 0x018c: func
= nv01_gr_mthd_bind_patt
; break;
798 case 0x0190: func
= nv04_gr_mthd_bind_rop
; break;
799 case 0x0194: func
= nv04_gr_mthd_bind_beta1
; break;
800 case 0x0198: func
= nv04_gr_mthd_bind_surf_dst
; break;
801 case 0x019c: func
= nv04_gr_mthd_bind_surf_src
; break;
802 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
806 return func(device
, inst
, data
);
810 nv04_gr_mthd_blit(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
812 bool (*func
)(struct nvkm_device
*, u32
, u32
);
814 case 0x0184: func
= nv01_gr_mthd_bind_chroma
; break;
815 case 0x0188: func
= nv01_gr_mthd_bind_clip
; break;
816 case 0x018c: func
= nv04_gr_mthd_bind_patt
; break;
817 case 0x0190: func
= nv04_gr_mthd_bind_rop
; break;
818 case 0x0194: func
= nv04_gr_mthd_bind_beta1
; break;
819 case 0x0198: func
= nv04_gr_mthd_bind_beta4
; break;
820 case 0x019c: func
= nv04_gr_mthd_bind_surf2d
; break;
821 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
825 return func(device
, inst
, data
);
829 nv04_gr_mthd_iifc(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
831 bool (*func
)(struct nvkm_device
*, u32
, u32
);
833 case 0x0188: func
= nv01_gr_mthd_bind_chroma
; break;
834 case 0x018c: func
= nv01_gr_mthd_bind_clip
; break;
835 case 0x0190: func
= nv04_gr_mthd_bind_patt
; break;
836 case 0x0194: func
= nv04_gr_mthd_bind_rop
; break;
837 case 0x0198: func
= nv04_gr_mthd_bind_beta1
; break;
838 case 0x019c: func
= nv04_gr_mthd_bind_beta4
; break;
839 case 0x01a0: func
= nv04_gr_mthd_bind_surf2d_swzsurf
; break;
840 case 0x03e4: func
= nv04_gr_mthd_set_operation
; break;
844 return func(device
, inst
, data
);
848 nv01_gr_mthd_ifc(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
850 bool (*func
)(struct nvkm_device
*, u32
, u32
);
852 case 0x0184: func
= nv01_gr_mthd_bind_chroma
; break;
853 case 0x0188: func
= nv01_gr_mthd_bind_clip
; break;
854 case 0x018c: func
= nv01_gr_mthd_bind_patt
; break;
855 case 0x0190: func
= nv04_gr_mthd_bind_rop
; break;
856 case 0x0194: func
= nv04_gr_mthd_bind_beta1
; break;
857 case 0x0198: func
= nv04_gr_mthd_bind_surf_dst
; break;
858 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
862 return func(device
, inst
, data
);
866 nv04_gr_mthd_ifc(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
868 bool (*func
)(struct nvkm_device
*, u32
, u32
);
870 case 0x0184: func
= nv01_gr_mthd_bind_chroma
; break;
871 case 0x0188: func
= nv01_gr_mthd_bind_clip
; break;
872 case 0x018c: func
= nv04_gr_mthd_bind_patt
; break;
873 case 0x0190: func
= nv04_gr_mthd_bind_rop
; break;
874 case 0x0194: func
= nv04_gr_mthd_bind_beta1
; break;
875 case 0x0198: func
= nv04_gr_mthd_bind_beta4
; break;
876 case 0x019c: func
= nv04_gr_mthd_bind_surf2d
; break;
877 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
881 return func(device
, inst
, data
);
885 nv03_gr_mthd_sifc(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
887 bool (*func
)(struct nvkm_device
*, u32
, u32
);
889 case 0x0184: func
= nv01_gr_mthd_bind_chroma
; break;
890 case 0x0188: func
= nv01_gr_mthd_bind_patt
; break;
891 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
892 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
893 case 0x0194: func
= nv04_gr_mthd_bind_surf_dst
; break;
894 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
898 return func(device
, inst
, data
);
902 nv04_gr_mthd_sifc(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
904 bool (*func
)(struct nvkm_device
*, u32
, u32
);
906 case 0x0184: func
= nv01_gr_mthd_bind_chroma
; break;
907 case 0x0188: func
= nv04_gr_mthd_bind_patt
; break;
908 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
909 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
910 case 0x0194: func
= nv04_gr_mthd_bind_beta4
; break;
911 case 0x0198: func
= nv04_gr_mthd_bind_surf2d
; break;
912 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
916 return func(device
, inst
, data
);
920 nv03_gr_mthd_sifm(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
922 bool (*func
)(struct nvkm_device
*, u32
, u32
);
924 case 0x0188: func
= nv01_gr_mthd_bind_patt
; break;
925 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
926 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
927 case 0x0194: func
= nv04_gr_mthd_bind_surf_dst
; break;
928 case 0x0304: func
= nv04_gr_mthd_set_operation
; break;
932 return func(device
, inst
, data
);
936 nv04_gr_mthd_sifm(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
938 bool (*func
)(struct nvkm_device
*, u32
, u32
);
940 case 0x0188: func
= nv04_gr_mthd_bind_patt
; break;
941 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
942 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
943 case 0x0194: func
= nv04_gr_mthd_bind_beta4
; break;
944 case 0x0198: func
= nv04_gr_mthd_bind_surf2d
; break;
945 case 0x0304: func
= nv04_gr_mthd_set_operation
; break;
949 return func(device
, inst
, data
);
953 nv04_gr_mthd_surf3d(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
955 bool (*func
)(struct nvkm_device
*, u32
, u32
);
957 case 0x02f8: func
= nv04_gr_mthd_surf3d_clip_h
; break;
958 case 0x02fc: func
= nv04_gr_mthd_surf3d_clip_v
; break;
962 return func(device
, inst
, data
);
966 nv03_gr_mthd_ttri(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
968 bool (*func
)(struct nvkm_device
*, u32
, u32
);
970 case 0x0188: func
= nv01_gr_mthd_bind_clip
; break;
971 case 0x018c: func
= nv04_gr_mthd_bind_surf_color
; break;
972 case 0x0190: func
= nv04_gr_mthd_bind_surf_zeta
; break;
976 return func(device
, inst
, data
);
980 nv01_gr_mthd_prim(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
982 bool (*func
)(struct nvkm_device
*, u32
, u32
);
984 case 0x0184: func
= nv01_gr_mthd_bind_clip
; break;
985 case 0x0188: func
= nv01_gr_mthd_bind_patt
; break;
986 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
987 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
988 case 0x0194: func
= nv04_gr_mthd_bind_surf_dst
; break;
989 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
993 return func(device
, inst
, data
);
997 nv04_gr_mthd_prim(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
999 bool (*func
)(struct nvkm_device
*, u32
, u32
);
1001 case 0x0184: func
= nv01_gr_mthd_bind_clip
; break;
1002 case 0x0188: func
= nv04_gr_mthd_bind_patt
; break;
1003 case 0x018c: func
= nv04_gr_mthd_bind_rop
; break;
1004 case 0x0190: func
= nv04_gr_mthd_bind_beta1
; break;
1005 case 0x0194: func
= nv04_gr_mthd_bind_beta4
; break;
1006 case 0x0198: func
= nv04_gr_mthd_bind_surf2d
; break;
1007 case 0x02fc: func
= nv04_gr_mthd_set_operation
; break;
1011 return func(device
, inst
, data
);
1015 nv04_gr_mthd(struct nvkm_device
*device
, u32 inst
, u32 mthd
, u32 data
)
1017 bool (*func
)(struct nvkm_device
*, u32
, u32
, u32
);
1018 switch (nvkm_rd32(device
, 0x700000 + inst
) & 0x000000ff) {
1020 func
= nv01_gr_mthd_prim
; break;
1021 case 0x1f: func
= nv01_gr_mthd_blit
; break;
1022 case 0x21: func
= nv01_gr_mthd_ifc
; break;
1023 case 0x36: func
= nv03_gr_mthd_sifc
; break;
1024 case 0x37: func
= nv03_gr_mthd_sifm
; break;
1025 case 0x48: func
= nv03_gr_mthd_ttri
; break;
1026 case 0x4a: func
= nv04_gr_mthd_gdi
; break;
1027 case 0x4b: func
= nv03_gr_mthd_gdi
; break;
1028 case 0x53: func
= nv04_gr_mthd_surf3d
; break;
1030 func
= nv04_gr_mthd_prim
; break;
1031 case 0x5f: func
= nv04_gr_mthd_blit
; break;
1032 case 0x60: func
= nv04_gr_mthd_iifc
; break;
1033 case 0x61: func
= nv04_gr_mthd_ifc
; break;
1034 case 0x76: func
= nv04_gr_mthd_sifc
; break;
1035 case 0x77: func
= nv04_gr_mthd_sifm
; break;
1039 return func(device
, inst
, mthd
, data
);
1043 nv04_gr_object_bind(struct nvkm_object
*object
, struct nvkm_gpuobj
*parent
,
1044 int align
, struct nvkm_gpuobj
**pgpuobj
)
1046 int ret
= nvkm_gpuobj_new(object
->engine
->subdev
.device
, 16, align
,
1047 false, parent
, pgpuobj
);
1049 nvkm_kmap(*pgpuobj
);
1050 nvkm_wo32(*pgpuobj
, 0x00, object
->oclass
);
1052 nvkm_mo32(*pgpuobj
, 0x00, 0x00080000, 0x00080000);
1054 nvkm_wo32(*pgpuobj
, 0x04, 0x00000000);
1055 nvkm_wo32(*pgpuobj
, 0x08, 0x00000000);
1056 nvkm_wo32(*pgpuobj
, 0x0c, 0x00000000);
1057 nvkm_done(*pgpuobj
);
1062 const struct nvkm_object_func
1064 .bind
= nv04_gr_object_bind
,
1067 /*******************************************************************************
1069 ******************************************************************************/
1071 static struct nv04_gr_chan
*
1072 nv04_gr_channel(struct nv04_gr
*gr
)
1074 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
1075 struct nv04_gr_chan
*chan
= NULL
;
1076 if (nvkm_rd32(device
, NV04_PGRAPH_CTX_CONTROL
) & 0x00010000) {
1077 int chid
= nvkm_rd32(device
, NV04_PGRAPH_CTX_USER
) >> 24;
1078 if (chid
< ARRAY_SIZE(gr
->chan
))
1079 chan
= gr
->chan
[chid
];
1085 nv04_gr_load_context(struct nv04_gr_chan
*chan
, int chid
)
1087 struct nvkm_device
*device
= chan
->gr
->base
.engine
.subdev
.device
;
1090 for (i
= 0; i
< ARRAY_SIZE(nv04_gr_ctx_regs
); i
++)
1091 nvkm_wr32(device
, nv04_gr_ctx_regs
[i
], chan
->nv04
[i
]);
1093 nvkm_wr32(device
, NV04_PGRAPH_CTX_CONTROL
, 0x10010100);
1094 nvkm_mask(device
, NV04_PGRAPH_CTX_USER
, 0xff000000, chid
<< 24);
1095 nvkm_mask(device
, NV04_PGRAPH_FFINTFC_ST2
, 0xfff00000, 0x00000000);
1100 nv04_gr_unload_context(struct nv04_gr_chan
*chan
)
1102 struct nvkm_device
*device
= chan
->gr
->base
.engine
.subdev
.device
;
1105 for (i
= 0; i
< ARRAY_SIZE(nv04_gr_ctx_regs
); i
++)
1106 chan
->nv04
[i
] = nvkm_rd32(device
, nv04_gr_ctx_regs
[i
]);
1108 nvkm_wr32(device
, NV04_PGRAPH_CTX_CONTROL
, 0x10000000);
1109 nvkm_mask(device
, NV04_PGRAPH_CTX_USER
, 0xff000000, 0x0f000000);
1114 nv04_gr_context_switch(struct nv04_gr
*gr
)
1116 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
1117 struct nv04_gr_chan
*prev
= NULL
;
1118 struct nv04_gr_chan
*next
= NULL
;
1121 nv04_gr_idle(&gr
->base
);
1123 /* If previous context is valid, we need to save it */
1124 prev
= nv04_gr_channel(gr
);
1126 nv04_gr_unload_context(prev
);
1128 /* load context for next channel */
1129 chid
= (nvkm_rd32(device
, NV04_PGRAPH_TRAPPED_ADDR
) >> 24) & 0x0f;
1130 next
= gr
->chan
[chid
];
1132 nv04_gr_load_context(next
, chid
);
1135 static u32
*ctx_reg(struct nv04_gr_chan
*chan
, u32 reg
)
1139 for (i
= 0; i
< ARRAY_SIZE(nv04_gr_ctx_regs
); i
++) {
1140 if (nv04_gr_ctx_regs
[i
] == reg
)
1141 return &chan
->nv04
[i
];
1148 nv04_gr_chan_dtor(struct nvkm_object
*object
)
1150 struct nv04_gr_chan
*chan
= nv04_gr_chan(object
);
1151 struct nv04_gr
*gr
= chan
->gr
;
1152 unsigned long flags
;
1154 spin_lock_irqsave(&gr
->lock
, flags
);
1155 gr
->chan
[chan
->chid
] = NULL
;
1156 spin_unlock_irqrestore(&gr
->lock
, flags
);
1161 nv04_gr_chan_fini(struct nvkm_object
*object
, bool suspend
)
1163 struct nv04_gr_chan
*chan
= nv04_gr_chan(object
);
1164 struct nv04_gr
*gr
= chan
->gr
;
1165 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
1166 unsigned long flags
;
1168 spin_lock_irqsave(&gr
->lock
, flags
);
1169 nvkm_mask(device
, NV04_PGRAPH_FIFO
, 0x00000001, 0x00000000);
1170 if (nv04_gr_channel(gr
) == chan
)
1171 nv04_gr_unload_context(chan
);
1172 nvkm_mask(device
, NV04_PGRAPH_FIFO
, 0x00000001, 0x00000001);
1173 spin_unlock_irqrestore(&gr
->lock
, flags
);
1177 static const struct nvkm_object_func
1179 .dtor
= nv04_gr_chan_dtor
,
1180 .fini
= nv04_gr_chan_fini
,
1184 nv04_gr_chan_new(struct nvkm_gr
*base
, struct nvkm_fifo_chan
*fifoch
,
1185 const struct nvkm_oclass
*oclass
, struct nvkm_object
**pobject
)
1187 struct nv04_gr
*gr
= nv04_gr(base
);
1188 struct nv04_gr_chan
*chan
;
1189 unsigned long flags
;
1191 if (!(chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
)))
1193 nvkm_object_ctor(&nv04_gr_chan
, oclass
, &chan
->object
);
1195 chan
->chid
= fifoch
->chid
;
1196 *pobject
= &chan
->object
;
1198 *ctx_reg(chan
, NV04_PGRAPH_DEBUG_3
) = 0xfad4ff31;
1200 spin_lock_irqsave(&gr
->lock
, flags
);
1201 gr
->chan
[chan
->chid
] = chan
;
1202 spin_unlock_irqrestore(&gr
->lock
, flags
);
1206 /*******************************************************************************
1207 * PGRAPH engine/subdev functions
1208 ******************************************************************************/
1211 nv04_gr_idle(struct nvkm_gr
*gr
)
1213 struct nvkm_subdev
*subdev
= &gr
->engine
.subdev
;
1214 struct nvkm_device
*device
= subdev
->device
;
1215 u32 mask
= 0xffffffff;
1217 if (device
->card_type
== NV_40
)
1218 mask
&= ~NV40_PGRAPH_STATUS_SYNC_STALL
;
1220 if (nvkm_msec(device
, 2000,
1221 if (!(nvkm_rd32(device
, NV04_PGRAPH_STATUS
) & mask
))
1224 nvkm_error(subdev
, "idle timed out with status %08x\n",
1225 nvkm_rd32(device
, NV04_PGRAPH_STATUS
));
1232 static const struct nvkm_bitfield
1233 nv04_gr_intr_name
[] = {
1234 { NV_PGRAPH_INTR_NOTIFY
, "NOTIFY" },
1238 static const struct nvkm_bitfield
1239 nv04_gr_nstatus
[] = {
1240 { NV04_PGRAPH_NSTATUS_STATE_IN_USE
, "STATE_IN_USE" },
1241 { NV04_PGRAPH_NSTATUS_INVALID_STATE
, "INVALID_STATE" },
1242 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT
, "BAD_ARGUMENT" },
1243 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT
, "PROTECTION_FAULT" },
1247 const struct nvkm_bitfield
1248 nv04_gr_nsource
[] = {
1249 { NV03_PGRAPH_NSOURCE_NOTIFICATION
, "NOTIFICATION" },
1250 { NV03_PGRAPH_NSOURCE_DATA_ERROR
, "DATA_ERROR" },
1251 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR
, "PROTECTION_ERROR" },
1252 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION
, "RANGE_EXCEPTION" },
1253 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR
, "LIMIT_COLOR" },
1254 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA
, "LIMIT_ZETA" },
1255 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD
, "ILLEGAL_MTHD" },
1256 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION
, "DMA_R_PROTECTION" },
1257 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION
, "DMA_W_PROTECTION" },
1258 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION
, "FORMAT_EXCEPTION" },
1259 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION
, "PATCH_EXCEPTION" },
1260 { NV03_PGRAPH_NSOURCE_STATE_INVALID
, "STATE_INVALID" },
1261 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY
, "DOUBLE_NOTIFY" },
1262 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE
, "NOTIFY_IN_USE" },
1263 { NV03_PGRAPH_NSOURCE_METHOD_CNT
, "METHOD_CNT" },
1264 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION
, "BFR_NOTIFICATION" },
1265 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION
, "DMA_VTX_PROTECTION" },
1266 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A
, "DMA_WIDTH_A" },
1267 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B
, "DMA_WIDTH_B" },
1272 nv04_gr_intr(struct nvkm_gr
*base
)
1274 struct nv04_gr
*gr
= nv04_gr(base
);
1275 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
1276 struct nvkm_device
*device
= subdev
->device
;
1277 u32 stat
= nvkm_rd32(device
, NV03_PGRAPH_INTR
);
1278 u32 nsource
= nvkm_rd32(device
, NV03_PGRAPH_NSOURCE
);
1279 u32 nstatus
= nvkm_rd32(device
, NV03_PGRAPH_NSTATUS
);
1280 u32 addr
= nvkm_rd32(device
, NV04_PGRAPH_TRAPPED_ADDR
);
1281 u32 chid
= (addr
& 0x0f000000) >> 24;
1282 u32 subc
= (addr
& 0x0000e000) >> 13;
1283 u32 mthd
= (addr
& 0x00001ffc);
1284 u32 data
= nvkm_rd32(device
, NV04_PGRAPH_TRAPPED_DATA
);
1285 u32
class = nvkm_rd32(device
, 0x400180 + subc
* 4) & 0xff;
1286 u32 inst
= (nvkm_rd32(device
, 0x40016c) & 0xffff) << 4;
1288 char msg
[128], src
[128], sta
[128];
1289 struct nv04_gr_chan
*chan
;
1290 unsigned long flags
;
1292 spin_lock_irqsave(&gr
->lock
, flags
);
1293 chan
= gr
->chan
[chid
];
1295 if (stat
& NV_PGRAPH_INTR_NOTIFY
) {
1296 if (chan
&& (nsource
& NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD
)) {
1297 if (!nv04_gr_mthd(device
, inst
, mthd
, data
))
1298 show
&= ~NV_PGRAPH_INTR_NOTIFY
;
1302 if (stat
& NV_PGRAPH_INTR_CONTEXT_SWITCH
) {
1303 nvkm_wr32(device
, NV03_PGRAPH_INTR
, NV_PGRAPH_INTR_CONTEXT_SWITCH
);
1304 stat
&= ~NV_PGRAPH_INTR_CONTEXT_SWITCH
;
1305 show
&= ~NV_PGRAPH_INTR_CONTEXT_SWITCH
;
1306 nv04_gr_context_switch(gr
);
1309 nvkm_wr32(device
, NV03_PGRAPH_INTR
, stat
);
1310 nvkm_wr32(device
, NV04_PGRAPH_FIFO
, 0x00000001);
1313 nvkm_snprintbf(msg
, sizeof(msg
), nv04_gr_intr_name
, show
);
1314 nvkm_snprintbf(src
, sizeof(src
), nv04_gr_nsource
, nsource
);
1315 nvkm_snprintbf(sta
, sizeof(sta
), nv04_gr_nstatus
, nstatus
);
1316 nvkm_error(subdev
, "intr %08x [%s] nsource %08x [%s] "
1317 "nstatus %08x [%s] ch %d [%s] subc %d "
1318 "class %04x mthd %04x data %08x\n",
1319 show
, msg
, nsource
, src
, nstatus
, sta
, chid
,
1320 chan
? chan
->object
.client
->name
: "unknown",
1321 subc
, class, mthd
, data
);
1324 spin_unlock_irqrestore(&gr
->lock
, flags
);
1328 nv04_gr_init(struct nvkm_gr
*base
)
1330 struct nv04_gr
*gr
= nv04_gr(base
);
1331 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
1333 /* Enable PGRAPH interrupts */
1334 nvkm_wr32(device
, NV03_PGRAPH_INTR
, 0xFFFFFFFF);
1335 nvkm_wr32(device
, NV03_PGRAPH_INTR_EN
, 0xFFFFFFFF);
1337 nvkm_wr32(device
, NV04_PGRAPH_VALID1
, 0);
1338 nvkm_wr32(device
, NV04_PGRAPH_VALID2
, 0);
1339 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1340 nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1341 nvkm_wr32(device
, NV04_PGRAPH_DEBUG_0
, 0x1231c000);
1342 /*1231C000 blob, 001 haiku*/
1343 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1344 nvkm_wr32(device
, NV04_PGRAPH_DEBUG_1
, 0x72111100);
1345 /*0x72111100 blob , 01 haiku*/
1346 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1347 nvkm_wr32(device
, NV04_PGRAPH_DEBUG_2
, 0x11d5f071);
1350 /*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1351 nvkm_wr32(device
, NV04_PGRAPH_DEBUG_3
, 0xf0d4ff31);
1352 /*haiku and blob 10d4*/
1354 nvkm_wr32(device
, NV04_PGRAPH_STATE
, 0xFFFFFFFF);
1355 nvkm_wr32(device
, NV04_PGRAPH_CTX_CONTROL
, 0x10000100);
1356 nvkm_mask(device
, NV04_PGRAPH_CTX_USER
, 0xff000000, 0x0f000000);
1358 /* These don't belong here, they're part of a per-channel context */
1359 nvkm_wr32(device
, NV04_PGRAPH_PATTERN_SHAPE
, 0x00000000);
1360 nvkm_wr32(device
, NV04_PGRAPH_BETA_AND
, 0xFFFFFFFF);
1364 static const struct nvkm_gr_func
1366 .init
= nv04_gr_init
,
1367 .intr
= nv04_gr_intr
,
1368 .chan_new
= nv04_gr_chan_new
,
1370 { -1, -1, 0x0012, &nv04_gr_object
}, /* beta1 */
1371 { -1, -1, 0x0017, &nv04_gr_object
}, /* chroma */
1372 { -1, -1, 0x0018, &nv04_gr_object
}, /* pattern (nv01) */
1373 { -1, -1, 0x0019, &nv04_gr_object
}, /* clip */
1374 { -1, -1, 0x001c, &nv04_gr_object
}, /* line */
1375 { -1, -1, 0x001d, &nv04_gr_object
}, /* tri */
1376 { -1, -1, 0x001e, &nv04_gr_object
}, /* rect */
1377 { -1, -1, 0x001f, &nv04_gr_object
},
1378 { -1, -1, 0x0021, &nv04_gr_object
},
1379 { -1, -1, 0x0030, &nv04_gr_object
}, /* null */
1380 { -1, -1, 0x0036, &nv04_gr_object
},
1381 { -1, -1, 0x0037, &nv04_gr_object
},
1382 { -1, -1, 0x0038, &nv04_gr_object
}, /* dvd subpicture */
1383 { -1, -1, 0x0039, &nv04_gr_object
}, /* m2mf */
1384 { -1, -1, 0x0042, &nv04_gr_object
}, /* surf2d */
1385 { -1, -1, 0x0043, &nv04_gr_object
}, /* rop */
1386 { -1, -1, 0x0044, &nv04_gr_object
}, /* pattern */
1387 { -1, -1, 0x0048, &nv04_gr_object
},
1388 { -1, -1, 0x004a, &nv04_gr_object
},
1389 { -1, -1, 0x004b, &nv04_gr_object
},
1390 { -1, -1, 0x0052, &nv04_gr_object
}, /* swzsurf */
1391 { -1, -1, 0x0053, &nv04_gr_object
},
1392 { -1, -1, 0x0054, &nv04_gr_object
}, /* ttri */
1393 { -1, -1, 0x0055, &nv04_gr_object
}, /* mtri */
1394 { -1, -1, 0x0057, &nv04_gr_object
}, /* chroma */
1395 { -1, -1, 0x0058, &nv04_gr_object
}, /* surf_dst */
1396 { -1, -1, 0x0059, &nv04_gr_object
}, /* surf_src */
1397 { -1, -1, 0x005a, &nv04_gr_object
}, /* surf_color */
1398 { -1, -1, 0x005b, &nv04_gr_object
}, /* surf_zeta */
1399 { -1, -1, 0x005c, &nv04_gr_object
}, /* line */
1400 { -1, -1, 0x005d, &nv04_gr_object
}, /* tri */
1401 { -1, -1, 0x005e, &nv04_gr_object
}, /* rect */
1402 { -1, -1, 0x005f, &nv04_gr_object
},
1403 { -1, -1, 0x0060, &nv04_gr_object
},
1404 { -1, -1, 0x0061, &nv04_gr_object
},
1405 { -1, -1, 0x0064, &nv04_gr_object
}, /* iifc (nv05) */
1406 { -1, -1, 0x0065, &nv04_gr_object
}, /* ifc (nv05) */
1407 { -1, -1, 0x0066, &nv04_gr_object
}, /* sifc (nv05) */
1408 { -1, -1, 0x0072, &nv04_gr_object
}, /* beta4 */
1409 { -1, -1, 0x0076, &nv04_gr_object
},
1410 { -1, -1, 0x0077, &nv04_gr_object
},
1416 nv04_gr_new(struct nvkm_device
*device
, int index
, struct nvkm_gr
**pgr
)
1420 if (!(gr
= kzalloc(sizeof(*gr
), GFP_KERNEL
)))
1422 spin_lock_init(&gr
->lock
);
1425 return nvkm_gr_ctor(&nv04_gr
, device
, index
, true, &gr
->base
);