2 * Copyright 2015 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
27 #include <subdev/secboot.h>
29 #include <nvif/class.h>
31 /*******************************************************************************
32 * PGRAPH engine/subdev functions
33 ******************************************************************************/
36 gm200_gr_rops(struct gf100_gr
*gr
)
38 return nvkm_rd32(gr
->base
.engine
.subdev
.device
, 0x12006c);
42 gm200_gr_init_gpc_mmu(struct gf100_gr
*gr
)
44 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
46 nvkm_wr32(device
, 0x418880, nvkm_rd32(device
, 0x100c80) & 0xf0001fff);
47 nvkm_wr32(device
, 0x418890, 0x00000000);
48 nvkm_wr32(device
, 0x418894, 0x00000000);
50 nvkm_wr32(device
, 0x4188b4, nvkm_rd32(device
, 0x100cc8));
51 nvkm_wr32(device
, 0x4188b8, nvkm_rd32(device
, 0x100ccc));
52 nvkm_wr32(device
, 0x4188b0, nvkm_rd32(device
, 0x100cc4));
56 gm200_gr_init_rop_active_fbps(struct gf100_gr
*gr
)
58 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
59 const u32 fbp_count
= nvkm_rd32(device
, 0x12006c);
60 nvkm_mask(device
, 0x408850, 0x0000000f, fbp_count
); /* zrop */
61 nvkm_mask(device
, 0x408958, 0x0000000f, fbp_count
); /* crop */
65 gm200_gr_init(struct gf100_gr
*gr
)
67 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
68 const u32 magicgpc918
= DIV_ROUND_UP(0x00800000, gr
->tpc_total
);
69 u32 data
[TPC_MAX
/ 8] = {};
74 gr
->func
->init_gpc_mmu(gr
);
76 gf100_gr_mmio(gr
, gr
->fuc_sw_nonctx
);
78 gm107_gr_init_bios(gr
);
80 nvkm_wr32(device
, GPC_UNIT(0, 0x3018), 0x00000001);
82 memset(data
, 0x00, sizeof(data
));
83 memcpy(tpcnr
, gr
->tpc_nr
, sizeof(gr
->tpc_nr
));
84 for (i
= 0, gpc
= -1; i
< gr
->tpc_total
; i
++) {
86 gpc
= (gpc
+ 1) % gr
->gpc_nr
;
87 } while (!tpcnr
[gpc
]);
88 tpc
= gr
->tpc_nr
[gpc
] - tpcnr
[gpc
]--;
90 data
[i
/ 8] |= tpc
<< ((i
% 8) * 4);
93 nvkm_wr32(device
, GPC_BCAST(0x0980), data
[0]);
94 nvkm_wr32(device
, GPC_BCAST(0x0984), data
[1]);
95 nvkm_wr32(device
, GPC_BCAST(0x0988), data
[2]);
96 nvkm_wr32(device
, GPC_BCAST(0x098c), data
[3]);
98 for (gpc
= 0; gpc
< gr
->gpc_nr
; gpc
++) {
99 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0914),
100 gr
->screen_tile_row_offset
<< 8 | gr
->tpc_nr
[gpc
]);
101 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0910), 0x00040000 |
103 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0918), magicgpc918
);
106 nvkm_wr32(device
, GPC_BCAST(0x3fd4), magicgpc918
);
107 nvkm_wr32(device
, GPC_BCAST(0x08ac), nvkm_rd32(device
, 0x100800));
108 nvkm_wr32(device
, GPC_BCAST(0x033c), nvkm_rd32(device
, 0x100804));
110 gr
->func
->init_rop_active_fbps(gr
);
112 nvkm_wr32(device
, 0x400500, 0x00010001);
113 nvkm_wr32(device
, 0x400100, 0xffffffff);
114 nvkm_wr32(device
, 0x40013c, 0xffffffff);
115 nvkm_wr32(device
, 0x400124, 0x00000002);
116 nvkm_wr32(device
, 0x409c24, 0x000e0000);
117 nvkm_wr32(device
, 0x405848, 0xc0000000);
118 nvkm_wr32(device
, 0x40584c, 0x00000001);
119 nvkm_wr32(device
, 0x404000, 0xc0000000);
120 nvkm_wr32(device
, 0x404600, 0xc0000000);
121 nvkm_wr32(device
, 0x408030, 0xc0000000);
122 nvkm_wr32(device
, 0x404490, 0xc0000000);
123 nvkm_wr32(device
, 0x406018, 0xc0000000);
124 nvkm_wr32(device
, 0x407020, 0x40000000);
125 nvkm_wr32(device
, 0x405840, 0xc0000000);
126 nvkm_wr32(device
, 0x405844, 0x00ffffff);
127 nvkm_mask(device
, 0x419cc0, 0x00000008, 0x00000008);
129 gr
->func
->init_ppc_exceptions(gr
);
131 for (gpc
= 0; gpc
< gr
->gpc_nr
; gpc
++) {
132 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0420), 0xc0000000);
133 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0900), 0xc0000000);
134 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x1028), 0xc0000000);
135 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x0824), 0xc0000000);
136 for (tpc
= 0; tpc
< gr
->tpc_nr
[gpc
]; tpc
++) {
137 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x508), 0xffffffff);
138 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x50c), 0xffffffff);
139 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x224), 0xc0000000);
140 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x48c), 0xc0000000);
141 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x084), 0xc0000000);
142 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x430), 0xc0000000);
143 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x644), 0x00dffffe);
144 nvkm_wr32(device
, TPC_UNIT(gpc
, tpc
, 0x64c), 0x00000005);
146 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x2c90), 0xffffffff);
147 nvkm_wr32(device
, GPC_UNIT(gpc
, 0x2c94), 0xffffffff);
150 for (rop
= 0; rop
< gr
->rop_nr
; rop
++) {
151 nvkm_wr32(device
, ROP_UNIT(rop
, 0x144), 0x40000000);
152 nvkm_wr32(device
, ROP_UNIT(rop
, 0x070), 0x40000000);
153 nvkm_wr32(device
, ROP_UNIT(rop
, 0x204), 0xffffffff);
154 nvkm_wr32(device
, ROP_UNIT(rop
, 0x208), 0xffffffff);
157 nvkm_wr32(device
, 0x400108, 0xffffffff);
158 nvkm_wr32(device
, 0x400138, 0xffffffff);
159 nvkm_wr32(device
, 0x400118, 0xffffffff);
160 nvkm_wr32(device
, 0x400130, 0xffffffff);
161 nvkm_wr32(device
, 0x40011c, 0xffffffff);
162 nvkm_wr32(device
, 0x400134, 0xffffffff);
164 nvkm_wr32(device
, 0x400054, 0x2c350f63);
166 gf100_gr_zbc_init(gr
);
168 return gf100_gr_init_ctxctl(gr
);
172 gm200_gr_new_(const struct gf100_gr_func
*func
, struct nvkm_device
*device
,
173 int index
, struct nvkm_gr
**pgr
)
178 if (!(gr
= kzalloc(sizeof(*gr
), GFP_KERNEL
)))
182 ret
= gf100_gr_ctor(func
, device
, index
, gr
);
186 /* Load firmwares for non-secure falcons */
187 if (!nvkm_secboot_is_managed(device
->secboot
,
188 NVKM_SECBOOT_FALCON_FECS
)) {
189 if ((ret
= gf100_gr_ctor_fw(gr
, "gr/fecs_inst", &gr
->fuc409c
)) ||
190 (ret
= gf100_gr_ctor_fw(gr
, "gr/fecs_data", &gr
->fuc409d
)))
193 if (!nvkm_secboot_is_managed(device
->secboot
,
194 NVKM_SECBOOT_FALCON_GPCCS
)) {
195 if ((ret
= gf100_gr_ctor_fw(gr
, "gr/gpccs_inst", &gr
->fuc41ac
)) ||
196 (ret
= gf100_gr_ctor_fw(gr
, "gr/gpccs_data", &gr
->fuc41ad
)))
200 if ((ret
= gk20a_gr_av_to_init(gr
, "gr/sw_nonctx", &gr
->fuc_sw_nonctx
)) ||
201 (ret
= gk20a_gr_aiv_to_init(gr
, "gr/sw_ctx", &gr
->fuc_sw_ctx
)) ||
202 (ret
= gk20a_gr_av_to_init(gr
, "gr/sw_bundle_init", &gr
->fuc_bundle
)) ||
203 (ret
= gk20a_gr_av_to_method(gr
, "gr/sw_method_init", &gr
->fuc_method
)))
209 static const struct gf100_gr_func
211 .init
= gm200_gr_init
,
212 .init_gpc_mmu
= gm200_gr_init_gpc_mmu
,
213 .init_rop_active_fbps
= gm200_gr_init_rop_active_fbps
,
214 .init_ppc_exceptions
= gk104_gr_init_ppc_exceptions
,
215 .rops
= gm200_gr_rops
,
217 .grctx
= &gm200_grctx
,
219 { -1, -1, FERMI_TWOD_A
},
220 { -1, -1, KEPLER_INLINE_TO_MEMORY_B
},
221 { -1, -1, MAXWELL_B
, &gf100_fermi
},
222 { -1, -1, MAXWELL_COMPUTE_B
},
228 gm200_gr_new(struct nvkm_device
*device
, int index
, struct nvkm_gr
**pgr
)
230 return gm200_gr_new_(&gm200_gr
, device
, index
, pgr
);