2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
25 #include <core/firmware.h>
26 #include <subdev/timer.h>
28 #include <nvif/class.h>
37 gk20a_gr_av_to_init(struct gf100_gr
*gr
, const char *path
, const char *name
,
38 int ver
, struct gf100_gr_pack
**ppack
)
40 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
41 struct nvkm_blob blob
;
42 struct gf100_gr_init
*init
;
43 struct gf100_gr_pack
*pack
;
48 ret
= nvkm_firmware_load_blob(subdev
, path
, name
, ver
, &blob
);
52 nent
= (blob
.size
/ sizeof(struct gk20a_fw_av
));
53 pack
= vzalloc((sizeof(*pack
) * 2) + (sizeof(*init
) * (nent
+ 1)));
59 init
= (void *)(pack
+ 2);
62 for (i
= 0; i
< nent
; i
++) {
63 struct gf100_gr_init
*ent
= &init
[i
];
64 struct gk20a_fw_av
*av
= &((struct gk20a_fw_av
*)blob
.data
)[i
];
75 nvkm_blob_dtor(&blob
);
87 gk20a_gr_aiv_to_init(struct gf100_gr
*gr
, const char *path
, const char *name
,
88 int ver
, struct gf100_gr_pack
**ppack
)
90 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
91 struct nvkm_blob blob
;
92 struct gf100_gr_init
*init
;
93 struct gf100_gr_pack
*pack
;
98 ret
= nvkm_firmware_load_blob(subdev
, path
, name
, ver
, &blob
);
102 nent
= (blob
.size
/ sizeof(struct gk20a_fw_aiv
));
103 pack
= vzalloc((sizeof(*pack
) * 2) + (sizeof(*init
) * (nent
+ 1)));
109 init
= (void *)(pack
+ 2);
112 for (i
= 0; i
< nent
; i
++) {
113 struct gf100_gr_init
*ent
= &init
[i
];
114 struct gk20a_fw_aiv
*av
= &((struct gk20a_fw_aiv
*)blob
.data
)[i
];
116 ent
->addr
= av
->addr
;
117 ent
->data
= av
->data
;
125 nvkm_blob_dtor(&blob
);
130 gk20a_gr_av_to_method(struct gf100_gr
*gr
, const char *path
, const char *name
,
131 int ver
, struct gf100_gr_pack
**ppack
)
133 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
134 struct nvkm_blob blob
;
135 struct gf100_gr_init
*init
;
136 struct gf100_gr_pack
*pack
;
137 /* We don't suppose we will initialize more than 16 classes here... */
138 static const unsigned int max_classes
= 16;
139 u32 classidx
= 0, prevclass
= 0;
144 ret
= nvkm_firmware_load_blob(subdev
, path
, name
, ver
, &blob
);
148 nent
= (blob
.size
/ sizeof(struct gk20a_fw_av
));
150 pack
= vzalloc((sizeof(*pack
) * (max_classes
+ 1)) +
151 (sizeof(*init
) * (nent
+ max_classes
+ 1)));
157 init
= (void *)(pack
+ max_classes
+ 1);
159 for (i
= 0; i
< nent
; i
++, init
++) {
160 struct gk20a_fw_av
*av
= &((struct gk20a_fw_av
*)blob
.data
)[i
];
161 u32
class = av
->addr
& 0xffff;
162 u32 addr
= (av
->addr
& 0xffff0000) >> 14;
164 if (prevclass
!= class) {
165 if (prevclass
) /* Add terminator to the method list. */
167 pack
[classidx
].init
= init
;
168 pack
[classidx
].type
= class;
170 if (++classidx
>= max_classes
) {
178 init
->data
= av
->data
;
186 nvkm_blob_dtor(&blob
);
191 gk20a_gr_wait_mem_scrubbing(struct gf100_gr
*gr
)
193 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
194 struct nvkm_device
*device
= subdev
->device
;
196 if (nvkm_msec(device
, 2000,
197 if (!(nvkm_rd32(device
, 0x40910c) & 0x00000006))
200 nvkm_error(subdev
, "FECS mem scrubbing timeout\n");
204 if (nvkm_msec(device
, 2000,
205 if (!(nvkm_rd32(device
, 0x41a10c) & 0x00000006))
208 nvkm_error(subdev
, "GPCCS mem scrubbing timeout\n");
216 gk20a_gr_set_hww_esr_report_mask(struct gf100_gr
*gr
)
218 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
219 nvkm_wr32(device
, 0x419e44, 0x1ffffe);
220 nvkm_wr32(device
, 0x419e4c, 0x7f);
224 gk20a_gr_init(struct gf100_gr
*gr
)
226 struct nvkm_device
*device
= gr
->base
.engine
.subdev
.device
;
230 nvkm_wr32(device
, 0x40802c, 0x1);
232 gf100_gr_mmio(gr
, gr
->sw_nonctx
);
234 ret
= gk20a_gr_wait_mem_scrubbing(gr
);
238 ret
= gf100_gr_wait_idle(gr
);
242 /* MMU debug buffer */
243 if (gr
->func
->init_gpc_mmu
)
244 gr
->func
->init_gpc_mmu(gr
);
246 /* Set the PE as stream master */
247 nvkm_mask(device
, 0x503018, 0x1, 0x1);
250 gr
->func
->init_zcull(gr
);
252 gr
->func
->init_rop_active_fbps(gr
);
254 /* Enable FIFO access */
255 nvkm_wr32(device
, 0x400500, 0x00010001);
257 /* Enable interrupts */
258 nvkm_wr32(device
, 0x400100, 0xffffffff);
259 nvkm_wr32(device
, 0x40013c, 0xffffffff);
261 /* Enable FECS error interrupts */
262 nvkm_wr32(device
, 0x409c24, 0x000f0000);
264 /* Enable hardware warning exceptions */
265 nvkm_wr32(device
, 0x404000, 0xc0000000);
266 nvkm_wr32(device
, 0x404600, 0xc0000000);
268 if (gr
->func
->set_hww_esr_report_mask
)
269 gr
->func
->set_hww_esr_report_mask(gr
);
271 /* Enable TPC exceptions per GPC */
272 nvkm_wr32(device
, 0x419d0c, 0x2);
273 nvkm_wr32(device
, 0x41ac94, (((1 << gr
->tpc_total
) - 1) & 0xff) << 16);
275 /* Reset and enable all exceptions */
276 nvkm_wr32(device
, 0x400108, 0xffffffff);
277 nvkm_wr32(device
, 0x400138, 0xffffffff);
278 nvkm_wr32(device
, 0x400118, 0xffffffff);
279 nvkm_wr32(device
, 0x400130, 0xffffffff);
280 nvkm_wr32(device
, 0x40011c, 0xffffffff);
281 nvkm_wr32(device
, 0x400134, 0xffffffff);
283 gf100_gr_zbc_init(gr
);
285 return gf100_gr_init_ctxctl(gr
);
288 static const struct gf100_gr_func
290 .oneinit_tiles
= gf100_gr_oneinit_tiles
,
291 .oneinit_sm_id
= gf100_gr_oneinit_sm_id
,
292 .init
= gk20a_gr_init
,
293 .init_zcull
= gf117_gr_init_zcull
,
294 .init_rop_active_fbps
= gk104_gr_init_rop_active_fbps
,
295 .trap_mp
= gf100_gr_trap_mp
,
296 .set_hww_esr_report_mask
= gk20a_gr_set_hww_esr_report_mask
,
297 .rops
= gf100_gr_rops
,
299 .grctx
= &gk20a_grctx
,
300 .zbc
= &gf100_gr_zbc
,
302 { -1, -1, FERMI_TWOD_A
},
303 { -1, -1, KEPLER_INLINE_TO_MEMORY_A
},
304 { -1, -1, KEPLER_C
, &gf100_fermi
},
305 { -1, -1, KEPLER_COMPUTE_A
},
311 gk20a_gr_load_sw(struct gf100_gr
*gr
, const char *path
, int ver
)
313 if (gk20a_gr_av_to_init(gr
, path
, "sw_nonctx", ver
, &gr
->sw_nonctx
) ||
314 gk20a_gr_aiv_to_init(gr
, path
, "sw_ctx", ver
, &gr
->sw_ctx
) ||
315 gk20a_gr_av_to_init(gr
, path
, "sw_bundle_init", ver
, &gr
->bundle
) ||
316 gk20a_gr_av_to_method(gr
, path
, "sw_method_init", ver
, &gr
->method
))
322 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
323 MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
324 MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
325 MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
326 MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
327 MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
328 MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
329 MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
330 MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
334 gk20a_gr_load(struct gf100_gr
*gr
, int ver
, const struct gf100_gr_fwif
*fwif
)
336 struct nvkm_subdev
*subdev
= &gr
->base
.engine
.subdev
;
338 if (nvkm_firmware_load_blob(subdev
, "", "fecs_inst", ver
,
340 nvkm_firmware_load_blob(subdev
, "", "fecs_data", ver
,
342 nvkm_firmware_load_blob(subdev
, "", "gpccs_inst", ver
,
344 nvkm_firmware_load_blob(subdev
, "", "gpccs_data", ver
,
350 return gk20a_gr_load_sw(gr
, "", ver
);
353 static const struct gf100_gr_fwif
355 { 0, gk20a_gr_load
, &gk20a_gr
},
360 gk20a_gr_new(struct nvkm_device
*device
, int index
, struct nvkm_gr
**pgr
)
362 return gf100_gr_new_(gk20a_gr_fwif
, device
, index
, pgr
);