2 * Copyright 2019 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/falcon.h>
25 #include <core/firmware.h>
26 #include <core/memory.h>
27 #include <subdev/mc.h>
28 #include <subdev/mmu.h>
29 #include <subdev/pmu.h>
30 #include <subdev/timer.h>
33 #include <nvfw/flcn.h>
36 gm200_acr_init(struct nvkm_acr
*acr
)
38 return nvkm_acr_hsf_boot(acr
, "load");
42 gm200_acr_wpr_check(struct nvkm_acr
*acr
, u64
*start
, u64
*limit
)
44 struct nvkm_device
*device
= acr
->subdev
.device
;
46 nvkm_wr32(device
, 0x100cd4, 2);
47 *start
= (u64
)(nvkm_rd32(device
, 0x100cd4) & 0xffffff00) << 8;
48 nvkm_wr32(device
, 0x100cd4, 3);
49 *limit
= (u64
)(nvkm_rd32(device
, 0x100cd4) & 0xffffff00) << 8;
50 *limit
= *limit
+ 0x20000;
54 gm200_acr_wpr_patch(struct nvkm_acr
*acr
, s64 adjust
)
56 struct nvkm_subdev
*subdev
= &acr
->subdev
;
57 struct wpr_header hdr
;
58 struct lsb_header lsb
;
59 struct nvkm_acr_lsf
*lsfw
;
63 nvkm_robj(acr
->wpr
, offset
, &hdr
, sizeof(hdr
));
64 wpr_header_dump(subdev
, &hdr
);
66 list_for_each_entry(lsfw
, &acr
->lsfw
, head
) {
67 if (lsfw
->id
!= hdr
.falcon_id
)
70 nvkm_robj(acr
->wpr
, hdr
.lsb_offset
, &lsb
, sizeof(lsb
));
71 lsb_header_dump(subdev
, &lsb
);
73 lsfw
->func
->bld_patch(acr
, lsb
.tail
.bl_data_off
, adjust
);
76 offset
+= sizeof(hdr
);
77 } while (hdr
.falcon_id
!= WPR_HEADER_V0_FALCON_ID_INVALID
);
81 gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw
*lsfw
,
82 struct lsb_header_tail
*hdr
)
84 hdr
->ucode_off
= lsfw
->offset
.img
;
85 hdr
->ucode_size
= lsfw
->ucode_size
;
86 hdr
->data_size
= lsfw
->data_size
;
87 hdr
->bl_code_size
= lsfw
->bootloader_size
;
88 hdr
->bl_imem_off
= lsfw
->bootloader_imem_offset
;
89 hdr
->bl_data_off
= lsfw
->offset
.bld
;
90 hdr
->bl_data_size
= lsfw
->bl_data_size
;
91 hdr
->app_code_off
= lsfw
->app_start_offset
+
92 lsfw
->app_resident_code_offset
;
93 hdr
->app_code_size
= lsfw
->app_resident_code_size
;
94 hdr
->app_data_off
= lsfw
->app_start_offset
+
95 lsfw
->app_resident_data_offset
;
96 hdr
->app_data_size
= lsfw
->app_resident_data_size
;
97 hdr
->flags
= lsfw
->func
->flags
;
101 gm200_acr_wpr_build_lsb(struct nvkm_acr
*acr
, struct nvkm_acr_lsfw
*lsfw
)
103 struct lsb_header hdr
;
105 if (WARN_ON(lsfw
->sig
->size
!= sizeof(hdr
.signature
)))
108 memcpy(&hdr
.signature
, lsfw
->sig
->data
, lsfw
->sig
->size
);
109 gm200_acr_wpr_build_lsb_tail(lsfw
, &hdr
.tail
);
111 nvkm_wobj(acr
->wpr
, lsfw
->offset
.lsb
, &hdr
, sizeof(hdr
));
116 gm200_acr_wpr_build(struct nvkm_acr
*acr
, struct nvkm_acr_lsf
*rtos
)
118 struct nvkm_acr_lsfw
*lsfw
;
122 /* Fill per-LSF structures. */
123 list_for_each_entry(lsfw
, &acr
->lsfw
, head
) {
124 struct wpr_header hdr
= {
125 .falcon_id
= lsfw
->id
,
126 .lsb_offset
= lsfw
->offset
.lsb
,
127 .bootstrap_owner
= NVKM_ACR_LSF_PMU
,
128 .lazy_bootstrap
= rtos
&& lsfw
->id
!= rtos
->id
,
129 .status
= WPR_HEADER_V0_STATUS_COPY
,
132 /* Write WPR header. */
133 nvkm_wobj(acr
->wpr
, offset
, &hdr
, sizeof(hdr
));
134 offset
+= sizeof(hdr
);
136 /* Write LSB header. */
137 ret
= gm200_acr_wpr_build_lsb(acr
, lsfw
);
141 /* Write ucode image. */
142 nvkm_wobj(acr
->wpr
, lsfw
->offset
.img
,
146 /* Write bootloader data. */
147 lsfw
->func
->bld_write(acr
, lsfw
->offset
.bld
, lsfw
);
151 nvkm_wo32(acr
->wpr
, offset
, WPR_HEADER_V0_FALCON_ID_INVALID
);
156 gm200_acr_wpr_alloc(struct nvkm_acr
*acr
, u32 wpr_size
)
158 int ret
= nvkm_memory_new(acr
->subdev
.device
, NVKM_MEM_TARGET_INST
,
159 ALIGN(wpr_size
, 0x40000), 0x40000, true,
164 acr
->wpr_start
= nvkm_memory_addr(acr
->wpr
);
165 acr
->wpr_end
= acr
->wpr_start
+ nvkm_memory_size(acr
->wpr
);
170 gm200_acr_wpr_layout(struct nvkm_acr
*acr
)
172 struct nvkm_acr_lsfw
*lsfw
;
175 wpr
+= 11 /* MAX_LSF */ * sizeof(struct wpr_header
);
177 list_for_each_entry(lsfw
, &acr
->lsfw
, head
) {
178 wpr
= ALIGN(wpr
, 256);
179 lsfw
->offset
.lsb
= wpr
;
180 wpr
+= sizeof(struct lsb_header
);
182 wpr
= ALIGN(wpr
, 4096);
183 lsfw
->offset
.img
= wpr
;
184 wpr
+= lsfw
->img
.size
;
186 wpr
= ALIGN(wpr
, 256);
187 lsfw
->offset
.bld
= wpr
;
188 lsfw
->bl_data_size
= ALIGN(lsfw
->func
->bld_size
, 256);
189 wpr
+= lsfw
->bl_data_size
;
196 gm200_acr_wpr_parse(struct nvkm_acr
*acr
)
198 const struct wpr_header
*hdr
= (void *)acr
->wpr_fw
->data
;
200 while (hdr
->falcon_id
!= WPR_HEADER_V0_FALCON_ID_INVALID
) {
201 wpr_header_dump(&acr
->subdev
, hdr
);
202 if (!nvkm_acr_lsfw_add(NULL
, acr
, NULL
, (hdr
++)->falcon_id
))
210 gm200_acr_hsfw_bld(struct nvkm_acr
*acr
, struct nvkm_acr_hsf
*hsf
)
212 struct flcn_bl_dmem_desc_v1 hsdesc
= {
213 .ctx_dma
= FALCON_DMAIDX_VIRT
,
214 .code_dma_base
= hsf
->vma
->addr
,
215 .non_sec_code_off
= hsf
->non_sec_addr
,
216 .non_sec_code_size
= hsf
->non_sec_size
,
217 .sec_code_off
= hsf
->sec_addr
,
218 .sec_code_size
= hsf
->sec_size
,
219 .code_entry_point
= 0,
220 .data_dma_base
= hsf
->vma
->addr
+ hsf
->data_addr
,
221 .data_size
= hsf
->data_size
,
224 flcn_bl_dmem_desc_v1_dump(&acr
->subdev
, &hsdesc
);
226 nvkm_falcon_load_dmem(hsf
->falcon
, &hsdesc
, 0, sizeof(hsdesc
), 0);
230 gm200_acr_hsfw_boot(struct nvkm_acr
*acr
, struct nvkm_acr_hsf
*hsf
,
231 u32 intr_clear
, u32 mbox0_ok
)
233 struct nvkm_subdev
*subdev
= &acr
->subdev
;
234 struct nvkm_device
*device
= subdev
->device
;
235 struct nvkm_falcon
*falcon
= hsf
->falcon
;
240 nvkm_falcon_reset(falcon
);
241 nvkm_falcon_bind_context(falcon
, acr
->inst
);
243 /* Load bootloader into IMEM. */
244 nvkm_falcon_load_imem(falcon
, hsf
->imem
,
245 falcon
->code
.limit
- hsf
->imem_size
,
250 /* Load bootloader data into DMEM. */
251 hsf
->func
->bld(acr
, hsf
);
253 /* Boot the falcon. */
254 nvkm_mc_intr_mask(device
, falcon
->owner
->index
, false);
256 nvkm_falcon_wr32(falcon
, 0x040, 0xdeada5a5);
257 nvkm_falcon_set_start_addr(falcon
, hsf
->imem_tag
<< 8);
258 nvkm_falcon_start(falcon
);
259 ret
= nvkm_falcon_wait_for_halt(falcon
, 100);
263 /* Check for successful completion. */
264 mbox0
= nvkm_falcon_rd32(falcon
, 0x040);
265 mbox1
= nvkm_falcon_rd32(falcon
, 0x044);
266 nvkm_debug(subdev
, "mailbox %08x %08x\n", mbox0
, mbox1
);
267 if (mbox0
&& mbox0
!= mbox0_ok
)
270 nvkm_falcon_clear_interrupt(falcon
, intr_clear
);
271 nvkm_mc_intr_mask(device
, falcon
->owner
->index
, true);
276 gm200_acr_hsfw_load(struct nvkm_acr
*acr
, struct nvkm_acr_hsfw
*hsfw
,
277 struct nvkm_falcon
*falcon
)
279 struct nvkm_subdev
*subdev
= &acr
->subdev
;
280 struct nvkm_acr_hsf
*hsf
;
283 /* Patch the appropriate signature (production/debug) into the FW
284 * image, as determined by the mode the falcon is in.
286 ret
= nvkm_falcon_get(falcon
, subdev
);
290 if (hsfw
->sig
.patch_loc
) {
291 if (!falcon
->debug
) {
292 nvkm_debug(subdev
, "patching production signature\n");
293 memcpy(hsfw
->image
+ hsfw
->sig
.patch_loc
,
295 hsfw
->sig
.prod
.size
);
297 nvkm_debug(subdev
, "patching debug signature\n");
298 memcpy(hsfw
->image
+ hsfw
->sig
.patch_loc
,
304 nvkm_falcon_put(falcon
, subdev
);
306 if (!(hsf
= kzalloc(sizeof(*hsf
), GFP_KERNEL
)))
308 hsf
->func
= hsfw
->func
;
309 hsf
->name
= hsfw
->name
;
310 list_add_tail(&hsf
->head
, &acr
->hsf
);
312 hsf
->imem_size
= hsfw
->imem_size
;
313 hsf
->imem_tag
= hsfw
->imem_tag
;
314 hsf
->imem
= kmemdup(hsfw
->imem
, hsfw
->imem_size
, GFP_KERNEL
);
318 hsf
->non_sec_addr
= hsfw
->non_sec_addr
;
319 hsf
->non_sec_size
= hsfw
->non_sec_size
;
320 hsf
->sec_addr
= hsfw
->sec_addr
;
321 hsf
->sec_size
= hsfw
->sec_size
;
322 hsf
->data_addr
= hsfw
->data_addr
;
323 hsf
->data_size
= hsfw
->data_size
;
325 /* Make the FW image accessible to the HS bootloader. */
326 ret
= nvkm_memory_new(subdev
->device
, NVKM_MEM_TARGET_INST
,
327 hsfw
->image_size
, 0x1000, false, &hsf
->ucode
);
331 nvkm_kmap(hsf
->ucode
);
332 nvkm_wobj(hsf
->ucode
, 0, hsfw
->image
, hsfw
->image_size
);
333 nvkm_done(hsf
->ucode
);
335 ret
= nvkm_vmm_get(acr
->vmm
, 12, nvkm_memory_size(hsf
->ucode
),
340 ret
= nvkm_memory_map(hsf
->ucode
, 0, acr
->vmm
, hsf
->vma
, NULL
, 0);
344 hsf
->falcon
= falcon
;
349 gm200_acr_unload_boot(struct nvkm_acr
*acr
, struct nvkm_acr_hsf
*hsf
)
351 return gm200_acr_hsfw_boot(acr
, hsf
, 0, 0x1d);
355 gm200_acr_unload_load(struct nvkm_acr
*acr
, struct nvkm_acr_hsfw
*hsfw
)
357 return gm200_acr_hsfw_load(acr
, hsfw
, &acr
->subdev
.device
->pmu
->falcon
);
360 const struct nvkm_acr_hsf_func
361 gm200_acr_unload_0
= {
362 .load
= gm200_acr_unload_load
,
363 .boot
= gm200_acr_unload_boot
,
364 .bld
= gm200_acr_hsfw_bld
,
367 MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
368 MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
369 MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
370 MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
372 static const struct nvkm_acr_hsf_fwif
373 gm200_acr_unload_fwif
[] = {
374 { 0, nvkm_acr_hsfw_load
, &gm200_acr_unload_0
},
379 gm200_acr_load_boot(struct nvkm_acr
*acr
, struct nvkm_acr_hsf
*hsf
)
381 return gm200_acr_hsfw_boot(acr
, hsf
, 0x10, 0);
385 gm200_acr_load_load(struct nvkm_acr
*acr
, struct nvkm_acr_hsfw
*hsfw
)
387 struct flcn_acr_desc
*desc
= (void *)&hsfw
->image
[hsfw
->data_addr
];
389 desc
->wpr_region_id
= 1;
390 desc
->regions
.no_regions
= 2;
391 desc
->regions
.region_props
[0].start_addr
= acr
->wpr_start
>> 8;
392 desc
->regions
.region_props
[0].end_addr
= acr
->wpr_end
>> 8;
393 desc
->regions
.region_props
[0].region_id
= 1;
394 desc
->regions
.region_props
[0].read_mask
= 0xf;
395 desc
->regions
.region_props
[0].write_mask
= 0xc;
396 desc
->regions
.region_props
[0].client_mask
= 0x2;
397 flcn_acr_desc_dump(&acr
->subdev
, desc
);
399 return gm200_acr_hsfw_load(acr
, hsfw
, &acr
->subdev
.device
->pmu
->falcon
);
402 static const struct nvkm_acr_hsf_func
404 .load
= gm200_acr_load_load
,
405 .boot
= gm200_acr_load_boot
,
406 .bld
= gm200_acr_hsfw_bld
,
409 MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
410 MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
412 MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
413 MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
415 MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
416 MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
418 MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
419 MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
421 static const struct nvkm_acr_hsf_fwif
422 gm200_acr_load_fwif
[] = {
423 { 0, nvkm_acr_hsfw_load
, &gm200_acr_load_0
},
427 static const struct nvkm_acr_func
429 .load
= gm200_acr_load_fwif
,
430 .unload
= gm200_acr_unload_fwif
,
431 .wpr_parse
= gm200_acr_wpr_parse
,
432 .wpr_layout
= gm200_acr_wpr_layout
,
433 .wpr_alloc
= gm200_acr_wpr_alloc
,
434 .wpr_build
= gm200_acr_wpr_build
,
435 .wpr_patch
= gm200_acr_wpr_patch
,
436 .wpr_check
= gm200_acr_wpr_check
,
437 .init
= gm200_acr_init
,
441 gm200_acr_load(struct nvkm_acr
*acr
, int ver
, const struct nvkm_acr_fwif
*fwif
)
443 struct nvkm_subdev
*subdev
= &acr
->subdev
;
444 const struct nvkm_acr_hsf_fwif
*hsfwif
;
446 hsfwif
= nvkm_firmware_load(subdev
, fwif
->func
->load
, "AcrLoad",
447 acr
, "acr/bl", "acr/ucode_load", "load");
449 return PTR_ERR(hsfwif
);
451 hsfwif
= nvkm_firmware_load(subdev
, fwif
->func
->unload
, "AcrUnload",
452 acr
, "acr/bl", "acr/ucode_unload",
455 return PTR_ERR(hsfwif
);
460 static const struct nvkm_acr_fwif
462 { 0, gm200_acr_load
, &gm200_acr
},
467 gm200_acr_new(struct nvkm_device
*device
, int index
, struct nvkm_acr
**pacr
)
469 return nvkm_acr_new_(gm200_acr_fwif
, device
, index
, pacr
);