2 * Copyright 2019 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <core/firmware.h>
25 #include <core/memory.h>
26 #include <subdev/mmu.h>
28 static struct nvkm_acr_hsf
*
29 nvkm_acr_hsf_find(struct nvkm_acr
*acr
, const char *name
)
31 struct nvkm_acr_hsf
*hsf
;
32 list_for_each_entry(hsf
, &acr
->hsf
, head
) {
33 if (!strcmp(hsf
->name
, name
))
40 nvkm_acr_hsf_boot(struct nvkm_acr
*acr
, const char *name
)
42 struct nvkm_subdev
*subdev
= &acr
->subdev
;
43 struct nvkm_acr_hsf
*hsf
;
46 hsf
= nvkm_acr_hsf_find(acr
, name
);
50 nvkm_debug(subdev
, "executing %s binary\n", hsf
->name
);
51 ret
= nvkm_falcon_get(hsf
->falcon
, subdev
);
55 ret
= hsf
->func
->boot(acr
, hsf
);
56 nvkm_falcon_put(hsf
->falcon
, subdev
);
58 nvkm_error(subdev
, "%s binary failed\n", hsf
->name
);
62 nvkm_debug(subdev
, "%s binary completed successfully\n", hsf
->name
);
67 nvkm_acr_unload(struct nvkm_acr
*acr
)
70 nvkm_acr_hsf_boot(acr
, "unload");
76 nvkm_acr_load(struct nvkm_acr
*acr
)
78 struct nvkm_subdev
*subdev
= &acr
->subdev
;
79 struct nvkm_acr_lsf
*lsf
;
83 if (list_empty(&acr
->lsf
)) {
84 nvkm_debug(subdev
, "No LSF(s) present.\n");
88 ret
= acr
->func
->init(acr
);
92 acr
->func
->wpr_check(acr
, &start
, &limit
);
94 if (start
!= acr
->wpr_start
|| limit
!= acr
->wpr_end
) {
95 nvkm_error(subdev
, "WPR not configured as expected: "
96 "%016llx-%016llx vs %016llx-%016llx\n",
97 acr
->wpr_start
, acr
->wpr_end
, start
, limit
);
103 list_for_each_entry(lsf
, &acr
->lsf
, head
) {
104 if (lsf
->func
->boot
) {
105 ret
= lsf
->func
->boot(lsf
->falcon
);
115 nvkm_acr_reload(struct nvkm_acr
*acr
)
117 nvkm_acr_unload(acr
);
118 return nvkm_acr_load(acr
);
121 static struct nvkm_acr_lsf
*
122 nvkm_acr_falcon(struct nvkm_device
*device
)
124 struct nvkm_acr
*acr
= device
->acr
;
125 struct nvkm_acr_lsf
*lsf
;
128 list_for_each_entry(lsf
, &acr
->lsf
, head
) {
129 if (lsf
->func
->bootstrap_falcon
)
138 nvkm_acr_bootstrap_falcons(struct nvkm_device
*device
, unsigned long mask
)
140 struct nvkm_acr_lsf
*acrflcn
= nvkm_acr_falcon(device
);
141 struct nvkm_acr
*acr
= device
->acr
;
145 int ret
= nvkm_acr_reload(acr
);
149 return acr
->done
? 0 : -EINVAL
;
152 if (acrflcn
->func
->bootstrap_multiple_falcons
) {
153 return acrflcn
->func
->
154 bootstrap_multiple_falcons(acrflcn
->falcon
, mask
);
157 for_each_set_bit(id
, &mask
, NVKM_ACR_LSF_NUM
) {
158 int ret
= acrflcn
->func
->bootstrap_falcon(acrflcn
->falcon
, id
);
167 nvkm_acr_managed_falcon(struct nvkm_device
*device
, enum nvkm_acr_lsf_id id
)
169 struct nvkm_acr
*acr
= device
->acr
;
170 struct nvkm_acr_lsf
*lsf
;
173 list_for_each_entry(lsf
, &acr
->lsf
, head
) {
183 nvkm_acr_fini(struct nvkm_subdev
*subdev
, bool suspend
)
185 nvkm_acr_unload(nvkm_acr(subdev
));
190 nvkm_acr_init(struct nvkm_subdev
*subdev
)
192 if (!nvkm_acr_falcon(subdev
->device
))
195 return nvkm_acr_load(nvkm_acr(subdev
));
199 nvkm_acr_cleanup(struct nvkm_acr
*acr
)
201 nvkm_acr_lsfw_del_all(acr
);
202 nvkm_acr_hsfw_del_all(acr
);
203 nvkm_firmware_put(acr
->wpr_fw
);
208 nvkm_acr_oneinit(struct nvkm_subdev
*subdev
)
210 struct nvkm_device
*device
= subdev
->device
;
211 struct nvkm_acr
*acr
= nvkm_acr(subdev
);
212 struct nvkm_acr_hsfw
*hsfw
;
213 struct nvkm_acr_lsfw
*lsfw
, *lsft
;
214 struct nvkm_acr_lsf
*lsf
;
218 if (list_empty(&acr
->hsfw
)) {
219 nvkm_debug(subdev
, "No HSFW(s)\n");
220 nvkm_acr_cleanup(acr
);
224 /* Determine layout/size of WPR image up-front, as we need to know
225 * it to allocate memory before we begin constructing it.
227 list_for_each_entry_safe(lsfw
, lsft
, &acr
->lsfw
, head
) {
228 /* Cull unknown falcons that are present in WPR image. */
231 nvkm_acr_lsfw_del(lsfw
);
235 wpr_size
= acr
->wpr_fw
->size
;
238 /* Ensure we've fetched falcon configuration. */
239 ret
= nvkm_falcon_get(lsfw
->falcon
, subdev
);
243 nvkm_falcon_put(lsfw
->falcon
, subdev
);
245 if (!(lsf
= kmalloc(sizeof(*lsf
), GFP_KERNEL
)))
247 lsf
->func
= lsfw
->func
;
248 lsf
->falcon
= lsfw
->falcon
;
250 list_add_tail(&lsf
->head
, &acr
->lsf
);
253 if (!acr
->wpr_fw
|| acr
->wpr_comp
)
254 wpr_size
= acr
->func
->wpr_layout(acr
);
256 /* Allocate/Locate WPR + fill ucode blob pointer.
258 * dGPU: allocate WPR + shadow blob
259 * Tegra: locate WPR with regs, ensure size is sufficient,
260 * allocate ucode blob.
262 ret
= acr
->func
->wpr_alloc(acr
, wpr_size
);
266 nvkm_debug(subdev
, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n",
267 acr
->wpr_start
, acr
->wpr_end
, acr
->shadow_start
);
269 /* Write WPR to ucode blob. */
271 if (acr
->wpr_fw
&& !acr
->wpr_comp
)
272 nvkm_wobj(acr
->wpr
, 0, acr
->wpr_fw
->data
, acr
->wpr_fw
->size
);
274 if (!acr
->wpr_fw
|| acr
->wpr_comp
)
275 acr
->func
->wpr_build(acr
, nvkm_acr_falcon(device
));
276 acr
->func
->wpr_patch(acr
, (s64
)acr
->wpr_start
- acr
->wpr_prev
);
278 if (acr
->wpr_fw
&& acr
->wpr_comp
) {
280 for (i
= 0; i
< acr
->wpr_fw
->size
; i
+= 4) {
281 u32 us
= nvkm_ro32(acr
->wpr
, i
);
282 u32 fw
= ((u32
*)acr
->wpr_fw
->data
)[i
/4];
284 nvkm_warn(subdev
, "%08x: %08x %08x\n",
292 /* Allocate instance block for ACR-related stuff. */
293 ret
= nvkm_memory_new(device
, NVKM_MEM_TARGET_INST
, 0x1000, 0, true,
298 ret
= nvkm_vmm_new(device
, 0, 0, NULL
, 0, NULL
, "acr", &acr
->vmm
);
302 acr
->vmm
->debug
= acr
->subdev
.debug
;
304 ret
= nvkm_vmm_join(acr
->vmm
, acr
->inst
);
308 /* Load HS firmware blobs into ACR VMM. */
309 list_for_each_entry(hsfw
, &acr
->hsfw
, head
) {
310 nvkm_debug(subdev
, "loading %s fw\n", hsfw
->name
);
311 ret
= hsfw
->func
->load(acr
, hsfw
);
316 /* Kill temporary data. */
317 nvkm_acr_cleanup(acr
);
322 nvkm_acr_dtor(struct nvkm_subdev
*subdev
)
324 struct nvkm_acr
*acr
= nvkm_acr(subdev
);
325 struct nvkm_acr_hsf
*hsf
, *hst
;
326 struct nvkm_acr_lsf
*lsf
, *lst
;
328 list_for_each_entry_safe(hsf
, hst
, &acr
->hsf
, head
) {
329 nvkm_vmm_put(acr
->vmm
, &hsf
->vma
);
330 nvkm_memory_unref(&hsf
->ucode
);
332 list_del(&hsf
->head
);
336 nvkm_vmm_part(acr
->vmm
, acr
->inst
);
337 nvkm_vmm_unref(&acr
->vmm
);
338 nvkm_memory_unref(&acr
->inst
);
340 nvkm_memory_unref(&acr
->wpr
);
342 list_for_each_entry_safe(lsf
, lst
, &acr
->lsf
, head
) {
343 list_del(&lsf
->head
);
347 nvkm_acr_cleanup(acr
);
351 static const struct nvkm_subdev_func
353 .dtor
= nvkm_acr_dtor
,
354 .oneinit
= nvkm_acr_oneinit
,
355 .init
= nvkm_acr_init
,
356 .fini
= nvkm_acr_fini
,
360 nvkm_acr_ctor_wpr(struct nvkm_acr
*acr
, int ver
)
362 struct nvkm_subdev
*subdev
= &acr
->subdev
;
363 struct nvkm_device
*device
= subdev
->device
;
366 ret
= nvkm_firmware_get(subdev
, "acr/wpr", ver
, &acr
->wpr_fw
);
370 /* Pre-add LSFs in the order they appear in the FW WPR image so that
371 * we're able to do a binary comparison with our own generator.
373 ret
= acr
->func
->wpr_parse(acr
);
377 acr
->wpr_comp
= nvkm_boolopt(device
->cfgopt
, "NvAcrWprCompare", false);
378 acr
->wpr_prev
= nvkm_longopt(device
->cfgopt
, "NvAcrWprPrevAddr", 0);
383 nvkm_acr_new_(const struct nvkm_acr_fwif
*fwif
, struct nvkm_device
*device
,
384 int index
, struct nvkm_acr
**pacr
)
386 struct nvkm_acr
*acr
;
389 if (!(acr
= *pacr
= kzalloc(sizeof(*acr
), GFP_KERNEL
)))
391 nvkm_subdev_ctor(&nvkm_acr
, device
, index
, &acr
->subdev
);
392 INIT_LIST_HEAD(&acr
->hsfw
);
393 INIT_LIST_HEAD(&acr
->lsfw
);
394 INIT_LIST_HEAD(&acr
->hsf
);
395 INIT_LIST_HEAD(&acr
->lsf
);
397 fwif
= nvkm_firmware_load(&acr
->subdev
, fwif
, "Acr", acr
);
399 return PTR_ERR(fwif
);
401 acr
->func
= fwif
->func
;
403 wprfw
= nvkm_longopt(device
->cfgopt
, "NvAcrWpr", -1);
405 int ret
= nvkm_acr_ctor_wpr(acr
, wprfw
);