2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <core/memory.h>
25 #include <subdev/acr.h>
27 #include <nvfw/flcn.h>
31 gm20b_pmu_acr_bootstrap_falcon_cb(void *priv
, struct nv_falcon_msg
*hdr
)
33 struct nv_pmu_acr_bootstrap_falcon_msg
*msg
=
34 container_of(hdr
, typeof(*msg
), msg
.hdr
);
35 return msg
->falcon_id
;
39 gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon
*falcon
,
40 enum nvkm_acr_lsf_id id
)
42 struct nvkm_pmu
*pmu
= container_of(falcon
, typeof(*pmu
), falcon
);
43 struct nv_pmu_acr_bootstrap_falcon_cmd cmd
= {
44 .cmd
.hdr
.unit_id
= NV_PMU_UNIT_ACR
,
45 .cmd
.hdr
.size
= sizeof(cmd
),
46 .cmd
.cmd_type
= NV_PMU_ACR_CMD_BOOTSTRAP_FALCON
,
47 .flags
= NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES
,
52 ret
= nvkm_falcon_cmdq_send(pmu
->hpq
, &cmd
.cmd
.hdr
,
53 gm20b_pmu_acr_bootstrap_falcon_cb
,
54 &pmu
->subdev
, msecs_to_jiffies(1000));
56 if (ret
!= cmd
.falcon_id
)
66 gm20b_pmu_acr_boot(struct nvkm_falcon
*falcon
)
68 struct nv_pmu_args args
= { .secure_mode
= true };
69 const u32 addr_args
= falcon
->data
.limit
- sizeof(struct nv_pmu_args
);
70 nvkm_falcon_load_dmem(falcon
, &args
, addr_args
, sizeof(args
), 0);
71 nvkm_falcon_start(falcon
);
76 gm20b_pmu_acr_bld_patch(struct nvkm_acr
*acr
, u32 bld
, s64 adjust
)
78 struct loader_config hdr
;
81 nvkm_robj(acr
->wpr
, bld
, &hdr
, sizeof(hdr
));
82 addr
= ((u64
)hdr
.code_dma_base1
<< 40 | hdr
.code_dma_base
<< 8);
83 hdr
.code_dma_base
= lower_32_bits((addr
+ adjust
) >> 8);
84 hdr
.code_dma_base1
= upper_32_bits((addr
+ adjust
) >> 8);
85 addr
= ((u64
)hdr
.data_dma_base1
<< 40 | hdr
.data_dma_base
<< 8);
86 hdr
.data_dma_base
= lower_32_bits((addr
+ adjust
) >> 8);
87 hdr
.data_dma_base1
= upper_32_bits((addr
+ adjust
) >> 8);
88 addr
= ((u64
)hdr
.overlay_dma_base1
<< 40 | hdr
.overlay_dma_base
<< 8);
89 hdr
.overlay_dma_base
= lower_32_bits((addr
+ adjust
) << 8);
90 hdr
.overlay_dma_base1
= upper_32_bits((addr
+ adjust
) << 8);
91 nvkm_wobj(acr
->wpr
, bld
, &hdr
, sizeof(hdr
));
93 loader_config_dump(&acr
->subdev
, &hdr
);
97 gm20b_pmu_acr_bld_write(struct nvkm_acr
*acr
, u32 bld
,
98 struct nvkm_acr_lsfw
*lsfw
)
100 const u64 base
= lsfw
->offset
.img
+ lsfw
->app_start_offset
;
101 const u64 code
= (base
+ lsfw
->app_resident_code_offset
) >> 8;
102 const u64 data
= (base
+ lsfw
->app_resident_data_offset
) >> 8;
103 const struct loader_config hdr
= {
104 .dma_idx
= FALCON_DMAIDX_UCODE
,
105 .code_dma_base
= lower_32_bits(code
),
106 .code_size_total
= lsfw
->app_size
,
107 .code_size_to_load
= lsfw
->app_resident_code_size
,
108 .code_entry_point
= lsfw
->app_imem_entry
,
109 .data_dma_base
= lower_32_bits(data
),
110 .data_size
= lsfw
->app_resident_data_size
,
111 .overlay_dma_base
= lower_32_bits(code
),
113 .argv
= lsfw
->falcon
->data
.limit
- sizeof(struct nv_pmu_args
),
114 .code_dma_base1
= upper_32_bits(code
),
115 .data_dma_base1
= upper_32_bits(data
),
116 .overlay_dma_base1
= upper_32_bits(code
),
119 nvkm_wobj(acr
->wpr
, bld
, &hdr
, sizeof(hdr
));
122 static const struct nvkm_acr_lsf_func
124 .flags
= NVKM_ACR_LSF_DMACTL_REQ_CTX
,
125 .bld_size
= sizeof(struct loader_config
),
126 .bld_write
= gm20b_pmu_acr_bld_write
,
127 .bld_patch
= gm20b_pmu_acr_bld_patch
,
128 .boot
= gm20b_pmu_acr_boot
,
129 .bootstrap_falcon
= gm20b_pmu_acr_bootstrap_falcon
,
133 gm20b_pmu_acr_init_wpr_callback(void *priv
, struct nv_falcon_msg
*hdr
)
135 struct nv_pmu_acr_init_wpr_region_msg
*msg
=
136 container_of(hdr
, typeof(*msg
), msg
.hdr
);
137 struct nvkm_pmu
*pmu
= priv
;
138 struct nvkm_subdev
*subdev
= &pmu
->subdev
;
140 if (msg
->error_code
) {
141 nvkm_error(subdev
, "ACR WPR init failure: %d\n",
146 nvkm_debug(subdev
, "ACR WPR init complete\n");
147 complete_all(&pmu
->wpr_ready
);
152 gm20b_pmu_acr_init_wpr(struct nvkm_pmu
*pmu
)
154 struct nv_pmu_acr_init_wpr_region_cmd cmd
= {
155 .cmd
.hdr
.unit_id
= NV_PMU_UNIT_ACR
,
156 .cmd
.hdr
.size
= sizeof(cmd
),
157 .cmd
.cmd_type
= NV_PMU_ACR_CMD_INIT_WPR_REGION
,
162 return nvkm_falcon_cmdq_send(pmu
->hpq
, &cmd
.cmd
.hdr
,
163 gm20b_pmu_acr_init_wpr_callback
, pmu
, 0);
167 gm20b_pmu_initmsg(struct nvkm_pmu
*pmu
)
169 struct nv_pmu_init_msg msg
;
172 ret
= nvkm_falcon_msgq_recv_initmsg(pmu
->msgq
, &msg
, sizeof(msg
));
176 if (msg
.hdr
.unit_id
!= NV_PMU_UNIT_INIT
||
177 msg
.msg_type
!= NV_PMU_INIT_MSG_INIT
)
180 nvkm_falcon_cmdq_init(pmu
->hpq
, msg
.queue_info
[0].index
,
181 msg
.queue_info
[0].offset
,
182 msg
.queue_info
[0].size
);
183 nvkm_falcon_cmdq_init(pmu
->lpq
, msg
.queue_info
[1].index
,
184 msg
.queue_info
[1].offset
,
185 msg
.queue_info
[1].size
);
186 nvkm_falcon_msgq_init(pmu
->msgq
, msg
.queue_info
[4].index
,
187 msg
.queue_info
[4].offset
,
188 msg
.queue_info
[4].size
);
189 return gm20b_pmu_acr_init_wpr(pmu
);
193 gm20b_pmu_recv(struct nvkm_pmu
*pmu
)
195 if (!pmu
->initmsg_received
) {
196 int ret
= pmu
->func
->initmsg(pmu
);
198 nvkm_error(&pmu
->subdev
,
199 "error parsing init message: %d\n", ret
);
203 pmu
->initmsg_received
= true;
206 nvkm_falcon_msgq_recv(pmu
->msgq
);
209 static const struct nvkm_pmu_func
211 .flcn
= >215_pmu_flcn
,
212 .enabled
= gf100_pmu_enabled
,
213 .intr
= gt215_pmu_intr
,
214 .recv
= gm20b_pmu_recv
,
215 .initmsg
= gm20b_pmu_initmsg
,
218 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
219 MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
220 MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
221 MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
225 gm20b_pmu_load(struct nvkm_pmu
*pmu
, int ver
, const struct nvkm_pmu_fwif
*fwif
)
227 return nvkm_acr_lsfw_load_sig_image_desc(&pmu
->subdev
, &pmu
->falcon
,
228 NVKM_ACR_LSF_PMU
, "pmu/",
232 static const struct nvkm_pmu_fwif
234 { 0, gm20b_pmu_load
, &gm20b_pmu
, &gm20b_pmu_acr
},
239 gm20b_pmu_new(struct nvkm_device
*device
, int index
, struct nvkm_pmu
**ppmu
)
241 return nvkm_pmu_new_(gm20b_pmu_fwif
, device
, index
, ppmu
);