2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <engine/falcon.h>
25 #include <subdev/secboot.h>
27 /* Queues identifiers */
29 /* High Priority Command Queue for Host -> PMU communication */
30 MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ
= 0,
31 /* Low Priority Command Queue for Host -> PMU communication */
32 MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ
= 1,
33 /* Message queue for PMU -> Host communication */
34 MSGQUEUE_0137C63D_MESSAGE_QUEUE
= 4,
35 MSGQUEUE_0137C63D_NUM_QUEUES
= 5,
38 struct msgqueue_0137c63d
{
39 struct nvkm_msgqueue base
;
41 struct nvkm_msgqueue_queue queue
[MSGQUEUE_0137C63D_NUM_QUEUES
];
43 #define msgqueue_0137c63d(q) \
44 container_of(q, struct msgqueue_0137c63d, base)
46 struct msgqueue_0137bca5
{
47 struct msgqueue_0137c63d base
;
51 #define msgqueue_0137bca5(q) \
52 container_of(container_of(q, struct msgqueue_0137c63d, base), \
53 struct msgqueue_0137bca5, base);
55 static struct nvkm_msgqueue_queue
*
56 msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue
*queue
,
57 enum msgqueue_msg_priority priority
)
59 struct msgqueue_0137c63d
*priv
= msgqueue_0137c63d(queue
);
60 const struct nvkm_subdev
*subdev
= priv
->base
.falcon
->owner
;
63 case MSGQUEUE_MSG_PRIORITY_HIGH
:
64 return &priv
->queue
[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ
];
65 case MSGQUEUE_MSG_PRIORITY_LOW
:
66 return &priv
->queue
[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ
];
68 nvkm_error(subdev
, "invalid command queue!\n");
69 return ERR_PTR(-EINVAL
);
74 msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue
*queue
)
76 struct msgqueue_0137c63d
*priv
= msgqueue_0137c63d(queue
);
77 struct nvkm_msgqueue_queue
*q_queue
=
78 &priv
->queue
[MSGQUEUE_0137C63D_MESSAGE_QUEUE
];
80 nvkm_msgqueue_process_msgs(&priv
->base
, q_queue
);
84 #define MSGQUEUE_0137C63D_UNIT_INIT 0x07
91 init_gen_cmdline(struct nvkm_msgqueue
*queue
, void *buf
)
113 args
->secure_mode
= 1;
116 /* forward declaration */
117 static int acr_init_wpr(struct nvkm_msgqueue
*queue
);
120 init_callback(struct nvkm_msgqueue
*_queue
, struct nvkm_msgqueue_hdr
*hdr
)
122 struct msgqueue_0137c63d
*priv
= msgqueue_0137c63d(_queue
);
124 struct nvkm_msgqueue_msg base
;
127 u16 os_debug_entry_point
;
134 } queue_info
[MSGQUEUE_0137C63D_NUM_QUEUES
];
136 u16 sw_managed_area_offset
;
137 u16 sw_managed_area_size
;
138 } *init
= (void *)hdr
;
139 const struct nvkm_subdev
*subdev
= _queue
->falcon
->owner
;
142 if (init
->base
.hdr
.unit_id
!= MSGQUEUE_0137C63D_UNIT_INIT
) {
143 nvkm_error(subdev
, "expected message from init unit\n");
147 if (init
->base
.msg_type
!= INIT_MSG_INIT
) {
148 nvkm_error(subdev
, "expected PMU init msg\n");
152 for (i
= 0; i
< MSGQUEUE_0137C63D_NUM_QUEUES
; i
++) {
153 struct nvkm_msgqueue_queue
*queue
= &priv
->queue
[i
];
155 mutex_init(&queue
->mutex
);
157 queue
->index
= init
->queue_info
[i
].index
;
158 queue
->offset
= init
->queue_info
[i
].offset
;
159 queue
->size
= init
->queue_info
[i
].size
;
161 if (i
!= MSGQUEUE_0137C63D_MESSAGE_QUEUE
) {
162 queue
->head_reg
= 0x4a0 + (queue
->index
* 4);
163 queue
->tail_reg
= 0x4b0 + (queue
->index
* 4);
165 queue
->head_reg
= 0x4c8;
166 queue
->tail_reg
= 0x4cc;
170 "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
171 i
, queue
->index
, queue
->offset
, queue
->size
);
174 /* Complete initialization by initializing WPR region */
175 return acr_init_wpr(&priv
->base
);
178 static const struct nvkm_msgqueue_init_func
179 msgqueue_0137c63d_init_func
= {
180 .gen_cmdline
= init_gen_cmdline
,
181 .init_callback
= init_callback
,
187 #define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
190 ACR_CMD_INIT_WPR_REGION
= 0x00,
191 ACR_CMD_BOOTSTRAP_FALCON
= 0x01,
192 ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS
= 0x03,
196 acr_init_wpr_callback(struct nvkm_msgqueue
*queue
,
197 struct nvkm_msgqueue_hdr
*hdr
)
200 struct nvkm_msgqueue_msg base
;
202 } *msg
= (void *)hdr
;
203 const struct nvkm_subdev
*subdev
= queue
->falcon
->owner
;
205 if (msg
->error_code
) {
206 nvkm_error(subdev
, "ACR WPR init failure: %d\n",
211 nvkm_debug(subdev
, "ACR WPR init complete\n");
212 complete_all(&queue
->init_done
);
216 acr_init_wpr(struct nvkm_msgqueue
*queue
)
219 * region_id: region ID in WPR region
220 * wpr_offset: offset in WPR region
223 struct nvkm_msgqueue_hdr hdr
;
228 memset(&cmd
, 0, sizeof(cmd
));
230 cmd
.hdr
.unit_id
= MSGQUEUE_0137C63D_UNIT_ACR
;
231 cmd
.hdr
.size
= sizeof(cmd
);
232 cmd
.cmd_type
= ACR_CMD_INIT_WPR_REGION
;
233 cmd
.region_id
= 0x01;
234 cmd
.wpr_offset
= 0x00;
236 nvkm_msgqueue_post(queue
, MSGQUEUE_MSG_PRIORITY_HIGH
, &cmd
.hdr
,
237 acr_init_wpr_callback
, NULL
, false);
244 acr_boot_falcon_callback(struct nvkm_msgqueue
*priv
,
245 struct nvkm_msgqueue_hdr
*hdr
)
247 struct acr_bootstrap_falcon_msg
{
248 struct nvkm_msgqueue_msg base
;
251 } *msg
= (void *)hdr
;
252 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
253 u32 falcon_id
= msg
->falcon_id
;
255 if (falcon_id
>= NVKM_SECBOOT_FALCON_END
) {
256 nvkm_error(subdev
, "in bootstrap falcon callback:\n");
257 nvkm_error(subdev
, "invalid falcon ID 0x%x\n", falcon_id
);
260 nvkm_debug(subdev
, "%s booted\n", nvkm_secboot_falcon_name
[falcon_id
]);
264 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES
= 0,
265 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO
= 1,
269 acr_boot_falcon(struct nvkm_msgqueue
*priv
, enum nvkm_secboot_falcon falcon
)
271 DECLARE_COMPLETION_ONSTACK(completed
);
273 * flags - Flag specifying RESET or no RESET.
274 * falcon id - Falcon id specifying falcon to bootstrap.
277 struct nvkm_msgqueue_hdr hdr
;
283 memset(&cmd
, 0, sizeof(cmd
));
285 cmd
.hdr
.unit_id
= MSGQUEUE_0137C63D_UNIT_ACR
;
286 cmd
.hdr
.size
= sizeof(cmd
);
287 cmd
.cmd_type
= ACR_CMD_BOOTSTRAP_FALCON
;
288 cmd
.flags
= ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES
;
289 cmd
.falcon_id
= falcon
;
290 nvkm_msgqueue_post(priv
, MSGQUEUE_MSG_PRIORITY_HIGH
, &cmd
.hdr
,
291 acr_boot_falcon_callback
, &completed
, true);
293 if (!wait_for_completion_timeout(&completed
, msecs_to_jiffies(1000)))
300 acr_boot_multiple_falcons_callback(struct nvkm_msgqueue
*priv
,
301 struct nvkm_msgqueue_hdr
*hdr
)
303 struct acr_bootstrap_falcon_msg
{
304 struct nvkm_msgqueue_msg base
;
307 } *msg
= (void *)hdr
;
308 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
309 unsigned long falcon_mask
= msg
->falcon_mask
;
310 u32 falcon_id
, falcon_treated
= 0;
312 for_each_set_bit(falcon_id
, &falcon_mask
, NVKM_SECBOOT_FALCON_END
) {
313 nvkm_debug(subdev
, "%s booted\n",
314 nvkm_secboot_falcon_name
[falcon_id
]);
315 falcon_treated
|= BIT(falcon_id
);
318 if (falcon_treated
!= msg
->falcon_mask
) {
319 nvkm_error(subdev
, "in bootstrap falcon callback:\n");
320 nvkm_error(subdev
, "invalid falcon mask 0x%x\n",
327 acr_boot_multiple_falcons(struct nvkm_msgqueue
*priv
, unsigned long falcon_mask
)
329 DECLARE_COMPLETION_ONSTACK(completed
);
331 * flags - Flag specifying RESET or no RESET.
332 * falcon id - Falcon id specifying falcon to bootstrap.
335 struct nvkm_msgqueue_hdr hdr
;
343 struct msgqueue_0137bca5
*queue
= msgqueue_0137bca5(priv
);
345 memset(&cmd
, 0, sizeof(cmd
));
347 cmd
.hdr
.unit_id
= MSGQUEUE_0137C63D_UNIT_ACR
;
348 cmd
.hdr
.size
= sizeof(cmd
);
349 cmd
.cmd_type
= ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS
;
350 cmd
.flags
= ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES
;
351 cmd
.falcon_mask
= falcon_mask
;
352 cmd
.wpr_lo
= lower_32_bits(queue
->wpr_addr
);
353 cmd
.wpr_hi
= upper_32_bits(queue
->wpr_addr
);
354 nvkm_msgqueue_post(priv
, MSGQUEUE_MSG_PRIORITY_HIGH
, &cmd
.hdr
,
355 acr_boot_multiple_falcons_callback
, &completed
, true);
357 if (!wait_for_completion_timeout(&completed
, msecs_to_jiffies(1000)))
363 static const struct nvkm_msgqueue_acr_func
364 msgqueue_0137c63d_acr_func
= {
365 .boot_falcon
= acr_boot_falcon
,
368 static const struct nvkm_msgqueue_acr_func
369 msgqueue_0137bca5_acr_func
= {
370 .boot_falcon
= acr_boot_falcon
,
371 .boot_multiple_falcons
= acr_boot_multiple_falcons
,
375 msgqueue_0137c63d_dtor(struct nvkm_msgqueue
*queue
)
377 kfree(msgqueue_0137c63d(queue
));
380 static const struct nvkm_msgqueue_func
381 msgqueue_0137c63d_func
= {
382 .init_func
= &msgqueue_0137c63d_init_func
,
383 .acr_func
= &msgqueue_0137c63d_acr_func
,
384 .cmd_queue
= msgqueue_0137c63d_cmd_queue
,
385 .recv
= msgqueue_0137c63d_process_msgs
,
386 .dtor
= msgqueue_0137c63d_dtor
,
390 msgqueue_0137c63d_new(struct nvkm_falcon
*falcon
, const struct nvkm_secboot
*sb
,
391 struct nvkm_msgqueue
**queue
)
393 struct msgqueue_0137c63d
*ret
;
395 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
401 nvkm_msgqueue_ctor(&msgqueue_0137c63d_func
, falcon
, &ret
->base
);
406 static const struct nvkm_msgqueue_func
407 msgqueue_0137bca5_func
= {
408 .init_func
= &msgqueue_0137c63d_init_func
,
409 .acr_func
= &msgqueue_0137bca5_acr_func
,
410 .cmd_queue
= msgqueue_0137c63d_cmd_queue
,
411 .recv
= msgqueue_0137c63d_process_msgs
,
412 .dtor
= msgqueue_0137c63d_dtor
,
416 msgqueue_0137bca5_new(struct nvkm_falcon
*falcon
, const struct nvkm_secboot
*sb
,
417 struct nvkm_msgqueue
**queue
)
419 struct msgqueue_0137bca5
*ret
;
421 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
425 *queue
= &ret
->base
.base
;
428 * FIXME this must be set to the address of a *GPU* mapping within the
431 /* ret->wpr_addr = sb->wpr_addr; */
433 nvkm_msgqueue_ctor(&msgqueue_0137bca5_func
, falcon
, &ret
->base
.base
);