Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / nouveau / nvkm / falcon / msgqueue_0137c63d.c
blobfec0273158f6a7f99499c09a7073ec450e019ee3
1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "msgqueue.h"
24 #include <engine/falcon.h>
25 #include <subdev/secboot.h>
27 /* Queues identifiers */
28 enum {
29 /* High Priority Command Queue for Host -> PMU communication */
30 MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
31 /* Low Priority Command Queue for Host -> PMU communication */
32 MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
33 /* Message queue for PMU -> Host communication */
34 MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
35 MSGQUEUE_0137C63D_NUM_QUEUES = 5,
38 struct msgqueue_0137c63d {
39 struct nvkm_msgqueue base;
41 struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
43 #define msgqueue_0137c63d(q) \
44 container_of(q, struct msgqueue_0137c63d, base)
46 struct msgqueue_0137bca5 {
47 struct msgqueue_0137c63d base;
49 u64 wpr_addr;
51 #define msgqueue_0137bca5(q) \
52 container_of(container_of(q, struct msgqueue_0137c63d, base), \
53 struct msgqueue_0137bca5, base);
55 static struct nvkm_msgqueue_queue *
56 msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
57 enum msgqueue_msg_priority priority)
59 struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
60 const struct nvkm_subdev *subdev = priv->base.falcon->owner;
62 switch (priority) {
63 case MSGQUEUE_MSG_PRIORITY_HIGH:
64 return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
65 case MSGQUEUE_MSG_PRIORITY_LOW:
66 return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
67 default:
68 nvkm_error(subdev, "invalid command queue!\n");
69 return ERR_PTR(-EINVAL);
73 static void
74 msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
76 struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
77 struct nvkm_msgqueue_queue *q_queue =
78 &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
80 nvkm_msgqueue_process_msgs(&priv->base, q_queue);
83 /* Init unit */
84 #define MSGQUEUE_0137C63D_UNIT_INIT 0x07
86 enum {
87 INIT_MSG_INIT = 0x0,
90 static void
91 init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
93 struct {
94 u32 reserved;
95 u32 freq_hz;
96 u32 trace_size;
97 u32 trace_dma_base;
98 u16 trace_dma_base1;
99 u8 trace_dma_offset;
100 u32 trace_dma_idx;
101 bool secure_mode;
102 bool raise_priv_sec;
103 struct {
104 u32 dma_base;
105 u16 dma_base1;
106 u8 dma_offset;
107 u16 fb_size;
108 u8 dma_idx;
109 } gc6_ctx;
110 u8 pad;
111 } *args = buf;
113 args->secure_mode = 1;
116 /* forward declaration */
117 static int acr_init_wpr(struct nvkm_msgqueue *queue);
119 static int
120 init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
122 struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
123 struct {
124 struct nvkm_msgqueue_msg base;
126 u8 pad;
127 u16 os_debug_entry_point;
129 struct {
130 u16 size;
131 u16 offset;
132 u8 index;
133 u8 pad;
134 } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
136 u16 sw_managed_area_offset;
137 u16 sw_managed_area_size;
138 } *init = (void *)hdr;
139 const struct nvkm_subdev *subdev = _queue->falcon->owner;
140 int i;
142 if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
143 nvkm_error(subdev, "expected message from init unit\n");
144 return -EINVAL;
147 if (init->base.msg_type != INIT_MSG_INIT) {
148 nvkm_error(subdev, "expected PMU init msg\n");
149 return -EINVAL;
152 for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
153 struct nvkm_msgqueue_queue *queue = &priv->queue[i];
155 mutex_init(&queue->mutex);
157 queue->index = init->queue_info[i].index;
158 queue->offset = init->queue_info[i].offset;
159 queue->size = init->queue_info[i].size;
161 if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
162 queue->head_reg = 0x4a0 + (queue->index * 4);
163 queue->tail_reg = 0x4b0 + (queue->index * 4);
164 } else {
165 queue->head_reg = 0x4c8;
166 queue->tail_reg = 0x4cc;
169 nvkm_debug(subdev,
170 "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
171 i, queue->index, queue->offset, queue->size);
174 /* Complete initialization by initializing WPR region */
175 return acr_init_wpr(&priv->base);
178 static const struct nvkm_msgqueue_init_func
179 msgqueue_0137c63d_init_func = {
180 .gen_cmdline = init_gen_cmdline,
181 .init_callback = init_callback,
186 /* ACR unit */
187 #define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
189 enum {
190 ACR_CMD_INIT_WPR_REGION = 0x00,
191 ACR_CMD_BOOTSTRAP_FALCON = 0x01,
192 ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
195 static void
196 acr_init_wpr_callback(struct nvkm_msgqueue *queue,
197 struct nvkm_msgqueue_hdr *hdr)
199 struct {
200 struct nvkm_msgqueue_msg base;
201 u32 error_code;
202 } *msg = (void *)hdr;
203 const struct nvkm_subdev *subdev = queue->falcon->owner;
205 if (msg->error_code) {
206 nvkm_error(subdev, "ACR WPR init failure: %d\n",
207 msg->error_code);
208 return;
211 nvkm_debug(subdev, "ACR WPR init complete\n");
212 complete_all(&queue->init_done);
215 static int
216 acr_init_wpr(struct nvkm_msgqueue *queue)
219 * region_id: region ID in WPR region
220 * wpr_offset: offset in WPR region
222 struct {
223 struct nvkm_msgqueue_hdr hdr;
224 u8 cmd_type;
225 u32 region_id;
226 u32 wpr_offset;
227 } cmd;
228 memset(&cmd, 0, sizeof(cmd));
230 cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
231 cmd.hdr.size = sizeof(cmd);
232 cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
233 cmd.region_id = 0x01;
234 cmd.wpr_offset = 0x00;
236 nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
237 acr_init_wpr_callback, NULL, false);
239 return 0;
243 static void
244 acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
245 struct nvkm_msgqueue_hdr *hdr)
247 struct acr_bootstrap_falcon_msg {
248 struct nvkm_msgqueue_msg base;
250 u32 falcon_id;
251 } *msg = (void *)hdr;
252 const struct nvkm_subdev *subdev = priv->falcon->owner;
253 u32 falcon_id = msg->falcon_id;
255 if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
256 nvkm_error(subdev, "in bootstrap falcon callback:\n");
257 nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
258 return;
260 nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
263 enum {
264 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
265 ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
268 static int
269 acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
271 DECLARE_COMPLETION_ONSTACK(completed);
273 * flags - Flag specifying RESET or no RESET.
274 * falcon id - Falcon id specifying falcon to bootstrap.
276 struct {
277 struct nvkm_msgqueue_hdr hdr;
278 u8 cmd_type;
279 u32 flags;
280 u32 falcon_id;
281 } cmd;
283 memset(&cmd, 0, sizeof(cmd));
285 cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
286 cmd.hdr.size = sizeof(cmd);
287 cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
288 cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
289 cmd.falcon_id = falcon;
290 nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
291 acr_boot_falcon_callback, &completed, true);
293 if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
294 return -ETIMEDOUT;
296 return 0;
299 static void
300 acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
301 struct nvkm_msgqueue_hdr *hdr)
303 struct acr_bootstrap_falcon_msg {
304 struct nvkm_msgqueue_msg base;
306 u32 falcon_mask;
307 } *msg = (void *)hdr;
308 const struct nvkm_subdev *subdev = priv->falcon->owner;
309 unsigned long falcon_mask = msg->falcon_mask;
310 u32 falcon_id, falcon_treated = 0;
312 for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
313 nvkm_debug(subdev, "%s booted\n",
314 nvkm_secboot_falcon_name[falcon_id]);
315 falcon_treated |= BIT(falcon_id);
318 if (falcon_treated != msg->falcon_mask) {
319 nvkm_error(subdev, "in bootstrap falcon callback:\n");
320 nvkm_error(subdev, "invalid falcon mask 0x%x\n",
321 msg->falcon_mask);
322 return;
326 static int
327 acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
329 DECLARE_COMPLETION_ONSTACK(completed);
331 * flags - Flag specifying RESET or no RESET.
332 * falcon id - Falcon id specifying falcon to bootstrap.
334 struct {
335 struct nvkm_msgqueue_hdr hdr;
336 u8 cmd_type;
337 u32 flags;
338 u32 falcon_mask;
339 u32 use_va_mask;
340 u32 wpr_lo;
341 u32 wpr_hi;
342 } cmd;
343 struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
345 memset(&cmd, 0, sizeof(cmd));
347 cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
348 cmd.hdr.size = sizeof(cmd);
349 cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
350 cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
351 cmd.falcon_mask = falcon_mask;
352 cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
353 cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
354 nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
355 acr_boot_multiple_falcons_callback, &completed, true);
357 if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
358 return -ETIMEDOUT;
360 return 0;
363 static const struct nvkm_msgqueue_acr_func
364 msgqueue_0137c63d_acr_func = {
365 .boot_falcon = acr_boot_falcon,
368 static const struct nvkm_msgqueue_acr_func
369 msgqueue_0137bca5_acr_func = {
370 .boot_falcon = acr_boot_falcon,
371 .boot_multiple_falcons = acr_boot_multiple_falcons,
374 static void
375 msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
377 kfree(msgqueue_0137c63d(queue));
380 static const struct nvkm_msgqueue_func
381 msgqueue_0137c63d_func = {
382 .init_func = &msgqueue_0137c63d_init_func,
383 .acr_func = &msgqueue_0137c63d_acr_func,
384 .cmd_queue = msgqueue_0137c63d_cmd_queue,
385 .recv = msgqueue_0137c63d_process_msgs,
386 .dtor = msgqueue_0137c63d_dtor,
390 msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
391 struct nvkm_msgqueue **queue)
393 struct msgqueue_0137c63d *ret;
395 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
396 if (!ret)
397 return -ENOMEM;
399 *queue = &ret->base;
401 nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
403 return 0;
406 static const struct nvkm_msgqueue_func
407 msgqueue_0137bca5_func = {
408 .init_func = &msgqueue_0137c63d_init_func,
409 .acr_func = &msgqueue_0137bca5_acr_func,
410 .cmd_queue = msgqueue_0137c63d_cmd_queue,
411 .recv = msgqueue_0137c63d_process_msgs,
412 .dtor = msgqueue_0137c63d_dtor,
416 msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
417 struct nvkm_msgqueue **queue)
419 struct msgqueue_0137bca5 *ret;
421 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
422 if (!ret)
423 return -ENOMEM;
425 *queue = &ret->base.base;
428 * FIXME this must be set to the address of a *GPU* mapping within the
429 * ACR address space!
431 /* ret->wpr_addr = sb->wpr_addr; */
433 nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
435 return 0;