Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / nouveau / nvkm / falcon / msgqueue.c
blob58a59b7db2e5fe44fdf18c8f349ff2b15cd08778
1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "msgqueue.h"
25 #include <engine/falcon.h>
27 #include <subdev/secboot.h>
30 #define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
31 #define QUEUE_ALIGNMENT 4
32 /* max size of the messages we can receive */
33 #define MSG_BUF_SIZE 128
35 static int
36 msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
38 struct nvkm_falcon *falcon = priv->falcon;
40 mutex_lock(&queue->mutex);
42 queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
44 return 0;
47 static void
48 msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
49 bool commit)
51 struct nvkm_falcon *falcon = priv->falcon;
53 if (commit)
54 nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
56 mutex_unlock(&queue->mutex);
59 static bool
60 msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
62 struct nvkm_falcon *falcon = priv->falcon;
63 u32 head, tail;
65 head = nvkm_falcon_rd32(falcon, queue->head_reg);
66 tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
68 return head == tail;
71 static int
72 msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
73 void *data, u32 size)
75 struct nvkm_falcon *falcon = priv->falcon;
76 const struct nvkm_subdev *subdev = priv->falcon->owner;
77 u32 head, tail, available;
79 head = nvkm_falcon_rd32(falcon, queue->head_reg);
80 /* has the buffer looped? */
81 if (head < queue->position)
82 queue->position = queue->offset;
84 tail = queue->position;
86 available = head - tail;
88 if (available == 0) {
89 nvkm_warn(subdev, "no message data available\n");
90 return 0;
93 if (size > available) {
94 nvkm_warn(subdev, "message data smaller than read request\n");
95 size = available;
98 nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
99 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
101 return size;
104 static int
105 msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
106 struct nvkm_msgqueue_hdr *hdr)
108 const struct nvkm_subdev *subdev = priv->falcon->owner;
109 int err;
111 err = msg_queue_open(priv, queue);
112 if (err) {
113 nvkm_error(subdev, "fail to open queue %d\n", queue->index);
114 return err;
117 if (msg_queue_empty(priv, queue)) {
118 err = 0;
119 goto close;
122 err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
123 if (err >= 0 && err != HDR_SIZE)
124 err = -EINVAL;
125 if (err < 0) {
126 nvkm_error(subdev, "failed to read message header: %d\n", err);
127 goto close;
130 if (hdr->size > MSG_BUF_SIZE) {
131 nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
132 err = -ENOSPC;
133 goto close;
136 if (hdr->size > HDR_SIZE) {
137 u32 read_size = hdr->size - HDR_SIZE;
139 err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
140 if (err >= 0 && err != read_size)
141 err = -EINVAL;
142 if (err < 0) {
143 nvkm_error(subdev, "failed to read message: %d\n", err);
144 goto close;
148 close:
149 msg_queue_close(priv, queue, (err >= 0));
151 return err;
154 static bool
155 cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
156 u32 size, bool *rewind)
158 struct nvkm_falcon *falcon = priv->falcon;
159 u32 head, tail, free;
161 size = ALIGN(size, QUEUE_ALIGNMENT);
163 head = nvkm_falcon_rd32(falcon, queue->head_reg);
164 tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
166 if (head >= tail) {
167 free = queue->offset + queue->size - head;
168 free -= HDR_SIZE;
170 if (size > free) {
171 *rewind = true;
172 head = queue->offset;
176 if (head < tail)
177 free = tail - head - 1;
179 return size <= free;
182 static int
183 cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
184 void *data, u32 size)
186 nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
187 queue->position += ALIGN(size, QUEUE_ALIGNMENT);
189 return 0;
192 /* REWIND unit is always 0x00 */
193 #define MSGQUEUE_UNIT_REWIND 0x00
195 static void
196 cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
198 const struct nvkm_subdev *subdev = priv->falcon->owner;
199 struct nvkm_msgqueue_hdr cmd;
200 int err;
202 cmd.unit_id = MSGQUEUE_UNIT_REWIND;
203 cmd.size = sizeof(cmd);
204 err = cmd_queue_push(priv, queue, &cmd, cmd.size);
205 if (err)
206 nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
207 else
208 nvkm_error(subdev, "queue %d rewinded\n", queue->index);
210 queue->position = queue->offset;
213 static int
214 cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
215 u32 size)
217 struct nvkm_falcon *falcon = priv->falcon;
218 const struct nvkm_subdev *subdev = priv->falcon->owner;
219 bool rewind = false;
221 mutex_lock(&queue->mutex);
223 if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
224 nvkm_error(subdev, "queue full\n");
225 mutex_unlock(&queue->mutex);
226 return -EAGAIN;
229 queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
231 if (rewind)
232 cmd_queue_rewind(priv, queue);
234 return 0;
237 static void
238 cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
239 bool commit)
241 struct nvkm_falcon *falcon = priv->falcon;
243 if (commit)
244 nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
246 mutex_unlock(&queue->mutex);
249 static int
250 cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
251 struct nvkm_msgqueue_queue *queue)
253 const struct nvkm_subdev *subdev = priv->falcon->owner;
254 static unsigned timeout = 2000;
255 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
256 int ret = -EAGAIN;
257 bool commit = true;
259 while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
260 ret = cmd_queue_open(priv, queue, cmd->size);
261 if (ret) {
262 nvkm_error(subdev, "pmu_queue_open_write failed\n");
263 return ret;
266 ret = cmd_queue_push(priv, queue, cmd, cmd->size);
267 if (ret) {
268 nvkm_error(subdev, "pmu_queue_push failed\n");
269 commit = false;
272 cmd_queue_close(priv, queue, commit);
274 return ret;
277 static struct nvkm_msgqueue_seq *
278 msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
280 const struct nvkm_subdev *subdev = priv->falcon->owner;
281 struct nvkm_msgqueue_seq *seq;
282 u32 index;
284 mutex_lock(&priv->seq_lock);
286 index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
288 if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
289 nvkm_error(subdev, "no free sequence available\n");
290 mutex_unlock(&priv->seq_lock);
291 return ERR_PTR(-EAGAIN);
294 set_bit(index, priv->seq_tbl);
296 mutex_unlock(&priv->seq_lock);
298 seq = &priv->seq[index];
299 seq->state = SEQ_STATE_PENDING;
301 return seq;
304 static void
305 msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
307 /* no need to acquire seq_lock since clear_bit is atomic */
308 seq->state = SEQ_STATE_FREE;
309 seq->callback = NULL;
310 seq->completion = NULL;
311 clear_bit(seq->id, priv->seq_tbl);
314 /* specifies that we want to know the command status in the answer message */
315 #define CMD_FLAGS_STATUS BIT(0)
316 /* specifies that we want an interrupt when the answer message is queued */
317 #define CMD_FLAGS_INTR BIT(1)
320 nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
321 struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
322 struct completion *completion, bool wait_init)
324 struct nvkm_msgqueue_seq *seq;
325 struct nvkm_msgqueue_queue *queue;
326 int ret;
328 if (wait_init && !wait_for_completion_timeout(&priv->init_done,
329 msecs_to_jiffies(1000)))
330 return -ETIMEDOUT;
332 queue = priv->func->cmd_queue(priv, prio);
333 if (IS_ERR(queue))
334 return PTR_ERR(queue);
336 seq = msgqueue_seq_acquire(priv);
337 if (IS_ERR(seq))
338 return PTR_ERR(seq);
340 cmd->seq_id = seq->id;
341 cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
343 seq->callback = cb;
344 seq->state = SEQ_STATE_USED;
345 seq->completion = completion;
347 ret = cmd_write(priv, cmd, queue);
348 if (ret) {
349 seq->state = SEQ_STATE_PENDING;
350 msgqueue_seq_release(priv, seq);
353 return ret;
356 static int
357 msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
359 const struct nvkm_subdev *subdev = priv->falcon->owner;
360 struct nvkm_msgqueue_seq *seq;
362 seq = &priv->seq[hdr->seq_id];
363 if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
364 nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
365 return -EINVAL;
368 if (seq->state == SEQ_STATE_USED) {
369 if (seq->callback)
370 seq->callback(priv, hdr);
373 if (seq->completion)
374 complete(seq->completion);
376 msgqueue_seq_release(priv, seq);
378 return 0;
381 static int
382 msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
383 struct nvkm_msgqueue_hdr *hdr)
385 struct nvkm_falcon *falcon = priv->falcon;
386 const struct nvkm_subdev *subdev = falcon->owner;
387 u32 tail;
388 u32 tail_reg;
389 int ret;
392 * Of course the message queue registers vary depending on the falcon
393 * used...
395 switch (falcon->owner->index) {
396 case NVKM_SUBDEV_PMU:
397 tail_reg = 0x4cc;
398 break;
399 case NVKM_ENGINE_SEC2:
400 tail_reg = 0xa34;
401 break;
402 default:
403 nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
404 nvkm_subdev_name[falcon->owner->index]);
405 return -EINVAL;
409 * Read the message - queues are not initialized yet so we cannot rely
410 * on msg_queue_read()
412 tail = nvkm_falcon_rd32(falcon, tail_reg);
413 nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
415 if (hdr->size > MSG_BUF_SIZE) {
416 nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
417 return -ENOSPC;
420 nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
421 (hdr + 1));
423 tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
424 nvkm_falcon_wr32(falcon, tail_reg, tail);
426 ret = priv->func->init_func->init_callback(priv, hdr);
427 if (ret)
428 return ret;
430 return 0;
433 void
434 nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
435 struct nvkm_msgqueue_queue *queue)
438 * We are invoked from a worker thread, so normally we have plenty of
439 * stack space to work with.
441 u8 msg_buffer[MSG_BUF_SIZE];
442 struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
443 int ret;
445 /* the first message we receive must be the init message */
446 if ((!priv->init_msg_received)) {
447 ret = msgqueue_handle_init_msg(priv, hdr);
448 if (!ret)
449 priv->init_msg_received = true;
450 } else {
451 while (msg_queue_read(priv, queue, hdr) > 0)
452 msgqueue_msg_handle(priv, hdr);
456 void
457 nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
459 if (!queue || !queue->func || !queue->func->init_func)
460 return;
462 queue->func->init_func->gen_cmdline(queue, buf);
466 nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
467 unsigned long falcon_mask)
469 unsigned long falcon;
471 if (!queue || !queue->func->acr_func)
472 return -ENODEV;
474 /* Does the firmware support booting multiple falcons? */
475 if (queue->func->acr_func->boot_multiple_falcons)
476 return queue->func->acr_func->boot_multiple_falcons(queue,
477 falcon_mask);
479 /* Else boot all requested falcons individually */
480 if (!queue->func->acr_func->boot_falcon)
481 return -ENODEV;
483 for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
484 int ret = queue->func->acr_func->boot_falcon(queue, falcon);
486 if (ret)
487 return ret;
490 return 0;
494 nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
495 const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
497 const struct nvkm_subdev *subdev = falcon->owner;
498 int ret = -EINVAL;
500 switch (version) {
501 case 0x0137c63d:
502 ret = msgqueue_0137c63d_new(falcon, sb, queue);
503 break;
504 case 0x0137bca5:
505 ret = msgqueue_0137bca5_new(falcon, sb, queue);
506 break;
507 case 0x0148cdec:
508 case 0x015ccf3e:
509 ret = msgqueue_0148cdec_new(falcon, sb, queue);
510 break;
511 default:
512 nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
513 version);
514 break;
517 if (ret == 0) {
518 nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
519 (*queue)->fw_version = version;
522 return ret;
525 void
526 nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
528 if (*queue) {
529 (*queue)->func->dtor(*queue);
530 *queue = NULL;
534 void
535 nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
537 if (!queue->func || !queue->func->recv) {
538 const struct nvkm_subdev *subdev = queue->falcon->owner;
540 nvkm_warn(subdev, "missing msgqueue recv function\n");
541 return;
544 queue->func->recv(queue);
548 nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
550 /* firmware not set yet... */
551 if (!queue)
552 return 0;
554 queue->init_msg_received = false;
555 reinit_completion(&queue->init_done);
557 return 0;
560 void
561 nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
562 struct nvkm_falcon *falcon,
563 struct nvkm_msgqueue *queue)
565 int i;
567 queue->func = func;
568 queue->falcon = falcon;
569 mutex_init(&queue->seq_lock);
570 for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
571 queue->seq[i].id = i;
573 init_completion(&queue->init_done);