2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <engine/falcon.h>
27 #include <subdev/secboot.h>
30 #define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
31 #define QUEUE_ALIGNMENT 4
32 /* max size of the messages we can receive */
33 #define MSG_BUF_SIZE 128
36 msg_queue_open(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
)
38 struct nvkm_falcon
*falcon
= priv
->falcon
;
40 mutex_lock(&queue
->mutex
);
42 queue
->position
= nvkm_falcon_rd32(falcon
, queue
->tail_reg
);
48 msg_queue_close(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
51 struct nvkm_falcon
*falcon
= priv
->falcon
;
54 nvkm_falcon_wr32(falcon
, queue
->tail_reg
, queue
->position
);
56 mutex_unlock(&queue
->mutex
);
60 msg_queue_empty(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
)
62 struct nvkm_falcon
*falcon
= priv
->falcon
;
65 head
= nvkm_falcon_rd32(falcon
, queue
->head_reg
);
66 tail
= nvkm_falcon_rd32(falcon
, queue
->tail_reg
);
72 msg_queue_pop(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
75 struct nvkm_falcon
*falcon
= priv
->falcon
;
76 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
77 u32 head
, tail
, available
;
79 head
= nvkm_falcon_rd32(falcon
, queue
->head_reg
);
80 /* has the buffer looped? */
81 if (head
< queue
->position
)
82 queue
->position
= queue
->offset
;
84 tail
= queue
->position
;
86 available
= head
- tail
;
89 nvkm_warn(subdev
, "no message data available\n");
93 if (size
> available
) {
94 nvkm_warn(subdev
, "message data smaller than read request\n");
98 nvkm_falcon_read_dmem(priv
->falcon
, tail
, size
, 0, data
);
99 queue
->position
+= ALIGN(size
, QUEUE_ALIGNMENT
);
105 msg_queue_read(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
106 struct nvkm_msgqueue_hdr
*hdr
)
108 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
111 err
= msg_queue_open(priv
, queue
);
113 nvkm_error(subdev
, "fail to open queue %d\n", queue
->index
);
117 if (msg_queue_empty(priv
, queue
)) {
122 err
= msg_queue_pop(priv
, queue
, hdr
, HDR_SIZE
);
123 if (err
>= 0 && err
!= HDR_SIZE
)
126 nvkm_error(subdev
, "failed to read message header: %d\n", err
);
130 if (hdr
->size
> MSG_BUF_SIZE
) {
131 nvkm_error(subdev
, "message too big (%d bytes)\n", hdr
->size
);
136 if (hdr
->size
> HDR_SIZE
) {
137 u32 read_size
= hdr
->size
- HDR_SIZE
;
139 err
= msg_queue_pop(priv
, queue
, (hdr
+ 1), read_size
);
140 if (err
>= 0 && err
!= read_size
)
143 nvkm_error(subdev
, "failed to read message: %d\n", err
);
149 msg_queue_close(priv
, queue
, (err
>= 0));
155 cmd_queue_has_room(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
156 u32 size
, bool *rewind
)
158 struct nvkm_falcon
*falcon
= priv
->falcon
;
159 u32 head
, tail
, free
;
161 size
= ALIGN(size
, QUEUE_ALIGNMENT
);
163 head
= nvkm_falcon_rd32(falcon
, queue
->head_reg
);
164 tail
= nvkm_falcon_rd32(falcon
, queue
->tail_reg
);
167 free
= queue
->offset
+ queue
->size
- head
;
172 head
= queue
->offset
;
177 free
= tail
- head
- 1;
183 cmd_queue_push(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
184 void *data
, u32 size
)
186 nvkm_falcon_load_dmem(priv
->falcon
, data
, queue
->position
, size
, 0);
187 queue
->position
+= ALIGN(size
, QUEUE_ALIGNMENT
);
192 /* REWIND unit is always 0x00 */
193 #define MSGQUEUE_UNIT_REWIND 0x00
196 cmd_queue_rewind(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
)
198 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
199 struct nvkm_msgqueue_hdr cmd
;
202 cmd
.unit_id
= MSGQUEUE_UNIT_REWIND
;
203 cmd
.size
= sizeof(cmd
);
204 err
= cmd_queue_push(priv
, queue
, &cmd
, cmd
.size
);
206 nvkm_error(subdev
, "queue %d rewind failed\n", queue
->index
);
208 nvkm_error(subdev
, "queue %d rewinded\n", queue
->index
);
210 queue
->position
= queue
->offset
;
214 cmd_queue_open(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
217 struct nvkm_falcon
*falcon
= priv
->falcon
;
218 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
221 mutex_lock(&queue
->mutex
);
223 if (!cmd_queue_has_room(priv
, queue
, size
, &rewind
)) {
224 nvkm_error(subdev
, "queue full\n");
225 mutex_unlock(&queue
->mutex
);
229 queue
->position
= nvkm_falcon_rd32(falcon
, queue
->head_reg
);
232 cmd_queue_rewind(priv
, queue
);
238 cmd_queue_close(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_queue
*queue
,
241 struct nvkm_falcon
*falcon
= priv
->falcon
;
244 nvkm_falcon_wr32(falcon
, queue
->head_reg
, queue
->position
);
246 mutex_unlock(&queue
->mutex
);
250 cmd_write(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_hdr
*cmd
,
251 struct nvkm_msgqueue_queue
*queue
)
253 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
254 static unsigned timeout
= 2000;
255 unsigned long end_jiffies
= jiffies
+ msecs_to_jiffies(timeout
);
259 while (ret
== -EAGAIN
&& time_before(jiffies
, end_jiffies
))
260 ret
= cmd_queue_open(priv
, queue
, cmd
->size
);
262 nvkm_error(subdev
, "pmu_queue_open_write failed\n");
266 ret
= cmd_queue_push(priv
, queue
, cmd
, cmd
->size
);
268 nvkm_error(subdev
, "pmu_queue_push failed\n");
272 cmd_queue_close(priv
, queue
, commit
);
277 static struct nvkm_msgqueue_seq
*
278 msgqueue_seq_acquire(struct nvkm_msgqueue
*priv
)
280 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
281 struct nvkm_msgqueue_seq
*seq
;
284 mutex_lock(&priv
->seq_lock
);
286 index
= find_first_zero_bit(priv
->seq_tbl
, NVKM_MSGQUEUE_NUM_SEQUENCES
);
288 if (index
>= NVKM_MSGQUEUE_NUM_SEQUENCES
) {
289 nvkm_error(subdev
, "no free sequence available\n");
290 mutex_unlock(&priv
->seq_lock
);
291 return ERR_PTR(-EAGAIN
);
294 set_bit(index
, priv
->seq_tbl
);
296 mutex_unlock(&priv
->seq_lock
);
298 seq
= &priv
->seq
[index
];
299 seq
->state
= SEQ_STATE_PENDING
;
305 msgqueue_seq_release(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_seq
*seq
)
307 /* no need to acquire seq_lock since clear_bit is atomic */
308 seq
->state
= SEQ_STATE_FREE
;
309 seq
->callback
= NULL
;
310 seq
->completion
= NULL
;
311 clear_bit(seq
->id
, priv
->seq_tbl
);
314 /* specifies that we want to know the command status in the answer message */
315 #define CMD_FLAGS_STATUS BIT(0)
316 /* specifies that we want an interrupt when the answer message is queued */
317 #define CMD_FLAGS_INTR BIT(1)
320 nvkm_msgqueue_post(struct nvkm_msgqueue
*priv
, enum msgqueue_msg_priority prio
,
321 struct nvkm_msgqueue_hdr
*cmd
, nvkm_msgqueue_callback cb
,
322 struct completion
*completion
, bool wait_init
)
324 struct nvkm_msgqueue_seq
*seq
;
325 struct nvkm_msgqueue_queue
*queue
;
328 if (wait_init
&& !wait_for_completion_timeout(&priv
->init_done
,
329 msecs_to_jiffies(1000)))
332 queue
= priv
->func
->cmd_queue(priv
, prio
);
334 return PTR_ERR(queue
);
336 seq
= msgqueue_seq_acquire(priv
);
340 cmd
->seq_id
= seq
->id
;
341 cmd
->ctrl_flags
= CMD_FLAGS_STATUS
| CMD_FLAGS_INTR
;
344 seq
->state
= SEQ_STATE_USED
;
345 seq
->completion
= completion
;
347 ret
= cmd_write(priv
, cmd
, queue
);
349 seq
->state
= SEQ_STATE_PENDING
;
350 msgqueue_seq_release(priv
, seq
);
357 msgqueue_msg_handle(struct nvkm_msgqueue
*priv
, struct nvkm_msgqueue_hdr
*hdr
)
359 const struct nvkm_subdev
*subdev
= priv
->falcon
->owner
;
360 struct nvkm_msgqueue_seq
*seq
;
362 seq
= &priv
->seq
[hdr
->seq_id
];
363 if (seq
->state
!= SEQ_STATE_USED
&& seq
->state
!= SEQ_STATE_CANCELLED
) {
364 nvkm_error(subdev
, "msg for unknown sequence %d", seq
->id
);
368 if (seq
->state
== SEQ_STATE_USED
) {
370 seq
->callback(priv
, hdr
);
374 complete(seq
->completion
);
376 msgqueue_seq_release(priv
, seq
);
382 msgqueue_handle_init_msg(struct nvkm_msgqueue
*priv
,
383 struct nvkm_msgqueue_hdr
*hdr
)
385 struct nvkm_falcon
*falcon
= priv
->falcon
;
386 const struct nvkm_subdev
*subdev
= falcon
->owner
;
392 * Of course the message queue registers vary depending on the falcon
395 switch (falcon
->owner
->index
) {
396 case NVKM_SUBDEV_PMU
:
399 case NVKM_ENGINE_SEC2
:
403 nvkm_error(subdev
, "falcon %s unsupported for msgqueue!\n",
404 nvkm_subdev_name
[falcon
->owner
->index
]);
409 * Read the message - queues are not initialized yet so we cannot rely
410 * on msg_queue_read()
412 tail
= nvkm_falcon_rd32(falcon
, tail_reg
);
413 nvkm_falcon_read_dmem(falcon
, tail
, HDR_SIZE
, 0, hdr
);
415 if (hdr
->size
> MSG_BUF_SIZE
) {
416 nvkm_error(subdev
, "message too big (%d bytes)\n", hdr
->size
);
420 nvkm_falcon_read_dmem(falcon
, tail
+ HDR_SIZE
, hdr
->size
- HDR_SIZE
, 0,
423 tail
+= ALIGN(hdr
->size
, QUEUE_ALIGNMENT
);
424 nvkm_falcon_wr32(falcon
, tail_reg
, tail
);
426 ret
= priv
->func
->init_func
->init_callback(priv
, hdr
);
434 nvkm_msgqueue_process_msgs(struct nvkm_msgqueue
*priv
,
435 struct nvkm_msgqueue_queue
*queue
)
438 * We are invoked from a worker thread, so normally we have plenty of
439 * stack space to work with.
441 u8 msg_buffer
[MSG_BUF_SIZE
];
442 struct nvkm_msgqueue_hdr
*hdr
= (void *)msg_buffer
;
445 /* the first message we receive must be the init message */
446 if ((!priv
->init_msg_received
)) {
447 ret
= msgqueue_handle_init_msg(priv
, hdr
);
449 priv
->init_msg_received
= true;
451 while (msg_queue_read(priv
, queue
, hdr
) > 0)
452 msgqueue_msg_handle(priv
, hdr
);
457 nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue
*queue
, void *buf
)
459 if (!queue
|| !queue
->func
|| !queue
->func
->init_func
)
462 queue
->func
->init_func
->gen_cmdline(queue
, buf
);
466 nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue
*queue
,
467 unsigned long falcon_mask
)
469 unsigned long falcon
;
471 if (!queue
|| !queue
->func
->acr_func
)
474 /* Does the firmware support booting multiple falcons? */
475 if (queue
->func
->acr_func
->boot_multiple_falcons
)
476 return queue
->func
->acr_func
->boot_multiple_falcons(queue
,
479 /* Else boot all requested falcons individually */
480 if (!queue
->func
->acr_func
->boot_falcon
)
483 for_each_set_bit(falcon
, &falcon_mask
, NVKM_SECBOOT_FALCON_END
) {
484 int ret
= queue
->func
->acr_func
->boot_falcon(queue
, falcon
);
494 nvkm_msgqueue_new(u32 version
, struct nvkm_falcon
*falcon
,
495 const struct nvkm_secboot
*sb
, struct nvkm_msgqueue
**queue
)
497 const struct nvkm_subdev
*subdev
= falcon
->owner
;
502 ret
= msgqueue_0137c63d_new(falcon
, sb
, queue
);
505 ret
= msgqueue_0137bca5_new(falcon
, sb
, queue
);
509 ret
= msgqueue_0148cdec_new(falcon
, sb
, queue
);
512 nvkm_error(subdev
, "unhandled firmware version 0x%08x\n",
518 nvkm_debug(subdev
, "firmware version: 0x%08x\n", version
);
519 (*queue
)->fw_version
= version
;
526 nvkm_msgqueue_del(struct nvkm_msgqueue
**queue
)
529 (*queue
)->func
->dtor(*queue
);
535 nvkm_msgqueue_recv(struct nvkm_msgqueue
*queue
)
537 if (!queue
->func
|| !queue
->func
->recv
) {
538 const struct nvkm_subdev
*subdev
= queue
->falcon
->owner
;
540 nvkm_warn(subdev
, "missing msgqueue recv function\n");
544 queue
->func
->recv(queue
);
548 nvkm_msgqueue_reinit(struct nvkm_msgqueue
*queue
)
550 /* firmware not set yet... */
554 queue
->init_msg_received
= false;
555 reinit_completion(&queue
->init_done
);
561 nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func
*func
,
562 struct nvkm_falcon
*falcon
,
563 struct nvkm_msgqueue
*queue
)
568 queue
->falcon
= falcon
;
569 mutex_init(&queue
->seq_lock
);
570 for (i
= 0; i
< NVKM_MSGQUEUE_NUM_SEQUENCES
; i
++)
571 queue
->seq
[i
].id
= i
;
573 init_completion(&queue
->init_done
);