2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 nvkm_falcon_msgq_open(struct nvkm_falcon_msgq
*msgq
)
28 mutex_lock(&msgq
->mutex
);
29 msgq
->position
= nvkm_falcon_rd32(msgq
->qmgr
->falcon
, msgq
->tail_reg
);
33 nvkm_falcon_msgq_close(struct nvkm_falcon_msgq
*msgq
, bool commit
)
35 struct nvkm_falcon
*falcon
= msgq
->qmgr
->falcon
;
38 nvkm_falcon_wr32(falcon
, msgq
->tail_reg
, msgq
->position
);
40 mutex_unlock(&msgq
->mutex
);
44 nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq
*msgq
)
46 u32 head
= nvkm_falcon_rd32(msgq
->qmgr
->falcon
, msgq
->head_reg
);
47 u32 tail
= nvkm_falcon_rd32(msgq
->qmgr
->falcon
, msgq
->tail_reg
);
52 nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq
*msgq
, void *data
, u32 size
)
54 struct nvkm_falcon
*falcon
= msgq
->qmgr
->falcon
;
55 u32 head
, tail
, available
;
57 head
= nvkm_falcon_rd32(falcon
, msgq
->head_reg
);
58 /* has the buffer looped? */
59 if (head
< msgq
->position
)
60 msgq
->position
= msgq
->offset
;
62 tail
= msgq
->position
;
64 available
= head
- tail
;
65 if (size
> available
) {
66 FLCNQ_ERR(msgq
, "requested %d bytes, but only %d available",
71 nvkm_falcon_read_dmem(falcon
, tail
, size
, 0, data
);
72 msgq
->position
+= ALIGN(size
, QUEUE_ALIGNMENT
);
77 nvkm_falcon_msgq_read(struct nvkm_falcon_msgq
*msgq
, struct nvfw_falcon_msg
*hdr
)
81 nvkm_falcon_msgq_open(msgq
);
83 if (nvkm_falcon_msgq_empty(msgq
))
86 ret
= nvkm_falcon_msgq_pop(msgq
, hdr
, HDR_SIZE
);
88 FLCNQ_ERR(msgq
, "failed to read message header");
92 if (hdr
->size
> MSG_BUF_SIZE
) {
93 FLCNQ_ERR(msgq
, "message too big, %d bytes", hdr
->size
);
98 if (hdr
->size
> HDR_SIZE
) {
99 u32 read_size
= hdr
->size
- HDR_SIZE
;
101 ret
= nvkm_falcon_msgq_pop(msgq
, (hdr
+ 1), read_size
);
103 FLCNQ_ERR(msgq
, "failed to read message data");
110 nvkm_falcon_msgq_close(msgq
, (ret
>= 0));
115 nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq
*msgq
, struct nvfw_falcon_msg
*hdr
)
117 struct nvkm_falcon_qmgr_seq
*seq
;
119 seq
= &msgq
->qmgr
->seq
.id
[hdr
->seq_id
];
120 if (seq
->state
!= SEQ_STATE_USED
&& seq
->state
!= SEQ_STATE_CANCELLED
) {
121 FLCNQ_ERR(msgq
, "message for unknown sequence %08x", seq
->id
);
125 if (seq
->state
== SEQ_STATE_USED
) {
127 seq
->result
= seq
->callback(seq
->priv
, hdr
);
131 nvkm_falcon_qmgr_seq_release(msgq
->qmgr
, seq
);
135 complete_all(&seq
->done
);
140 nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq
*msgq
)
143 * We are invoked from a worker thread, so normally we have plenty of
144 * stack space to work with.
146 u8 msg_buffer
[MSG_BUF_SIZE
];
147 struct nvfw_falcon_msg
*hdr
= (void *)msg_buffer
;
149 while (nvkm_falcon_msgq_read(msgq
, hdr
) > 0)
150 nvkm_falcon_msgq_exec(msgq
, hdr
);
154 nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq
*msgq
,
155 void *data
, u32 size
)
157 struct nvkm_falcon
*falcon
= msgq
->qmgr
->falcon
;
158 struct nvfw_falcon_msg
*hdr
= data
;
161 msgq
->head_reg
= falcon
->func
->msgq
.head
;
162 msgq
->tail_reg
= falcon
->func
->msgq
.tail
;
163 msgq
->offset
= nvkm_falcon_rd32(falcon
, falcon
->func
->msgq
.tail
);
165 nvkm_falcon_msgq_open(msgq
);
166 ret
= nvkm_falcon_msgq_pop(msgq
, data
, size
);
167 if (ret
== 0 && hdr
->size
!= size
) {
168 FLCN_ERR(falcon
, "unexpected init message size %d vs %d",
172 nvkm_falcon_msgq_close(msgq
, ret
== 0);
177 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq
*msgq
,
178 u32 index
, u32 offset
, u32 size
)
180 const struct nvkm_falcon_func
*func
= msgq
->qmgr
->falcon
->func
;
182 msgq
->head_reg
= func
->msgq
.head
+ index
* func
->msgq
.stride
;
183 msgq
->tail_reg
= func
->msgq
.tail
+ index
* func
->msgq
.stride
;
184 msgq
->offset
= offset
;
186 FLCNQ_DBG(msgq
, "initialised @ index %d offset 0x%08x size 0x%08x",
187 index
, msgq
->offset
, size
);
191 nvkm_falcon_msgq_del(struct nvkm_falcon_msgq
**pmsgq
)
193 struct nvkm_falcon_msgq
*msgq
= *pmsgq
;
201 nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr
*qmgr
, const char *name
,
202 struct nvkm_falcon_msgq
**pmsgq
)
204 struct nvkm_falcon_msgq
*msgq
= *pmsgq
;
206 if (!(msgq
= *pmsgq
= kzalloc(sizeof(*msgq
), GFP_KERNEL
)))
211 mutex_init(&msgq
->mutex
);