WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nvkm / falcon / msgq.c
blobe74371dffc76cc4a2142be9788bb1770a04aef97
1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "qmgr.h"
25 static void
26 nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
28 mutex_lock(&msgq->mutex);
29 msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
32 static void
33 nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
35 struct nvkm_falcon *falcon = msgq->qmgr->falcon;
37 if (commit)
38 nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
40 mutex_unlock(&msgq->mutex);
43 static bool
44 nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
46 u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
47 u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
48 return head == tail;
51 static int
52 nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
54 struct nvkm_falcon *falcon = msgq->qmgr->falcon;
55 u32 head, tail, available;
57 head = nvkm_falcon_rd32(falcon, msgq->head_reg);
58 /* has the buffer looped? */
59 if (head < msgq->position)
60 msgq->position = msgq->offset;
62 tail = msgq->position;
64 available = head - tail;
65 if (size > available) {
66 FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
67 size, available);
68 return -EINVAL;
71 nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
72 msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
73 return 0;
76 static int
77 nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
79 int ret = 0;
81 nvkm_falcon_msgq_open(msgq);
83 if (nvkm_falcon_msgq_empty(msgq))
84 goto close;
86 ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
87 if (ret) {
88 FLCNQ_ERR(msgq, "failed to read message header");
89 goto close;
92 if (hdr->size > MSG_BUF_SIZE) {
93 FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
94 ret = -ENOSPC;
95 goto close;
98 if (hdr->size > HDR_SIZE) {
99 u32 read_size = hdr->size - HDR_SIZE;
101 ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
102 if (ret) {
103 FLCNQ_ERR(msgq, "failed to read message data");
104 goto close;
108 ret = 1;
109 close:
110 nvkm_falcon_msgq_close(msgq, (ret >= 0));
111 return ret;
114 static int
115 nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
117 struct nvkm_falcon_qmgr_seq *seq;
119 seq = &msgq->qmgr->seq.id[hdr->seq_id];
120 if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
121 FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
122 return -EINVAL;
125 if (seq->state == SEQ_STATE_USED) {
126 if (seq->callback)
127 seq->result = seq->callback(seq->priv, hdr);
130 if (seq->async) {
131 nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
132 return 0;
135 complete_all(&seq->done);
136 return 0;
139 void
140 nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
143 * We are invoked from a worker thread, so normally we have plenty of
144 * stack space to work with.
146 u8 msg_buffer[MSG_BUF_SIZE];
147 struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
149 while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
150 nvkm_falcon_msgq_exec(msgq, hdr);
154 nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
155 void *data, u32 size)
157 struct nvkm_falcon *falcon = msgq->qmgr->falcon;
158 struct nvfw_falcon_msg *hdr = data;
159 int ret;
161 msgq->head_reg = falcon->func->msgq.head;
162 msgq->tail_reg = falcon->func->msgq.tail;
163 msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
165 nvkm_falcon_msgq_open(msgq);
166 ret = nvkm_falcon_msgq_pop(msgq, data, size);
167 if (ret == 0 && hdr->size != size) {
168 FLCN_ERR(falcon, "unexpected init message size %d vs %d",
169 hdr->size, size);
170 ret = -EINVAL;
172 nvkm_falcon_msgq_close(msgq, ret == 0);
173 return ret;
176 void
177 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
178 u32 index, u32 offset, u32 size)
180 const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
182 msgq->head_reg = func->msgq.head + index * func->msgq.stride;
183 msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
184 msgq->offset = offset;
186 FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
187 index, msgq->offset, size);
190 void
191 nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
193 struct nvkm_falcon_msgq *msgq = *pmsgq;
194 if (msgq) {
195 kfree(*pmsgq);
196 *pmsgq = NULL;
201 nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
202 struct nvkm_falcon_msgq **pmsgq)
204 struct nvkm_falcon_msgq *msgq = *pmsgq;
206 if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
207 return -ENOMEM;
209 msgq->qmgr = qmgr;
210 msgq->name = name;
211 mutex_init(&msgq->mutex);
212 return 0;