Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nvkm / falcon / cmdq.c
blob44cf6a8862e1e0e9e652cbd630624d4a907348d5
1 /*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "qmgr.h"
25 static bool
26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
30 u32 free;
32 size = ALIGN(size, QUEUE_ALIGNMENT);
34 if (head >= tail) {
35 free = cmdq->offset + cmdq->size - head;
36 free -= HDR_SIZE;
38 if (size > free) {
39 *rewind = true;
40 head = cmdq->offset;
44 if (head < tail)
45 free = tail - head - 1;
47 return size <= free;
50 static void
51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
53 struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
54 nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
55 cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
58 static void
59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
61 struct nvfw_falcon_cmd cmd;
63 cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
64 cmd.size = sizeof(cmd);
65 nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
67 cmdq->position = cmdq->offset;
70 static int
71 nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
73 struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
74 bool rewind = false;
76 mutex_lock(&cmdq->mutex);
78 if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
79 FLCNQ_DBG(cmdq, "queue full");
80 mutex_unlock(&cmdq->mutex);
81 return -EAGAIN;
84 cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
86 if (rewind)
87 nvkm_falcon_cmdq_rewind(cmdq);
89 return 0;
92 static void
93 nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
95 nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
96 mutex_unlock(&cmdq->mutex);
99 static int
100 nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
102 static unsigned timeout = 2000;
103 unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
104 int ret = -EAGAIN;
106 while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
107 ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
108 if (ret) {
109 FLCNQ_ERR(cmdq, "timeout waiting for queue space");
110 return ret;
113 nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
114 nvkm_falcon_cmdq_close(cmdq);
115 return ret;
118 /* specifies that we want to know the command status in the answer message */
119 #define CMD_FLAGS_STATUS BIT(0)
120 /* specifies that we want an interrupt when the answer message is queued */
121 #define CMD_FLAGS_INTR BIT(1)
124 nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
125 nvkm_falcon_qmgr_callback cb, void *priv,
126 unsigned long timeout)
128 struct nvkm_falcon_qmgr_seq *seq;
129 int ret;
131 if (!wait_for_completion_timeout(&cmdq->ready,
132 msecs_to_jiffies(1000))) {
133 FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
134 return -ETIMEDOUT;
137 seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
138 if (IS_ERR(seq))
139 return PTR_ERR(seq);
141 cmd->seq_id = seq->id;
142 cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
144 seq->state = SEQ_STATE_USED;
145 seq->async = !timeout;
146 seq->callback = cb;
147 seq->priv = priv;
149 ret = nvkm_falcon_cmdq_write(cmdq, cmd);
150 if (ret) {
151 seq->state = SEQ_STATE_PENDING;
152 nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
153 return ret;
156 if (!seq->async) {
157 if (!wait_for_completion_timeout(&seq->done, timeout)) {
158 FLCNQ_ERR(cmdq, "timeout waiting for reply");
159 return -ETIMEDOUT;
161 ret = seq->result;
162 nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
165 return ret;
168 void
169 nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
171 reinit_completion(&cmdq->ready);
174 void
175 nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
176 u32 index, u32 offset, u32 size)
178 const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
180 cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
181 cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
182 cmdq->offset = offset;
183 cmdq->size = size;
184 complete_all(&cmdq->ready);
186 FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
187 index, cmdq->offset, cmdq->size);
190 void
191 nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
193 struct nvkm_falcon_cmdq *cmdq = *pcmdq;
194 if (cmdq) {
195 kfree(*pcmdq);
196 *pcmdq = NULL;
201 nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
202 struct nvkm_falcon_cmdq **pcmdq)
204 struct nvkm_falcon_cmdq *cmdq = *pcmdq;
206 if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
207 return -ENOMEM;
209 cmdq->qmgr = qmgr;
210 cmdq->name = name;
211 mutex_init(&cmdq->mutex);
212 init_completion(&cmdq->ready);
213 return 0;