treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / soc / mediatek / mtk-cmdq-helper.c
blob9add0fd5fa6cc5964125885d2a3bf81df7fbd195
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
16 << 32 | CMDQ_EOC_IRQ_EN)
18 struct cmdq_instruction {
19 union {
20 u32 value;
21 u32 mask;
23 union {
24 u16 offset;
25 u16 event;
27 u8 subsys;
28 u8 op;
31 int cmdq_dev_get_client_reg(struct device *dev,
32 struct cmdq_client_reg *client_reg, int idx)
34 struct of_phandle_args spec;
35 int err;
37 if (!client_reg)
38 return -ENOENT;
40 err = of_parse_phandle_with_fixed_args(dev->of_node,
41 "mediatek,gce-client-reg",
42 3, idx, &spec);
43 if (err < 0) {
44 dev_err(dev,
45 "error %d can't parse gce-client-reg property (%d)",
46 err, idx);
48 return err;
51 client_reg->subsys = (u8)spec.args[0];
52 client_reg->offset = (u16)spec.args[1];
53 client_reg->size = (u16)spec.args[2];
54 of_node_put(spec.np);
56 return 0;
58 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
60 static void cmdq_client_timeout(struct timer_list *t)
62 struct cmdq_client *client = from_timer(client, t, timer);
64 dev_err(client->client.dev, "cmdq timeout!\n");
67 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
69 struct cmdq_client *client;
71 client = kzalloc(sizeof(*client), GFP_KERNEL);
72 if (!client)
73 return (struct cmdq_client *)-ENOMEM;
75 client->timeout_ms = timeout;
76 if (timeout != CMDQ_NO_TIMEOUT) {
77 spin_lock_init(&client->lock);
78 timer_setup(&client->timer, cmdq_client_timeout, 0);
80 client->pkt_cnt = 0;
81 client->client.dev = dev;
82 client->client.tx_block = false;
83 client->chan = mbox_request_channel(&client->client, index);
85 if (IS_ERR(client->chan)) {
86 long err;
88 dev_err(dev, "failed to request channel\n");
89 err = PTR_ERR(client->chan);
90 kfree(client);
92 return ERR_PTR(err);
95 return client;
97 EXPORT_SYMBOL(cmdq_mbox_create);
99 void cmdq_mbox_destroy(struct cmdq_client *client)
101 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
102 spin_lock(&client->lock);
103 del_timer_sync(&client->timer);
104 spin_unlock(&client->lock);
106 mbox_free_channel(client->chan);
107 kfree(client);
109 EXPORT_SYMBOL(cmdq_mbox_destroy);
111 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
113 struct cmdq_pkt *pkt;
114 struct device *dev;
115 dma_addr_t dma_addr;
117 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
118 if (!pkt)
119 return ERR_PTR(-ENOMEM);
120 pkt->va_base = kzalloc(size, GFP_KERNEL);
121 if (!pkt->va_base) {
122 kfree(pkt);
123 return ERR_PTR(-ENOMEM);
125 pkt->buf_size = size;
126 pkt->cl = (void *)client;
128 dev = client->chan->mbox->dev;
129 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
130 DMA_TO_DEVICE);
131 if (dma_mapping_error(dev, dma_addr)) {
132 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
133 kfree(pkt->va_base);
134 kfree(pkt);
135 return ERR_PTR(-ENOMEM);
138 pkt->pa_base = dma_addr;
140 return pkt;
142 EXPORT_SYMBOL(cmdq_pkt_create);
144 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
146 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
148 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
149 DMA_TO_DEVICE);
150 kfree(pkt->va_base);
151 kfree(pkt);
153 EXPORT_SYMBOL(cmdq_pkt_destroy);
155 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
156 struct cmdq_instruction inst)
158 struct cmdq_instruction *cmd_ptr;
160 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
162 * In the case of allocated buffer size (pkt->buf_size) is used
163 * up, the real required size (pkt->cmdq_buf_size) is still
164 * increased, so that the user knows how much memory should be
165 * ultimately allocated after appending all commands and
166 * flushing the command packet. Therefor, the user can call
167 * cmdq_pkt_create() again with the real required buffer size.
169 pkt->cmd_buf_size += CMDQ_INST_SIZE;
170 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
171 __func__, (u32)pkt->buf_size);
172 return -ENOMEM;
175 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
176 *cmd_ptr = inst;
177 pkt->cmd_buf_size += CMDQ_INST_SIZE;
179 return 0;
182 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
184 struct cmdq_instruction inst;
186 inst.op = CMDQ_CODE_WRITE;
187 inst.value = value;
188 inst.offset = offset;
189 inst.subsys = subsys;
191 return cmdq_pkt_append_command(pkt, inst);
193 EXPORT_SYMBOL(cmdq_pkt_write);
195 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
196 u16 offset, u32 value, u32 mask)
198 struct cmdq_instruction inst = { {0} };
199 u16 offset_mask = offset;
200 int err;
202 if (mask != 0xffffffff) {
203 inst.op = CMDQ_CODE_MASK;
204 inst.mask = ~mask;
205 err = cmdq_pkt_append_command(pkt, inst);
206 if (err < 0)
207 return err;
209 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
211 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
213 return err;
215 EXPORT_SYMBOL(cmdq_pkt_write_mask);
217 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
219 struct cmdq_instruction inst = { {0} };
221 if (event >= CMDQ_MAX_EVENT)
222 return -EINVAL;
224 inst.op = CMDQ_CODE_WFE;
225 inst.value = CMDQ_WFE_OPTION;
226 inst.event = event;
228 return cmdq_pkt_append_command(pkt, inst);
230 EXPORT_SYMBOL(cmdq_pkt_wfe);
232 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
234 struct cmdq_instruction inst = { {0} };
236 if (event >= CMDQ_MAX_EVENT)
237 return -EINVAL;
239 inst.op = CMDQ_CODE_WFE;
240 inst.value = CMDQ_WFE_UPDATE;
241 inst.event = event;
243 return cmdq_pkt_append_command(pkt, inst);
245 EXPORT_SYMBOL(cmdq_pkt_clear_event);
247 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
248 u16 offset, u32 value)
250 struct cmdq_instruction inst = { {0} };
251 int err;
253 inst.op = CMDQ_CODE_POLL;
254 inst.value = value;
255 inst.offset = offset;
256 inst.subsys = subsys;
257 err = cmdq_pkt_append_command(pkt, inst);
259 return err;
261 EXPORT_SYMBOL(cmdq_pkt_poll);
263 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
264 u16 offset, u32 value, u32 mask)
266 struct cmdq_instruction inst = { {0} };
267 int err;
269 inst.op = CMDQ_CODE_MASK;
270 inst.mask = ~mask;
271 err = cmdq_pkt_append_command(pkt, inst);
272 if (err < 0)
273 return err;
275 offset = offset | CMDQ_POLL_ENABLE_MASK;
276 err = cmdq_pkt_poll(pkt, subsys, offset, value);
278 return err;
280 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
282 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
284 struct cmdq_instruction inst = { {0} };
285 int err;
287 /* insert EOC and generate IRQ for each command iteration */
288 inst.op = CMDQ_CODE_EOC;
289 inst.value = CMDQ_EOC_IRQ_EN;
290 err = cmdq_pkt_append_command(pkt, inst);
291 if (err < 0)
292 return err;
294 /* JUMP to end */
295 inst.op = CMDQ_CODE_JUMP;
296 inst.value = CMDQ_JUMP_PASS;
297 err = cmdq_pkt_append_command(pkt, inst);
299 return err;
302 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
304 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
305 struct cmdq_task_cb *cb = &pkt->cb;
306 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
308 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
309 unsigned long flags = 0;
311 spin_lock_irqsave(&client->lock, flags);
312 if (--client->pkt_cnt == 0)
313 del_timer(&client->timer);
314 else
315 mod_timer(&client->timer, jiffies +
316 msecs_to_jiffies(client->timeout_ms));
317 spin_unlock_irqrestore(&client->lock, flags);
320 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
321 pkt->cmd_buf_size, DMA_TO_DEVICE);
322 if (cb->cb) {
323 data.data = cb->data;
324 cb->cb(data);
328 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
329 void *data)
331 int err;
332 unsigned long flags = 0;
333 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
335 err = cmdq_pkt_finalize(pkt);
336 if (err < 0)
337 return err;
339 pkt->cb.cb = cb;
340 pkt->cb.data = data;
341 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
342 pkt->async_cb.data = pkt;
344 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
345 pkt->cmd_buf_size, DMA_TO_DEVICE);
347 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
348 spin_lock_irqsave(&client->lock, flags);
349 if (client->pkt_cnt++ == 0)
350 mod_timer(&client->timer, jiffies +
351 msecs_to_jiffies(client->timeout_ms));
352 spin_unlock_irqrestore(&client->lock, flags);
355 mbox_send_message(client->chan, pkt);
356 /* We can send next packet immediately, so just call txdone. */
357 mbox_client_txdone(client->chan, 0);
359 return 0;
361 EXPORT_SYMBOL(cmdq_pkt_flush_async);
363 struct cmdq_flush_completion {
364 struct completion cmplt;
365 bool err;
368 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
370 struct cmdq_flush_completion *cmplt;
372 cmplt = (struct cmdq_flush_completion *)data.data;
373 if (data.sta != CMDQ_CB_NORMAL)
374 cmplt->err = true;
375 else
376 cmplt->err = false;
377 complete(&cmplt->cmplt);
380 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
382 struct cmdq_flush_completion cmplt;
383 int err;
385 init_completion(&cmplt.cmplt);
386 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
387 if (err < 0)
388 return err;
389 wait_for_completion(&cmplt.cmplt);
391 return cmplt.err ? -EFAULT : 0;
393 EXPORT_SYMBOL(cmdq_pkt_flush);
395 MODULE_LICENSE("GPL v2");