dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / soc / mediatek / mtk-cmdq-helper.c
blobff9fef5a032b3d8a0ef78a640644b6f2654b6315
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_ARG_A_WRITE_MASK 0xffff
13 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
16 << 32 | CMDQ_EOC_IRQ_EN)
18 static void cmdq_client_timeout(struct timer_list *t)
20 struct cmdq_client *client = from_timer(client, t, timer);
22 dev_err(client->client.dev, "cmdq timeout!\n");
25 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
27 struct cmdq_client *client;
29 client = kzalloc(sizeof(*client), GFP_KERNEL);
30 if (!client)
31 return (struct cmdq_client *)-ENOMEM;
33 client->timeout_ms = timeout;
34 if (timeout != CMDQ_NO_TIMEOUT) {
35 spin_lock_init(&client->lock);
36 timer_setup(&client->timer, cmdq_client_timeout, 0);
38 client->pkt_cnt = 0;
39 client->client.dev = dev;
40 client->client.tx_block = false;
41 client->chan = mbox_request_channel(&client->client, index);
43 if (IS_ERR(client->chan)) {
44 long err;
46 dev_err(dev, "failed to request channel\n");
47 err = PTR_ERR(client->chan);
48 kfree(client);
50 return ERR_PTR(err);
53 return client;
55 EXPORT_SYMBOL(cmdq_mbox_create);
57 void cmdq_mbox_destroy(struct cmdq_client *client)
59 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
60 spin_lock(&client->lock);
61 del_timer_sync(&client->timer);
62 spin_unlock(&client->lock);
64 mbox_free_channel(client->chan);
65 kfree(client);
67 EXPORT_SYMBOL(cmdq_mbox_destroy);
69 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
71 struct cmdq_pkt *pkt;
72 struct device *dev;
73 dma_addr_t dma_addr;
75 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
76 if (!pkt)
77 return ERR_PTR(-ENOMEM);
78 pkt->va_base = kzalloc(size, GFP_KERNEL);
79 if (!pkt->va_base) {
80 kfree(pkt);
81 return ERR_PTR(-ENOMEM);
83 pkt->buf_size = size;
84 pkt->cl = (void *)client;
86 dev = client->chan->mbox->dev;
87 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
88 DMA_TO_DEVICE);
89 if (dma_mapping_error(dev, dma_addr)) {
90 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
91 kfree(pkt->va_base);
92 kfree(pkt);
93 return ERR_PTR(-ENOMEM);
96 pkt->pa_base = dma_addr;
98 return pkt;
100 EXPORT_SYMBOL(cmdq_pkt_create);
102 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
104 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
106 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
107 DMA_TO_DEVICE);
108 kfree(pkt->va_base);
109 kfree(pkt);
111 EXPORT_SYMBOL(cmdq_pkt_destroy);
113 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
114 u32 arg_a, u32 arg_b)
116 u64 *cmd_ptr;
118 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
120 * In the case of allocated buffer size (pkt->buf_size) is used
121 * up, the real required size (pkt->cmdq_buf_size) is still
122 * increased, so that the user knows how much memory should be
123 * ultimately allocated after appending all commands and
124 * flushing the command packet. Therefor, the user can call
125 * cmdq_pkt_create() again with the real required buffer size.
127 pkt->cmd_buf_size += CMDQ_INST_SIZE;
128 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
129 __func__, (u32)pkt->buf_size);
130 return -ENOMEM;
132 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
133 (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
134 pkt->cmd_buf_size += CMDQ_INST_SIZE;
136 return 0;
139 int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset)
141 u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
142 (subsys << CMDQ_SUBSYS_SHIFT);
144 return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
146 EXPORT_SYMBOL(cmdq_pkt_write);
148 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value,
149 u32 subsys, u32 offset, u32 mask)
151 u32 offset_mask = offset;
152 int err = 0;
154 if (mask != 0xffffffff) {
155 err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
156 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
158 err |= cmdq_pkt_write(pkt, value, subsys, offset_mask);
160 return err;
162 EXPORT_SYMBOL(cmdq_pkt_write_mask);
164 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event)
166 u32 arg_b;
168 if (event >= CMDQ_MAX_EVENT)
169 return -EINVAL;
172 * WFE arg_b
173 * bit 0-11: wait value
174 * bit 15: 1 - wait, 0 - no wait
175 * bit 16-27: update value
176 * bit 31: 1 - update, 0 - no update
178 arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
180 return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
182 EXPORT_SYMBOL(cmdq_pkt_wfe);
184 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event)
186 if (event >= CMDQ_MAX_EVENT)
187 return -EINVAL;
189 return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
190 CMDQ_WFE_UPDATE);
192 EXPORT_SYMBOL(cmdq_pkt_clear_event);
194 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
196 int err;
198 /* insert EOC and generate IRQ for each command iteration */
199 err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
201 /* JUMP to end */
202 err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
204 return err;
207 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
209 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
210 struct cmdq_task_cb *cb = &pkt->cb;
211 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
213 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
214 unsigned long flags = 0;
216 spin_lock_irqsave(&client->lock, flags);
217 if (--client->pkt_cnt == 0)
218 del_timer(&client->timer);
219 else
220 mod_timer(&client->timer, jiffies +
221 msecs_to_jiffies(client->timeout_ms));
222 spin_unlock_irqrestore(&client->lock, flags);
225 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
226 pkt->cmd_buf_size, DMA_TO_DEVICE);
227 if (cb->cb) {
228 data.data = cb->data;
229 cb->cb(data);
233 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
234 void *data)
236 int err;
237 unsigned long flags = 0;
238 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
240 err = cmdq_pkt_finalize(pkt);
241 if (err < 0)
242 return err;
244 pkt->cb.cb = cb;
245 pkt->cb.data = data;
246 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
247 pkt->async_cb.data = pkt;
249 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
250 pkt->cmd_buf_size, DMA_TO_DEVICE);
252 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
253 spin_lock_irqsave(&client->lock, flags);
254 if (client->pkt_cnt++ == 0)
255 mod_timer(&client->timer, jiffies +
256 msecs_to_jiffies(client->timeout_ms));
257 spin_unlock_irqrestore(&client->lock, flags);
260 mbox_send_message(client->chan, pkt);
261 /* We can send next packet immediately, so just call txdone. */
262 mbox_client_txdone(client->chan, 0);
264 return 0;
266 EXPORT_SYMBOL(cmdq_pkt_flush_async);
268 struct cmdq_flush_completion {
269 struct completion cmplt;
270 bool err;
273 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
275 struct cmdq_flush_completion *cmplt;
277 cmplt = (struct cmdq_flush_completion *)data.data;
278 if (data.sta != CMDQ_CB_NORMAL)
279 cmplt->err = true;
280 else
281 cmplt->err = false;
282 complete(&cmplt->cmplt);
285 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
287 struct cmdq_flush_completion cmplt;
288 int err;
290 init_completion(&cmplt.cmplt);
291 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
292 if (err < 0)
293 return err;
294 wait_for_completion(&cmplt.cmplt);
296 return cmplt.err ? -EFAULT : 0;
298 EXPORT_SYMBOL(cmdq_pkt_flush);
300 MODULE_LICENSE("GPL v2");