Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / drivers / dma / sh / shdmac.c
blob5aafe548ca5f308200de9e555580e44e8bcb234a
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Renesas SuperH DMA Engine support
5 * base is drivers/dma/flsdma.c
7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
12 * - DMA of SuperH does not have Hardware DMA chain mode.
13 * - MAX DMA size is 16MB.
17 #include <linux/delay.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/notifier.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/rculist.h>
30 #include <linux/sh_dma.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
34 #include "../dmaengine.h"
35 #include "shdma.h"
37 /* DMA registers */
38 #define SAR 0x00 /* Source Address Register */
39 #define DAR 0x04 /* Destination Address Register */
40 #define TCR 0x08 /* Transfer Count Register */
41 #define CHCR 0x0C /* Channel Control Register */
42 #define DMAOR 0x40 /* DMA Operation Register */
44 #define TEND 0x18 /* USB-DMAC */
46 #define SH_DMAE_DRV_NAME "sh-dma-engine"
48 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
49 #define LOG2_DEFAULT_XFER_SIZE 2
50 #define SH_DMA_SLAVE_NUMBER 256
51 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
54 * Used for write-side mutual exclusion for the global device list,
55 * read-side synchronization by way of RCU, and per-controller data.
57 static DEFINE_SPINLOCK(sh_dmae_lock);
58 static LIST_HEAD(sh_dmae_devices);
61 * Different DMAC implementations provide different ways to clear DMA channels:
62 * (1) none - no CHCLR registers are available
63 * (2) one CHCLR register per channel - 0 has to be written to it to clear
64 * channel buffers
65 * (3) one CHCLR per several channels - 1 has to be written to the bit,
66 * corresponding to the specific channel to reset it
68 static void channel_clear(struct sh_dmae_chan *sh_dc)
70 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
71 const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
72 sh_dc->shdma_chan.id;
73 u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
75 __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
78 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
80 __raw_writel(data, sh_dc->base + reg);
83 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
85 return __raw_readl(sh_dc->base + reg);
88 static u16 dmaor_read(struct sh_dmae_device *shdev)
90 void __iomem *addr = shdev->chan_reg + DMAOR;
92 if (shdev->pdata->dmaor_is_32bit)
93 return __raw_readl(addr);
94 else
95 return __raw_readw(addr);
98 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
100 void __iomem *addr = shdev->chan_reg + DMAOR;
102 if (shdev->pdata->dmaor_is_32bit)
103 __raw_writel(data, addr);
104 else
105 __raw_writew(data, addr);
108 static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
110 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
112 __raw_writel(data, sh_dc->base + shdev->chcr_offset);
115 static u32 chcr_read(struct sh_dmae_chan *sh_dc)
117 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
119 return __raw_readl(sh_dc->base + shdev->chcr_offset);
123 * Reset DMA controller
125 * SH7780 has two DMAOR register
127 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
129 unsigned short dmaor;
130 unsigned long flags;
132 spin_lock_irqsave(&sh_dmae_lock, flags);
134 dmaor = dmaor_read(shdev);
135 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
137 spin_unlock_irqrestore(&sh_dmae_lock, flags);
140 static int sh_dmae_rst(struct sh_dmae_device *shdev)
142 unsigned short dmaor;
143 unsigned long flags;
145 spin_lock_irqsave(&sh_dmae_lock, flags);
147 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
149 if (shdev->pdata->chclr_present) {
150 int i;
151 for (i = 0; i < shdev->pdata->channel_num; i++) {
152 struct sh_dmae_chan *sh_chan = shdev->chan[i];
153 if (sh_chan)
154 channel_clear(sh_chan);
158 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
160 dmaor = dmaor_read(shdev);
162 spin_unlock_irqrestore(&sh_dmae_lock, flags);
164 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
165 dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
166 return -EIO;
168 if (shdev->pdata->dmaor_init & ~dmaor)
169 dev_warn(shdev->shdma_dev.dma_dev.dev,
170 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
171 dmaor, shdev->pdata->dmaor_init);
172 return 0;
175 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
177 u32 chcr = chcr_read(sh_chan);
179 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
180 return true; /* working */
182 return false; /* waiting */
185 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
187 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
188 const struct sh_dmae_pdata *pdata = shdev->pdata;
189 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
190 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
192 if (cnt >= pdata->ts_shift_num)
193 cnt = 0;
195 return pdata->ts_shift[cnt];
198 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
200 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
201 const struct sh_dmae_pdata *pdata = shdev->pdata;
202 int i;
204 for (i = 0; i < pdata->ts_shift_num; i++)
205 if (pdata->ts_shift[i] == l2size)
206 break;
208 if (i == pdata->ts_shift_num)
209 i = 0;
211 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
212 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
215 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
217 sh_dmae_writel(sh_chan, hw->sar, SAR);
218 sh_dmae_writel(sh_chan, hw->dar, DAR);
219 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
222 static void dmae_start(struct sh_dmae_chan *sh_chan)
224 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
225 u32 chcr = chcr_read(sh_chan);
227 if (shdev->pdata->needs_tend_set)
228 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
230 chcr |= CHCR_DE | shdev->chcr_ie_bit;
231 chcr_write(sh_chan, chcr & ~CHCR_TE);
234 static void dmae_init(struct sh_dmae_chan *sh_chan)
237 * Default configuration for dual address memory-memory transfer.
239 u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
240 LOG2_DEFAULT_XFER_SIZE);
241 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
242 chcr_write(sh_chan, chcr);
245 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
247 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
248 if (dmae_is_busy(sh_chan))
249 return -EBUSY;
251 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
252 chcr_write(sh_chan, val);
254 return 0;
257 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
259 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
260 const struct sh_dmae_pdata *pdata = shdev->pdata;
261 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
262 void __iomem *addr = shdev->dmars;
263 unsigned int shift = chan_pdata->dmars_bit;
265 if (dmae_is_busy(sh_chan))
266 return -EBUSY;
268 if (pdata->no_dmars)
269 return 0;
271 /* in the case of a missing DMARS resource use first memory window */
272 if (!addr)
273 addr = shdev->chan_reg;
274 addr += chan_pdata->dmars;
276 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
277 addr);
279 return 0;
282 static void sh_dmae_start_xfer(struct shdma_chan *schan,
283 struct shdma_desc *sdesc)
285 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
286 shdma_chan);
287 struct sh_dmae_desc *sh_desc = container_of(sdesc,
288 struct sh_dmae_desc, shdma_desc);
289 dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
290 sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
291 sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
292 /* Get the ld start address from ld_queue */
293 dmae_set_reg(sh_chan, &sh_desc->hw);
294 dmae_start(sh_chan);
297 static bool sh_dmae_channel_busy(struct shdma_chan *schan)
299 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
300 shdma_chan);
301 return dmae_is_busy(sh_chan);
304 static void sh_dmae_setup_xfer(struct shdma_chan *schan,
305 int slave_id)
307 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
308 shdma_chan);
310 if (slave_id >= 0) {
311 const struct sh_dmae_slave_config *cfg =
312 sh_chan->config;
314 dmae_set_dmars(sh_chan, cfg->mid_rid);
315 dmae_set_chcr(sh_chan, cfg->chcr);
316 } else {
317 dmae_init(sh_chan);
322 * Find a slave channel configuration from the contoller list by either a slave
323 * ID in the non-DT case, or by a MID/RID value in the DT case
325 static const struct sh_dmae_slave_config *dmae_find_slave(
326 struct sh_dmae_chan *sh_chan, int match)
328 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
329 const struct sh_dmae_pdata *pdata = shdev->pdata;
330 const struct sh_dmae_slave_config *cfg;
331 int i;
333 if (!sh_chan->shdma_chan.dev->of_node) {
334 if (match >= SH_DMA_SLAVE_NUMBER)
335 return NULL;
337 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
338 if (cfg->slave_id == match)
339 return cfg;
340 } else {
341 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
342 if (cfg->mid_rid == match) {
343 sh_chan->shdma_chan.slave_id = i;
344 return cfg;
348 return NULL;
351 static int sh_dmae_set_slave(struct shdma_chan *schan,
352 int slave_id, dma_addr_t slave_addr, bool try)
354 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
355 shdma_chan);
356 const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
357 if (!cfg)
358 return -ENXIO;
360 if (!try) {
361 sh_chan->config = cfg;
362 sh_chan->slave_addr = slave_addr ? : cfg->addr;
365 return 0;
368 static void dmae_halt(struct sh_dmae_chan *sh_chan)
370 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
371 u32 chcr = chcr_read(sh_chan);
373 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
374 chcr_write(sh_chan, chcr);
377 static int sh_dmae_desc_setup(struct shdma_chan *schan,
378 struct shdma_desc *sdesc,
379 dma_addr_t src, dma_addr_t dst, size_t *len)
381 struct sh_dmae_desc *sh_desc = container_of(sdesc,
382 struct sh_dmae_desc, shdma_desc);
384 if (*len > schan->max_xfer_len)
385 *len = schan->max_xfer_len;
387 sh_desc->hw.sar = src;
388 sh_desc->hw.dar = dst;
389 sh_desc->hw.tcr = *len;
391 return 0;
394 static void sh_dmae_halt(struct shdma_chan *schan)
396 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
397 shdma_chan);
398 dmae_halt(sh_chan);
401 static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
403 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
404 shdma_chan);
406 if (!(chcr_read(sh_chan) & CHCR_TE))
407 return false;
409 /* DMA stop */
410 dmae_halt(sh_chan);
412 return true;
415 static size_t sh_dmae_get_partial(struct shdma_chan *schan,
416 struct shdma_desc *sdesc)
418 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
419 shdma_chan);
420 struct sh_dmae_desc *sh_desc = container_of(sdesc,
421 struct sh_dmae_desc, shdma_desc);
422 return sh_desc->hw.tcr -
423 (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
426 /* Called from error IRQ or NMI */
427 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
429 bool ret;
431 /* halt the dma controller */
432 sh_dmae_ctl_stop(shdev);
434 /* We cannot detect, which channel caused the error, have to reset all */
435 ret = shdma_reset(&shdev->shdma_dev);
437 sh_dmae_rst(shdev);
439 return ret;
442 static irqreturn_t sh_dmae_err(int irq, void *data)
444 struct sh_dmae_device *shdev = data;
446 if (!(dmaor_read(shdev) & DMAOR_AE))
447 return IRQ_NONE;
449 sh_dmae_reset(shdev);
450 return IRQ_HANDLED;
453 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
454 struct shdma_desc *sdesc)
456 struct sh_dmae_chan *sh_chan = container_of(schan,
457 struct sh_dmae_chan, shdma_chan);
458 struct sh_dmae_desc *sh_desc = container_of(sdesc,
459 struct sh_dmae_desc, shdma_desc);
460 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
461 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
463 return (sdesc->direction == DMA_DEV_TO_MEM &&
464 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
465 (sdesc->direction != DMA_DEV_TO_MEM &&
466 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
469 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
471 /* Fast path out if NMIF is not asserted for this controller */
472 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
473 return false;
475 return sh_dmae_reset(shdev);
478 static int sh_dmae_nmi_handler(struct notifier_block *self,
479 unsigned long cmd, void *data)
481 struct sh_dmae_device *shdev;
482 int ret = NOTIFY_DONE;
483 bool triggered;
486 * Only concern ourselves with NMI events.
488 * Normally we would check the die chain value, but as this needs
489 * to be architecture independent, check for NMI context instead.
491 if (!in_nmi())
492 return NOTIFY_DONE;
494 rcu_read_lock();
495 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
497 * Only stop if one of the controllers has NMIF asserted,
498 * we do not want to interfere with regular address error
499 * handling or NMI events that don't concern the DMACs.
501 triggered = sh_dmae_nmi_notify(shdev);
502 if (triggered == true)
503 ret = NOTIFY_OK;
505 rcu_read_unlock();
507 return ret;
510 static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
511 .notifier_call = sh_dmae_nmi_handler,
513 /* Run before NMI debug handler and KGDB */
514 .priority = 1,
517 static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
518 int irq, unsigned long flags)
520 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
521 struct shdma_dev *sdev = &shdev->shdma_dev;
522 struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
523 struct sh_dmae_chan *sh_chan;
524 struct shdma_chan *schan;
525 int err;
527 sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
528 GFP_KERNEL);
529 if (!sh_chan)
530 return -ENOMEM;
532 schan = &sh_chan->shdma_chan;
533 schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
535 shdma_chan_probe(sdev, schan, id);
537 sh_chan->base = shdev->chan_reg + chan_pdata->offset;
539 /* set up channel irq */
540 if (pdev->id >= 0)
541 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
542 "sh-dmae%d.%d", pdev->id, id);
543 else
544 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
545 "sh-dma%d", id);
547 err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
548 if (err) {
549 dev_err(sdev->dma_dev.dev,
550 "DMA channel %d request_irq error %d\n",
551 id, err);
552 goto err_no_irq;
555 shdev->chan[id] = sh_chan;
556 return 0;
558 err_no_irq:
559 /* remove from dmaengine device node */
560 shdma_chan_remove(schan);
561 return err;
564 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
566 struct shdma_chan *schan;
567 int i;
569 shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
570 BUG_ON(!schan);
572 shdma_chan_remove(schan);
576 #ifdef CONFIG_PM
577 static int sh_dmae_runtime_suspend(struct device *dev)
579 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
581 sh_dmae_ctl_stop(shdev);
582 return 0;
585 static int sh_dmae_runtime_resume(struct device *dev)
587 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
589 return sh_dmae_rst(shdev);
591 #endif
593 #ifdef CONFIG_PM_SLEEP
594 static int sh_dmae_suspend(struct device *dev)
596 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
598 sh_dmae_ctl_stop(shdev);
599 return 0;
602 static int sh_dmae_resume(struct device *dev)
604 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
605 int i, ret;
607 ret = sh_dmae_rst(shdev);
608 if (ret < 0)
609 dev_err(dev, "Failed to reset!\n");
611 for (i = 0; i < shdev->pdata->channel_num; i++) {
612 struct sh_dmae_chan *sh_chan = shdev->chan[i];
614 if (!sh_chan->shdma_chan.desc_num)
615 continue;
617 if (sh_chan->shdma_chan.slave_id >= 0) {
618 const struct sh_dmae_slave_config *cfg = sh_chan->config;
619 dmae_set_dmars(sh_chan, cfg->mid_rid);
620 dmae_set_chcr(sh_chan, cfg->chcr);
621 } else {
622 dmae_init(sh_chan);
626 return 0;
628 #endif
630 static const struct dev_pm_ops sh_dmae_pm = {
631 SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
632 SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
633 NULL)
636 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
638 struct sh_dmae_chan *sh_chan = container_of(schan,
639 struct sh_dmae_chan, shdma_chan);
642 * Implicit BUG_ON(!sh_chan->config)
643 * This is an exclusive slave DMA operation, may only be called after a
644 * successful slave configuration.
646 return sh_chan->slave_addr;
649 static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
651 return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
654 static const struct shdma_ops sh_dmae_shdma_ops = {
655 .desc_completed = sh_dmae_desc_completed,
656 .halt_channel = sh_dmae_halt,
657 .channel_busy = sh_dmae_channel_busy,
658 .slave_addr = sh_dmae_slave_addr,
659 .desc_setup = sh_dmae_desc_setup,
660 .set_slave = sh_dmae_set_slave,
661 .setup_xfer = sh_dmae_setup_xfer,
662 .start_xfer = sh_dmae_start_xfer,
663 .embedded_desc = sh_dmae_embedded_desc,
664 .chan_irq = sh_dmae_chan_irq,
665 .get_partial = sh_dmae_get_partial,
668 static int sh_dmae_probe(struct platform_device *pdev)
670 const enum dma_slave_buswidth widths =
671 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
672 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
673 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
674 const struct sh_dmae_pdata *pdata;
675 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
676 int chan_irq[SH_DMAE_MAX_CHANNELS];
677 unsigned long irqflags = 0;
678 int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
679 struct sh_dmae_device *shdev;
680 struct dma_device *dma_dev;
681 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
683 if (pdev->dev.of_node)
684 pdata = of_device_get_match_data(&pdev->dev);
685 else
686 pdata = dev_get_platdata(&pdev->dev);
688 /* get platform data */
689 if (!pdata || !pdata->channel_num)
690 return -ENODEV;
692 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
693 /* DMARS area is optional */
694 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
696 * IRQ resources:
697 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
698 * the error IRQ, in which case it is the only IRQ in this resource:
699 * start == end. If it is the only IRQ resource, all channels also
700 * use the same IRQ.
701 * 2. DMA channel IRQ resources can be specified one per resource or in
702 * ranges (start != end)
703 * 3. iff all events (channels and, optionally, error) on this
704 * controller use the same IRQ, only one IRQ resource can be
705 * specified, otherwise there must be one IRQ per channel, even if
706 * some of them are equal
707 * 4. if all IRQs on this controller are equal or if some specific IRQs
708 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
709 * requested with the IRQF_SHARED flag
711 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
712 if (!chan || !errirq_res)
713 return -ENODEV;
715 shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
716 GFP_KERNEL);
717 if (!shdev)
718 return -ENOMEM;
720 dma_dev = &shdev->shdma_dev.dma_dev;
722 shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
723 if (IS_ERR(shdev->chan_reg))
724 return PTR_ERR(shdev->chan_reg);
725 if (dmars) {
726 shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
727 if (IS_ERR(shdev->dmars))
728 return PTR_ERR(shdev->dmars);
731 dma_dev->src_addr_widths = widths;
732 dma_dev->dst_addr_widths = widths;
733 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
734 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
736 if (!pdata->slave_only)
737 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
738 if (pdata->slave && pdata->slave_num)
739 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
741 /* Default transfer size of 32 bytes requires 32-byte alignment */
742 dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
744 shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
745 shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
746 err = shdma_init(&pdev->dev, &shdev->shdma_dev,
747 pdata->channel_num);
748 if (err < 0)
749 goto eshdma;
751 /* platform data */
752 shdev->pdata = pdata;
754 if (pdata->chcr_offset)
755 shdev->chcr_offset = pdata->chcr_offset;
756 else
757 shdev->chcr_offset = CHCR;
759 if (pdata->chcr_ie_bit)
760 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
761 else
762 shdev->chcr_ie_bit = CHCR_IE;
764 platform_set_drvdata(pdev, shdev);
766 pm_runtime_enable(&pdev->dev);
767 err = pm_runtime_get_sync(&pdev->dev);
768 if (err < 0)
769 dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
771 spin_lock_irq(&sh_dmae_lock);
772 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
773 spin_unlock_irq(&sh_dmae_lock);
775 /* reset dma controller - only needed as a test */
776 err = sh_dmae_rst(shdev);
777 if (err)
778 goto rst_err;
780 if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
781 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
783 if (!chanirq_res)
784 chanirq_res = errirq_res;
785 else
786 irqres++;
788 if (chanirq_res == errirq_res ||
789 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
790 irqflags = IRQF_SHARED;
792 errirq = errirq_res->start;
794 err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
795 irqflags, "DMAC Address Error", shdev);
796 if (err) {
797 dev_err(&pdev->dev,
798 "DMA failed requesting irq #%d, error %d\n",
799 errirq, err);
800 goto eirq_err;
802 } else {
803 chanirq_res = errirq_res;
806 if (chanirq_res->start == chanirq_res->end &&
807 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
808 /* Special case - all multiplexed */
809 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
810 if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
811 chan_irq[irq_cnt] = chanirq_res->start;
812 chan_flag[irq_cnt] = IRQF_SHARED;
813 } else {
814 irq_cap = 1;
815 break;
818 } else {
819 do {
820 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
821 if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
822 irq_cap = 1;
823 break;
826 if ((errirq_res->flags & IORESOURCE_BITS) ==
827 IORESOURCE_IRQ_SHAREABLE)
828 chan_flag[irq_cnt] = IRQF_SHARED;
829 else
830 chan_flag[irq_cnt] = 0;
831 dev_dbg(&pdev->dev,
832 "Found IRQ %d for channel %d\n",
833 i, irq_cnt);
834 chan_irq[irq_cnt++] = i;
837 if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
838 break;
840 chanirq_res = platform_get_resource(pdev,
841 IORESOURCE_IRQ, ++irqres);
842 } while (irq_cnt < pdata->channel_num && chanirq_res);
845 /* Create DMA Channel */
846 for (i = 0; i < irq_cnt; i++) {
847 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
848 if (err)
849 goto chan_probe_err;
852 if (irq_cap)
853 dev_notice(&pdev->dev, "Attempting to register %d DMA "
854 "channels when a maximum of %d are supported.\n",
855 pdata->channel_num, SH_DMAE_MAX_CHANNELS);
857 pm_runtime_put(&pdev->dev);
859 err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
860 if (err < 0)
861 goto edmadevreg;
863 return err;
865 edmadevreg:
866 pm_runtime_get(&pdev->dev);
868 chan_probe_err:
869 sh_dmae_chan_remove(shdev);
871 eirq_err:
872 rst_err:
873 spin_lock_irq(&sh_dmae_lock);
874 list_del_rcu(&shdev->node);
875 spin_unlock_irq(&sh_dmae_lock);
877 pm_runtime_put(&pdev->dev);
878 pm_runtime_disable(&pdev->dev);
880 shdma_cleanup(&shdev->shdma_dev);
881 eshdma:
882 synchronize_rcu();
884 return err;
887 static int sh_dmae_remove(struct platform_device *pdev)
889 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
890 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
892 dma_async_device_unregister(dma_dev);
894 spin_lock_irq(&sh_dmae_lock);
895 list_del_rcu(&shdev->node);
896 spin_unlock_irq(&sh_dmae_lock);
898 pm_runtime_disable(&pdev->dev);
900 sh_dmae_chan_remove(shdev);
901 shdma_cleanup(&shdev->shdma_dev);
903 synchronize_rcu();
905 return 0;
908 static struct platform_driver sh_dmae_driver = {
909 .driver = {
910 .pm = &sh_dmae_pm,
911 .name = SH_DMAE_DRV_NAME,
913 .remove = sh_dmae_remove,
916 static int __init sh_dmae_init(void)
918 /* Wire up NMI handling */
919 int err = register_die_notifier(&sh_dmae_nmi_notifier);
920 if (err)
921 return err;
923 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
925 module_init(sh_dmae_init);
927 static void __exit sh_dmae_exit(void)
929 platform_driver_unregister(&sh_dmae_driver);
931 unregister_die_notifier(&sh_dmae_nmi_notifier);
933 module_exit(sh_dmae_exit);
935 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
936 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
937 MODULE_LICENSE("GPL");
938 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);