1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2006 ARM Ltd.
4 * Copyright (c) 2010 ST-Ericsson SA
5 * Copyirght (c) 2017 Linaro Ltd.
7 * Author: Peter Pearse <peter.pearse@arm.com>
8 * Author: Linus Walleij <linus.walleij@linaro.org>
10 * Documentation: ARM DDI 0196G == PL080
11 * Documentation: ARM DDI 0218E == PL081
12 * Documentation: S3C6410 User's Manual == PL080S
14 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
17 * The PL080 has 8 channels available for simultaneous use, and the PL081
18 * has only two channels. So on these DMA controllers the number of channels
19 * and the number of incoming DMA signals are two totally different things.
20 * It is usually not possible to theoretically handle all physical signals,
21 * so a multiplexing scheme with possible denial of use is necessary.
23 * The PL080 has a dual bus master, PL081 has a single master.
25 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
26 * It differs in following aspects:
27 * - CH_CONFIG register at different offset,
28 * - separate CH_CONTROL2 register for transfer size,
29 * - bigger maximum transfer size,
30 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
31 * - no support for peripheral flow control.
33 * Memory to peripheral transfer may be visualized as
34 * Get data from memory to DMAC
36 * On burst request from peripheral
37 * Destination burst from DMAC to peripheral
39 * Raise terminal count interrupt
41 * For peripherals with a FIFO:
42 * Source burst size == half the depth of the peripheral FIFO
43 * Destination burst size == the depth of the peripheral FIFO
45 * (Bursts are irrelevant for mem to mem transfers - there are no burst
46 * signals, the DMA controller will simply facilitate its AHB master.)
48 * ASSUMES default (little) endianness for DMA transfers
50 * The PL08x has two flow control settings:
51 * - DMAC flow control: the transfer size defines the number of transfers
52 * which occur for the current LLI entry, and the DMAC raises TC at the
53 * end of every LLI entry. Observed behaviour shows the DMAC listening
54 * to both the BREQ and SREQ signals (contrary to documented),
55 * transferring data if either is active. The LBREQ and LSREQ signals
58 * - Peripheral flow control: the transfer size is ignored (and should be
59 * zero). The data is transferred from the current LLI entry, until
60 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
61 * will then move to the next LLI entry. Unsupported by PL080S.
63 #include <linux/amba/bus.h>
64 #include <linux/amba/pl08x.h>
65 #include <linux/debugfs.h>
66 #include <linux/delay.h>
67 #include <linux/device.h>
68 #include <linux/dmaengine.h>
69 #include <linux/dmapool.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/export.h>
72 #include <linux/init.h>
73 #include <linux/interrupt.h>
74 #include <linux/module.h>
76 #include <linux/of_dma.h>
77 #include <linux/pm_runtime.h>
78 #include <linux/seq_file.h>
79 #include <linux/slab.h>
80 #include <linux/amba/pl080.h>
82 #include "dmaengine.h"
85 #define DRIVER_NAME "pl08xdmac"
87 #define PL80X_DMA_BUSWIDTHS \
88 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
89 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
90 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
91 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
93 static struct amba_driver pl08x_amba_driver
;
94 struct pl08x_driver_data
;
97 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
98 * @config_offset: offset to the configuration register
99 * @channels: the number of channels available in this variant
100 * @signals: the number of request signals available from the hardware
101 * @dualmaster: whether this version supports dual AHB masters or not.
102 * @nomadik: whether this variant is a ST Microelectronics Nomadik, where the
103 * channels have Nomadik security extension bits that need to be checked
104 * for permission before use and some registers are missing
105 * @pl080s: whether this variant is a Samsung PL080S, which has separate
106 * register and LLI word for transfer size.
107 * @ftdmac020: whether this variant is a Faraday Technology FTDMAC020
108 * @max_transfer_size: the maximum single element transfer size for this
119 u32 max_transfer_size
;
123 * struct pl08x_bus_data - information of source or destination
124 * busses for a transfer
125 * @addr: current address
126 * @maxwidth: the maximum width of a transfer on this bus
127 * @buswidth: the width of this bus in bytes: 1, 2 or 4
129 struct pl08x_bus_data
{
135 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
138 * struct pl08x_phy_chan - holder for the physical channels
139 * @id: physical index to this channel
140 * @base: memory base address for this physical channel
141 * @reg_config: configuration address for this physical channel
142 * @reg_control: control address for this physical channel
143 * @reg_src: transfer source address register
144 * @reg_dst: transfer destination address register
145 * @reg_lli: transfer LLI address register
146 * @reg_busy: if the variant has a special per-channel busy register,
147 * this contains a pointer to it
148 * @lock: a lock to use when altering an instance of this struct
149 * @serving: the virtual channel currently being served by this physical
151 * @locked: channel unavailable for the system, e.g. dedicated to secure
153 * @ftdmac020: channel is on a FTDMAC020
154 * @pl080s: channel is on a PL08s
156 struct pl08x_phy_chan
{
159 void __iomem
*reg_config
;
160 void __iomem
*reg_control
;
161 void __iomem
*reg_src
;
162 void __iomem
*reg_dst
;
163 void __iomem
*reg_lli
;
164 void __iomem
*reg_busy
;
166 struct pl08x_dma_chan
*serving
;
173 * struct pl08x_sg - structure containing data per sg
174 * @src_addr: src address of sg
175 * @dst_addr: dst address of sg
176 * @len: transfer len in bytes
177 * @node: node for txd's dsg_list
183 struct list_head node
;
187 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
188 * @vd: virtual DMA descriptor
189 * @dsg_list: list of children sg's
190 * @llis_bus: DMA memory address (physical) start for the LLIs
191 * @llis_va: virtual memory address start for the LLIs
192 * @cctl: control reg values for current txd
193 * @ccfg: config reg values for current txd
194 * @done: this marks completed descriptors, which should not have their
196 * @cyclic: indicate cyclic transfers
199 struct virt_dma_desc vd
;
200 struct list_head dsg_list
;
203 /* Default cctl value for LLIs */
206 * Settings to be put into the physical channel when we
207 * trigger this txd. Other registers are in llis_va[0].
215 * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel
217 * @PL08X_CHAN_IDLE: the channel is idle
218 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
219 * channel and is running a transfer on it
220 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
221 * channel, but the transfer is currently paused
222 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
223 * channel to become available (only pertains to memcpy channels)
225 enum pl08x_dma_chan_state
{
233 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
234 * @vc: wrappped virtual channel
235 * @phychan: the physical channel utilized by this channel, if there is one
236 * @name: name of channel
237 * @cd: channel platform data
238 * @cfg: slave configuration
239 * @at: active transaction on this channel
240 * @host: a pointer to the host (internal use)
241 * @state: whether the channel is idle, paused, running etc
242 * @slave: whether this channel is a device (slave) or for memcpy
243 * @signal: the physical DMA request signal which this channel is using
244 * @mux_use: count of descriptors using this DMA request signal setting
245 * @waiting_at: time in jiffies when this channel moved to waiting state
247 struct pl08x_dma_chan
{
248 struct virt_dma_chan vc
;
249 struct pl08x_phy_chan
*phychan
;
251 struct pl08x_channel_data
*cd
;
252 struct dma_slave_config cfg
;
253 struct pl08x_txd
*at
;
254 struct pl08x_driver_data
*host
;
255 enum pl08x_dma_chan_state state
;
259 unsigned long waiting_at
;
263 * struct pl08x_driver_data - the local state holder for the PL08x
264 * @slave: optional slave engine for this instance
265 * @memcpy: memcpy engine for this instance
266 * @has_slave: the PL08x has a slave engine (routed signals)
267 * @base: virtual memory base (remapped) for the PL08x
268 * @adev: the corresponding AMBA (PrimeCell) bus entry
269 * @vd: vendor data for this PL08x variant
270 * @pd: platform data passed in from the platform/machine
271 * @phy_chans: array of data for the physical channels
272 * @pool: a pool for the LLI descriptors
273 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
275 * @mem_buses: set to indicate memory transfers on AHB2.
276 * @lli_words: how many words are used in each LLI item for this variant
278 struct pl08x_driver_data
{
279 struct dma_device slave
;
280 struct dma_device memcpy
;
283 struct amba_device
*adev
;
284 const struct vendor_data
*vd
;
285 struct pl08x_platform_data
*pd
;
286 struct pl08x_phy_chan
*phy_chans
;
287 struct dma_pool
*pool
;
294 * PL08X specific defines
297 /* The order of words in an LLI. */
298 #define PL080_LLI_SRC 0
299 #define PL080_LLI_DST 1
300 #define PL080_LLI_LLI 2
301 #define PL080_LLI_CCTL 3
302 #define PL080S_LLI_CCTL2 4
304 /* Total words in an LLI. */
305 #define PL080_LLI_WORDS 4
306 #define PL080S_LLI_WORDS 8
309 * Number of LLIs in each LLI buffer allocated for one transfer
310 * (maximum times we call dma_pool_alloc on this pool without freeing)
312 #define MAX_NUM_TSFR_LLIS 512
313 #define PL08X_ALIGN 8
315 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
317 return container_of(chan
, struct pl08x_dma_chan
, vc
.chan
);
320 static inline struct pl08x_txd
*to_pl08x_txd(struct dma_async_tx_descriptor
*tx
)
322 return container_of(tx
, struct pl08x_txd
, vd
.tx
);
328 * This gives us the DMA request input to the PL08x primecell which the
329 * peripheral described by the channel data will be routed to, possibly
330 * via a board/SoC specific external MUX. One important point to note
331 * here is that this does not depend on the physical channel.
333 static int pl08x_request_mux(struct pl08x_dma_chan
*plchan
)
335 const struct pl08x_platform_data
*pd
= plchan
->host
->pd
;
338 if (plchan
->mux_use
++ == 0 && pd
->get_xfer_signal
) {
339 ret
= pd
->get_xfer_signal(plchan
->cd
);
345 plchan
->signal
= ret
;
350 static void pl08x_release_mux(struct pl08x_dma_chan
*plchan
)
352 const struct pl08x_platform_data
*pd
= plchan
->host
->pd
;
354 if (plchan
->signal
>= 0) {
355 WARN_ON(plchan
->mux_use
== 0);
357 if (--plchan
->mux_use
== 0 && pd
->put_xfer_signal
) {
358 pd
->put_xfer_signal(plchan
->cd
, plchan
->signal
);
365 * Physical channel handling
368 /* Whether a certain channel is busy or not */
369 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
373 /* If we have a special busy register, take a shortcut */
375 val
= readl(ch
->reg_busy
);
376 return !!(val
& BIT(ch
->id
));
378 val
= readl(ch
->reg_config
);
379 return val
& PL080_CONFIG_ACTIVE
;
383 * pl08x_write_lli() - Write an LLI into the DMA controller.
385 * The PL08x derivatives support linked lists, but the first item of the
386 * list containing the source, destination, control word and next LLI is
387 * ignored. Instead the driver has to write those values directly into the
388 * SRC, DST, LLI and control registers. On FTDMAC020 also the SIZE
389 * register need to be set up for the first transfer.
391 static void pl08x_write_lli(struct pl08x_driver_data
*pl08x
,
392 struct pl08x_phy_chan
*phychan
, const u32
*lli
, u32 ccfg
)
394 if (pl08x
->vd
->pl080s
)
395 dev_vdbg(&pl08x
->adev
->dev
,
396 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
397 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
398 phychan
->id
, lli
[PL080_LLI_SRC
], lli
[PL080_LLI_DST
],
399 lli
[PL080_LLI_LLI
], lli
[PL080_LLI_CCTL
],
400 lli
[PL080S_LLI_CCTL2
], ccfg
);
402 dev_vdbg(&pl08x
->adev
->dev
,
403 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
404 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
405 phychan
->id
, lli
[PL080_LLI_SRC
], lli
[PL080_LLI_DST
],
406 lli
[PL080_LLI_LLI
], lli
[PL080_LLI_CCTL
], ccfg
);
408 writel_relaxed(lli
[PL080_LLI_SRC
], phychan
->reg_src
);
409 writel_relaxed(lli
[PL080_LLI_DST
], phychan
->reg_dst
);
410 writel_relaxed(lli
[PL080_LLI_LLI
], phychan
->reg_lli
);
413 * The FTMAC020 has a different layout in the CCTL word of the LLI
414 * and the CCTL register which is split in CSR and SIZE registers.
415 * Convert the LLI item CCTL into the proper values to write into
416 * the CSR and SIZE registers.
418 if (phychan
->ftdmac020
) {
419 u32 llictl
= lli
[PL080_LLI_CCTL
];
422 /* Write the transfer size (12 bits) to the size register */
423 writel_relaxed(llictl
& FTDMAC020_LLI_TRANSFER_SIZE_MASK
,
424 phychan
->base
+ FTDMAC020_CH_SIZE
);
426 * Then write the control bits 28..16 to the control register
427 * by shuffleing the bits around to where they are in the
428 * main register. The mapping is as follows:
429 * Bit 28: TC_MSK - mask on all except last LLI
430 * Bit 27..25: SRC_WIDTH
431 * Bit 24..22: DST_WIDTH
432 * Bit 21..20: SRCAD_CTRL
433 * Bit 19..17: DSTAD_CTRL
437 if (llictl
& FTDMAC020_LLI_TC_MSK
)
438 val
|= FTDMAC020_CH_CSR_TC_MSK
;
439 val
|= ((llictl
& FTDMAC020_LLI_SRC_WIDTH_MSK
) >>
440 (FTDMAC020_LLI_SRC_WIDTH_SHIFT
-
441 FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT
));
442 val
|= ((llictl
& FTDMAC020_LLI_DST_WIDTH_MSK
) >>
443 (FTDMAC020_LLI_DST_WIDTH_SHIFT
-
444 FTDMAC020_CH_CSR_DST_WIDTH_SHIFT
));
445 val
|= ((llictl
& FTDMAC020_LLI_SRCAD_CTL_MSK
) >>
446 (FTDMAC020_LLI_SRCAD_CTL_SHIFT
-
447 FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT
));
448 val
|= ((llictl
& FTDMAC020_LLI_DSTAD_CTL_MSK
) >>
449 (FTDMAC020_LLI_DSTAD_CTL_SHIFT
-
450 FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT
));
451 if (llictl
& FTDMAC020_LLI_SRC_SEL
)
452 val
|= FTDMAC020_CH_CSR_SRC_SEL
;
453 if (llictl
& FTDMAC020_LLI_DST_SEL
)
454 val
|= FTDMAC020_CH_CSR_DST_SEL
;
457 * Set up the bits that exist in the CSR but are not
458 * part the LLI, i.e. only gets written to the control
459 * register right here.
461 * FIXME: do not just handle memcpy, also handle slave DMA.
463 switch (pl08x
->pd
->memcpy_burst_size
) {
465 case PL08X_BURST_SZ_1
:
466 val
|= PL080_BSIZE_1
<<
467 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
469 case PL08X_BURST_SZ_4
:
470 val
|= PL080_BSIZE_4
<<
471 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
473 case PL08X_BURST_SZ_8
:
474 val
|= PL080_BSIZE_8
<<
475 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
477 case PL08X_BURST_SZ_16
:
478 val
|= PL080_BSIZE_16
<<
479 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
481 case PL08X_BURST_SZ_32
:
482 val
|= PL080_BSIZE_32
<<
483 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
485 case PL08X_BURST_SZ_64
:
486 val
|= PL080_BSIZE_64
<<
487 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
489 case PL08X_BURST_SZ_128
:
490 val
|= PL080_BSIZE_128
<<
491 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
493 case PL08X_BURST_SZ_256
:
494 val
|= PL080_BSIZE_256
<<
495 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT
;
499 /* Protection flags */
500 if (pl08x
->pd
->memcpy_prot_buff
)
501 val
|= FTDMAC020_CH_CSR_PROT2
;
502 if (pl08x
->pd
->memcpy_prot_cache
)
503 val
|= FTDMAC020_CH_CSR_PROT3
;
504 /* We are the kernel, so we are in privileged mode */
505 val
|= FTDMAC020_CH_CSR_PROT1
;
507 writel_relaxed(val
, phychan
->reg_control
);
509 /* Bits are just identical */
510 writel_relaxed(lli
[PL080_LLI_CCTL
], phychan
->reg_control
);
513 /* Second control word on the PL080s */
514 if (pl08x
->vd
->pl080s
)
515 writel_relaxed(lli
[PL080S_LLI_CCTL2
],
516 phychan
->base
+ PL080S_CH_CONTROL2
);
518 writel(ccfg
, phychan
->reg_config
);
522 * Set the initial DMA register values i.e. those for the first LLI
523 * The next LLI pointer and the configuration interrupt bit have
524 * been set when the LLIs were constructed. Poke them into the hardware
525 * and start the transfer.
527 static void pl08x_start_next_txd(struct pl08x_dma_chan
*plchan
)
529 struct pl08x_driver_data
*pl08x
= plchan
->host
;
530 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
531 struct virt_dma_desc
*vd
= vchan_next_desc(&plchan
->vc
);
532 struct pl08x_txd
*txd
= to_pl08x_txd(&vd
->tx
);
535 list_del(&txd
->vd
.node
);
539 /* Wait for channel inactive */
540 while (pl08x_phy_channel_busy(phychan
))
543 pl08x_write_lli(pl08x
, phychan
, &txd
->llis_va
[0], txd
->ccfg
);
545 /* Enable the DMA channel */
546 /* Do not access config register until channel shows as disabled */
547 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & BIT(phychan
->id
))
550 /* Do not access config register until channel shows as inactive */
551 if (phychan
->ftdmac020
) {
552 val
= readl(phychan
->reg_config
);
553 while (val
& FTDMAC020_CH_CFG_BUSY
)
554 val
= readl(phychan
->reg_config
);
556 val
= readl(phychan
->reg_control
);
557 while (val
& FTDMAC020_CH_CSR_EN
)
558 val
= readl(phychan
->reg_control
);
560 writel(val
| FTDMAC020_CH_CSR_EN
,
561 phychan
->reg_control
);
563 val
= readl(phychan
->reg_config
);
564 while ((val
& PL080_CONFIG_ACTIVE
) ||
565 (val
& PL080_CONFIG_ENABLE
))
566 val
= readl(phychan
->reg_config
);
568 writel(val
| PL080_CONFIG_ENABLE
, phychan
->reg_config
);
573 * Pause the channel by setting the HALT bit.
575 * For M->P transfers, pause the DMAC first and then stop the peripheral -
576 * the FIFO can only drain if the peripheral is still requesting data.
577 * (note: this can still timeout if the DMAC FIFO never drains of data.)
579 * For P->M transfers, disable the peripheral first to stop it filling
580 * the DMAC FIFO, and then pause the DMAC.
582 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
588 /* Use the enable bit on the FTDMAC020 */
589 val
= readl(ch
->reg_control
);
590 val
&= ~FTDMAC020_CH_CSR_EN
;
591 writel(val
, ch
->reg_control
);
595 /* Set the HALT bit and wait for the FIFO to drain */
596 val
= readl(ch
->reg_config
);
597 val
|= PL080_CONFIG_HALT
;
598 writel(val
, ch
->reg_config
);
600 /* Wait for channel inactive */
601 for (timeout
= 1000; timeout
; timeout
--) {
602 if (!pl08x_phy_channel_busy(ch
))
606 if (pl08x_phy_channel_busy(ch
))
607 pr_err("pl08x: channel%u timeout waiting for pause\n", ch
->id
);
610 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
614 /* Use the enable bit on the FTDMAC020 */
616 val
= readl(ch
->reg_control
);
617 val
|= FTDMAC020_CH_CSR_EN
;
618 writel(val
, ch
->reg_control
);
622 /* Clear the HALT bit */
623 val
= readl(ch
->reg_config
);
624 val
&= ~PL080_CONFIG_HALT
;
625 writel(val
, ch
->reg_config
);
629 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
630 * clears any pending interrupt status. This should not be used for
631 * an on-going transfer, but as a method of shutting down a channel
632 * (eg, when it's no longer used) or terminating a transfer.
634 static void pl08x_terminate_phy_chan(struct pl08x_driver_data
*pl08x
,
635 struct pl08x_phy_chan
*ch
)
639 /* The layout for the FTDMAC020 is different */
641 /* Disable all interrupts */
642 val
= readl(ch
->reg_config
);
643 val
|= (FTDMAC020_CH_CFG_INT_ABT_MASK
|
644 FTDMAC020_CH_CFG_INT_ERR_MASK
|
645 FTDMAC020_CH_CFG_INT_TC_MASK
);
646 writel(val
, ch
->reg_config
);
648 /* Abort and disable channel */
649 val
= readl(ch
->reg_control
);
650 val
&= ~FTDMAC020_CH_CSR_EN
;
651 val
|= FTDMAC020_CH_CSR_ABT
;
652 writel(val
, ch
->reg_control
);
654 /* Clear ABT and ERR interrupt flags */
655 writel(BIT(ch
->id
) | BIT(ch
->id
+ 16),
656 pl08x
->base
+ PL080_ERR_CLEAR
);
657 writel(BIT(ch
->id
), pl08x
->base
+ PL080_TC_CLEAR
);
662 val
= readl(ch
->reg_config
);
663 val
&= ~(PL080_CONFIG_ENABLE
| PL080_CONFIG_ERR_IRQ_MASK
|
664 PL080_CONFIG_TC_IRQ_MASK
);
665 writel(val
, ch
->reg_config
);
667 writel(BIT(ch
->id
), pl08x
->base
+ PL080_ERR_CLEAR
);
668 writel(BIT(ch
->id
), pl08x
->base
+ PL080_TC_CLEAR
);
671 static u32
get_bytes_in_phy_channel(struct pl08x_phy_chan
*ch
)
677 bytes
= readl(ch
->base
+ FTDMAC020_CH_SIZE
);
679 val
= readl(ch
->reg_control
);
680 val
&= FTDMAC020_CH_CSR_SRC_WIDTH_MSK
;
681 val
>>= FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT
;
682 } else if (ch
->pl080s
) {
683 val
= readl(ch
->base
+ PL080S_CH_CONTROL2
);
684 bytes
= val
& PL080S_CONTROL_TRANSFER_SIZE_MASK
;
686 val
= readl(ch
->reg_control
);
687 val
&= PL080_CONTROL_SWIDTH_MASK
;
688 val
>>= PL080_CONTROL_SWIDTH_SHIFT
;
691 val
= readl(ch
->reg_control
);
692 bytes
= val
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
694 val
&= PL080_CONTROL_SWIDTH_MASK
;
695 val
>>= PL080_CONTROL_SWIDTH_SHIFT
;
699 case PL080_WIDTH_8BIT
:
701 case PL080_WIDTH_16BIT
:
704 case PL080_WIDTH_32BIT
:
711 static u32
get_bytes_in_lli(struct pl08x_phy_chan
*ch
, const u32
*llis_va
)
717 val
= llis_va
[PL080_LLI_CCTL
];
718 bytes
= val
& FTDMAC020_LLI_TRANSFER_SIZE_MASK
;
720 val
= llis_va
[PL080_LLI_CCTL
];
721 val
&= FTDMAC020_LLI_SRC_WIDTH_MSK
;
722 val
>>= FTDMAC020_LLI_SRC_WIDTH_SHIFT
;
723 } else if (ch
->pl080s
) {
724 val
= llis_va
[PL080S_LLI_CCTL2
];
725 bytes
= val
& PL080S_CONTROL_TRANSFER_SIZE_MASK
;
727 val
= llis_va
[PL080_LLI_CCTL
];
728 val
&= PL080_CONTROL_SWIDTH_MASK
;
729 val
>>= PL080_CONTROL_SWIDTH_SHIFT
;
732 val
= llis_va
[PL080_LLI_CCTL
];
733 bytes
= val
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
735 val
&= PL080_CONTROL_SWIDTH_MASK
;
736 val
>>= PL080_CONTROL_SWIDTH_SHIFT
;
740 case PL080_WIDTH_8BIT
:
742 case PL080_WIDTH_16BIT
:
745 case PL080_WIDTH_32BIT
:
752 /* The channel should be paused when calling this */
753 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
755 struct pl08x_driver_data
*pl08x
= plchan
->host
;
756 const u32
*llis_va
, *llis_va_limit
;
757 struct pl08x_phy_chan
*ch
;
759 struct pl08x_txd
*txd
;
764 ch
= plchan
->phychan
;
771 * Follow the LLIs to get the number of remaining
772 * bytes in the currently active transaction.
774 clli
= readl(ch
->reg_lli
) & ~PL080_LLI_LM_AHB2
;
776 /* First get the remaining bytes in the active transfer */
777 bytes
= get_bytes_in_phy_channel(ch
);
782 llis_va
= txd
->llis_va
;
783 llis_bus
= txd
->llis_bus
;
785 llis_max_words
= pl08x
->lli_words
* MAX_NUM_TSFR_LLIS
;
786 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
787 sizeof(u32
) * llis_max_words
);
790 * Locate the next LLI - as this is an array,
791 * it's simple maths to find.
793 llis_va
+= (clli
- llis_bus
) / sizeof(u32
);
795 llis_va_limit
= llis_va
+ llis_max_words
;
797 for (; llis_va
< llis_va_limit
; llis_va
+= pl08x
->lli_words
) {
798 bytes
+= get_bytes_in_lli(ch
, llis_va
);
801 * A LLI pointer going backward terminates the LLI list
803 if (llis_va
[PL080_LLI_LLI
] <= clli
)
811 * Allocate a physical channel for a virtual channel
813 * Try to locate a physical channel to be used for this transfer. If all
814 * are taken return NULL and the requester will have to cope by using
815 * some fallback PIO mode or retrying later.
817 static struct pl08x_phy_chan
*
818 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
819 struct pl08x_dma_chan
*virt_chan
)
821 struct pl08x_phy_chan
*ch
= NULL
;
825 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
826 ch
= &pl08x
->phy_chans
[i
];
828 spin_lock_irqsave(&ch
->lock
, flags
);
830 if (!ch
->locked
&& !ch
->serving
) {
831 ch
->serving
= virt_chan
;
832 spin_unlock_irqrestore(&ch
->lock
, flags
);
836 spin_unlock_irqrestore(&ch
->lock
, flags
);
839 if (i
== pl08x
->vd
->channels
) {
840 /* No physical channel available, cope with it */
847 /* Mark the physical channel as free. Note, this write is atomic. */
848 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
849 struct pl08x_phy_chan
*ch
)
855 * Try to allocate a physical channel. When successful, assign it to
856 * this virtual channel, and initiate the next descriptor. The
857 * virtual channel lock must be held at this point.
859 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan
*plchan
)
861 struct pl08x_driver_data
*pl08x
= plchan
->host
;
862 struct pl08x_phy_chan
*ch
;
864 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
866 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
867 plchan
->state
= PL08X_CHAN_WAITING
;
868 plchan
->waiting_at
= jiffies
;
872 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d for xfer on %s\n",
873 ch
->id
, plchan
->name
);
875 plchan
->phychan
= ch
;
876 plchan
->state
= PL08X_CHAN_RUNNING
;
877 pl08x_start_next_txd(plchan
);
880 static void pl08x_phy_reassign_start(struct pl08x_phy_chan
*ch
,
881 struct pl08x_dma_chan
*plchan
)
883 struct pl08x_driver_data
*pl08x
= plchan
->host
;
885 dev_dbg(&pl08x
->adev
->dev
, "reassigned physical channel %d for xfer on %s\n",
886 ch
->id
, plchan
->name
);
889 * We do this without taking the lock; we're really only concerned
890 * about whether this pointer is NULL or not, and we're guaranteed
891 * that this will only be called when it _already_ is non-NULL.
893 ch
->serving
= plchan
;
894 plchan
->phychan
= ch
;
895 plchan
->state
= PL08X_CHAN_RUNNING
;
896 pl08x_start_next_txd(plchan
);
900 * Free a physical DMA channel, potentially reallocating it to another
901 * virtual channel if we have any pending.
903 static void pl08x_phy_free(struct pl08x_dma_chan
*plchan
)
905 struct pl08x_driver_data
*pl08x
= plchan
->host
;
906 struct pl08x_dma_chan
*p
, *next
;
907 unsigned long waiting_at
;
910 waiting_at
= jiffies
;
913 * Find a waiting virtual channel for the next transfer.
914 * To be fair, time when each channel reached waiting state is compared
915 * to select channel that is waiting for the longest time.
917 list_for_each_entry(p
, &pl08x
->memcpy
.channels
, vc
.chan
.device_node
)
918 if (p
->state
== PL08X_CHAN_WAITING
&&
919 p
->waiting_at
<= waiting_at
) {
921 waiting_at
= p
->waiting_at
;
924 if (!next
&& pl08x
->has_slave
) {
925 list_for_each_entry(p
, &pl08x
->slave
.channels
, vc
.chan
.device_node
)
926 if (p
->state
== PL08X_CHAN_WAITING
&&
927 p
->waiting_at
<= waiting_at
) {
929 waiting_at
= p
->waiting_at
;
933 /* Ensure that the physical channel is stopped */
934 pl08x_terminate_phy_chan(pl08x
, plchan
->phychan
);
940 * Eww. We know this isn't going to deadlock
941 * but lockdep probably doesn't.
943 spin_lock(&next
->vc
.lock
);
944 /* Re-check the state now that we have the lock */
945 success
= next
->state
== PL08X_CHAN_WAITING
;
947 pl08x_phy_reassign_start(plchan
->phychan
, next
);
948 spin_unlock(&next
->vc
.lock
);
950 /* If the state changed, try to find another channel */
954 /* No more jobs, so free up the physical channel */
955 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
958 plchan
->phychan
= NULL
;
959 plchan
->state
= PL08X_CHAN_IDLE
;
966 static inline unsigned int
967 pl08x_get_bytes_for_lli(struct pl08x_driver_data
*pl08x
,
973 if (pl08x
->vd
->ftdmac020
) {
975 val
= (cctl
& FTDMAC020_LLI_SRC_WIDTH_MSK
) >>
976 FTDMAC020_LLI_SRC_WIDTH_SHIFT
;
978 val
= (cctl
& FTDMAC020_LLI_DST_WIDTH_MSK
) >>
979 FTDMAC020_LLI_DST_WIDTH_SHIFT
;
982 val
= (cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
983 PL080_CONTROL_SWIDTH_SHIFT
;
985 val
= (cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
986 PL080_CONTROL_DWIDTH_SHIFT
;
990 case PL080_WIDTH_8BIT
:
992 case PL080_WIDTH_16BIT
:
994 case PL080_WIDTH_32BIT
:
1003 static inline u32
pl08x_lli_control_bits(struct pl08x_driver_data
*pl08x
,
1005 u8 srcwidth
, u8 dstwidth
,
1011 * Remove all src, dst and transfer size bits, then set the
1012 * width and size according to the parameters. The bit offsets
1013 * are different in the FTDMAC020 so we need to accound for this.
1015 if (pl08x
->vd
->ftdmac020
) {
1016 retbits
&= ~FTDMAC020_LLI_DST_WIDTH_MSK
;
1017 retbits
&= ~FTDMAC020_LLI_SRC_WIDTH_MSK
;
1018 retbits
&= ~FTDMAC020_LLI_TRANSFER_SIZE_MASK
;
1022 retbits
|= PL080_WIDTH_8BIT
<<
1023 FTDMAC020_LLI_SRC_WIDTH_SHIFT
;
1026 retbits
|= PL080_WIDTH_16BIT
<<
1027 FTDMAC020_LLI_SRC_WIDTH_SHIFT
;
1030 retbits
|= PL080_WIDTH_32BIT
<<
1031 FTDMAC020_LLI_SRC_WIDTH_SHIFT
;
1040 retbits
|= PL080_WIDTH_8BIT
<<
1041 FTDMAC020_LLI_DST_WIDTH_SHIFT
;
1044 retbits
|= PL080_WIDTH_16BIT
<<
1045 FTDMAC020_LLI_DST_WIDTH_SHIFT
;
1048 retbits
|= PL080_WIDTH_32BIT
<<
1049 FTDMAC020_LLI_DST_WIDTH_SHIFT
;
1056 tsize
&= FTDMAC020_LLI_TRANSFER_SIZE_MASK
;
1057 retbits
|= tsize
<< FTDMAC020_LLI_TRANSFER_SIZE_SHIFT
;
1059 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
1060 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
1061 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
1065 retbits
|= PL080_WIDTH_8BIT
<<
1066 PL080_CONTROL_SWIDTH_SHIFT
;
1069 retbits
|= PL080_WIDTH_16BIT
<<
1070 PL080_CONTROL_SWIDTH_SHIFT
;
1073 retbits
|= PL080_WIDTH_32BIT
<<
1074 PL080_CONTROL_SWIDTH_SHIFT
;
1083 retbits
|= PL080_WIDTH_8BIT
<<
1084 PL080_CONTROL_DWIDTH_SHIFT
;
1087 retbits
|= PL080_WIDTH_16BIT
<<
1088 PL080_CONTROL_DWIDTH_SHIFT
;
1091 retbits
|= PL080_WIDTH_32BIT
<<
1092 PL080_CONTROL_DWIDTH_SHIFT
;
1099 tsize
&= PL080_CONTROL_TRANSFER_SIZE_MASK
;
1100 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
1106 struct pl08x_lli_build_data
{
1107 struct pl08x_txd
*txd
;
1108 struct pl08x_bus_data srcbus
;
1109 struct pl08x_bus_data dstbus
;
1115 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
1116 * victim in case src & dest are not similarly aligned. i.e. If after aligning
1117 * masters address with width requirements of transfer (by sending few byte by
1118 * byte data), slave is still not aligned, then its width will be reduced to
1120 * - prefers the destination bus if both available
1121 * - prefers bus with fixed address (i.e. peripheral)
1123 static void pl08x_choose_master_bus(struct pl08x_driver_data
*pl08x
,
1124 struct pl08x_lli_build_data
*bd
,
1125 struct pl08x_bus_data
**mbus
,
1126 struct pl08x_bus_data
**sbus
,
1133 * The FTDMAC020 only supports memory-to-memory transfer, so
1134 * source and destination always increase.
1136 if (pl08x
->vd
->ftdmac020
) {
1140 dst_incr
= !!(cctl
& PL080_CONTROL_DST_INCR
);
1141 src_incr
= !!(cctl
& PL080_CONTROL_SRC_INCR
);
1145 * If either bus is not advancing, i.e. it is a peripheral, that
1146 * one becomes master
1149 *mbus
= &bd
->dstbus
;
1150 *sbus
= &bd
->srcbus
;
1151 } else if (!src_incr
) {
1152 *mbus
= &bd
->srcbus
;
1153 *sbus
= &bd
->dstbus
;
1155 if (bd
->dstbus
.buswidth
>= bd
->srcbus
.buswidth
) {
1156 *mbus
= &bd
->dstbus
;
1157 *sbus
= &bd
->srcbus
;
1159 *mbus
= &bd
->srcbus
;
1160 *sbus
= &bd
->dstbus
;
1166 * Fills in one LLI for a certain transfer descriptor and advance the counter
1168 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data
*pl08x
,
1169 struct pl08x_lli_build_data
*bd
,
1170 int num_llis
, int len
, u32 cctl
, u32 cctl2
)
1172 u32 offset
= num_llis
* pl08x
->lli_words
;
1173 u32
*llis_va
= bd
->txd
->llis_va
+ offset
;
1174 dma_addr_t llis_bus
= bd
->txd
->llis_bus
;
1176 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
1178 /* Advance the offset to next LLI. */
1179 offset
+= pl08x
->lli_words
;
1181 llis_va
[PL080_LLI_SRC
] = bd
->srcbus
.addr
;
1182 llis_va
[PL080_LLI_DST
] = bd
->dstbus
.addr
;
1183 llis_va
[PL080_LLI_LLI
] = (llis_bus
+ sizeof(u32
) * offset
);
1184 llis_va
[PL080_LLI_LLI
] |= bd
->lli_bus
;
1185 llis_va
[PL080_LLI_CCTL
] = cctl
;
1186 if (pl08x
->vd
->pl080s
)
1187 llis_va
[PL080S_LLI_CCTL2
] = cctl2
;
1189 if (pl08x
->vd
->ftdmac020
) {
1190 /* FIXME: only memcpy so far so both increase */
1191 bd
->srcbus
.addr
+= len
;
1192 bd
->dstbus
.addr
+= len
;
1194 if (cctl
& PL080_CONTROL_SRC_INCR
)
1195 bd
->srcbus
.addr
+= len
;
1196 if (cctl
& PL080_CONTROL_DST_INCR
)
1197 bd
->dstbus
.addr
+= len
;
1200 BUG_ON(bd
->remainder
< len
);
1202 bd
->remainder
-= len
;
1205 static inline void prep_byte_width_lli(struct pl08x_driver_data
*pl08x
,
1206 struct pl08x_lli_build_data
*bd
, u32
*cctl
, u32 len
,
1207 int num_llis
, size_t *total_bytes
)
1209 *cctl
= pl08x_lli_control_bits(pl08x
, *cctl
, 1, 1, len
);
1210 pl08x_fill_lli_for_desc(pl08x
, bd
, num_llis
, len
, *cctl
, len
);
1211 (*total_bytes
) += len
;
1215 static void pl08x_dump_lli(struct pl08x_driver_data
*pl08x
,
1216 const u32
*llis_va
, int num_llis
)
1220 if (pl08x
->vd
->pl080s
) {
1221 dev_vdbg(&pl08x
->adev
->dev
,
1222 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
1223 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
1224 for (i
= 0; i
< num_llis
; i
++) {
1225 dev_vdbg(&pl08x
->adev
->dev
,
1226 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1227 i
, llis_va
, llis_va
[PL080_LLI_SRC
],
1228 llis_va
[PL080_LLI_DST
], llis_va
[PL080_LLI_LLI
],
1229 llis_va
[PL080_LLI_CCTL
],
1230 llis_va
[PL080S_LLI_CCTL2
]);
1231 llis_va
+= pl08x
->lli_words
;
1234 dev_vdbg(&pl08x
->adev
->dev
,
1235 "%-3s %-9s %-10s %-10s %-10s %s\n",
1236 "lli", "", "csrc", "cdst", "clli", "cctl");
1237 for (i
= 0; i
< num_llis
; i
++) {
1238 dev_vdbg(&pl08x
->adev
->dev
,
1239 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1240 i
, llis_va
, llis_va
[PL080_LLI_SRC
],
1241 llis_va
[PL080_LLI_DST
], llis_va
[PL080_LLI_LLI
],
1242 llis_va
[PL080_LLI_CCTL
]);
1243 llis_va
+= pl08x
->lli_words
;
1248 static inline void pl08x_dump_lli(struct pl08x_driver_data
*pl08x
,
1249 const u32
*llis_va
, int num_llis
) {}
1253 * This fills in the table of LLIs for the transfer descriptor
1254 * Note that we assume we never have to change the burst sizes
1255 * Return 0 for error
1257 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
1258 struct pl08x_txd
*txd
)
1260 struct pl08x_bus_data
*mbus
, *sbus
;
1261 struct pl08x_lli_build_data bd
;
1263 u32 cctl
, early_bytes
= 0;
1264 size_t max_bytes_per_lli
, total_bytes
;
1265 u32
*llis_va
, *last_lli
;
1266 struct pl08x_sg
*dsg
;
1268 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
, &txd
->llis_bus
);
1269 if (!txd
->llis_va
) {
1270 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
1275 bd
.lli_bus
= (pl08x
->lli_buses
& PL08X_AHB2
) ? PL080_LLI_LM_AHB2
: 0;
1278 /* Find maximum width of the source bus */
1279 bd
.srcbus
.maxwidth
= pl08x_get_bytes_for_lli(pl08x
, cctl
, true);
1281 /* Find maximum width of the destination bus */
1282 bd
.dstbus
.maxwidth
= pl08x_get_bytes_for_lli(pl08x
, cctl
, false);
1284 list_for_each_entry(dsg
, &txd
->dsg_list
, node
) {
1288 bd
.srcbus
.addr
= dsg
->src_addr
;
1289 bd
.dstbus
.addr
= dsg
->dst_addr
;
1290 bd
.remainder
= dsg
->len
;
1291 bd
.srcbus
.buswidth
= bd
.srcbus
.maxwidth
;
1292 bd
.dstbus
.buswidth
= bd
.dstbus
.maxwidth
;
1294 pl08x_choose_master_bus(pl08x
, &bd
, &mbus
, &sbus
, cctl
);
1296 dev_vdbg(&pl08x
->adev
->dev
,
1297 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
1298 (u64
)bd
.srcbus
.addr
,
1299 cctl
& PL080_CONTROL_SRC_INCR
? "+" : "",
1301 (u64
)bd
.dstbus
.addr
,
1302 cctl
& PL080_CONTROL_DST_INCR
? "+" : "",
1305 dev_vdbg(&pl08x
->adev
->dev
, "mbus=%s sbus=%s\n",
1306 mbus
== &bd
.srcbus
? "src" : "dst",
1307 sbus
== &bd
.srcbus
? "src" : "dst");
1310 * Zero length is only allowed if all these requirements are
1312 * - flow controller is peripheral.
1313 * - src.addr is aligned to src.width
1314 * - dst.addr is aligned to dst.width
1316 * sg_len == 1 should be true, as there can be two cases here:
1318 * - Memory addresses are contiguous and are not scattered.
1319 * Here, Only one sg will be passed by user driver, with
1320 * memory address and zero length. We pass this to controller
1321 * and after the transfer it will receive the last burst
1322 * request from peripheral and so transfer finishes.
1324 * - Memory addresses are scattered and are not contiguous.
1325 * Here, Obviously as DMA controller doesn't know when a lli's
1326 * transfer gets over, it can't load next lli. So in this
1327 * case, there has to be an assumption that only one lli is
1328 * supported. Thus, we can't have scattered addresses.
1330 if (!bd
.remainder
) {
1333 /* FTDMAC020 only does memory-to-memory */
1334 if (pl08x
->vd
->ftdmac020
)
1335 fc
= PL080_FLOW_MEM2MEM
;
1337 fc
= (txd
->ccfg
& PL080_CONFIG_FLOW_CONTROL_MASK
) >>
1338 PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1339 if (!((fc
>= PL080_FLOW_SRC2DST_DST
) &&
1340 (fc
<= PL080_FLOW_SRC2DST_SRC
))) {
1341 dev_err(&pl08x
->adev
->dev
, "%s sg len can't be zero",
1346 if (!IS_BUS_ALIGNED(&bd
.srcbus
) ||
1347 !IS_BUS_ALIGNED(&bd
.dstbus
)) {
1348 dev_err(&pl08x
->adev
->dev
,
1349 "%s src & dst address must be aligned to src"
1350 " & dst width if peripheral is flow controller",
1355 cctl
= pl08x_lli_control_bits(pl08x
, cctl
,
1356 bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
,
1358 pl08x_fill_lli_for_desc(pl08x
, &bd
, num_llis
++,
1364 * Send byte by byte for following cases
1365 * - Less than a bus width available
1366 * - until master bus is aligned
1368 if (bd
.remainder
< mbus
->buswidth
)
1369 early_bytes
= bd
.remainder
;
1370 else if (!IS_BUS_ALIGNED(mbus
)) {
1371 early_bytes
= mbus
->buswidth
-
1372 (mbus
->addr
& (mbus
->buswidth
- 1));
1373 if ((bd
.remainder
- early_bytes
) < mbus
->buswidth
)
1374 early_bytes
= bd
.remainder
;
1378 dev_vdbg(&pl08x
->adev
->dev
,
1379 "%s byte width LLIs (remain 0x%08zx)\n",
1380 __func__
, bd
.remainder
);
1381 prep_byte_width_lli(pl08x
, &bd
, &cctl
, early_bytes
,
1382 num_llis
++, &total_bytes
);
1387 * Master now aligned
1388 * - if slave is not then we must set its width down
1390 if (!IS_BUS_ALIGNED(sbus
)) {
1391 dev_dbg(&pl08x
->adev
->dev
,
1392 "%s set down bus width to one byte\n",
1399 * Bytes transferred = tsize * src width, not
1402 max_bytes_per_lli
= bd
.srcbus
.buswidth
*
1403 pl08x
->vd
->max_transfer_size
;
1404 dev_vdbg(&pl08x
->adev
->dev
,
1405 "%s max bytes per lli = %zu\n",
1406 __func__
, max_bytes_per_lli
);
1409 * Make largest possible LLIs until less than one bus
1412 while (bd
.remainder
> (mbus
->buswidth
- 1)) {
1413 size_t lli_len
, tsize
, width
;
1416 * If enough left try to send max possible,
1417 * otherwise try to send the remainder
1419 lli_len
= min(bd
.remainder
, max_bytes_per_lli
);
1422 * Check against maximum bus alignment:
1423 * Calculate actual transfer size in relation to
1424 * bus width an get a maximum remainder of the
1425 * highest bus width - 1
1427 width
= max(mbus
->buswidth
, sbus
->buswidth
);
1428 lli_len
= (lli_len
/ width
) * width
;
1429 tsize
= lli_len
/ bd
.srcbus
.buswidth
;
1431 dev_vdbg(&pl08x
->adev
->dev
,
1432 "%s fill lli with single lli chunk of "
1433 "size 0x%08zx (remainder 0x%08zx)\n",
1434 __func__
, lli_len
, bd
.remainder
);
1436 cctl
= pl08x_lli_control_bits(pl08x
, cctl
,
1437 bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
,
1439 pl08x_fill_lli_for_desc(pl08x
, &bd
, num_llis
++,
1440 lli_len
, cctl
, tsize
);
1441 total_bytes
+= lli_len
;
1445 * Send any odd bytes
1448 dev_vdbg(&pl08x
->adev
->dev
,
1449 "%s align with boundary, send odd bytes (remain %zu)\n",
1450 __func__
, bd
.remainder
);
1451 prep_byte_width_lli(pl08x
, &bd
, &cctl
,
1452 bd
.remainder
, num_llis
++, &total_bytes
);
1456 if (total_bytes
!= dsg
->len
) {
1457 dev_err(&pl08x
->adev
->dev
,
1458 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1459 __func__
, total_bytes
, dsg
->len
);
1463 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
1464 dev_err(&pl08x
->adev
->dev
,
1465 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1466 __func__
, MAX_NUM_TSFR_LLIS
);
1471 llis_va
= txd
->llis_va
;
1472 last_lli
= llis_va
+ (num_llis
- 1) * pl08x
->lli_words
;
1475 /* Link back to the first LLI. */
1476 last_lli
[PL080_LLI_LLI
] = txd
->llis_bus
| bd
.lli_bus
;
1478 /* The final LLI terminates the LLI. */
1479 last_lli
[PL080_LLI_LLI
] = 0;
1480 /* The final LLI element shall also fire an interrupt. */
1481 if (pl08x
->vd
->ftdmac020
)
1482 last_lli
[PL080_LLI_CCTL
] &= ~FTDMAC020_LLI_TC_MSK
;
1484 last_lli
[PL080_LLI_CCTL
] |= PL080_CONTROL_TC_IRQ_EN
;
1487 pl08x_dump_lli(pl08x
, llis_va
, num_llis
);
1492 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
1493 struct pl08x_txd
*txd
)
1495 struct pl08x_sg
*dsg
, *_dsg
;
1498 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
1500 list_for_each_entry_safe(dsg
, _dsg
, &txd
->dsg_list
, node
) {
1501 list_del(&dsg
->node
);
1508 static void pl08x_desc_free(struct virt_dma_desc
*vd
)
1510 struct pl08x_txd
*txd
= to_pl08x_txd(&vd
->tx
);
1511 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(vd
->tx
.chan
);
1513 dma_descriptor_unmap(&vd
->tx
);
1515 pl08x_release_mux(plchan
);
1517 pl08x_free_txd(plchan
->host
, txd
);
1520 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
1521 struct pl08x_dma_chan
*plchan
)
1525 vchan_get_all_descriptors(&plchan
->vc
, &head
);
1526 vchan_dma_desc_free_list(&plchan
->vc
, &head
);
1530 * The DMA ENGINE API
1532 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
1534 /* Ensure all queued descriptors are freed */
1535 vchan_free_chan_resources(to_virt_chan(chan
));
1538 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
1539 struct dma_chan
*chan
, unsigned long flags
)
1541 struct dma_async_tx_descriptor
*retval
= NULL
;
1547 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1548 * If slaves are relying on interrupts to signal completion this function
1549 * must not be called with interrupts disabled.
1551 static enum dma_status
pl08x_dma_tx_status(struct dma_chan
*chan
,
1552 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
1554 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1555 struct virt_dma_desc
*vd
;
1556 unsigned long flags
;
1557 enum dma_status ret
;
1560 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1561 if (ret
== DMA_COMPLETE
)
1565 * There's no point calculating the residue if there's
1566 * no txstate to store the value.
1569 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1574 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
1575 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1576 if (ret
!= DMA_COMPLETE
) {
1577 vd
= vchan_find_desc(&plchan
->vc
, cookie
);
1579 /* On the issued list, so hasn't been processed yet */
1580 struct pl08x_txd
*txd
= to_pl08x_txd(&vd
->tx
);
1581 struct pl08x_sg
*dsg
;
1583 list_for_each_entry(dsg
, &txd
->dsg_list
, node
)
1586 bytes
= pl08x_getbytes_chan(plchan
);
1589 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
1592 * This cookie not complete yet
1593 * Get number of bytes left in the active transactions and queue
1595 dma_set_residue(txstate
, bytes
);
1597 if (plchan
->state
== PL08X_CHAN_PAUSED
&& ret
== DMA_IN_PROGRESS
)
1600 /* Whether waiting or running, we're in progress */
1604 /* PrimeCell DMA extension */
1605 struct burst_table
{
1610 static const struct burst_table burst_sizes
[] = {
1613 .reg
= PL080_BSIZE_256
,
1617 .reg
= PL080_BSIZE_128
,
1621 .reg
= PL080_BSIZE_64
,
1625 .reg
= PL080_BSIZE_32
,
1629 .reg
= PL080_BSIZE_16
,
1633 .reg
= PL080_BSIZE_8
,
1637 .reg
= PL080_BSIZE_4
,
1641 .reg
= PL080_BSIZE_1
,
1646 * Given the source and destination available bus masks, select which
1647 * will be routed to each port. We try to have source and destination
1648 * on separate ports, but always respect the allowable settings.
1650 static u32
pl08x_select_bus(bool ftdmac020
, u8 src
, u8 dst
)
1656 /* The FTDMAC020 use different bits to indicate src/dst bus */
1658 dst_ahb2
= FTDMAC020_LLI_DST_SEL
;
1659 src_ahb2
= FTDMAC020_LLI_SRC_SEL
;
1661 dst_ahb2
= PL080_CONTROL_DST_AHB2
;
1662 src_ahb2
= PL080_CONTROL_SRC_AHB2
;
1665 if (!(dst
& PL08X_AHB1
) || ((dst
& PL08X_AHB2
) && (src
& PL08X_AHB1
)))
1667 if (!(src
& PL08X_AHB1
) || ((src
& PL08X_AHB2
) && !(dst
& PL08X_AHB2
)))
1673 static u32
pl08x_cctl(u32 cctl
)
1675 cctl
&= ~(PL080_CONTROL_SRC_AHB2
| PL080_CONTROL_DST_AHB2
|
1676 PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
|
1677 PL080_CONTROL_PROT_MASK
);
1679 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1680 return cctl
| PL080_CONTROL_PROT_SYS
;
1683 static u32
pl08x_width(enum dma_slave_buswidth width
)
1686 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1687 return PL080_WIDTH_8BIT
;
1688 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1689 return PL080_WIDTH_16BIT
;
1690 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1691 return PL080_WIDTH_32BIT
;
1697 static u32
pl08x_burst(u32 maxburst
)
1701 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1702 if (burst_sizes
[i
].burstwords
<= maxburst
)
1705 return burst_sizes
[i
].reg
;
1708 static u32
pl08x_get_cctl(struct pl08x_dma_chan
*plchan
,
1709 enum dma_slave_buswidth addr_width
, u32 maxburst
)
1711 u32 width
, burst
, cctl
= 0;
1713 width
= pl08x_width(addr_width
);
1717 cctl
|= width
<< PL080_CONTROL_SWIDTH_SHIFT
;
1718 cctl
|= width
<< PL080_CONTROL_DWIDTH_SHIFT
;
1721 * If this channel will only request single transfers, set this
1722 * down to ONE element. Also select one element if no maxburst
1725 if (plchan
->cd
->single
)
1728 burst
= pl08x_burst(maxburst
);
1729 cctl
|= burst
<< PL080_CONTROL_SB_SIZE_SHIFT
;
1730 cctl
|= burst
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1732 return pl08x_cctl(cctl
);
1736 * Slave transactions callback to the slave device to allow
1737 * synchronization of slave DMA signals with the DMAC enable
1739 static void pl08x_issue_pending(struct dma_chan
*chan
)
1741 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1742 unsigned long flags
;
1744 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
1745 if (vchan_issue_pending(&plchan
->vc
)) {
1746 if (!plchan
->phychan
&& plchan
->state
!= PL08X_CHAN_WAITING
)
1747 pl08x_phy_alloc_and_start(plchan
);
1749 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
1752 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
)
1754 struct pl08x_txd
*txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
1757 INIT_LIST_HEAD(&txd
->dsg_list
);
1761 static u32
pl08x_memcpy_cctl(struct pl08x_driver_data
*pl08x
)
1766 switch (pl08x
->pd
->memcpy_burst_size
) {
1768 dev_err(&pl08x
->adev
->dev
,
1769 "illegal burst size for memcpy, set to 1\n");
1771 case PL08X_BURST_SZ_1
:
1772 cctl
|= PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1773 PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1775 case PL08X_BURST_SZ_4
:
1776 cctl
|= PL080_BSIZE_4
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1777 PL080_BSIZE_4
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1779 case PL08X_BURST_SZ_8
:
1780 cctl
|= PL080_BSIZE_8
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1781 PL080_BSIZE_8
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1783 case PL08X_BURST_SZ_16
:
1784 cctl
|= PL080_BSIZE_16
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1785 PL080_BSIZE_16
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1787 case PL08X_BURST_SZ_32
:
1788 cctl
|= PL080_BSIZE_32
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1789 PL080_BSIZE_32
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1791 case PL08X_BURST_SZ_64
:
1792 cctl
|= PL080_BSIZE_64
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1793 PL080_BSIZE_64
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1795 case PL08X_BURST_SZ_128
:
1796 cctl
|= PL080_BSIZE_128
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1797 PL080_BSIZE_128
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1799 case PL08X_BURST_SZ_256
:
1800 cctl
|= PL080_BSIZE_256
<< PL080_CONTROL_SB_SIZE_SHIFT
|
1801 PL080_BSIZE_256
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1805 switch (pl08x
->pd
->memcpy_bus_width
) {
1807 dev_err(&pl08x
->adev
->dev
,
1808 "illegal bus width for memcpy, set to 8 bits\n");
1810 case PL08X_BUS_WIDTH_8_BITS
:
1811 cctl
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
|
1812 PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
1814 case PL08X_BUS_WIDTH_16_BITS
:
1815 cctl
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
|
1816 PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
1818 case PL08X_BUS_WIDTH_32_BITS
:
1819 cctl
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
|
1820 PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
1824 /* Protection flags */
1825 if (pl08x
->pd
->memcpy_prot_buff
)
1826 cctl
|= PL080_CONTROL_PROT_BUFF
;
1827 if (pl08x
->pd
->memcpy_prot_cache
)
1828 cctl
|= PL080_CONTROL_PROT_CACHE
;
1830 /* We are the kernel, so we are in privileged mode */
1831 cctl
|= PL080_CONTROL_PROT_SYS
;
1833 /* Both to be incremented or the code will break */
1834 cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1836 if (pl08x
->vd
->dualmaster
)
1837 cctl
|= pl08x_select_bus(false,
1844 static u32
pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data
*pl08x
)
1849 switch (pl08x
->pd
->memcpy_bus_width
) {
1851 dev_err(&pl08x
->adev
->dev
,
1852 "illegal bus width for memcpy, set to 8 bits\n");
1854 case PL08X_BUS_WIDTH_8_BITS
:
1855 cctl
|= PL080_WIDTH_8BIT
<< FTDMAC020_LLI_SRC_WIDTH_SHIFT
|
1856 PL080_WIDTH_8BIT
<< FTDMAC020_LLI_DST_WIDTH_SHIFT
;
1858 case PL08X_BUS_WIDTH_16_BITS
:
1859 cctl
|= PL080_WIDTH_16BIT
<< FTDMAC020_LLI_SRC_WIDTH_SHIFT
|
1860 PL080_WIDTH_16BIT
<< FTDMAC020_LLI_DST_WIDTH_SHIFT
;
1862 case PL08X_BUS_WIDTH_32_BITS
:
1863 cctl
|= PL080_WIDTH_32BIT
<< FTDMAC020_LLI_SRC_WIDTH_SHIFT
|
1864 PL080_WIDTH_32BIT
<< FTDMAC020_LLI_DST_WIDTH_SHIFT
;
1869 * By default mask the TC IRQ on all LLIs, it will be unmasked on
1870 * the last LLI item by other code.
1872 cctl
|= FTDMAC020_LLI_TC_MSK
;
1875 * Both to be incremented so leave bits FTDMAC020_LLI_SRCAD_CTL
1876 * and FTDMAC020_LLI_DSTAD_CTL as zero
1878 if (pl08x
->vd
->dualmaster
)
1879 cctl
|= pl08x_select_bus(true,
1887 * Initialize a descriptor to be used by memcpy submit
1889 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1890 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1891 size_t len
, unsigned long flags
)
1893 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1894 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1895 struct pl08x_txd
*txd
;
1896 struct pl08x_sg
*dsg
;
1899 txd
= pl08x_get_txd(plchan
);
1901 dev_err(&pl08x
->adev
->dev
,
1902 "%s no memory for descriptor\n", __func__
);
1906 dsg
= kzalloc(sizeof(struct pl08x_sg
), GFP_NOWAIT
);
1908 pl08x_free_txd(pl08x
, txd
);
1911 list_add_tail(&dsg
->node
, &txd
->dsg_list
);
1913 dsg
->src_addr
= src
;
1914 dsg
->dst_addr
= dest
;
1916 if (pl08x
->vd
->ftdmac020
) {
1917 /* Writing CCFG zero ENABLES all interrupts */
1919 txd
->cctl
= pl08x_ftdmac020_memcpy_cctl(pl08x
);
1921 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1922 PL080_CONFIG_TC_IRQ_MASK
|
1923 PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1924 txd
->cctl
= pl08x_memcpy_cctl(pl08x
);
1927 ret
= pl08x_fill_llis_for_desc(plchan
->host
, txd
);
1929 pl08x_free_txd(pl08x
, txd
);
1933 return vchan_tx_prep(&plchan
->vc
, &txd
->vd
, flags
);
1936 static struct pl08x_txd
*pl08x_init_txd(
1937 struct dma_chan
*chan
,
1938 enum dma_transfer_direction direction
,
1939 dma_addr_t
*slave_addr
)
1941 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1942 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1943 struct pl08x_txd
*txd
;
1944 enum dma_slave_buswidth addr_width
;
1946 u8 src_buses
, dst_buses
;
1949 txd
= pl08x_get_txd(plchan
);
1951 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1956 * Set up addresses, the PrimeCell configured address
1957 * will take precedence since this may configure the
1958 * channel target address dynamically at runtime.
1960 if (direction
== DMA_MEM_TO_DEV
) {
1961 cctl
= PL080_CONTROL_SRC_INCR
;
1962 *slave_addr
= plchan
->cfg
.dst_addr
;
1963 addr_width
= plchan
->cfg
.dst_addr_width
;
1964 maxburst
= plchan
->cfg
.dst_maxburst
;
1965 src_buses
= pl08x
->mem_buses
;
1966 dst_buses
= plchan
->cd
->periph_buses
;
1967 } else if (direction
== DMA_DEV_TO_MEM
) {
1968 cctl
= PL080_CONTROL_DST_INCR
;
1969 *slave_addr
= plchan
->cfg
.src_addr
;
1970 addr_width
= plchan
->cfg
.src_addr_width
;
1971 maxburst
= plchan
->cfg
.src_maxburst
;
1972 src_buses
= plchan
->cd
->periph_buses
;
1973 dst_buses
= pl08x
->mem_buses
;
1975 pl08x_free_txd(pl08x
, txd
);
1976 dev_err(&pl08x
->adev
->dev
,
1977 "%s direction unsupported\n", __func__
);
1981 cctl
|= pl08x_get_cctl(plchan
, addr_width
, maxburst
);
1983 pl08x_free_txd(pl08x
, txd
);
1984 dev_err(&pl08x
->adev
->dev
,
1985 "DMA slave configuration botched?\n");
1989 txd
->cctl
= cctl
| pl08x_select_bus(false, src_buses
, dst_buses
);
1991 if (plchan
->cfg
.device_fc
)
1992 tmp
= (direction
== DMA_MEM_TO_DEV
) ? PL080_FLOW_MEM2PER_PER
:
1993 PL080_FLOW_PER2MEM_PER
;
1995 tmp
= (direction
== DMA_MEM_TO_DEV
) ? PL080_FLOW_MEM2PER
:
1998 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1999 PL080_CONFIG_TC_IRQ_MASK
|
2000 tmp
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
2002 ret
= pl08x_request_mux(plchan
);
2004 pl08x_free_txd(pl08x
, txd
);
2005 dev_dbg(&pl08x
->adev
->dev
,
2006 "unable to mux for transfer on %s due to platform restrictions\n",
2011 dev_dbg(&pl08x
->adev
->dev
, "allocated DMA request signal %d for xfer on %s\n",
2012 plchan
->signal
, plchan
->name
);
2014 /* Assign the flow control signal to this channel */
2015 if (direction
== DMA_MEM_TO_DEV
)
2016 txd
->ccfg
|= plchan
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
2018 txd
->ccfg
|= plchan
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
2023 static int pl08x_tx_add_sg(struct pl08x_txd
*txd
,
2024 enum dma_transfer_direction direction
,
2025 dma_addr_t slave_addr
,
2026 dma_addr_t buf_addr
,
2029 struct pl08x_sg
*dsg
;
2031 dsg
= kzalloc(sizeof(struct pl08x_sg
), GFP_NOWAIT
);
2035 list_add_tail(&dsg
->node
, &txd
->dsg_list
);
2038 if (direction
== DMA_MEM_TO_DEV
) {
2039 dsg
->src_addr
= buf_addr
;
2040 dsg
->dst_addr
= slave_addr
;
2042 dsg
->src_addr
= slave_addr
;
2043 dsg
->dst_addr
= buf_addr
;
2049 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
2050 struct dma_chan
*chan
, struct scatterlist
*sgl
,
2051 unsigned int sg_len
, enum dma_transfer_direction direction
,
2052 unsigned long flags
, void *context
)
2054 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2055 struct pl08x_driver_data
*pl08x
= plchan
->host
;
2056 struct pl08x_txd
*txd
;
2057 struct scatterlist
*sg
;
2059 dma_addr_t slave_addr
;
2061 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
2062 __func__
, sg_dma_len(sgl
), plchan
->name
);
2064 txd
= pl08x_init_txd(chan
, direction
, &slave_addr
);
2068 for_each_sg(sgl
, sg
, sg_len
, tmp
) {
2069 ret
= pl08x_tx_add_sg(txd
, direction
, slave_addr
,
2073 pl08x_release_mux(plchan
);
2074 pl08x_free_txd(pl08x
, txd
);
2075 dev_err(&pl08x
->adev
->dev
, "%s no mem for pl080 sg\n",
2081 ret
= pl08x_fill_llis_for_desc(plchan
->host
, txd
);
2083 pl08x_release_mux(plchan
);
2084 pl08x_free_txd(pl08x
, txd
);
2088 return vchan_tx_prep(&plchan
->vc
, &txd
->vd
, flags
);
2091 static struct dma_async_tx_descriptor
*pl08x_prep_dma_cyclic(
2092 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
2093 size_t period_len
, enum dma_transfer_direction direction
,
2094 unsigned long flags
)
2096 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2097 struct pl08x_driver_data
*pl08x
= plchan
->host
;
2098 struct pl08x_txd
*txd
;
2100 dma_addr_t slave_addr
;
2102 dev_dbg(&pl08x
->adev
->dev
,
2103 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
2104 __func__
, period_len
, buf_len
,
2105 direction
== DMA_MEM_TO_DEV
? "to" : "from",
2108 txd
= pl08x_init_txd(chan
, direction
, &slave_addr
);
2113 txd
->cctl
|= PL080_CONTROL_TC_IRQ_EN
;
2114 for (tmp
= 0; tmp
< buf_len
; tmp
+= period_len
) {
2115 ret
= pl08x_tx_add_sg(txd
, direction
, slave_addr
,
2116 buf_addr
+ tmp
, period_len
);
2118 pl08x_release_mux(plchan
);
2119 pl08x_free_txd(pl08x
, txd
);
2124 ret
= pl08x_fill_llis_for_desc(plchan
->host
, txd
);
2126 pl08x_release_mux(plchan
);
2127 pl08x_free_txd(pl08x
, txd
);
2131 return vchan_tx_prep(&plchan
->vc
, &txd
->vd
, flags
);
2134 static int pl08x_config(struct dma_chan
*chan
,
2135 struct dma_slave_config
*config
)
2137 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2138 struct pl08x_driver_data
*pl08x
= plchan
->host
;
2143 /* Reject definitely invalid configurations */
2144 if (config
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
2145 config
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
2148 if (config
->device_fc
&& pl08x
->vd
->pl080s
) {
2149 dev_err(&pl08x
->adev
->dev
,
2150 "%s: PL080S does not support peripheral flow control\n",
2155 plchan
->cfg
= *config
;
2160 static int pl08x_terminate_all(struct dma_chan
*chan
)
2162 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2163 struct pl08x_driver_data
*pl08x
= plchan
->host
;
2164 unsigned long flags
;
2166 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
2167 if (!plchan
->phychan
&& !plchan
->at
) {
2168 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
2172 plchan
->state
= PL08X_CHAN_IDLE
;
2174 if (plchan
->phychan
) {
2176 * Mark physical channel as free and free any slave
2179 pl08x_phy_free(plchan
);
2181 /* Dequeue jobs and free LLIs */
2183 vchan_terminate_vdesc(&plchan
->at
->vd
);
2186 /* Dequeue jobs not yet fired as well */
2187 pl08x_free_txd_list(pl08x
, plchan
);
2189 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
2194 static void pl08x_synchronize(struct dma_chan
*chan
)
2196 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2198 vchan_synchronize(&plchan
->vc
);
2201 static int pl08x_pause(struct dma_chan
*chan
)
2203 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2204 unsigned long flags
;
2207 * Anything succeeds on channels with no physical allocation and
2208 * no queued transfers.
2210 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
2211 if (!plchan
->phychan
&& !plchan
->at
) {
2212 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
2216 pl08x_pause_phy_chan(plchan
->phychan
);
2217 plchan
->state
= PL08X_CHAN_PAUSED
;
2219 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
2224 static int pl08x_resume(struct dma_chan
*chan
)
2226 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2227 unsigned long flags
;
2230 * Anything succeeds on channels with no physical allocation and
2231 * no queued transfers.
2233 spin_lock_irqsave(&plchan
->vc
.lock
, flags
);
2234 if (!plchan
->phychan
&& !plchan
->at
) {
2235 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
2239 pl08x_resume_phy_chan(plchan
->phychan
);
2240 plchan
->state
= PL08X_CHAN_RUNNING
;
2242 spin_unlock_irqrestore(&plchan
->vc
.lock
, flags
);
2247 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
2249 struct pl08x_dma_chan
*plchan
;
2250 char *name
= chan_id
;
2252 /* Reject channels for devices not bound to this driver */
2253 if (chan
->device
->dev
->driver
!= &pl08x_amba_driver
.drv
)
2256 plchan
= to_pl08x_chan(chan
);
2258 /* Check that the channel is not taken! */
2259 if (!strcmp(plchan
->name
, name
))
2264 EXPORT_SYMBOL_GPL(pl08x_filter_id
);
2266 static bool pl08x_filter_fn(struct dma_chan
*chan
, void *chan_id
)
2268 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
2270 return plchan
->cd
== chan_id
;
2274 * Just check that the device is there and active
2275 * TODO: turn this bit on/off depending on the number of physical channels
2276 * actually used, if it is zero... well shut it off. That will save some
2277 * power. Cut the clock at the same time.
2279 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
2281 /* The Nomadik variant does not have the config register */
2282 if (pl08x
->vd
->nomadik
)
2284 /* The FTDMAC020 variant does this in another register */
2285 if (pl08x
->vd
->ftdmac020
) {
2286 writel(PL080_CONFIG_ENABLE
, pl08x
->base
+ FTDMAC020_CSR
);
2289 writel(PL080_CONFIG_ENABLE
, pl08x
->base
+ PL080_CONFIG
);
2292 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
2294 struct pl08x_driver_data
*pl08x
= dev
;
2295 u32 mask
= 0, err
, tc
, i
;
2297 /* check & clear - ERR & TC interrupts */
2298 err
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
2300 dev_err(&pl08x
->adev
->dev
, "%s error interrupt, register value 0x%08x\n",
2302 writel(err
, pl08x
->base
+ PL080_ERR_CLEAR
);
2304 tc
= readl(pl08x
->base
+ PL080_TC_STATUS
);
2306 writel(tc
, pl08x
->base
+ PL080_TC_CLEAR
);
2311 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
2312 if ((BIT(i
) & err
) || (BIT(i
) & tc
)) {
2313 /* Locate physical channel */
2314 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
2315 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
2316 struct pl08x_txd
*tx
;
2319 dev_err(&pl08x
->adev
->dev
,
2320 "%s Error TC interrupt on unused channel: 0x%08x\n",
2325 spin_lock(&plchan
->vc
.lock
);
2327 if (tx
&& tx
->cyclic
) {
2328 vchan_cyclic_callback(&tx
->vd
);
2332 * This descriptor is done, release its mux
2335 pl08x_release_mux(plchan
);
2337 vchan_cookie_complete(&tx
->vd
);
2340 * And start the next descriptor (if any),
2341 * otherwise free this channel.
2343 if (vchan_next_desc(&plchan
->vc
))
2344 pl08x_start_next_txd(plchan
);
2346 pl08x_phy_free(plchan
);
2348 spin_unlock(&plchan
->vc
.lock
);
2354 return mask
? IRQ_HANDLED
: IRQ_NONE
;
2357 static void pl08x_dma_slave_init(struct pl08x_dma_chan
*chan
)
2360 chan
->name
= chan
->cd
->bus_id
;
2361 chan
->cfg
.src_addr
= chan
->cd
->addr
;
2362 chan
->cfg
.dst_addr
= chan
->cd
->addr
;
2366 * Initialise the DMAC memcpy/slave channels.
2367 * Make a local wrapper to hold required data
2369 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
2370 struct dma_device
*dmadev
, unsigned int channels
, bool slave
)
2372 struct pl08x_dma_chan
*chan
;
2375 INIT_LIST_HEAD(&dmadev
->channels
);
2378 * Register as many many memcpy as we have physical channels,
2379 * we won't always be able to use all but the code will have
2380 * to cope with that situation.
2382 for (i
= 0; i
< channels
; i
++) {
2383 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
2388 chan
->state
= PL08X_CHAN_IDLE
;
2392 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
2394 * Some implementations have muxed signals, whereas some
2395 * use a mux in front of the signals and need dynamic
2396 * assignment of signals.
2399 pl08x_dma_slave_init(chan
);
2401 chan
->cd
= kzalloc(sizeof(*chan
->cd
), GFP_KERNEL
);
2406 chan
->cd
->bus_id
= "memcpy";
2407 chan
->cd
->periph_buses
= pl08x
->pd
->mem_buses
;
2408 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
2415 dev_dbg(&pl08x
->adev
->dev
,
2416 "initialize virtual channel \"%s\"\n",
2419 chan
->vc
.desc_free
= pl08x_desc_free
;
2420 vchan_init(&chan
->vc
, dmadev
);
2422 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
2423 i
, slave
? "slave" : "memcpy");
2427 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
2429 struct pl08x_dma_chan
*chan
= NULL
;
2430 struct pl08x_dma_chan
*next
;
2432 list_for_each_entry_safe(chan
,
2433 next
, &dmadev
->channels
, vc
.chan
.device_node
) {
2434 list_del(&chan
->vc
.chan
.device_node
);
2439 #ifdef CONFIG_DEBUG_FS
2440 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
2443 case PL08X_CHAN_IDLE
:
2445 case PL08X_CHAN_RUNNING
:
2447 case PL08X_CHAN_PAUSED
:
2449 case PL08X_CHAN_WAITING
:
2454 return "UNKNOWN STATE";
2457 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
2459 struct pl08x_driver_data
*pl08x
= s
->private;
2460 struct pl08x_dma_chan
*chan
;
2461 struct pl08x_phy_chan
*ch
;
2462 unsigned long flags
;
2465 seq_printf(s
, "PL08x physical channels:\n");
2466 seq_printf(s
, "CHANNEL:\tUSER:\n");
2467 seq_printf(s
, "--------\t-----\n");
2468 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
2469 struct pl08x_dma_chan
*virt_chan
;
2471 ch
= &pl08x
->phy_chans
[i
];
2473 spin_lock_irqsave(&ch
->lock
, flags
);
2474 virt_chan
= ch
->serving
;
2476 seq_printf(s
, "%d\t\t%s%s\n",
2478 virt_chan
? virt_chan
->name
: "(none)",
2479 ch
->locked
? " LOCKED" : "");
2481 spin_unlock_irqrestore(&ch
->lock
, flags
);
2484 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
2485 seq_printf(s
, "CHANNEL:\tSTATE:\n");
2486 seq_printf(s
, "--------\t------\n");
2487 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, vc
.chan
.device_node
) {
2488 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
2489 pl08x_state_str(chan
->state
));
2492 if (pl08x
->has_slave
) {
2493 seq_printf(s
, "\nPL08x virtual slave channels:\n");
2494 seq_printf(s
, "CHANNEL:\tSTATE:\n");
2495 seq_printf(s
, "--------\t------\n");
2496 list_for_each_entry(chan
, &pl08x
->slave
.channels
,
2497 vc
.chan
.device_node
) {
2498 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
2499 pl08x_state_str(chan
->state
));
2506 DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs
);
2508 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
2510 /* Expose a simple debugfs interface to view all clocks */
2511 debugfs_create_file(dev_name(&pl08x
->adev
->dev
), S_IFREG
| S_IRUGO
,
2512 NULL
, pl08x
, &pl08x_debugfs_fops
);
2516 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
2522 static struct dma_chan
*pl08x_find_chan_id(struct pl08x_driver_data
*pl08x
,
2525 struct pl08x_dma_chan
*chan
;
2527 /* Trying to get a slave channel from something with no slave support */
2528 if (!pl08x
->has_slave
)
2531 list_for_each_entry(chan
, &pl08x
->slave
.channels
, vc
.chan
.device_node
) {
2532 if (chan
->signal
== id
)
2533 return &chan
->vc
.chan
;
2539 static struct dma_chan
*pl08x_of_xlate(struct of_phandle_args
*dma_spec
,
2540 struct of_dma
*ofdma
)
2542 struct pl08x_driver_data
*pl08x
= ofdma
->of_dma_data
;
2543 struct dma_chan
*dma_chan
;
2544 struct pl08x_dma_chan
*plchan
;
2549 if (dma_spec
->args_count
!= 2) {
2550 dev_err(&pl08x
->adev
->dev
,
2551 "DMA channel translation requires two cells\n");
2555 dma_chan
= pl08x_find_chan_id(pl08x
, dma_spec
->args
[0]);
2557 dev_err(&pl08x
->adev
->dev
,
2558 "DMA slave channel not found\n");
2562 plchan
= to_pl08x_chan(dma_chan
);
2563 dev_dbg(&pl08x
->adev
->dev
,
2564 "translated channel for signal %d\n",
2567 /* Augment channel data for applicable AHB buses */
2568 plchan
->cd
->periph_buses
= dma_spec
->args
[1];
2569 return dma_get_slave_channel(dma_chan
);
2572 static int pl08x_of_probe(struct amba_device
*adev
,
2573 struct pl08x_driver_data
*pl08x
,
2574 struct device_node
*np
)
2576 struct pl08x_platform_data
*pd
;
2577 struct pl08x_channel_data
*chanp
= NULL
;
2582 pd
= devm_kzalloc(&adev
->dev
, sizeof(*pd
), GFP_KERNEL
);
2586 /* Eligible bus masters for fetching LLIs */
2587 if (of_property_read_bool(np
, "lli-bus-interface-ahb1"))
2588 pd
->lli_buses
|= PL08X_AHB1
;
2589 if (of_property_read_bool(np
, "lli-bus-interface-ahb2"))
2590 pd
->lli_buses
|= PL08X_AHB2
;
2591 if (!pd
->lli_buses
) {
2592 dev_info(&adev
->dev
, "no bus masters for LLIs stated, assume all\n");
2593 pd
->lli_buses
|= PL08X_AHB1
| PL08X_AHB2
;
2596 /* Eligible bus masters for memory access */
2597 if (of_property_read_bool(np
, "mem-bus-interface-ahb1"))
2598 pd
->mem_buses
|= PL08X_AHB1
;
2599 if (of_property_read_bool(np
, "mem-bus-interface-ahb2"))
2600 pd
->mem_buses
|= PL08X_AHB2
;
2601 if (!pd
->mem_buses
) {
2602 dev_info(&adev
->dev
, "no bus masters for memory stated, assume all\n");
2603 pd
->mem_buses
|= PL08X_AHB1
| PL08X_AHB2
;
2606 /* Parse the memcpy channel properties */
2607 ret
= of_property_read_u32(np
, "memcpy-burst-size", &val
);
2609 dev_info(&adev
->dev
, "no memcpy burst size specified, using 1 byte\n");
2614 dev_err(&adev
->dev
, "illegal burst size for memcpy, set to 1\n");
2617 pd
->memcpy_burst_size
= PL08X_BURST_SZ_1
;
2620 pd
->memcpy_burst_size
= PL08X_BURST_SZ_4
;
2623 pd
->memcpy_burst_size
= PL08X_BURST_SZ_8
;
2626 pd
->memcpy_burst_size
= PL08X_BURST_SZ_16
;
2629 pd
->memcpy_burst_size
= PL08X_BURST_SZ_32
;
2632 pd
->memcpy_burst_size
= PL08X_BURST_SZ_64
;
2635 pd
->memcpy_burst_size
= PL08X_BURST_SZ_128
;
2638 pd
->memcpy_burst_size
= PL08X_BURST_SZ_256
;
2642 ret
= of_property_read_u32(np
, "memcpy-bus-width", &val
);
2644 dev_info(&adev
->dev
, "no memcpy bus width specified, using 8 bits\n");
2649 dev_err(&adev
->dev
, "illegal bus width for memcpy, set to 8 bits\n");
2652 pd
->memcpy_bus_width
= PL08X_BUS_WIDTH_8_BITS
;
2655 pd
->memcpy_bus_width
= PL08X_BUS_WIDTH_16_BITS
;
2658 pd
->memcpy_bus_width
= PL08X_BUS_WIDTH_32_BITS
;
2663 * Allocate channel data for all possible slave channels (one
2664 * for each possible signal), channels will then be allocated
2665 * for a device and have it's AHB interfaces set up at
2668 if (pl08x
->vd
->signals
) {
2669 chanp
= devm_kcalloc(&adev
->dev
,
2671 sizeof(struct pl08x_channel_data
),
2676 pd
->slave_channels
= chanp
;
2677 for (i
= 0; i
< pl08x
->vd
->signals
; i
++) {
2679 * chanp->periph_buses will be assigned at translation
2681 chanp
->bus_id
= kasprintf(GFP_KERNEL
, "slave%d", i
);
2684 pd
->num_slave_channels
= pl08x
->vd
->signals
;
2689 return of_dma_controller_register(adev
->dev
.of_node
, pl08x_of_xlate
,
2693 static inline int pl08x_of_probe(struct amba_device
*adev
,
2694 struct pl08x_driver_data
*pl08x
,
2695 struct device_node
*np
)
2701 static int pl08x_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2703 struct pl08x_driver_data
*pl08x
;
2704 struct vendor_data
*vd
= id
->data
;
2705 struct device_node
*np
= adev
->dev
.of_node
;
2710 ret
= amba_request_regions(adev
, NULL
);
2714 /* Ensure that we can do DMA */
2715 ret
= dma_set_mask_and_coherent(&adev
->dev
, DMA_BIT_MASK(32));
2719 /* Create the driver state holder */
2720 pl08x
= kzalloc(sizeof(*pl08x
), GFP_KERNEL
);
2726 /* Assign useful pointers to the driver state */
2730 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
2733 goto out_no_ioremap
;
2736 if (vd
->ftdmac020
) {
2739 val
= readl(pl08x
->base
+ FTDMAC020_REVISION
);
2740 dev_info(&pl08x
->adev
->dev
, "FTDMAC020 %d.%d rel %d\n",
2741 (val
>> 16) & 0xff, (val
>> 8) & 0xff, val
& 0xff);
2742 val
= readl(pl08x
->base
+ FTDMAC020_FEATURE
);
2743 dev_info(&pl08x
->adev
->dev
, "FTDMAC020 %d channels, "
2744 "%s built-in bridge, %s, %s linked lists\n",
2746 (val
& BIT(10)) ? "no" : "has",
2747 (val
& BIT(9)) ? "AHB0 and AHB1" : "AHB0",
2748 (val
& BIT(8)) ? "supports" : "does not support");
2750 /* Vendor data from feature register */
2751 if (!(val
& BIT(8)))
2752 dev_warn(&pl08x
->adev
->dev
,
2753 "linked lists not supported, required\n");
2754 vd
->channels
= (val
>> 12) & 0x0f;
2755 vd
->dualmaster
= !!(val
& BIT(9));
2758 /* Initialize memcpy engine */
2759 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
2760 pl08x
->memcpy
.dev
= &adev
->dev
;
2761 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
2762 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
2763 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
2764 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
2765 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
2766 pl08x
->memcpy
.device_config
= pl08x_config
;
2767 pl08x
->memcpy
.device_pause
= pl08x_pause
;
2768 pl08x
->memcpy
.device_resume
= pl08x_resume
;
2769 pl08x
->memcpy
.device_terminate_all
= pl08x_terminate_all
;
2770 pl08x
->memcpy
.device_synchronize
= pl08x_synchronize
;
2771 pl08x
->memcpy
.src_addr_widths
= PL80X_DMA_BUSWIDTHS
;
2772 pl08x
->memcpy
.dst_addr_widths
= PL80X_DMA_BUSWIDTHS
;
2773 pl08x
->memcpy
.directions
= BIT(DMA_MEM_TO_MEM
);
2774 pl08x
->memcpy
.residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
2776 pl08x
->memcpy
.copy_align
= DMAENGINE_ALIGN_4_BYTES
;
2780 * Initialize slave engine, if the block has no signals, that means
2781 * we have no slave support.
2784 pl08x
->has_slave
= true;
2785 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
2786 dma_cap_set(DMA_CYCLIC
, pl08x
->slave
.cap_mask
);
2787 pl08x
->slave
.dev
= &adev
->dev
;
2788 pl08x
->slave
.device_free_chan_resources
=
2789 pl08x_free_chan_resources
;
2790 pl08x
->slave
.device_prep_dma_interrupt
=
2791 pl08x_prep_dma_interrupt
;
2792 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
2793 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
2794 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
2795 pl08x
->slave
.device_prep_dma_cyclic
= pl08x_prep_dma_cyclic
;
2796 pl08x
->slave
.device_config
= pl08x_config
;
2797 pl08x
->slave
.device_pause
= pl08x_pause
;
2798 pl08x
->slave
.device_resume
= pl08x_resume
;
2799 pl08x
->slave
.device_terminate_all
= pl08x_terminate_all
;
2800 pl08x
->slave
.device_synchronize
= pl08x_synchronize
;
2801 pl08x
->slave
.src_addr_widths
= PL80X_DMA_BUSWIDTHS
;
2802 pl08x
->slave
.dst_addr_widths
= PL80X_DMA_BUSWIDTHS
;
2803 pl08x
->slave
.directions
=
2804 BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2805 pl08x
->slave
.residue_granularity
=
2806 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2809 /* Get the platform data */
2810 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
2813 ret
= pl08x_of_probe(adev
, pl08x
, np
);
2815 goto out_no_platdata
;
2817 dev_err(&adev
->dev
, "no platform data supplied\n");
2819 goto out_no_platdata
;
2822 pl08x
->slave
.filter
.map
= pl08x
->pd
->slave_map
;
2823 pl08x
->slave
.filter
.mapcnt
= pl08x
->pd
->slave_map_len
;
2824 pl08x
->slave
.filter
.fn
= pl08x_filter_fn
;
2827 /* By default, AHB1 only. If dualmaster, from platform */
2828 pl08x
->lli_buses
= PL08X_AHB1
;
2829 pl08x
->mem_buses
= PL08X_AHB1
;
2830 if (pl08x
->vd
->dualmaster
) {
2831 pl08x
->lli_buses
= pl08x
->pd
->lli_buses
;
2832 pl08x
->mem_buses
= pl08x
->pd
->mem_buses
;
2836 pl08x
->lli_words
= PL080S_LLI_WORDS
;
2838 pl08x
->lli_words
= PL080_LLI_WORDS
;
2839 tsfr_size
= MAX_NUM_TSFR_LLIS
* pl08x
->lli_words
* sizeof(u32
);
2841 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2842 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
2843 tsfr_size
, PL08X_ALIGN
, 0);
2846 goto out_no_lli_pool
;
2849 /* Turn on the PL08x */
2850 pl08x_ensure_on(pl08x
);
2852 /* Clear any pending interrupts */
2854 /* This variant has error IRQs in bits 16-19 */
2855 writel(0x0000FFFF, pl08x
->base
+ PL080_ERR_CLEAR
);
2857 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
2858 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
2860 /* Attach the interrupt handler */
2861 ret
= request_irq(adev
->irq
[0], pl08x_irq
, 0, DRIVER_NAME
, pl08x
);
2863 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
2864 __func__
, adev
->irq
[0]);
2868 /* Initialize physical channels */
2869 pl08x
->phy_chans
= kzalloc((vd
->channels
* sizeof(*pl08x
->phy_chans
)),
2871 if (!pl08x
->phy_chans
) {
2873 goto out_no_phychans
;
2876 for (i
= 0; i
< vd
->channels
; i
++) {
2877 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
2880 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
2881 if (vd
->ftdmac020
) {
2882 /* FTDMA020 has a special channel busy register */
2883 ch
->reg_busy
= ch
->base
+ FTDMAC020_CH_BUSY
;
2884 ch
->reg_config
= ch
->base
+ FTDMAC020_CH_CFG
;
2885 ch
->reg_control
= ch
->base
+ FTDMAC020_CH_CSR
;
2886 ch
->reg_src
= ch
->base
+ FTDMAC020_CH_SRC_ADDR
;
2887 ch
->reg_dst
= ch
->base
+ FTDMAC020_CH_DST_ADDR
;
2888 ch
->reg_lli
= ch
->base
+ FTDMAC020_CH_LLP
;
2889 ch
->ftdmac020
= true;
2891 ch
->reg_config
= ch
->base
+ vd
->config_offset
;
2892 ch
->reg_control
= ch
->base
+ PL080_CH_CONTROL
;
2893 ch
->reg_src
= ch
->base
+ PL080_CH_SRC_ADDR
;
2894 ch
->reg_dst
= ch
->base
+ PL080_CH_DST_ADDR
;
2895 ch
->reg_lli
= ch
->base
+ PL080_CH_LLI
;
2900 spin_lock_init(&ch
->lock
);
2903 * Nomadik variants can have channels that are locked
2904 * down for the secure world only. Lock up these channels
2905 * by perpetually serving a dummy virtual channel.
2910 val
= readl(ch
->reg_config
);
2911 if (val
& (PL080N_CONFIG_ITPROT
| PL080N_CONFIG_SECPROT
)) {
2912 dev_info(&adev
->dev
, "physical channel %d reserved for secure access only\n", i
);
2917 dev_dbg(&adev
->dev
, "physical channel %d is %s\n",
2918 i
, pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
2921 /* Register as many memcpy channels as there are physical channels */
2922 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
2923 pl08x
->vd
->channels
, false);
2925 dev_warn(&pl08x
->adev
->dev
,
2926 "%s failed to enumerate memcpy channels - %d\n",
2931 /* Register slave channels */
2932 if (pl08x
->has_slave
) {
2933 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
2934 pl08x
->pd
->num_slave_channels
, true);
2936 dev_warn(&pl08x
->adev
->dev
,
2937 "%s failed to enumerate slave channels - %d\n",
2943 ret
= dma_async_device_register(&pl08x
->memcpy
);
2945 dev_warn(&pl08x
->adev
->dev
,
2946 "%s failed to register memcpy as an async device - %d\n",
2948 goto out_no_memcpy_reg
;
2951 if (pl08x
->has_slave
) {
2952 ret
= dma_async_device_register(&pl08x
->slave
);
2954 dev_warn(&pl08x
->adev
->dev
,
2955 "%s failed to register slave as an async device - %d\n",
2957 goto out_no_slave_reg
;
2961 amba_set_drvdata(adev
, pl08x
);
2962 init_pl08x_debugfs(pl08x
);
2963 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2964 amba_part(adev
), pl08x
->vd
->pl080s
? "s" : "", amba_rev(adev
),
2965 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
2970 dma_async_device_unregister(&pl08x
->memcpy
);
2972 if (pl08x
->has_slave
)
2973 pl08x_free_virtual_channels(&pl08x
->slave
);
2975 pl08x_free_virtual_channels(&pl08x
->memcpy
);
2977 kfree(pl08x
->phy_chans
);
2979 free_irq(adev
->irq
[0], pl08x
);
2981 dma_pool_destroy(pl08x
->pool
);
2984 iounmap(pl08x
->base
);
2988 amba_release_regions(adev
);
2992 /* PL080 has 8 channels and the PL080 have just 2 */
2993 static struct vendor_data vendor_pl080
= {
2994 .config_offset
= PL080_CH_CONFIG
,
2998 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
3001 static struct vendor_data vendor_nomadik
= {
3002 .config_offset
= PL080_CH_CONFIG
,
3007 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
3010 static struct vendor_data vendor_pl080s
= {
3011 .config_offset
= PL080S_CH_CONFIG
,
3015 .max_transfer_size
= PL080S_CONTROL_TRANSFER_SIZE_MASK
,
3018 static struct vendor_data vendor_pl081
= {
3019 .config_offset
= PL080_CH_CONFIG
,
3022 .dualmaster
= false,
3023 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
3026 static struct vendor_data vendor_ftdmac020
= {
3027 .config_offset
= PL080_CH_CONFIG
,
3029 .max_transfer_size
= PL080_CONTROL_TRANSFER_SIZE_MASK
,
3032 static const struct amba_id pl08x_ids
[] = {
3033 /* Samsung PL080S variant */
3037 .data
= &vendor_pl080s
,
3043 .data
= &vendor_pl080
,
3049 .data
= &vendor_pl081
,
3051 /* Nomadik 8815 PL080 variant */
3055 .data
= &vendor_nomadik
,
3057 /* Faraday Technology FTDMAC020 */
3061 .data
= &vendor_ftdmac020
,
3066 MODULE_DEVICE_TABLE(amba
, pl08x_ids
);
3068 static struct amba_driver pl08x_amba_driver
= {
3069 .drv
.name
= DRIVER_NAME
,
3070 .id_table
= pl08x_ids
,
3071 .probe
= pl08x_probe
,
3074 static int __init
pl08x_init(void)
3077 retval
= amba_driver_register(&pl08x_amba_driver
);
3079 printk(KERN_WARNING DRIVER_NAME
3080 "failed to register as an AMBA device (%d)\n",
3084 subsys_initcall(pl08x_init
);