1 /* SPDX-License-Identifier: GPL-2.0
3 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
5 * extracted from shdma.c and headers
7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
16 #include <linux/dmaengine.h>
17 #include <linux/interrupt.h>
18 #include <linux/list.h>
19 #include <linux/types.h>
22 * shdma_pm_state - DMA channel PM state
23 * SHDMA_PM_ESTABLISHED: either idle or during data transfer
24 * SHDMA_PM_BUSY: during the transfer preparation, when we have to
25 * drop the lock temporarily
26 * SHDMA_PM_PENDING: transfers pending
37 * Drivers, using this library are expected to embed struct shdma_dev,
38 * struct shdma_chan, struct shdma_desc, and struct shdma_slave
39 * in their respective device, channel, descriptor and slave objects.
47 struct list_head node
;
48 struct dma_async_tx_descriptor async_tx
;
49 enum dma_transfer_direction direction
;
54 bool cyclic
; /* used as cyclic transfer */
58 spinlock_t chan_lock
; /* Channel operation lock */
59 struct list_head ld_queue
; /* Link descriptors queue */
60 struct list_head ld_free
; /* Free link descriptors */
61 struct dma_chan dma_chan
; /* DMA channel */
62 struct device
*dev
; /* Channel device */
63 void *desc
; /* buffer for descriptor array */
64 int desc_num
; /* desc count */
65 size_t max_xfer_len
; /* max transfer length */
66 int id
; /* Raw id of this channel */
67 int irq
; /* Channel IRQ */
68 int slave_id
; /* Client ID for slave DMA */
69 int real_slave_id
; /* argument passed to filter function */
70 int hw_req
; /* DMA request line for slave DMA - same
71 * as MID/RID, used with DT */
72 enum shdma_pm_state pm_state
;
76 * struct shdma_ops - simple DMA driver operations
77 * desc_completed: return true, if this is the descriptor, that just has
79 * halt_channel: stop DMA channel operation (atomic)
80 * channel_busy: return true, if the channel is busy (atomic)
81 * slave_addr: return slave DMA address
82 * desc_setup: set up the hardware specific descriptor portion (atomic)
83 * set_slave: bind channel to a slave
84 * setup_xfer: configure channel hardware for operation (atomic)
85 * start_xfer: start the DMA transfer (atomic)
86 * embedded_desc: return Nth struct shdma_desc pointer from the
88 * chan_irq: process channel IRQ, return true if a transfer has
92 bool (*desc_completed
)(struct shdma_chan
*, struct shdma_desc
*);
93 void (*halt_channel
)(struct shdma_chan
*);
94 bool (*channel_busy
)(struct shdma_chan
*);
95 dma_addr_t (*slave_addr
)(struct shdma_chan
*);
96 int (*desc_setup
)(struct shdma_chan
*, struct shdma_desc
*,
97 dma_addr_t
, dma_addr_t
, size_t *);
98 int (*set_slave
)(struct shdma_chan
*, int, dma_addr_t
, bool);
99 void (*setup_xfer
)(struct shdma_chan
*, int);
100 void (*start_xfer
)(struct shdma_chan
*, struct shdma_desc
*);
101 struct shdma_desc
*(*embedded_desc
)(void *, int);
102 bool (*chan_irq
)(struct shdma_chan
*, int);
103 size_t (*get_partial
)(struct shdma_chan
*, struct shdma_desc
*);
107 struct dma_device dma_dev
;
108 struct shdma_chan
**schan
;
109 const struct shdma_ops
*ops
;
113 #define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
114 i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
116 int shdma_request_irq(struct shdma_chan
*, int,
117 unsigned long, const char *);
118 bool shdma_reset(struct shdma_dev
*sdev
);
119 void shdma_chan_probe(struct shdma_dev
*sdev
,
120 struct shdma_chan
*schan
, int id
);
121 void shdma_chan_remove(struct shdma_chan
*schan
);
122 int shdma_init(struct device
*dev
, struct shdma_dev
*sdev
,
124 void shdma_cleanup(struct shdma_dev
*sdev
);
125 #if IS_ENABLED(CONFIG_SH_DMAE_BASE)
126 bool shdma_chan_filter(struct dma_chan
*chan
, void *arg
);
128 static inline bool shdma_chan_filter(struct dma_chan
*chan
, void *arg
)