gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / dma / at_hdmac_regs.h
blob2727ca560572586482cb53a0f38522e0b66abfe3
1 /*
2 * Header file for the Atmel AHB DMA Controller driver
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 #ifndef AT_HDMAC_REGS_H
12 #define AT_HDMAC_REGS_H
14 #include <linux/platform_data/dma-atmel.h>
16 #define AT_DMA_MAX_NR_CHANNELS 8
19 #define AT_DMA_GCFG 0x00 /* Global Configuration Register */
20 #define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */
21 #define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */
22 #define AT_DMA_ARB_CFG_FIXED (0x0 << 4)
23 #define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4)
25 #define AT_DMA_EN 0x04 /* Controller Enable Register */
26 #define AT_DMA_ENABLE (0x1 << 0)
28 #define AT_DMA_SREQ 0x08 /* Software Single Request Register */
29 #define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */
30 #define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */
32 #define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */
33 #define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */
34 #define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */
36 #define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */
37 #define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */
38 #define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */
40 #define AT_DMA_SYNC 0x14 /* Request Synchronization Register */
41 #define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */
43 /* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
44 #define AT_DMA_EBCIER 0x18 /* Enable register */
45 #define AT_DMA_EBCIDR 0x1C /* Disable register */
46 #define AT_DMA_EBCIMR 0x20 /* Mask Register */
47 #define AT_DMA_EBCISR 0x24 /* Status Register */
48 #define AT_DMA_CBTC_OFFSET 8
49 #define AT_DMA_ERR_OFFSET 16
50 #define AT_DMA_BTC(x) (0x1 << (x))
51 #define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x)))
52 #define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x)))
54 #define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */
55 #define AT_DMA_ENA(x) (0x1 << (x))
56 #define AT_DMA_SUSP(x) (0x1 << ( 8 + (x)))
57 #define AT_DMA_KEEP(x) (0x1 << (24 + (x)))
59 #define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */
60 #define AT_DMA_DIS(x) (0x1 << (x))
61 #define AT_DMA_RES(x) (0x1 << ( 8 + (x)))
63 #define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */
64 #define AT_DMA_EMPT(x) (0x1 << (16 + (x)))
65 #define AT_DMA_STAL(x) (0x1 << (24 + (x)))
68 #define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */
69 #define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
71 /* Hardware register offset for each channel */
72 #define ATC_SADDR_OFFSET 0x00 /* Source Address Register */
73 #define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */
74 #define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */
75 #define ATC_CTRLA_OFFSET 0x0C /* Control A Register */
76 #define ATC_CTRLB_OFFSET 0x10 /* Control B Register */
77 #define ATC_CFG_OFFSET 0x14 /* Configuration Register */
78 #define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */
79 #define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */
82 /* Bitfield definitions */
84 /* Bitfields in DSCR */
85 #define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */
87 /* Bitfields in CTRLA */
88 #define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
89 #define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
90 #define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
91 #define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16))
92 #define ATC_SCSIZE_1 (0x0 << 16)
93 #define ATC_SCSIZE_4 (0x1 << 16)
94 #define ATC_SCSIZE_8 (0x2 << 16)
95 #define ATC_SCSIZE_16 (0x3 << 16)
96 #define ATC_SCSIZE_32 (0x4 << 16)
97 #define ATC_SCSIZE_64 (0x5 << 16)
98 #define ATC_SCSIZE_128 (0x6 << 16)
99 #define ATC_SCSIZE_256 (0x7 << 16)
100 #define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
101 #define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20))
102 #define ATC_DCSIZE_1 (0x0 << 20)
103 #define ATC_DCSIZE_4 (0x1 << 20)
104 #define ATC_DCSIZE_8 (0x2 << 20)
105 #define ATC_DCSIZE_16 (0x3 << 20)
106 #define ATC_DCSIZE_32 (0x4 << 20)
107 #define ATC_DCSIZE_64 (0x5 << 20)
108 #define ATC_DCSIZE_128 (0x6 << 20)
109 #define ATC_DCSIZE_256 (0x7 << 20)
110 #define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
111 #define ATC_SRC_WIDTH(x) ((x) << 24)
112 #define ATC_SRC_WIDTH_BYTE (0x0 << 24)
113 #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
114 #define ATC_SRC_WIDTH_WORD (0x2 << 24)
115 #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
116 #define ATC_DST_WIDTH(x) ((x) << 28)
117 #define ATC_DST_WIDTH_BYTE (0x0 << 28)
118 #define ATC_DST_WIDTH_HALFWORD (0x1 << 28)
119 #define ATC_DST_WIDTH_WORD (0x2 << 28)
120 #define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */
122 /* Bitfields in CTRLB */
123 #define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
124 #define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
125 /* Specify AHB interfaces */
126 #define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
127 #define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
129 #define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
130 #define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
131 #define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
132 #define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */
133 #define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */
134 #define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */
135 #define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */
136 #define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */
137 #define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */
138 #define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */
139 #define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */
140 #define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */
141 #define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */
142 #define ATC_SRC_ADDR_MODE_MASK (0x3 << 24)
143 #define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */
144 #define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */
145 #define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */
146 #define ATC_DST_ADDR_MODE_MASK (0x3 << 28)
147 #define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */
148 #define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */
149 #define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */
150 #define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */
151 #define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */
153 /* Bitfields in CFG */
154 /* are in at_hdmac.h */
156 /* Bitfields in SPIP */
157 #define ATC_SPIP_HOLE(x) (0xFFFFU & (x))
158 #define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
160 /* Bitfields in DPIP */
161 #define ATC_DPIP_HOLE(x) (0xFFFFU & (x))
162 #define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16)
165 /*-- descriptors -----------------------------------------------------*/
167 /* LLI == Linked List Item; aka DMA buffer descriptor */
168 struct at_lli {
169 /* values that are not changed by hardware */
170 dma_addr_t saddr;
171 dma_addr_t daddr;
172 /* value that may get written back: */
173 u32 ctrla;
174 /* more values that are not changed by hardware */
175 u32 ctrlb;
176 dma_addr_t dscr; /* chain to next lli */
180 * struct at_desc - software descriptor
181 * @at_lli: hardware lli structure
182 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list
184 * @len: descriptor byte count
185 * @tx_width: transfer width
186 * @total_len: total transaction byte count
188 struct at_desc {
189 /* FIRST values the hardware uses */
190 struct at_lli lli;
192 /* THEN values for driver housekeeping */
193 struct list_head tx_list;
194 struct dma_async_tx_descriptor txd;
195 struct list_head desc_node;
196 size_t len;
197 u32 tx_width;
198 size_t total_len;
201 static inline struct at_desc *
202 txd_to_at_desc(struct dma_async_tx_descriptor *txd)
204 return container_of(txd, struct at_desc, txd);
208 /*-- Channels --------------------------------------------------------*/
211 * atc_status - information bits stored in channel status flag
213 * Manipulated with atomic operations.
215 enum atc_status {
216 ATC_IS_ERROR = 0,
217 ATC_IS_PAUSED = 1,
218 ATC_IS_CYCLIC = 24,
222 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
223 * @chan_common: common dmaengine channel object members
224 * @device: parent device
225 * @ch_regs: memory mapped register base
226 * @mask: channel index in a mask
227 * @per_if: peripheral interface
228 * @mem_if: memory interface
229 * @status: transmit status information from irq/prep* functions
230 * to tasklet (use atomic operations)
231 * @tasklet: bottom half to finish transaction work
232 * @save_cfg: configuration register that is saved on suspend/resume cycle
233 * @save_dscr: for cyclic operations, preserve next descriptor address in
234 * the cyclic list on suspend/resume cycle
235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config
237 * @lock: serializes enqueue/dequeue operations to descriptors lists
238 * @active_list: list of descriptors dmaengine is being running on
239 * @queue: list of descriptors ready to be submitted to engine
240 * @free_list: list of descriptors usable by the channel
241 * @descs_allocated: records the actual size of the descriptor pool
243 struct at_dma_chan {
244 struct dma_chan chan_common;
245 struct at_dma *device;
246 void __iomem *ch_regs;
247 u8 mask;
248 u8 per_if;
249 u8 mem_if;
250 unsigned long status;
251 struct tasklet_struct tasklet;
252 u32 save_cfg;
253 u32 save_dscr;
254 struct dma_slave_config dma_sconfig;
256 spinlock_t lock;
258 /* these other elements are all protected by lock */
259 struct list_head active_list;
260 struct list_head queue;
261 struct list_head free_list;
262 unsigned int descs_allocated;
265 #define channel_readl(atchan, name) \
266 __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
268 #define channel_writel(atchan, name, val) \
269 __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
271 static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
273 return container_of(dchan, struct at_dma_chan, chan_common);
277 * Fix sconfig's burst size according to at_hdmac. We need to convert them as:
278 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
280 * This can be done by finding most significant bit set.
282 static inline void convert_burst(u32 *maxburst)
284 if (*maxburst > 1)
285 *maxburst = fls(*maxburst) - 2;
286 else
287 *maxburst = 0;
291 * Fix sconfig's bus width according to at_hdmac.
292 * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
294 static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
296 switch (addr_width) {
297 case DMA_SLAVE_BUSWIDTH_2_BYTES:
298 return 1;
299 case DMA_SLAVE_BUSWIDTH_4_BYTES:
300 return 2;
301 default:
302 /* For 1 byte width or fallback */
303 return 0;
307 /*-- Controller ------------------------------------------------------*/
310 * struct at_dma - internal representation of an Atmel HDMA Controller
311 * @chan_common: common dmaengine dma_device object members
312 * @atdma_devtype: identifier of DMA controller compatibility
313 * @ch_regs: memory mapped register base
314 * @clk: dma controller clock
315 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
316 * @all_chan_mask: all channels availlable in a mask
317 * @dma_desc_pool: base of DMA descriptor region (DMA address)
318 * @chan: channels table to store at_dma_chan structures
320 struct at_dma {
321 struct dma_device dma_common;
322 void __iomem *regs;
323 struct clk *clk;
324 u32 save_imr;
326 u8 all_chan_mask;
328 struct dma_pool *dma_desc_pool;
329 /* AT THE END channels table */
330 struct at_dma_chan chan[0];
333 #define dma_readl(atdma, name) \
334 __raw_readl((atdma)->regs + AT_DMA_##name)
335 #define dma_writel(atdma, name, val) \
336 __raw_writel((val), (atdma)->regs + AT_DMA_##name)
338 static inline struct at_dma *to_at_dma(struct dma_device *ddev)
340 return container_of(ddev, struct at_dma, dma_common);
344 /*-- Helper functions ------------------------------------------------*/
346 static struct device *chan2dev(struct dma_chan *chan)
348 return &chan->dev->device;
351 #if defined(VERBOSE_DEBUG)
352 static void vdbg_dump_regs(struct at_dma_chan *atchan)
354 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
356 dev_err(chan2dev(&atchan->chan_common),
357 " channel %d : imr = 0x%x, chsr = 0x%x\n",
358 atchan->chan_common.chan_id,
359 dma_readl(atdma, EBCIMR),
360 dma_readl(atdma, CHSR));
362 dev_err(chan2dev(&atchan->chan_common),
363 " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n",
364 channel_readl(atchan, SADDR),
365 channel_readl(atchan, DADDR),
366 channel_readl(atchan, CTRLA),
367 channel_readl(atchan, CTRLB),
368 channel_readl(atchan, CFG),
369 channel_readl(atchan, DSCR));
371 #else
372 static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
373 #endif
375 static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
377 dev_crit(chan2dev(&atchan->chan_common),
378 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
379 lli->saddr, lli->daddr,
380 lli->ctrla, lli->ctrlb, lli->dscr);
384 static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
386 u32 ebci;
388 /* enable interrupts on buffer transfer completion & error */
389 ebci = AT_DMA_BTC(chan_id)
390 | AT_DMA_ERR(chan_id);
391 if (on)
392 dma_writel(atdma, EBCIER, ebci);
393 else
394 dma_writel(atdma, EBCIDR, ebci);
397 static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
399 atc_setup_irq(atdma, chan_id, 1);
402 static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
404 atc_setup_irq(atdma, chan_id, 0);
409 * atc_chan_is_enabled - test if given channel is enabled
410 * @atchan: channel we want to test status
412 static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
414 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
416 return !!(dma_readl(atdma, CHSR) & atchan->mask);
420 * atc_chan_is_paused - test channel pause/resume status
421 * @atchan: channel we want to test status
423 static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
425 return test_bit(ATC_IS_PAUSED, &atchan->status);
429 * atc_chan_is_cyclic - test if given channel has cyclic property set
430 * @atchan: channel we want to test status
432 static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
434 return test_bit(ATC_IS_CYCLIC, &atchan->status);
438 * set_desc_eol - set end-of-link to descriptor so it will end transfer
439 * @desc: descriptor, signle or at the end of a chain, to end chain on
441 static void set_desc_eol(struct at_desc *desc)
443 u32 ctrlb = desc->lli.ctrlb;
445 ctrlb &= ~ATC_IEN;
446 ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
448 desc->lli.ctrlb = ctrlb;
449 desc->lli.dscr = 0;
452 #endif /* AT_HDMAC_REGS_H */