Avoid beyond bounds copy while caching ACL
[zen-stable.git] / drivers / dma / intel_mid_dma_regs.h
blobc83d35b97bd8e38a91330854c3d8b10acbe4cedf
1 /*
2 * intel_mid_dma_regs.h - Intel MID DMA Drivers
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 #ifndef __INTEL_MID_DMAC_REGS_H__
26 #define __INTEL_MID_DMAC_REGS_H__
28 #include <linux/dmaengine.h>
29 #include <linux/dmapool.h>
30 #include <linux/pci_ids.h>
32 #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
34 #define REG_BIT0 0x00000001
35 #define REG_BIT8 0x00000100
36 #define INT_MASK_WE 0x8
37 #define CLEAR_DONE 0xFFFFEFFF
38 #define UNMASK_INTR_REG(chan_num) \
39 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
40 #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
42 #define ENABLE_CHANNEL(chan_num) \
43 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
45 #define DISABLE_CHANNEL(chan_num) \
46 (REG_BIT8 << chan_num)
48 #define DESCS_PER_CHANNEL 16
49 /*DMA Registers*/
50 /*registers associated with channel programming*/
51 #define DMA_REG_SIZE 0x400
52 #define DMA_CH_SIZE 0x58
54 /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
55 #define SAR 0x00 /* Source Address Register*/
56 #define DAR 0x08 /* Destination Address Register*/
57 #define LLP 0x10 /* Linked List Pointer Register*/
58 #define CTL_LOW 0x18 /* Control Register*/
59 #define CTL_HIGH 0x1C /* Control Register*/
60 #define CFG_LOW 0x40 /* Configuration Register Low*/
61 #define CFG_HIGH 0x44 /* Configuration Register high*/
63 #define STATUS_TFR 0x2E8
64 #define STATUS_BLOCK 0x2F0
65 #define STATUS_ERR 0x308
67 #define RAW_TFR 0x2C0
68 #define RAW_BLOCK 0x2C8
69 #define RAW_ERR 0x2E0
71 #define MASK_TFR 0x310
72 #define MASK_BLOCK 0x318
73 #define MASK_SRC_TRAN 0x320
74 #define MASK_DST_TRAN 0x328
75 #define MASK_ERR 0x330
77 #define CLEAR_TFR 0x338
78 #define CLEAR_BLOCK 0x340
79 #define CLEAR_SRC_TRAN 0x348
80 #define CLEAR_DST_TRAN 0x350
81 #define CLEAR_ERR 0x358
83 #define INTR_STATUS 0x360
84 #define DMA_CFG 0x398
85 #define DMA_CHAN_EN 0x3A0
87 /*DMA channel control registers*/
88 union intel_mid_dma_ctl_lo {
89 struct {
90 u32 int_en:1; /*enable or disable interrupts*/
91 /*should be 0*/
92 u32 dst_tr_width:3; /*destination transfer width*/
93 /*usually 32 bits = 010*/
94 u32 src_tr_width:3; /*source transfer width*/
95 /*usually 32 bits = 010*/
96 u32 dinc:2; /*destination address inc/dec*/
97 /*For mem:INC=00, Periphral NoINC=11*/
98 u32 sinc:2; /*source address inc or dec, as above*/
99 u32 dst_msize:3; /*destination burst transaction length*/
100 /*always = 16 ie 011*/
101 u32 src_msize:3; /*source burst transaction length*/
102 /*always = 16 ie 011*/
103 u32 reser1:3;
104 u32 tt_fc:3; /*transfer type and flow controller*/
105 /*M-M = 000
106 P-M = 010
107 M-P = 001*/
108 u32 dms:2; /*destination master select = 0*/
109 u32 sms:2; /*source master select = 0*/
110 u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
111 u32 llp_src_en:1; /*enable/disable source LLP = 0*/
112 u32 reser2:3;
113 } ctlx;
114 u32 ctl_lo;
117 union intel_mid_dma_ctl_hi {
118 struct {
119 u32 block_ts:12; /*block transfer size*/
120 u32 done:1; /*Done - updated by DMAC*/
121 u32 reser:19; /*configured by DMAC*/
122 } ctlx;
123 u32 ctl_hi;
127 /*DMA channel configuration registers*/
128 union intel_mid_dma_cfg_lo {
129 struct {
130 u32 reser1:5;
131 u32 ch_prior:3; /*channel priority = 0*/
132 u32 ch_susp:1; /*channel suspend = 0*/
133 u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
134 u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
135 /*HW = 0, SW = 1*/
136 u32 hs_sel_src:1; /*select HW/SW src handshaking*/
137 u32 reser2:6;
138 u32 dst_hs_pol:1; /*dest HS interface polarity*/
139 u32 src_hs_pol:1; /*src HS interface polarity*/
140 u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
141 u32 reload_src:1; /*auto reload src addr =1 if src is P*/
142 u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
143 } cfgx;
144 u32 cfg_lo;
147 union intel_mid_dma_cfg_hi {
148 struct {
149 u32 fcmode:1; /*flow control mode = 1*/
150 u32 fifo_mode:1; /*FIFO mode select = 1*/
151 u32 protctl:3; /*protection control = 0*/
152 u32 rsvd:2;
153 u32 src_per:4; /*src hw HS interface*/
154 u32 dst_per:4; /*dstn hw HS interface*/
155 u32 reser2:17;
156 } cfgx;
157 u32 cfg_hi;
162 * struct intel_mid_dma_chan - internal mid representation of a DMA channel
163 * @chan: dma_chan strcture represetation for mid chan
164 * @ch_regs: MMIO register space pointer to channel register
165 * @dma_base: MMIO register space DMA engine base pointer
166 * @ch_id: DMA channel id
167 * @lock: channel spinlock
168 * @completed: DMA cookie
169 * @active_list: current active descriptors
170 * @queue: current queued up descriptors
171 * @free_list: current free descriptors
172 * @slave: dma slave struture
173 * @descs_allocated: total number of decsiptors allocated
174 * @dma: dma device struture pointer
175 * @busy: bool representing if ch is busy (active txn) or not
176 * @in_use: bool representing if ch is in use or not
177 * @raw_tfr: raw trf interrupt received
178 * @raw_block: raw block interrupt received
180 struct intel_mid_dma_chan {
181 struct dma_chan chan;
182 void __iomem *ch_regs;
183 void __iomem *dma_base;
184 int ch_id;
185 spinlock_t lock;
186 dma_cookie_t completed;
187 struct list_head active_list;
188 struct list_head queue;
189 struct list_head free_list;
190 unsigned int descs_allocated;
191 struct middma_device *dma;
192 bool busy;
193 bool in_use;
194 u32 raw_tfr;
195 u32 raw_block;
196 struct intel_mid_dma_slave *mid_slave;
199 static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
200 struct dma_chan *chan)
202 return container_of(chan, struct intel_mid_dma_chan, chan);
205 enum intel_mid_dma_state {
206 RUNNING = 0,
207 SUSPENDED,
210 * struct middma_device - internal representation of a DMA device
211 * @pdev: PCI device
212 * @dma_base: MMIO register space pointer of DMA
213 * @dma_pool: for allocating DMA descriptors
214 * @common: embedded struct dma_device
215 * @tasklet: dma tasklet for processing interrupts
216 * @ch: per channel data
217 * @pci_id: DMA device PCI ID
218 * @intr_mask: Interrupt mask to be used
219 * @mask_reg: MMIO register for periphral mask
220 * @chan_base: Base ch index (read from driver data)
221 * @max_chan: max number of chs supported (from drv_data)
222 * @block_size: Block size of DMA transfer supported (from drv_data)
223 * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
224 * @state: dma PM device state
226 struct middma_device {
227 struct pci_dev *pdev;
228 void __iomem *dma_base;
229 struct pci_pool *dma_pool;
230 struct dma_device common;
231 struct tasklet_struct tasklet;
232 struct intel_mid_dma_chan ch[MAX_CHAN];
233 unsigned int pci_id;
234 unsigned int intr_mask;
235 void __iomem *mask_reg;
236 int chan_base;
237 int max_chan;
238 int block_size;
239 unsigned int pimr_mask;
240 enum intel_mid_dma_state state;
243 static inline struct middma_device *to_middma_device(struct dma_device *common)
245 return container_of(common, struct middma_device, common);
248 struct intel_mid_dma_desc {
249 void __iomem *block; /*ch ptr*/
250 struct list_head desc_node;
251 struct dma_async_tx_descriptor txd;
252 size_t len;
253 dma_addr_t sar;
254 dma_addr_t dar;
255 u32 cfg_hi;
256 u32 cfg_lo;
257 u32 ctl_lo;
258 u32 ctl_hi;
259 struct pci_pool *lli_pool;
260 struct intel_mid_dma_lli *lli;
261 dma_addr_t lli_phys;
262 unsigned int lli_length;
263 unsigned int current_lli;
264 dma_addr_t next;
265 enum dma_transfer_direction dirn;
266 enum dma_status status;
267 enum dma_slave_buswidth width; /*width of DMA txn*/
268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
272 struct intel_mid_dma_lli {
273 dma_addr_t sar;
274 dma_addr_t dar;
275 dma_addr_t llp;
276 u32 ctl_lo;
277 u32 ctl_hi;
278 } __attribute__ ((packed));
280 static inline int test_ch_en(void __iomem *dma, u32 ch_no)
282 u32 en_reg = ioread32(dma + DMA_CHAN_EN);
283 return (en_reg >> ch_no) & 0x1;
286 static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
287 (struct dma_async_tx_descriptor *txd)
289 return container_of(txd, struct intel_mid_dma_desc, txd);
292 static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
293 (struct dma_slave_config *slave)
295 return container_of(slave, struct intel_mid_dma_slave, dma_slave);
299 int dma_resume(struct device *dev);
301 #endif /*__INTEL_MID_DMAC_REGS_H__*/