be2net: Bug fix to avoid disabling bottom half during firmware upgrade.
[linux/fpc-iii.git] / drivers / dma / coh901318_lli.c
blob9f7e0e6a7eea12e8487cef8fd1395c38cfbdc7a0
1 /*
2 * driver/dma/coh901318_lli.c
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/dmapool.h>
13 #include <linux/memory.h>
14 #include <linux/gfp.h>
15 #include <mach/coh901318.h>
17 #include "coh901318_lli.h"
19 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
20 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
21 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
22 #else
23 #define DEBUGFS_POOL_COUNTER_RESET(pool)
24 #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
25 #endif
27 static struct coh901318_lli *
28 coh901318_lli_next(struct coh901318_lli *data)
30 if (data == NULL || data->link_addr == 0)
31 return NULL;
33 return (struct coh901318_lli *) data->virt_link_addr;
36 int coh901318_pool_create(struct coh901318_pool *pool,
37 struct device *dev,
38 size_t size, size_t align)
40 spin_lock_init(&pool->lock);
41 pool->dev = dev;
42 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
44 DEBUGFS_POOL_COUNTER_RESET(pool);
45 return 0;
48 int coh901318_pool_destroy(struct coh901318_pool *pool)
51 dma_pool_destroy(pool->dmapool);
52 return 0;
55 struct coh901318_lli *
56 coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
58 int i;
59 struct coh901318_lli *head;
60 struct coh901318_lli *lli;
61 struct coh901318_lli *lli_prev;
62 dma_addr_t phy;
64 if (len == 0)
65 goto err;
67 spin_lock(&pool->lock);
69 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
71 if (head == NULL)
72 goto err;
74 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
76 lli = head;
77 lli->phy_this = phy;
78 lli->link_addr = 0x00000000;
79 lli->virt_link_addr = 0x00000000U;
81 for (i = 1; i < len; i++) {
82 lli_prev = lli;
84 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
86 if (lli == NULL)
87 goto err_clean_up;
89 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
90 lli->phy_this = phy;
91 lli->link_addr = 0x00000000;
92 lli->virt_link_addr = 0x00000000U;
94 lli_prev->link_addr = phy;
95 lli_prev->virt_link_addr = lli;
98 spin_unlock(&pool->lock);
100 return head;
102 err:
103 spin_unlock(&pool->lock);
104 return NULL;
106 err_clean_up:
107 lli_prev->link_addr = 0x00000000U;
108 spin_unlock(&pool->lock);
109 coh901318_lli_free(pool, &head);
110 return NULL;
113 void coh901318_lli_free(struct coh901318_pool *pool,
114 struct coh901318_lli **lli)
116 struct coh901318_lli *l;
117 struct coh901318_lli *next;
119 if (lli == NULL)
120 return;
122 l = *lli;
124 if (l == NULL)
125 return;
127 spin_lock(&pool->lock);
129 while (l->link_addr) {
130 next = l->virt_link_addr;
131 dma_pool_free(pool->dmapool, l, l->phy_this);
132 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
133 l = next;
135 dma_pool_free(pool->dmapool, l, l->phy_this);
136 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
138 spin_unlock(&pool->lock);
139 *lli = NULL;
143 coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
144 struct coh901318_lli *lli,
145 dma_addr_t source, unsigned int size,
146 dma_addr_t destination, u32 ctrl_chained,
147 u32 ctrl_eom)
149 int s = size;
150 dma_addr_t src = source;
151 dma_addr_t dst = destination;
153 lli->src_addr = src;
154 lli->dst_addr = dst;
156 while (lli->link_addr) {
157 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
158 lli->src_addr = src;
159 lli->dst_addr = dst;
161 s -= MAX_DMA_PACKET_SIZE;
162 lli = coh901318_lli_next(lli);
164 src += MAX_DMA_PACKET_SIZE;
165 dst += MAX_DMA_PACKET_SIZE;
168 lli->control = ctrl_eom | s;
169 lli->src_addr = src;
170 lli->dst_addr = dst;
172 return 0;
176 coh901318_lli_fill_single(struct coh901318_pool *pool,
177 struct coh901318_lli *lli,
178 dma_addr_t buf, unsigned int size,
179 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
180 enum dma_data_direction dir)
182 int s = size;
183 dma_addr_t src;
184 dma_addr_t dst;
187 if (dir == DMA_TO_DEVICE) {
188 src = buf;
189 dst = dev_addr;
191 } else if (dir == DMA_FROM_DEVICE) {
193 src = dev_addr;
194 dst = buf;
195 } else {
196 return -EINVAL;
199 while (lli->link_addr) {
200 size_t block_size = MAX_DMA_PACKET_SIZE;
201 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
203 /* If we are on the next-to-final block and there will
204 * be less than half a DMA packet left for the last
205 * block, then we want to make this block a little
206 * smaller to balance the sizes. This is meant to
207 * avoid too small transfers if the buffer size is
208 * (MAX_DMA_PACKET_SIZE*N + 1) */
209 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
210 block_size = MAX_DMA_PACKET_SIZE/2;
212 s -= block_size;
213 lli->src_addr = src;
214 lli->dst_addr = dst;
216 lli = coh901318_lli_next(lli);
218 if (dir == DMA_TO_DEVICE)
219 src += block_size;
220 else if (dir == DMA_FROM_DEVICE)
221 dst += block_size;
224 lli->control = ctrl_eom | s;
225 lli->src_addr = src;
226 lli->dst_addr = dst;
228 return 0;
232 coh901318_lli_fill_sg(struct coh901318_pool *pool,
233 struct coh901318_lli *lli,
234 struct scatterlist *sgl, unsigned int nents,
235 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
236 u32 ctrl_last,
237 enum dma_data_direction dir, u32 ctrl_irq_mask)
239 int i;
240 struct scatterlist *sg;
241 u32 ctrl_sg;
242 dma_addr_t src = 0;
243 dma_addr_t dst = 0;
244 u32 bytes_to_transfer;
245 u32 elem_size;
247 if (lli == NULL)
248 goto err;
250 spin_lock(&pool->lock);
252 if (dir == DMA_TO_DEVICE)
253 dst = dev_addr;
254 else if (dir == DMA_FROM_DEVICE)
255 src = dev_addr;
256 else
257 goto err;
259 for_each_sg(sgl, sg, nents, i) {
260 if (sg_is_chain(sg)) {
261 /* sg continues to the next sg-element don't
262 * send ctrl_finish until the last
263 * sg-element in the chain
265 ctrl_sg = ctrl_chained;
266 } else if (i == nents - 1)
267 ctrl_sg = ctrl_last;
268 else
269 ctrl_sg = ctrl ? ctrl : ctrl_last;
272 if (dir == DMA_TO_DEVICE)
273 /* increment source address */
274 src = sg_phys(sg);
275 else
276 /* increment destination address */
277 dst = sg_phys(sg);
279 bytes_to_transfer = sg_dma_len(sg);
281 while (bytes_to_transfer) {
282 u32 val;
284 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
285 elem_size = MAX_DMA_PACKET_SIZE;
286 val = ctrl_chained;
287 } else {
288 elem_size = bytes_to_transfer;
289 val = ctrl_sg;
292 lli->control = val | elem_size;
293 lli->src_addr = src;
294 lli->dst_addr = dst;
296 if (dir == DMA_FROM_DEVICE)
297 dst += elem_size;
298 else
299 src += elem_size;
301 BUG_ON(lli->link_addr & 3);
303 bytes_to_transfer -= elem_size;
304 lli = coh901318_lli_next(lli);
308 spin_unlock(&pool->lock);
310 return 0;
311 err:
312 spin_unlock(&pool->lock);
313 return -EINVAL;