vmod/vmodttl: fixed bug related to luns not ordered and/or not starting from zero.
[ht-drivers.git] / vmebridge / driver / vme_dma.c
blob93d91eae82da332f75e7cf917ff925fdbc057feb
1 /*
2 * vme_dma.c - PCI-VME bridge DMA management
4 * Copyright (c) 2009 Sebastien Dugue
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
14 * This file provides the PCI-VME bridge DMA management code.
17 #include <linux/pagemap.h>
18 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
21 #include <linux/semaphore.h>
22 #else
23 #include <asm/semaphore.h>
24 #endif
26 #include <asm/atomic.h>
28 #include "vmebus.h"
29 #include "vme_bridge.h"
30 #include "vme_dma.h"
33 struct dma_channel channels[TSI148_NUM_DMA_CHANNELS];
36 * @dma_semaphore manages the common queue to access all the DMA channels.
37 * Once a process gets through the semaphore, it must acquire
38 * dma_lock mutex to atomically look for an available channel.
39 * The @disable flag can be set to disable any further DMA transfers.
41 static struct compat_semaphore dma_semaphore;
42 static struct mutex dma_lock;
43 static atomic_t dma_disable;
46 * Used for synchronizing between DMA transfer using a channel and
47 * module exit
49 wait_queue_head_t channel_wait[TSI148_NUM_DMA_CHANNELS];
51 void handle_dma_interrupt(int channel_mask)
53 if (channel_mask & 1)
54 wake_up(&channels[0].wait);
56 if (channel_mask & 2)
57 wake_up(&channels[1].wait);
59 account_dma_interrupt(channel_mask);
63 static int sgl_fill_user_pages(struct page **pages, unsigned long uaddr,
64 const unsigned int nr_pages, int rw)
66 int ret;
68 /* Get user pages for the DMA transfer */
69 down_read(&current->mm->mmap_sem);
70 ret = get_user_pages(current, current->mm, uaddr, nr_pages, rw, 0,
71 pages, NULL);
72 up_read(&current->mm->mmap_sem);
74 return ret;
77 static int sgl_fill_kernel_pages(struct page **pages, unsigned long kaddr,
78 const unsigned int nr_pages, int rw)
80 int i;
82 /* Note: this supports lowmem pages only */
83 if (!virt_addr_valid(kaddr))
84 return -EINVAL;
86 for (i = 0; i < nr_pages; i++)
87 pages[i] = virt_to_page(kaddr + PAGE_SIZE * i);
89 return nr_pages;
92 /**
93 * sgl_map_user_pages() - Pin user pages and put them into a scatter gather list
94 * @sgl: Scatter gather list to fill
95 * @nr_pages: Number of pages
96 * @uaddr: User buffer address
97 * @count: Length of user buffer
98 * @rw: Direction (0=read from userspace / 1 = write to userspace)
99 * @to_user: 1 - transfer is to/from a user-space buffer. 0 - kernel buffer.
101 * This function pins the pages of the userspace buffer and fill in the
102 * scatter gather list.
104 static int sgl_map_user_pages(struct scatterlist *sgl,
105 const unsigned int nr_pages, unsigned long uaddr,
106 size_t length, int rw, int to_user)
108 int rc;
109 int i;
110 struct page **pages;
112 if ((pages = kmalloc(nr_pages * sizeof(struct page *),
113 GFP_KERNEL)) == NULL)
114 return -ENOMEM;
116 if (to_user) {
117 rc = sgl_fill_user_pages(pages, uaddr, nr_pages, rw);
118 if (rc >= 0 && rc < nr_pages) {
119 /* Some pages were pinned, release these */
120 for (i = 0; i < rc; i++)
121 page_cache_release(pages[i]);
122 rc = -ENOMEM;
123 goto out_free;
125 } else {
126 rc = sgl_fill_kernel_pages(pages, uaddr, nr_pages, rw);
129 if (rc < 0)
130 /* We completely failed to get the pages */
131 goto out_free;
133 /* Populate the scatter/gather list */
134 sg_init_table(sgl, nr_pages);
136 /* Take a shortcut here when we only have a single page transfer */
137 if (nr_pages > 1) {
138 unsigned int off = offset_in_page(uaddr);
139 unsigned int len = PAGE_SIZE - off;
141 sg_set_page (&sgl[0], pages[0], len, off);
142 length -= len;
144 for (i = 1; i < nr_pages; i++) {
145 sg_set_page (&sgl[i], pages[i],
146 (length < PAGE_SIZE) ? length : PAGE_SIZE,
148 length -= PAGE_SIZE;
150 } else
151 sg_set_page (&sgl[0], pages[0], length, offset_in_page(uaddr));
153 out_free:
154 /* We do not need the pages array anymore */
155 kfree(pages);
157 return nr_pages;
161 * sgl_unmap_user_pages() - Release the scatter gather list pages
162 * @sgl: The scatter gather list
163 * @nr_pages: Number of pages in the list
164 * @dirty: Flag indicating whether the pages should be marked dirty
165 * @to_user: 1 when transfer is to/from user-space (0 for to/from kernel)
168 static void sgl_unmap_user_pages(struct scatterlist *sgl,
169 const unsigned int nr_pages, int dirty,
170 int to_user)
172 int i;
174 if (!to_user)
175 return;
177 for (i = 0; i < nr_pages; i++) {
178 struct page *page = sg_page(&sgl[i]);
180 if (dirty && !PageReserved(page))
181 SetPageDirty(page);
183 page_cache_release (page);
188 * vme_dma_setup() - Setup a DMA transfer
189 * @desc: DMA channel to setup
190 * @to_user: 1 if the transfer is to/from a user-space buffer.
191 * 0 if it is to/from a kernel buffer.
193 * Setup a DMA transfer.
195 * Returns 0 on success, or a standard kernel error code on failure.
197 static int vme_dma_setup(struct dma_channel *channel, int to_user)
199 int rc = 0;
200 struct vme_dma *desc = &channel->desc;
201 unsigned int length = desc->length;
202 unsigned int uaddr;
203 int nr_pages;
205 /* Create the scatter gather list */
206 uaddr = (desc->dir == VME_DMA_TO_DEVICE) ?
207 desc->src.addrl : desc->dst.addrl;
209 /* Check for overflow */
210 if ((uaddr + length) < uaddr)
211 return -EINVAL;
213 nr_pages = ((uaddr & ~PAGE_MASK) + length + ~PAGE_MASK) >> PAGE_SHIFT;
215 if ((channel->sgl = kmalloc(nr_pages * sizeof(struct scatterlist),
216 GFP_KERNEL)) == NULL)
217 return -ENOMEM;
219 /* Map the user pages into the scatter gather list */
220 channel->sg_pages = sgl_map_user_pages(channel->sgl, nr_pages, uaddr,
221 length,
222 (desc->dir==VME_DMA_FROM_DEVICE),
223 to_user);
225 if (channel->sg_pages <= 0) {
226 rc = channel->sg_pages;
227 goto out_free_sgl;
230 /* Map the sg list entries onto the PCI bus */
231 channel->sg_mapped = pci_map_sg(vme_bridge->pdev, channel->sgl,
232 channel->sg_pages, desc->dir);
234 rc = tsi148_dma_setup(channel);
237 if (rc)
238 goto out_unmap_sgl;
240 return 0;
242 out_unmap_sgl:
243 pci_unmap_sg(vme_bridge->pdev, channel->sgl, channel->sg_mapped,
244 desc->dir);
246 sgl_unmap_user_pages(channel->sgl, channel->sg_pages, 0, to_user);
248 out_free_sgl:
249 kfree(channel->sgl);
251 return rc;
255 * vme_dma_start() - Start a DMA transfer
256 * @channel: DMA channel to start
259 static void vme_dma_start(struct dma_channel *channel)
261 /* Not much to do here */
262 tsi148_dma_start(channel);
265 /* This function has to be called with dma_semaphore and dma_lock held. */
266 static struct dma_channel *__lock_avail_channel(void)
268 struct dma_channel *channel;
269 int i;
271 for (i = 0; i < TSI148_NUM_DMA_CHANNELS; i++) {
272 channel = &channels[i];
274 if (!channel->busy) {
275 channel->busy = 1;
276 return channel;
279 WARN_ON_ONCE(i == TSI148_NUM_DMA_CHANNELS);
280 return ERR_PTR(-EDEADLK);
284 * Wait in the queue of the semaphore for an available channel. Then find
285 * this newly available channel, and acquire it by flagging it as busy.
287 static struct dma_channel *vme_dma_channel_acquire(void)
289 struct dma_channel *channel;
290 int rc;
292 /* do not process any requests if dma_disable is set */
293 if (atomic_read(&dma_disable))
294 return ERR_PTR(-EBUSY);
296 /* wait for a channel to be available */
297 rc = down_interruptible(&dma_semaphore);
298 if (rc)
299 return ERR_PTR(rc);
302 * dma_disable might have been flagged while this task was
303 * sleeping on dma_semaphore.
305 if (atomic_read(&dma_disable)) {
306 up(&dma_semaphore);
307 return ERR_PTR(-EBUSY);
310 /* find the available channel */
311 mutex_lock(&dma_lock);
312 channel = __lock_avail_channel();
313 mutex_unlock(&dma_lock);
315 return channel;
318 static void vme_dma_channel_release(struct dma_channel *channel)
320 /* release the channel busy flag */
321 mutex_lock(&dma_lock);
322 channel->busy = 0;
323 mutex_unlock(&dma_lock);
325 /* up the DMA semaphore to mark there's a channel available */
326 up(&dma_semaphore);
330 * @to_user: 1 - the transfer is to/from a user-space buffer
331 * 0 - the transfer is to/from a kernel buffer
333 static int __vme_do_dma(struct vme_dma *desc, int to_user)
335 int rc = 0;
336 struct dma_channel *channel;
338 /* First check the transfer length */
339 if (!desc->length) {
340 printk(KERN_ERR PFX "%s: Wrong length %d\n",
341 __func__, desc->length);
342 return -EINVAL;
345 /* Check the transfer direction validity */
346 if ((desc->dir != VME_DMA_FROM_DEVICE) &&
347 (desc->dir != VME_DMA_TO_DEVICE)) {
348 printk(KERN_ERR PFX "%s: Wrong direction %d\n",
349 __func__, desc->dir);
350 return -EINVAL;
353 /* Check we're within a 32-bit address space */
354 if (desc->src.addru || desc->dst.addru) {
355 printk(KERN_ERR PFX "%s: Addresses are not 32-bit\n", __func__);
356 return -EINVAL;
359 /* Acquire an available channel */
360 channel = vme_dma_channel_acquire();
361 if (IS_ERR(channel))
362 return PTR_ERR(channel);
364 memcpy(&channel->desc, desc, sizeof(struct vme_dma));
366 /* Setup the DMA transfer */
367 rc = vme_dma_setup(channel, to_user);
369 if (rc)
370 goto out_release_channel;
372 /* Start the DMA transfer */
373 vme_dma_start(channel);
375 /* Wait for DMA completion */
376 rc = wait_event_interruptible(channel->wait,
377 !tsi148_dma_busy(channel));
379 /* React to user-space signals by aborting the ongoing DMA transfer */
380 if (rc) {
381 tsi148_dma_abort(channel);
382 /* leave some time for the bridge to clear the DMA channel */
383 udelay(10);
386 desc->status = tsi148_dma_get_status(channel);
388 /* Now do some cleanup and we're done */
389 tsi148_dma_release(channel);
391 pci_unmap_sg(vme_bridge->pdev, channel->sgl, channel->sg_mapped,
392 desc->dir);
394 sgl_unmap_user_pages(channel->sgl, channel->sg_pages, 0, to_user);
396 kfree(channel->sgl);
398 out_release_channel:
399 vme_dma_channel_release(channel);
401 /* Signal we're done in case we're in module exit */
402 wake_up(&channel_wait[channel->num]);
404 return rc;
408 * vme_do_dma() - Do a DMA transfer
409 * @desc: DMA transfer descriptor
411 * This function first checks the validity of the user supplied DMA transfer
412 * parameters. It then tries to find an available DMA channel to do the
413 * transfer, setups that channel and starts the DMA.
415 * Returns 0 on success, or a standard kernel error code on failure.
417 int vme_do_dma(struct vme_dma *desc)
419 return __vme_do_dma(desc, 1);
421 EXPORT_SYMBOL_GPL(vme_do_dma);
424 * vme_do_dma_kernel() - Do a DMA transfer to/from a kernel buffer
425 * @desc: DMA transfer descriptor
427 * Returns 0 on success, or a standard kernel error code on failure.
429 int vme_do_dma_kernel(struct vme_dma *desc)
431 return __vme_do_dma(desc, 0);
433 EXPORT_SYMBOL_GPL(vme_do_dma_kernel);
436 * vme_dma_ioctl() - ioctl file method for the VME DMA device
437 * @file: Device file descriptor
438 * @cmd: ioctl number
439 * @arg: ioctl argument
441 * Currently the VME DMA device supports the following ioctl:
443 * VME_IOCTL_START_DMA
445 long vme_dma_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
447 int rc = 0;
448 struct vme_dma desc;
449 void __user *argp = (void __user *)arg;
451 switch (cmd) {
452 case VME_IOCTL_START_DMA:
453 /* Get the DMA transfer descriptor */
454 if (copy_from_user(&desc, (void *)argp, sizeof(struct vme_dma)))
455 return -EFAULT;
457 /* Do the DMA */
458 rc = vme_do_dma(&desc);
460 if (rc)
461 return rc;
464 * Copy back the DMA transfer descriptor containing the DMA
465 * updated status.
467 if (copy_to_user((void *)argp, &desc, sizeof(struct vme_dma)))
468 return -EFAULT;
470 break;
472 default:
473 rc = -ENOIOCTLCMD;
477 return rc;
481 * vme_dma_exit() - Release DMA management resources
485 void __devexit vme_dma_exit(void)
487 int i;
489 /* do not perform any further DMA operations */
490 atomic_set(&dma_disable, 1);
492 /* abort all the in flight DMA operations */
493 for (i = 0; i < TSI148_NUM_DMA_CHANNELS; i++) {
494 tsi148_dma_abort(&channels[i]);
497 /* wait until all the channels are idle */
498 for (i = 0; i < TSI148_NUM_DMA_CHANNELS; i++) {
499 down(&dma_semaphore);
500 up(&dma_semaphore);
503 tsi148_dma_exit();
507 * vme_dma_init() - Initialize DMA management
510 int __devinit vme_dma_init(void)
512 int i;
514 for (i = 0; i < TSI148_NUM_DMA_CHANNELS; i++) {
515 channels[i].num = i;
516 init_waitqueue_head(&channels[i].wait);
517 init_waitqueue_head(&channel_wait[i]);
518 INIT_LIST_HEAD(&channels[i].hw_desc_list);
521 sema_init(&dma_semaphore, TSI148_NUM_DMA_CHANNELS);
522 mutex_init(&dma_lock);
523 atomic_set(&dma_disable, 0);
524 return tsi148_dma_init();