2 * vme_dma.c - PCI-VME bridge DMA management
4 * Copyright (c) 2009 Sebastien Dugue
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
14 * This file provides the PCI-VME bridge DMA management code.
17 #include <linux/pagemap.h>
18 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
21 #include <linux/semaphore.h>
23 #include <asm/semaphore.h>
26 #include <asm/atomic.h>
29 #include "vme_bridge.h"
33 struct dma_channel channels
[TSI148_NUM_DMA_CHANNELS
];
36 * @dma_semaphore manages the common queue to access all the DMA channels.
37 * Once a process gets through the semaphore, it must acquire
38 * dma_lock mutex to atomically look for an available channel.
39 * The @disable flag can be set to disable any further DMA transfers.
41 static struct compat_semaphore dma_semaphore
;
42 static struct mutex dma_lock
;
43 static atomic_t dma_disable
;
46 * Used for synchronizing between DMA transfer using a channel and
49 wait_queue_head_t channel_wait
[TSI148_NUM_DMA_CHANNELS
];
51 void handle_dma_interrupt(int channel_mask
)
54 wake_up(&channels
[0].wait
);
57 wake_up(&channels
[1].wait
);
59 account_dma_interrupt(channel_mask
);
63 static int sgl_fill_user_pages(struct page
**pages
, unsigned long uaddr
,
64 const unsigned int nr_pages
, int rw
)
68 /* Get user pages for the DMA transfer */
69 down_read(¤t
->mm
->mmap_sem
);
70 ret
= get_user_pages(current
, current
->mm
, uaddr
, nr_pages
, rw
, 0,
72 up_read(¤t
->mm
->mmap_sem
);
77 static int sgl_fill_kernel_pages(struct page
**pages
, unsigned long kaddr
,
78 const unsigned int nr_pages
, int rw
)
82 /* Note: this supports lowmem pages only */
83 if (!virt_addr_valid(kaddr
))
86 for (i
= 0; i
< nr_pages
; i
++)
87 pages
[i
] = virt_to_page(kaddr
+ PAGE_SIZE
* i
);
93 * sgl_map_user_pages() - Pin user pages and put them into a scatter gather list
94 * @sgl: Scatter gather list to fill
95 * @nr_pages: Number of pages
96 * @uaddr: User buffer address
97 * @count: Length of user buffer
98 * @rw: Direction (0=read from userspace / 1 = write to userspace)
99 * @to_user: 1 - transfer is to/from a user-space buffer. 0 - kernel buffer.
101 * This function pins the pages of the userspace buffer and fill in the
102 * scatter gather list.
104 static int sgl_map_user_pages(struct scatterlist
*sgl
,
105 const unsigned int nr_pages
, unsigned long uaddr
,
106 size_t length
, int rw
, int to_user
)
112 if ((pages
= kmalloc(nr_pages
* sizeof(struct page
*),
113 GFP_KERNEL
)) == NULL
)
117 rc
= sgl_fill_user_pages(pages
, uaddr
, nr_pages
, rw
);
118 if (rc
>= 0 && rc
< nr_pages
) {
119 /* Some pages were pinned, release these */
120 for (i
= 0; i
< rc
; i
++)
121 page_cache_release(pages
[i
]);
126 rc
= sgl_fill_kernel_pages(pages
, uaddr
, nr_pages
, rw
);
130 /* We completely failed to get the pages */
133 /* Populate the scatter/gather list */
134 sg_init_table(sgl
, nr_pages
);
136 /* Take a shortcut here when we only have a single page transfer */
138 unsigned int off
= offset_in_page(uaddr
);
139 unsigned int len
= PAGE_SIZE
- off
;
141 sg_set_page (&sgl
[0], pages
[0], len
, off
);
144 for (i
= 1; i
< nr_pages
; i
++) {
145 sg_set_page (&sgl
[i
], pages
[i
],
146 (length
< PAGE_SIZE
) ? length
: PAGE_SIZE
,
151 sg_set_page (&sgl
[0], pages
[0], length
, offset_in_page(uaddr
));
154 /* We do not need the pages array anymore */
161 * sgl_unmap_user_pages() - Release the scatter gather list pages
162 * @sgl: The scatter gather list
163 * @nr_pages: Number of pages in the list
164 * @dirty: Flag indicating whether the pages should be marked dirty
165 * @to_user: 1 when transfer is to/from user-space (0 for to/from kernel)
168 static void sgl_unmap_user_pages(struct scatterlist
*sgl
,
169 const unsigned int nr_pages
, int dirty
,
177 for (i
= 0; i
< nr_pages
; i
++) {
178 struct page
*page
= sg_page(&sgl
[i
]);
180 if (dirty
&& !PageReserved(page
))
183 page_cache_release (page
);
188 * vme_dma_setup() - Setup a DMA transfer
189 * @desc: DMA channel to setup
190 * @to_user: 1 if the transfer is to/from a user-space buffer.
191 * 0 if it is to/from a kernel buffer.
193 * Setup a DMA transfer.
195 * Returns 0 on success, or a standard kernel error code on failure.
197 static int vme_dma_setup(struct dma_channel
*channel
, int to_user
)
200 struct vme_dma
*desc
= &channel
->desc
;
201 unsigned int length
= desc
->length
;
205 /* Create the scatter gather list */
206 uaddr
= (desc
->dir
== VME_DMA_TO_DEVICE
) ?
207 desc
->src
.addrl
: desc
->dst
.addrl
;
209 /* Check for overflow */
210 if ((uaddr
+ length
) < uaddr
)
213 nr_pages
= ((uaddr
& ~PAGE_MASK
) + length
+ ~PAGE_MASK
) >> PAGE_SHIFT
;
215 if ((channel
->sgl
= kmalloc(nr_pages
* sizeof(struct scatterlist
),
216 GFP_KERNEL
)) == NULL
)
219 /* Map the user pages into the scatter gather list */
220 channel
->sg_pages
= sgl_map_user_pages(channel
->sgl
, nr_pages
, uaddr
,
222 (desc
->dir
==VME_DMA_FROM_DEVICE
),
225 if (channel
->sg_pages
<= 0) {
226 rc
= channel
->sg_pages
;
230 /* Map the sg list entries onto the PCI bus */
231 channel
->sg_mapped
= pci_map_sg(vme_bridge
->pdev
, channel
->sgl
,
232 channel
->sg_pages
, desc
->dir
);
234 rc
= tsi148_dma_setup(channel
);
243 pci_unmap_sg(vme_bridge
->pdev
, channel
->sgl
, channel
->sg_mapped
,
246 sgl_unmap_user_pages(channel
->sgl
, channel
->sg_pages
, 0, to_user
);
255 * vme_dma_start() - Start a DMA transfer
256 * @channel: DMA channel to start
259 static void vme_dma_start(struct dma_channel
*channel
)
261 /* Not much to do here */
262 tsi148_dma_start(channel
);
265 /* This function has to be called with dma_semaphore and dma_lock held. */
266 static struct dma_channel
*__lock_avail_channel(void)
268 struct dma_channel
*channel
;
271 for (i
= 0; i
< TSI148_NUM_DMA_CHANNELS
; i
++) {
272 channel
= &channels
[i
];
274 if (!channel
->busy
) {
279 WARN_ON_ONCE(i
== TSI148_NUM_DMA_CHANNELS
);
280 return ERR_PTR(-EDEADLK
);
284 * Wait in the queue of the semaphore for an available channel. Then find
285 * this newly available channel, and acquire it by flagging it as busy.
287 static struct dma_channel
*vme_dma_channel_acquire(void)
289 struct dma_channel
*channel
;
292 /* do not process any requests if dma_disable is set */
293 if (atomic_read(&dma_disable
))
294 return ERR_PTR(-EBUSY
);
296 /* wait for a channel to be available */
297 rc
= down_interruptible(&dma_semaphore
);
302 * dma_disable might have been flagged while this task was
303 * sleeping on dma_semaphore.
305 if (atomic_read(&dma_disable
)) {
307 return ERR_PTR(-EBUSY
);
310 /* find the available channel */
311 mutex_lock(&dma_lock
);
312 channel
= __lock_avail_channel();
313 mutex_unlock(&dma_lock
);
318 static void vme_dma_channel_release(struct dma_channel
*channel
)
320 /* release the channel busy flag */
321 mutex_lock(&dma_lock
);
323 mutex_unlock(&dma_lock
);
325 /* up the DMA semaphore to mark there's a channel available */
330 * @to_user: 1 - the transfer is to/from a user-space buffer
331 * 0 - the transfer is to/from a kernel buffer
333 static int __vme_do_dma(struct vme_dma
*desc
, int to_user
)
336 struct dma_channel
*channel
;
338 /* First check the transfer length */
340 printk(KERN_ERR PFX
"%s: Wrong length %d\n",
341 __func__
, desc
->length
);
345 /* Check the transfer direction validity */
346 if ((desc
->dir
!= VME_DMA_FROM_DEVICE
) &&
347 (desc
->dir
!= VME_DMA_TO_DEVICE
)) {
348 printk(KERN_ERR PFX
"%s: Wrong direction %d\n",
349 __func__
, desc
->dir
);
353 /* Check we're within a 32-bit address space */
354 if (desc
->src
.addru
|| desc
->dst
.addru
) {
355 printk(KERN_ERR PFX
"%s: Addresses are not 32-bit\n", __func__
);
359 /* Acquire an available channel */
360 channel
= vme_dma_channel_acquire();
362 return PTR_ERR(channel
);
364 memcpy(&channel
->desc
, desc
, sizeof(struct vme_dma
));
366 /* Setup the DMA transfer */
367 rc
= vme_dma_setup(channel
, to_user
);
370 goto out_release_channel
;
372 /* Start the DMA transfer */
373 vme_dma_start(channel
);
375 /* Wait for DMA completion */
376 rc
= wait_event_interruptible(channel
->wait
,
377 !tsi148_dma_busy(channel
));
379 /* React to user-space signals by aborting the ongoing DMA transfer */
381 tsi148_dma_abort(channel
);
382 /* leave some time for the bridge to clear the DMA channel */
386 desc
->status
= tsi148_dma_get_status(channel
);
388 /* Now do some cleanup and we're done */
389 tsi148_dma_release(channel
);
391 pci_unmap_sg(vme_bridge
->pdev
, channel
->sgl
, channel
->sg_mapped
,
394 sgl_unmap_user_pages(channel
->sgl
, channel
->sg_pages
, 0, to_user
);
399 vme_dma_channel_release(channel
);
401 /* Signal we're done in case we're in module exit */
402 wake_up(&channel_wait
[channel
->num
]);
408 * vme_do_dma() - Do a DMA transfer
409 * @desc: DMA transfer descriptor
411 * This function first checks the validity of the user supplied DMA transfer
412 * parameters. It then tries to find an available DMA channel to do the
413 * transfer, setups that channel and starts the DMA.
415 * Returns 0 on success, or a standard kernel error code on failure.
417 int vme_do_dma(struct vme_dma
*desc
)
419 return __vme_do_dma(desc
, 1);
421 EXPORT_SYMBOL_GPL(vme_do_dma
);
424 * vme_do_dma_kernel() - Do a DMA transfer to/from a kernel buffer
425 * @desc: DMA transfer descriptor
427 * Returns 0 on success, or a standard kernel error code on failure.
429 int vme_do_dma_kernel(struct vme_dma
*desc
)
431 return __vme_do_dma(desc
, 0);
433 EXPORT_SYMBOL_GPL(vme_do_dma_kernel
);
436 * vme_dma_ioctl() - ioctl file method for the VME DMA device
437 * @file: Device file descriptor
439 * @arg: ioctl argument
441 * Currently the VME DMA device supports the following ioctl:
443 * VME_IOCTL_START_DMA
445 long vme_dma_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
449 void __user
*argp
= (void __user
*)arg
;
452 case VME_IOCTL_START_DMA
:
453 /* Get the DMA transfer descriptor */
454 if (copy_from_user(&desc
, (void *)argp
, sizeof(struct vme_dma
)))
458 rc
= vme_do_dma(&desc
);
464 * Copy back the DMA transfer descriptor containing the DMA
467 if (copy_to_user((void *)argp
, &desc
, sizeof(struct vme_dma
)))
481 * vme_dma_exit() - Release DMA management resources
485 void __devexit
vme_dma_exit(void)
489 /* do not perform any further DMA operations */
490 atomic_set(&dma_disable
, 1);
492 /* abort all the in flight DMA operations */
493 for (i
= 0; i
< TSI148_NUM_DMA_CHANNELS
; i
++) {
494 tsi148_dma_abort(&channels
[i
]);
497 /* wait until all the channels are idle */
498 for (i
= 0; i
< TSI148_NUM_DMA_CHANNELS
; i
++) {
499 down(&dma_semaphore
);
507 * vme_dma_init() - Initialize DMA management
510 int __devinit
vme_dma_init(void)
514 for (i
= 0; i
< TSI148_NUM_DMA_CHANNELS
; i
++) {
516 init_waitqueue_head(&channels
[i
].wait
);
517 init_waitqueue_head(&channel_wait
[i
]);
518 INIT_LIST_HEAD(&channels
[i
].hw_desc_list
);
521 sema_init(&dma_semaphore
, TSI148_NUM_DMA_CHANNELS
);
522 mutex_init(&dma_lock
);
523 atomic_set(&dma_disable
, 0);
524 return tsi148_dma_init();