4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/vme.h>
35 #include "vme_bridge.h"
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers
;
39 static LIST_HEAD(vme_bus_list
);
40 static DEFINE_MUTEX(vme_buses_lock
);
42 static void __exit
vme_exit(void);
43 static int __init
vme_init(void);
45 static struct vme_dev
*dev_to_vme_dev(struct device
*dev
)
47 return container_of(dev
, struct vme_dev
, dev
);
51 * Find the bridge that the resource is associated with.
53 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
55 /* Get list to search */
56 switch (resource
->type
) {
58 return list_entry(resource
->entry
, struct vme_master_resource
,
62 return list_entry(resource
->entry
, struct vme_slave_resource
,
66 return list_entry(resource
->entry
, struct vme_dma_resource
,
70 return list_entry(resource
->entry
, struct vme_lm_resource
,
74 printk(KERN_ERR
"Unknown resource type\n");
81 * Allocate a contiguous block of memory for use by the driver. This is used to
82 * create the buffers for the slave windows.
84 void *vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
87 struct vme_bridge
*bridge
;
89 if (resource
== NULL
) {
90 printk(KERN_ERR
"No resource\n");
94 bridge
= find_bridge(resource
);
96 printk(KERN_ERR
"Can't find bridge\n");
100 if (bridge
->parent
== NULL
) {
101 printk(KERN_ERR
"Dev entry NULL for bridge %s\n", bridge
->name
);
105 if (bridge
->alloc_consistent
== NULL
) {
106 printk(KERN_ERR
"alloc_consistent not supported by bridge %s\n",
111 return bridge
->alloc_consistent(bridge
->parent
, size
, dma
);
113 EXPORT_SYMBOL(vme_alloc_consistent
);
116 * Free previously allocated contiguous block of memory.
118 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
119 void *vaddr
, dma_addr_t dma
)
121 struct vme_bridge
*bridge
;
123 if (resource
== NULL
) {
124 printk(KERN_ERR
"No resource\n");
128 bridge
= find_bridge(resource
);
129 if (bridge
== NULL
) {
130 printk(KERN_ERR
"Can't find bridge\n");
134 if (bridge
->parent
== NULL
) {
135 printk(KERN_ERR
"Dev entry NULL for bridge %s\n", bridge
->name
);
139 if (bridge
->free_consistent
== NULL
) {
140 printk(KERN_ERR
"free_consistent not supported by bridge %s\n",
145 bridge
->free_consistent(bridge
->parent
, size
, vaddr
, dma
);
147 EXPORT_SYMBOL(vme_free_consistent
);
149 size_t vme_get_size(struct vme_resource
*resource
)
152 unsigned long long base
, size
;
154 u32 aspace
, cycle
, dwidth
;
156 switch (resource
->type
) {
158 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
159 &aspace
, &cycle
, &dwidth
);
164 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
165 &buf_base
, &aspace
, &cycle
);
173 printk(KERN_ERR
"Unknown resource type\n");
178 EXPORT_SYMBOL(vme_get_size
);
180 int vme_check_window(u32 aspace
, unsigned long long vme_base
,
181 unsigned long long size
)
187 if (((vme_base
+ size
) > VME_A16_MAX
) ||
188 (vme_base
> VME_A16_MAX
))
192 if (((vme_base
+ size
) > VME_A24_MAX
) ||
193 (vme_base
> VME_A24_MAX
))
197 if (((vme_base
+ size
) > VME_A32_MAX
) ||
198 (vme_base
> VME_A32_MAX
))
202 if ((size
!= 0) && (vme_base
> U64_MAX
+ 1 - size
))
206 if (((vme_base
+ size
) > VME_CRCSR_MAX
) ||
207 (vme_base
> VME_CRCSR_MAX
))
217 printk(KERN_ERR
"Invalid address space\n");
224 EXPORT_SYMBOL(vme_check_window
);
227 * Request a slave image with specific attributes, return some unique
230 struct vme_resource
*vme_slave_request(struct vme_dev
*vdev
, u32 address
,
233 struct vme_bridge
*bridge
;
234 struct list_head
*slave_pos
= NULL
;
235 struct vme_slave_resource
*allocated_image
= NULL
;
236 struct vme_slave_resource
*slave_image
= NULL
;
237 struct vme_resource
*resource
= NULL
;
239 bridge
= vdev
->bridge
;
240 if (bridge
== NULL
) {
241 printk(KERN_ERR
"Can't find VME bus\n");
245 /* Loop through slave resources */
246 list_for_each(slave_pos
, &bridge
->slave_resources
) {
247 slave_image
= list_entry(slave_pos
,
248 struct vme_slave_resource
, list
);
250 if (slave_image
== NULL
) {
251 printk(KERN_ERR
"Registered NULL Slave resource\n");
255 /* Find an unlocked and compatible image */
256 mutex_lock(&slave_image
->mtx
);
257 if (((slave_image
->address_attr
& address
) == address
) &&
258 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
259 (slave_image
->locked
== 0)) {
261 slave_image
->locked
= 1;
262 mutex_unlock(&slave_image
->mtx
);
263 allocated_image
= slave_image
;
266 mutex_unlock(&slave_image
->mtx
);
270 if (allocated_image
== NULL
)
273 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
274 if (resource
== NULL
) {
275 printk(KERN_WARNING
"Unable to allocate resource structure\n");
278 resource
->type
= VME_SLAVE
;
279 resource
->entry
= &allocated_image
->list
;
285 mutex_lock(&slave_image
->mtx
);
286 slave_image
->locked
= 0;
287 mutex_unlock(&slave_image
->mtx
);
292 EXPORT_SYMBOL(vme_slave_request
);
294 int vme_slave_set(struct vme_resource
*resource
, int enabled
,
295 unsigned long long vme_base
, unsigned long long size
,
296 dma_addr_t buf_base
, u32 aspace
, u32 cycle
)
298 struct vme_bridge
*bridge
= find_bridge(resource
);
299 struct vme_slave_resource
*image
;
302 if (resource
->type
!= VME_SLAVE
) {
303 printk(KERN_ERR
"Not a slave resource\n");
307 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
309 if (bridge
->slave_set
== NULL
) {
310 printk(KERN_ERR
"Function not supported\n");
314 if (!(((image
->address_attr
& aspace
) == aspace
) &&
315 ((image
->cycle_attr
& cycle
) == cycle
))) {
316 printk(KERN_ERR
"Invalid attributes\n");
320 retval
= vme_check_window(aspace
, vme_base
, size
);
324 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
327 EXPORT_SYMBOL(vme_slave_set
);
329 int vme_slave_get(struct vme_resource
*resource
, int *enabled
,
330 unsigned long long *vme_base
, unsigned long long *size
,
331 dma_addr_t
*buf_base
, u32
*aspace
, u32
*cycle
)
333 struct vme_bridge
*bridge
= find_bridge(resource
);
334 struct vme_slave_resource
*image
;
336 if (resource
->type
!= VME_SLAVE
) {
337 printk(KERN_ERR
"Not a slave resource\n");
341 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
343 if (bridge
->slave_get
== NULL
) {
344 printk(KERN_ERR
"vme_slave_get not supported\n");
348 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
351 EXPORT_SYMBOL(vme_slave_get
);
353 void vme_slave_free(struct vme_resource
*resource
)
355 struct vme_slave_resource
*slave_image
;
357 if (resource
->type
!= VME_SLAVE
) {
358 printk(KERN_ERR
"Not a slave resource\n");
362 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
364 if (slave_image
== NULL
) {
365 printk(KERN_ERR
"Can't find slave resource\n");
370 mutex_lock(&slave_image
->mtx
);
371 if (slave_image
->locked
== 0)
372 printk(KERN_ERR
"Image is already free\n");
374 slave_image
->locked
= 0;
375 mutex_unlock(&slave_image
->mtx
);
377 /* Free up resource memory */
380 EXPORT_SYMBOL(vme_slave_free
);
383 * Request a master image with specific attributes, return some unique
386 struct vme_resource
*vme_master_request(struct vme_dev
*vdev
, u32 address
,
387 u32 cycle
, u32 dwidth
)
389 struct vme_bridge
*bridge
;
390 struct list_head
*master_pos
= NULL
;
391 struct vme_master_resource
*allocated_image
= NULL
;
392 struct vme_master_resource
*master_image
= NULL
;
393 struct vme_resource
*resource
= NULL
;
395 bridge
= vdev
->bridge
;
396 if (bridge
== NULL
) {
397 printk(KERN_ERR
"Can't find VME bus\n");
401 /* Loop through master resources */
402 list_for_each(master_pos
, &bridge
->master_resources
) {
403 master_image
= list_entry(master_pos
,
404 struct vme_master_resource
, list
);
406 if (master_image
== NULL
) {
407 printk(KERN_WARNING
"Registered NULL master resource\n");
411 /* Find an unlocked and compatible image */
412 spin_lock(&master_image
->lock
);
413 if (((master_image
->address_attr
& address
) == address
) &&
414 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
415 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
416 (master_image
->locked
== 0)) {
418 master_image
->locked
= 1;
419 spin_unlock(&master_image
->lock
);
420 allocated_image
= master_image
;
423 spin_unlock(&master_image
->lock
);
426 /* Check to see if we found a resource */
427 if (allocated_image
== NULL
) {
428 printk(KERN_ERR
"Can't find a suitable resource\n");
432 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
433 if (resource
== NULL
) {
434 printk(KERN_ERR
"Unable to allocate resource structure\n");
437 resource
->type
= VME_MASTER
;
438 resource
->entry
= &allocated_image
->list
;
444 spin_lock(&master_image
->lock
);
445 master_image
->locked
= 0;
446 spin_unlock(&master_image
->lock
);
451 EXPORT_SYMBOL(vme_master_request
);
453 int vme_master_set(struct vme_resource
*resource
, int enabled
,
454 unsigned long long vme_base
, unsigned long long size
, u32 aspace
,
455 u32 cycle
, u32 dwidth
)
457 struct vme_bridge
*bridge
= find_bridge(resource
);
458 struct vme_master_resource
*image
;
461 if (resource
->type
!= VME_MASTER
) {
462 printk(KERN_ERR
"Not a master resource\n");
466 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
468 if (bridge
->master_set
== NULL
) {
469 printk(KERN_WARNING
"vme_master_set not supported\n");
473 if (!(((image
->address_attr
& aspace
) == aspace
) &&
474 ((image
->cycle_attr
& cycle
) == cycle
) &&
475 ((image
->width_attr
& dwidth
) == dwidth
))) {
476 printk(KERN_WARNING
"Invalid attributes\n");
480 retval
= vme_check_window(aspace
, vme_base
, size
);
484 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
487 EXPORT_SYMBOL(vme_master_set
);
489 int vme_master_get(struct vme_resource
*resource
, int *enabled
,
490 unsigned long long *vme_base
, unsigned long long *size
, u32
*aspace
,
491 u32
*cycle
, u32
*dwidth
)
493 struct vme_bridge
*bridge
= find_bridge(resource
);
494 struct vme_master_resource
*image
;
496 if (resource
->type
!= VME_MASTER
) {
497 printk(KERN_ERR
"Not a master resource\n");
501 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
503 if (bridge
->master_get
== NULL
) {
504 printk(KERN_WARNING
"%s not supported\n", __func__
);
508 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
511 EXPORT_SYMBOL(vme_master_get
);
514 * Read data out of VME space into a buffer.
516 ssize_t
vme_master_read(struct vme_resource
*resource
, void *buf
, size_t count
,
519 struct vme_bridge
*bridge
= find_bridge(resource
);
520 struct vme_master_resource
*image
;
523 if (bridge
->master_read
== NULL
) {
524 printk(KERN_WARNING
"Reading from resource not supported\n");
528 if (resource
->type
!= VME_MASTER
) {
529 printk(KERN_ERR
"Not a master resource\n");
533 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
535 length
= vme_get_size(resource
);
537 if (offset
> length
) {
538 printk(KERN_WARNING
"Invalid Offset\n");
542 if ((offset
+ count
) > length
)
543 count
= length
- offset
;
545 return bridge
->master_read(image
, buf
, count
, offset
);
548 EXPORT_SYMBOL(vme_master_read
);
551 * Write data out to VME space from a buffer.
553 ssize_t
vme_master_write(struct vme_resource
*resource
, void *buf
,
554 size_t count
, loff_t offset
)
556 struct vme_bridge
*bridge
= find_bridge(resource
);
557 struct vme_master_resource
*image
;
560 if (bridge
->master_write
== NULL
) {
561 printk(KERN_WARNING
"Writing to resource not supported\n");
565 if (resource
->type
!= VME_MASTER
) {
566 printk(KERN_ERR
"Not a master resource\n");
570 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
572 length
= vme_get_size(resource
);
574 if (offset
> length
) {
575 printk(KERN_WARNING
"Invalid Offset\n");
579 if ((offset
+ count
) > length
)
580 count
= length
- offset
;
582 return bridge
->master_write(image
, buf
, count
, offset
);
584 EXPORT_SYMBOL(vme_master_write
);
587 * Perform RMW cycle to provided location.
589 unsigned int vme_master_rmw(struct vme_resource
*resource
, unsigned int mask
,
590 unsigned int compare
, unsigned int swap
, loff_t offset
)
592 struct vme_bridge
*bridge
= find_bridge(resource
);
593 struct vme_master_resource
*image
;
595 if (bridge
->master_rmw
== NULL
) {
596 printk(KERN_WARNING
"Writing to resource not supported\n");
600 if (resource
->type
!= VME_MASTER
) {
601 printk(KERN_ERR
"Not a master resource\n");
605 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
607 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
609 EXPORT_SYMBOL(vme_master_rmw
);
611 int vme_master_mmap(struct vme_resource
*resource
, struct vm_area_struct
*vma
)
613 struct vme_master_resource
*image
;
614 phys_addr_t phys_addr
;
615 unsigned long vma_size
;
617 if (resource
->type
!= VME_MASTER
) {
618 pr_err("Not a master resource\n");
622 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
623 phys_addr
= image
->bus_resource
.start
+ (vma
->vm_pgoff
<< PAGE_SHIFT
);
624 vma_size
= vma
->vm_end
- vma
->vm_start
;
626 if (phys_addr
+ vma_size
> image
->bus_resource
.end
+ 1) {
627 pr_err("Map size cannot exceed the window size\n");
631 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
633 return vm_iomap_memory(vma
, phys_addr
, vma
->vm_end
- vma
->vm_start
);
635 EXPORT_SYMBOL(vme_master_mmap
);
637 void vme_master_free(struct vme_resource
*resource
)
639 struct vme_master_resource
*master_image
;
641 if (resource
->type
!= VME_MASTER
) {
642 printk(KERN_ERR
"Not a master resource\n");
646 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
648 if (master_image
== NULL
) {
649 printk(KERN_ERR
"Can't find master resource\n");
654 spin_lock(&master_image
->lock
);
655 if (master_image
->locked
== 0)
656 printk(KERN_ERR
"Image is already free\n");
658 master_image
->locked
= 0;
659 spin_unlock(&master_image
->lock
);
661 /* Free up resource memory */
664 EXPORT_SYMBOL(vme_master_free
);
667 * Request a DMA controller with specific attributes, return some unique
670 struct vme_resource
*vme_dma_request(struct vme_dev
*vdev
, u32 route
)
672 struct vme_bridge
*bridge
;
673 struct list_head
*dma_pos
= NULL
;
674 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
675 struct vme_dma_resource
*dma_ctrlr
= NULL
;
676 struct vme_resource
*resource
= NULL
;
678 /* XXX Not checking resource attributes */
679 printk(KERN_ERR
"No VME resource Attribute tests done\n");
681 bridge
= vdev
->bridge
;
682 if (bridge
== NULL
) {
683 printk(KERN_ERR
"Can't find VME bus\n");
687 /* Loop through DMA resources */
688 list_for_each(dma_pos
, &bridge
->dma_resources
) {
689 dma_ctrlr
= list_entry(dma_pos
,
690 struct vme_dma_resource
, list
);
692 if (dma_ctrlr
== NULL
) {
693 printk(KERN_ERR
"Registered NULL DMA resource\n");
697 /* Find an unlocked and compatible controller */
698 mutex_lock(&dma_ctrlr
->mtx
);
699 if (((dma_ctrlr
->route_attr
& route
) == route
) &&
700 (dma_ctrlr
->locked
== 0)) {
702 dma_ctrlr
->locked
= 1;
703 mutex_unlock(&dma_ctrlr
->mtx
);
704 allocated_ctrlr
= dma_ctrlr
;
707 mutex_unlock(&dma_ctrlr
->mtx
);
710 /* Check to see if we found a resource */
711 if (allocated_ctrlr
== NULL
)
714 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
715 if (resource
== NULL
) {
716 printk(KERN_WARNING
"Unable to allocate resource structure\n");
719 resource
->type
= VME_DMA
;
720 resource
->entry
= &allocated_ctrlr
->list
;
726 mutex_lock(&dma_ctrlr
->mtx
);
727 dma_ctrlr
->locked
= 0;
728 mutex_unlock(&dma_ctrlr
->mtx
);
733 EXPORT_SYMBOL(vme_dma_request
);
738 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
740 struct vme_dma_resource
*ctrlr
;
741 struct vme_dma_list
*dma_list
;
743 if (resource
->type
!= VME_DMA
) {
744 printk(KERN_ERR
"Not a DMA resource\n");
748 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
750 dma_list
= kmalloc(sizeof(struct vme_dma_list
), GFP_KERNEL
);
751 if (dma_list
== NULL
) {
752 printk(KERN_ERR
"Unable to allocate memory for new dma list\n");
755 INIT_LIST_HEAD(&dma_list
->entries
);
756 dma_list
->parent
= ctrlr
;
757 mutex_init(&dma_list
->mtx
);
761 EXPORT_SYMBOL(vme_new_dma_list
);
764 * Create "Pattern" type attributes
766 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
, u32 type
)
768 struct vme_dma_attr
*attributes
;
769 struct vme_dma_pattern
*pattern_attr
;
771 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
772 if (attributes
== NULL
) {
773 printk(KERN_ERR
"Unable to allocate memory for attributes structure\n");
777 pattern_attr
= kmalloc(sizeof(struct vme_dma_pattern
), GFP_KERNEL
);
778 if (pattern_attr
== NULL
) {
779 printk(KERN_ERR
"Unable to allocate memory for pattern attributes\n");
783 attributes
->type
= VME_DMA_PATTERN
;
784 attributes
->private = (void *)pattern_attr
;
786 pattern_attr
->pattern
= pattern
;
787 pattern_attr
->type
= type
;
796 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
799 * Create "PCI" type attributes
801 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
803 struct vme_dma_attr
*attributes
;
804 struct vme_dma_pci
*pci_attr
;
806 /* XXX Run some sanity checks here */
808 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
809 if (attributes
== NULL
) {
810 printk(KERN_ERR
"Unable to allocate memory for attributes structure\n");
814 pci_attr
= kmalloc(sizeof(struct vme_dma_pci
), GFP_KERNEL
);
815 if (pci_attr
== NULL
) {
816 printk(KERN_ERR
"Unable to allocate memory for pci attributes\n");
822 attributes
->type
= VME_DMA_PCI
;
823 attributes
->private = (void *)pci_attr
;
825 pci_attr
->address
= address
;
834 EXPORT_SYMBOL(vme_dma_pci_attribute
);
837 * Create "VME" type attributes
839 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
840 u32 aspace
, u32 cycle
, u32 dwidth
)
842 struct vme_dma_attr
*attributes
;
843 struct vme_dma_vme
*vme_attr
;
845 attributes
= kmalloc(
846 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
847 if (attributes
== NULL
) {
848 printk(KERN_ERR
"Unable to allocate memory for attributes structure\n");
852 vme_attr
= kmalloc(sizeof(struct vme_dma_vme
), GFP_KERNEL
);
853 if (vme_attr
== NULL
) {
854 printk(KERN_ERR
"Unable to allocate memory for vme attributes\n");
858 attributes
->type
= VME_DMA_VME
;
859 attributes
->private = (void *)vme_attr
;
861 vme_attr
->address
= address
;
862 vme_attr
->aspace
= aspace
;
863 vme_attr
->cycle
= cycle
;
864 vme_attr
->dwidth
= dwidth
;
873 EXPORT_SYMBOL(vme_dma_vme_attribute
);
878 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
880 kfree(attributes
->private);
883 EXPORT_SYMBOL(vme_dma_free_attribute
);
885 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
886 struct vme_dma_attr
*dest
, size_t count
)
888 struct vme_bridge
*bridge
= list
->parent
->parent
;
891 if (bridge
->dma_list_add
== NULL
) {
892 printk(KERN_WARNING
"Link List DMA generation not supported\n");
896 if (!mutex_trylock(&list
->mtx
)) {
897 printk(KERN_ERR
"Link List already submitted\n");
901 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
903 mutex_unlock(&list
->mtx
);
907 EXPORT_SYMBOL(vme_dma_list_add
);
909 int vme_dma_list_exec(struct vme_dma_list
*list
)
911 struct vme_bridge
*bridge
= list
->parent
->parent
;
914 if (bridge
->dma_list_exec
== NULL
) {
915 printk(KERN_ERR
"Link List DMA execution not supported\n");
919 mutex_lock(&list
->mtx
);
921 retval
= bridge
->dma_list_exec(list
);
923 mutex_unlock(&list
->mtx
);
927 EXPORT_SYMBOL(vme_dma_list_exec
);
929 int vme_dma_list_free(struct vme_dma_list
*list
)
931 struct vme_bridge
*bridge
= list
->parent
->parent
;
934 if (bridge
->dma_list_empty
== NULL
) {
935 printk(KERN_WARNING
"Emptying of Link Lists not supported\n");
939 if (!mutex_trylock(&list
->mtx
)) {
940 printk(KERN_ERR
"Link List in use\n");
945 * Empty out all of the entries from the dma list. We need to go to the
946 * low level driver as dma entries are driver specific.
948 retval
= bridge
->dma_list_empty(list
);
950 printk(KERN_ERR
"Unable to empty link-list entries\n");
951 mutex_unlock(&list
->mtx
);
954 mutex_unlock(&list
->mtx
);
959 EXPORT_SYMBOL(vme_dma_list_free
);
961 int vme_dma_free(struct vme_resource
*resource
)
963 struct vme_dma_resource
*ctrlr
;
965 if (resource
->type
!= VME_DMA
) {
966 printk(KERN_ERR
"Not a DMA resource\n");
970 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
972 if (!mutex_trylock(&ctrlr
->mtx
)) {
973 printk(KERN_ERR
"Resource busy, can't free\n");
977 if (!(list_empty(&ctrlr
->pending
) && list_empty(&ctrlr
->running
))) {
978 printk(KERN_WARNING
"Resource still processing transfers\n");
979 mutex_unlock(&ctrlr
->mtx
);
985 mutex_unlock(&ctrlr
->mtx
);
991 EXPORT_SYMBOL(vme_dma_free
);
993 void vme_irq_handler(struct vme_bridge
*bridge
, int level
, int statid
)
995 void (*call
)(int, int, void *);
998 call
= bridge
->irq
[level
- 1].callback
[statid
].func
;
999 priv_data
= bridge
->irq
[level
- 1].callback
[statid
].priv_data
;
1002 call(level
, statid
, priv_data
);
1004 printk(KERN_WARNING
"Spurilous VME interrupt, level:%x, vector:%x\n",
1007 EXPORT_SYMBOL(vme_irq_handler
);
1009 int vme_irq_request(struct vme_dev
*vdev
, int level
, int statid
,
1010 void (*callback
)(int, int, void *),
1013 struct vme_bridge
*bridge
;
1015 bridge
= vdev
->bridge
;
1016 if (bridge
== NULL
) {
1017 printk(KERN_ERR
"Can't find VME bus\n");
1021 if ((level
< 1) || (level
> 7)) {
1022 printk(KERN_ERR
"Invalid interrupt level\n");
1026 if (bridge
->irq_set
== NULL
) {
1027 printk(KERN_ERR
"Configuring interrupts not supported\n");
1031 mutex_lock(&bridge
->irq_mtx
);
1033 if (bridge
->irq
[level
- 1].callback
[statid
].func
) {
1034 mutex_unlock(&bridge
->irq_mtx
);
1035 printk(KERN_WARNING
"VME Interrupt already taken\n");
1039 bridge
->irq
[level
- 1].count
++;
1040 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
1041 bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
1043 /* Enable IRQ level */
1044 bridge
->irq_set(bridge
, level
, 1, 1);
1046 mutex_unlock(&bridge
->irq_mtx
);
1050 EXPORT_SYMBOL(vme_irq_request
);
1052 void vme_irq_free(struct vme_dev
*vdev
, int level
, int statid
)
1054 struct vme_bridge
*bridge
;
1056 bridge
= vdev
->bridge
;
1057 if (bridge
== NULL
) {
1058 printk(KERN_ERR
"Can't find VME bus\n");
1062 if ((level
< 1) || (level
> 7)) {
1063 printk(KERN_ERR
"Invalid interrupt level\n");
1067 if (bridge
->irq_set
== NULL
) {
1068 printk(KERN_ERR
"Configuring interrupts not supported\n");
1072 mutex_lock(&bridge
->irq_mtx
);
1074 bridge
->irq
[level
- 1].count
--;
1076 /* Disable IRQ level if no more interrupts attached at this level*/
1077 if (bridge
->irq
[level
- 1].count
== 0)
1078 bridge
->irq_set(bridge
, level
, 0, 1);
1080 bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
1081 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
1083 mutex_unlock(&bridge
->irq_mtx
);
1085 EXPORT_SYMBOL(vme_irq_free
);
1087 int vme_irq_generate(struct vme_dev
*vdev
, int level
, int statid
)
1089 struct vme_bridge
*bridge
;
1091 bridge
= vdev
->bridge
;
1092 if (bridge
== NULL
) {
1093 printk(KERN_ERR
"Can't find VME bus\n");
1097 if ((level
< 1) || (level
> 7)) {
1098 printk(KERN_WARNING
"Invalid interrupt level\n");
1102 if (bridge
->irq_generate
== NULL
) {
1103 printk(KERN_WARNING
"Interrupt generation not supported\n");
1107 return bridge
->irq_generate(bridge
, level
, statid
);
1109 EXPORT_SYMBOL(vme_irq_generate
);
1112 * Request the location monitor, return resource or NULL
1114 struct vme_resource
*vme_lm_request(struct vme_dev
*vdev
)
1116 struct vme_bridge
*bridge
;
1117 struct list_head
*lm_pos
= NULL
;
1118 struct vme_lm_resource
*allocated_lm
= NULL
;
1119 struct vme_lm_resource
*lm
= NULL
;
1120 struct vme_resource
*resource
= NULL
;
1122 bridge
= vdev
->bridge
;
1123 if (bridge
== NULL
) {
1124 printk(KERN_ERR
"Can't find VME bus\n");
1128 /* Loop through DMA resources */
1129 list_for_each(lm_pos
, &bridge
->lm_resources
) {
1130 lm
= list_entry(lm_pos
,
1131 struct vme_lm_resource
, list
);
1134 printk(KERN_ERR
"Registered NULL Location Monitor resource\n");
1138 /* Find an unlocked controller */
1139 mutex_lock(&lm
->mtx
);
1140 if (lm
->locked
== 0) {
1142 mutex_unlock(&lm
->mtx
);
1146 mutex_unlock(&lm
->mtx
);
1149 /* Check to see if we found a resource */
1150 if (allocated_lm
== NULL
)
1153 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
1154 if (resource
== NULL
) {
1155 printk(KERN_ERR
"Unable to allocate resource structure\n");
1158 resource
->type
= VME_LM
;
1159 resource
->entry
= &allocated_lm
->list
;
1165 mutex_lock(&lm
->mtx
);
1167 mutex_unlock(&lm
->mtx
);
1172 EXPORT_SYMBOL(vme_lm_request
);
1174 int vme_lm_count(struct vme_resource
*resource
)
1176 struct vme_lm_resource
*lm
;
1178 if (resource
->type
!= VME_LM
) {
1179 printk(KERN_ERR
"Not a Location Monitor resource\n");
1183 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1185 return lm
->monitors
;
1187 EXPORT_SYMBOL(vme_lm_count
);
1189 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1190 u32 aspace
, u32 cycle
)
1192 struct vme_bridge
*bridge
= find_bridge(resource
);
1193 struct vme_lm_resource
*lm
;
1195 if (resource
->type
!= VME_LM
) {
1196 printk(KERN_ERR
"Not a Location Monitor resource\n");
1200 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1202 if (bridge
->lm_set
== NULL
) {
1203 printk(KERN_ERR
"vme_lm_set not supported\n");
1207 return bridge
->lm_set(lm
, lm_base
, aspace
, cycle
);
1209 EXPORT_SYMBOL(vme_lm_set
);
1211 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1212 u32
*aspace
, u32
*cycle
)
1214 struct vme_bridge
*bridge
= find_bridge(resource
);
1215 struct vme_lm_resource
*lm
;
1217 if (resource
->type
!= VME_LM
) {
1218 printk(KERN_ERR
"Not a Location Monitor resource\n");
1222 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1224 if (bridge
->lm_get
== NULL
) {
1225 printk(KERN_ERR
"vme_lm_get not supported\n");
1229 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1231 EXPORT_SYMBOL(vme_lm_get
);
1233 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1234 void (*callback
)(int))
1236 struct vme_bridge
*bridge
= find_bridge(resource
);
1237 struct vme_lm_resource
*lm
;
1239 if (resource
->type
!= VME_LM
) {
1240 printk(KERN_ERR
"Not a Location Monitor resource\n");
1244 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1246 if (bridge
->lm_attach
== NULL
) {
1247 printk(KERN_ERR
"vme_lm_attach not supported\n");
1251 return bridge
->lm_attach(lm
, monitor
, callback
);
1253 EXPORT_SYMBOL(vme_lm_attach
);
1255 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1257 struct vme_bridge
*bridge
= find_bridge(resource
);
1258 struct vme_lm_resource
*lm
;
1260 if (resource
->type
!= VME_LM
) {
1261 printk(KERN_ERR
"Not a Location Monitor resource\n");
1265 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1267 if (bridge
->lm_detach
== NULL
) {
1268 printk(KERN_ERR
"vme_lm_detach not supported\n");
1272 return bridge
->lm_detach(lm
, monitor
);
1274 EXPORT_SYMBOL(vme_lm_detach
);
1276 void vme_lm_free(struct vme_resource
*resource
)
1278 struct vme_lm_resource
*lm
;
1280 if (resource
->type
!= VME_LM
) {
1281 printk(KERN_ERR
"Not a Location Monitor resource\n");
1285 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1287 mutex_lock(&lm
->mtx
);
1290 * Check to see that there aren't any callbacks still attached, if
1291 * there are we should probably be detaching them!
1296 mutex_unlock(&lm
->mtx
);
1300 EXPORT_SYMBOL(vme_lm_free
);
1302 int vme_slot_num(struct vme_dev
*vdev
)
1304 struct vme_bridge
*bridge
;
1306 bridge
= vdev
->bridge
;
1307 if (bridge
== NULL
) {
1308 printk(KERN_ERR
"Can't find VME bus\n");
1312 if (bridge
->slot_get
== NULL
) {
1313 printk(KERN_WARNING
"vme_slot_num not supported\n");
1317 return bridge
->slot_get(bridge
);
1319 EXPORT_SYMBOL(vme_slot_num
);
1321 int vme_bus_num(struct vme_dev
*vdev
)
1323 struct vme_bridge
*bridge
;
1325 bridge
= vdev
->bridge
;
1326 if (bridge
== NULL
) {
1327 pr_err("Can't find VME bus\n");
1333 EXPORT_SYMBOL(vme_bus_num
);
1335 /* - Bridge Registration --------------------------------------------------- */
1337 static void vme_dev_release(struct device
*dev
)
1339 kfree(dev_to_vme_dev(dev
));
1342 int vme_register_bridge(struct vme_bridge
*bridge
)
1347 mutex_lock(&vme_buses_lock
);
1348 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1349 if ((vme_bus_numbers
& (1 << i
)) == 0) {
1350 vme_bus_numbers
|= (1 << i
);
1352 INIT_LIST_HEAD(&bridge
->devices
);
1353 list_add_tail(&bridge
->bus_list
, &vme_bus_list
);
1358 mutex_unlock(&vme_buses_lock
);
1362 EXPORT_SYMBOL(vme_register_bridge
);
1364 void vme_unregister_bridge(struct vme_bridge
*bridge
)
1366 struct vme_dev
*vdev
;
1367 struct vme_dev
*tmp
;
1369 mutex_lock(&vme_buses_lock
);
1370 vme_bus_numbers
&= ~(1 << bridge
->num
);
1371 list_for_each_entry_safe(vdev
, tmp
, &bridge
->devices
, bridge_list
) {
1372 list_del(&vdev
->drv_list
);
1373 list_del(&vdev
->bridge_list
);
1374 device_unregister(&vdev
->dev
);
1376 list_del(&bridge
->bus_list
);
1377 mutex_unlock(&vme_buses_lock
);
1379 EXPORT_SYMBOL(vme_unregister_bridge
);
1381 /* - Driver Registration --------------------------------------------------- */
1383 static int __vme_register_driver_bus(struct vme_driver
*drv
,
1384 struct vme_bridge
*bridge
, unsigned int ndevs
)
1388 struct vme_dev
*vdev
;
1389 struct vme_dev
*tmp
;
1391 for (i
= 0; i
< ndevs
; i
++) {
1392 vdev
= kzalloc(sizeof(struct vme_dev
), GFP_KERNEL
);
1398 vdev
->bridge
= bridge
;
1399 vdev
->dev
.platform_data
= drv
;
1400 vdev
->dev
.release
= vme_dev_release
;
1401 vdev
->dev
.parent
= bridge
->parent
;
1402 vdev
->dev
.bus
= &vme_bus_type
;
1403 dev_set_name(&vdev
->dev
, "%s.%u-%u", drv
->name
, bridge
->num
,
1406 err
= device_register(&vdev
->dev
);
1410 if (vdev
->dev
.platform_data
) {
1411 list_add_tail(&vdev
->drv_list
, &drv
->devices
);
1412 list_add_tail(&vdev
->bridge_list
, &bridge
->devices
);
1414 device_unregister(&vdev
->dev
);
1419 put_device(&vdev
->dev
);
1422 list_for_each_entry_safe(vdev
, tmp
, &drv
->devices
, drv_list
) {
1423 list_del(&vdev
->drv_list
);
1424 list_del(&vdev
->bridge_list
);
1425 device_unregister(&vdev
->dev
);
1430 static int __vme_register_driver(struct vme_driver
*drv
, unsigned int ndevs
)
1432 struct vme_bridge
*bridge
;
1435 mutex_lock(&vme_buses_lock
);
1436 list_for_each_entry(bridge
, &vme_bus_list
, bus_list
) {
1438 * This cannot cause trouble as we already have vme_buses_lock
1439 * and if the bridge is removed, it will have to go through
1440 * vme_unregister_bridge() to do it (which calls remove() on
1441 * the bridge which in turn tries to acquire vme_buses_lock and
1442 * will have to wait).
1444 err
= __vme_register_driver_bus(drv
, bridge
, ndevs
);
1448 mutex_unlock(&vme_buses_lock
);
1452 int vme_register_driver(struct vme_driver
*drv
, unsigned int ndevs
)
1456 drv
->driver
.name
= drv
->name
;
1457 drv
->driver
.bus
= &vme_bus_type
;
1458 INIT_LIST_HEAD(&drv
->devices
);
1460 err
= driver_register(&drv
->driver
);
1464 err
= __vme_register_driver(drv
, ndevs
);
1466 driver_unregister(&drv
->driver
);
1470 EXPORT_SYMBOL(vme_register_driver
);
1472 void vme_unregister_driver(struct vme_driver
*drv
)
1474 struct vme_dev
*dev
, *dev_tmp
;
1476 mutex_lock(&vme_buses_lock
);
1477 list_for_each_entry_safe(dev
, dev_tmp
, &drv
->devices
, drv_list
) {
1478 list_del(&dev
->drv_list
);
1479 list_del(&dev
->bridge_list
);
1480 device_unregister(&dev
->dev
);
1482 mutex_unlock(&vme_buses_lock
);
1484 driver_unregister(&drv
->driver
);
1486 EXPORT_SYMBOL(vme_unregister_driver
);
1488 /* - Bus Registration ------------------------------------------------------ */
1490 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1492 struct vme_driver
*vme_drv
;
1494 vme_drv
= container_of(drv
, struct vme_driver
, driver
);
1496 if (dev
->platform_data
== vme_drv
) {
1497 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
1499 if (vme_drv
->match
&& vme_drv
->match(vdev
))
1502 dev
->platform_data
= NULL
;
1507 static int vme_bus_probe(struct device
*dev
)
1509 int retval
= -ENODEV
;
1510 struct vme_driver
*driver
;
1511 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
1513 driver
= dev
->platform_data
;
1515 if (driver
->probe
!= NULL
)
1516 retval
= driver
->probe(vdev
);
1521 static int vme_bus_remove(struct device
*dev
)
1523 int retval
= -ENODEV
;
1524 struct vme_driver
*driver
;
1525 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
1527 driver
= dev
->platform_data
;
1529 if (driver
->remove
!= NULL
)
1530 retval
= driver
->remove(vdev
);
1535 struct bus_type vme_bus_type
= {
1537 .match
= vme_bus_match
,
1538 .probe
= vme_bus_probe
,
1539 .remove
= vme_bus_remove
,
1541 EXPORT_SYMBOL(vme_bus_type
);
1543 static int __init
vme_init(void)
1545 return bus_register(&vme_bus_type
);
1548 static void __exit
vme_exit(void)
1550 bus_unregister(&vme_bus_type
);
1553 subsys_initcall(vme_init
);
1554 module_exit(vme_exit
);