4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
35 #include "vme_bridge.h"
37 /* Bitmask and mutex to keep track of bridge numbers */
38 static unsigned int vme_bus_numbers
;
39 static DEFINE_MUTEX(vme_bus_num_mtx
);
41 static void __exit
vme_exit(void);
42 static int __init
vme_init(void);
46 * Find the bridge resource associated with a specific device resource
48 static struct vme_bridge
*dev_to_bridge(struct device
*dev
)
50 return dev
->platform_data
;
54 * Find the bridge that the resource is associated with.
56 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
58 /* Get list to search */
59 switch (resource
->type
) {
61 return list_entry(resource
->entry
, struct vme_master_resource
,
65 return list_entry(resource
->entry
, struct vme_slave_resource
,
69 return list_entry(resource
->entry
, struct vme_dma_resource
,
73 return list_entry(resource
->entry
, struct vme_lm_resource
,
77 printk(KERN_ERR
"Unknown resource type\n");
84 * Allocate a contiguous block of memory for use by the driver. This is used to
85 * create the buffers for the slave windows.
87 * XXX VME bridges could be available on buses other than PCI. At the momment
88 * this framework only supports PCI devices.
90 void *vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
93 struct vme_bridge
*bridge
;
96 if (resource
== NULL
) {
97 printk(KERN_ERR
"No resource\n");
101 bridge
= find_bridge(resource
);
102 if (bridge
== NULL
) {
103 printk(KERN_ERR
"Can't find bridge\n");
107 /* Find pci_dev container of dev */
108 if (bridge
->parent
== NULL
) {
109 printk(KERN_ERR
"Dev entry NULL\n");
112 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
114 return pci_alloc_consistent(pdev
, size
, dma
);
116 EXPORT_SYMBOL(vme_alloc_consistent
);
119 * Free previously allocated contiguous block of memory.
121 * XXX VME bridges could be available on buses other than PCI. At the momment
122 * this framework only supports PCI devices.
124 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
125 void *vaddr
, dma_addr_t dma
)
127 struct vme_bridge
*bridge
;
128 struct pci_dev
*pdev
;
130 if (resource
== NULL
) {
131 printk(KERN_ERR
"No resource\n");
135 bridge
= find_bridge(resource
);
136 if (bridge
== NULL
) {
137 printk(KERN_ERR
"Can't find bridge\n");
141 /* Find pci_dev container of dev */
142 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
144 pci_free_consistent(pdev
, size
, vaddr
, dma
);
146 EXPORT_SYMBOL(vme_free_consistent
);
148 size_t vme_get_size(struct vme_resource
*resource
)
151 unsigned long long base
, size
;
153 vme_address_t aspace
;
157 switch (resource
->type
) {
159 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
160 &aspace
, &cycle
, &dwidth
);
165 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
166 &buf_base
, &aspace
, &cycle
);
174 printk(KERN_ERR
"Unknown resource type\n");
179 EXPORT_SYMBOL(vme_get_size
);
181 static int vme_check_window(vme_address_t aspace
, unsigned long long vme_base
,
182 unsigned long long size
)
188 if (((vme_base
+ size
) > VME_A16_MAX
) ||
189 (vme_base
> VME_A16_MAX
))
193 if (((vme_base
+ size
) > VME_A24_MAX
) ||
194 (vme_base
> VME_A24_MAX
))
198 if (((vme_base
+ size
) > VME_A32_MAX
) ||
199 (vme_base
> VME_A32_MAX
))
204 * Any value held in an unsigned long long can be used as the
209 if (((vme_base
+ size
) > VME_CRCSR_MAX
) ||
210 (vme_base
> VME_CRCSR_MAX
))
220 printk(KERN_ERR
"Invalid address space\n");
229 * Request a slave image with specific attributes, return some unique
232 struct vme_resource
*vme_slave_request(struct device
*dev
,
233 vme_address_t address
, vme_cycle_t cycle
)
235 struct vme_bridge
*bridge
;
236 struct list_head
*slave_pos
= NULL
;
237 struct vme_slave_resource
*allocated_image
= NULL
;
238 struct vme_slave_resource
*slave_image
= NULL
;
239 struct vme_resource
*resource
= NULL
;
241 bridge
= dev_to_bridge(dev
);
242 if (bridge
== NULL
) {
243 printk(KERN_ERR
"Can't find VME bus\n");
247 /* Loop through slave resources */
248 list_for_each(slave_pos
, &(bridge
->slave_resources
)) {
249 slave_image
= list_entry(slave_pos
,
250 struct vme_slave_resource
, list
);
252 if (slave_image
== NULL
) {
253 printk(KERN_ERR
"Registered NULL Slave resource\n");
257 /* Find an unlocked and compatible image */
258 mutex_lock(&(slave_image
->mtx
));
259 if (((slave_image
->address_attr
& address
) == address
) &&
260 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
261 (slave_image
->locked
== 0)) {
263 slave_image
->locked
= 1;
264 mutex_unlock(&(slave_image
->mtx
));
265 allocated_image
= slave_image
;
268 mutex_unlock(&(slave_image
->mtx
));
272 if (allocated_image
== NULL
)
275 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
276 if (resource
== NULL
) {
277 printk(KERN_WARNING
"Unable to allocate resource structure\n");
280 resource
->type
= VME_SLAVE
;
281 resource
->entry
= &(allocated_image
->list
);
287 mutex_lock(&(slave_image
->mtx
));
288 slave_image
->locked
= 0;
289 mutex_unlock(&(slave_image
->mtx
));
294 EXPORT_SYMBOL(vme_slave_request
);
296 int vme_slave_set(struct vme_resource
*resource
, int enabled
,
297 unsigned long long vme_base
, unsigned long long size
,
298 dma_addr_t buf_base
, vme_address_t aspace
, vme_cycle_t cycle
)
300 struct vme_bridge
*bridge
= find_bridge(resource
);
301 struct vme_slave_resource
*image
;
304 if (resource
->type
!= VME_SLAVE
) {
305 printk(KERN_ERR
"Not a slave resource\n");
309 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
311 if (bridge
->slave_set
== NULL
) {
312 printk(KERN_ERR
"Function not supported\n");
316 if (!(((image
->address_attr
& aspace
) == aspace
) &&
317 ((image
->cycle_attr
& cycle
) == cycle
))) {
318 printk(KERN_ERR
"Invalid attributes\n");
322 retval
= vme_check_window(aspace
, vme_base
, size
);
326 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
329 EXPORT_SYMBOL(vme_slave_set
);
331 int vme_slave_get(struct vme_resource
*resource
, int *enabled
,
332 unsigned long long *vme_base
, unsigned long long *size
,
333 dma_addr_t
*buf_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
335 struct vme_bridge
*bridge
= find_bridge(resource
);
336 struct vme_slave_resource
*image
;
338 if (resource
->type
!= VME_SLAVE
) {
339 printk(KERN_ERR
"Not a slave resource\n");
343 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
345 if (bridge
->slave_get
== NULL
) {
346 printk(KERN_ERR
"vme_slave_get not supported\n");
350 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
353 EXPORT_SYMBOL(vme_slave_get
);
355 void vme_slave_free(struct vme_resource
*resource
)
357 struct vme_slave_resource
*slave_image
;
359 if (resource
->type
!= VME_SLAVE
) {
360 printk(KERN_ERR
"Not a slave resource\n");
364 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
366 if (slave_image
== NULL
) {
367 printk(KERN_ERR
"Can't find slave resource\n");
372 mutex_lock(&(slave_image
->mtx
));
373 if (slave_image
->locked
== 0)
374 printk(KERN_ERR
"Image is already free\n");
376 slave_image
->locked
= 0;
377 mutex_unlock(&(slave_image
->mtx
));
379 /* Free up resource memory */
382 EXPORT_SYMBOL(vme_slave_free
);
385 * Request a master image with specific attributes, return some unique
388 struct vme_resource
*vme_master_request(struct device
*dev
,
389 vme_address_t address
, vme_cycle_t cycle
, vme_width_t dwidth
)
391 struct vme_bridge
*bridge
;
392 struct list_head
*master_pos
= NULL
;
393 struct vme_master_resource
*allocated_image
= NULL
;
394 struct vme_master_resource
*master_image
= NULL
;
395 struct vme_resource
*resource
= NULL
;
397 bridge
= dev_to_bridge(dev
);
398 if (bridge
== NULL
) {
399 printk(KERN_ERR
"Can't find VME bus\n");
403 /* Loop through master resources */
404 list_for_each(master_pos
, &(bridge
->master_resources
)) {
405 master_image
= list_entry(master_pos
,
406 struct vme_master_resource
, list
);
408 if (master_image
== NULL
) {
409 printk(KERN_WARNING
"Registered NULL master resource\n");
413 /* Find an unlocked and compatible image */
414 spin_lock(&(master_image
->lock
));
415 if (((master_image
->address_attr
& address
) == address
) &&
416 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
417 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
418 (master_image
->locked
== 0)) {
420 master_image
->locked
= 1;
421 spin_unlock(&(master_image
->lock
));
422 allocated_image
= master_image
;
425 spin_unlock(&(master_image
->lock
));
428 /* Check to see if we found a resource */
429 if (allocated_image
== NULL
) {
430 printk(KERN_ERR
"Can't find a suitable resource\n");
434 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
435 if (resource
== NULL
) {
436 printk(KERN_ERR
"Unable to allocate resource structure\n");
439 resource
->type
= VME_MASTER
;
440 resource
->entry
= &(allocated_image
->list
);
447 spin_lock(&(master_image
->lock
));
448 master_image
->locked
= 0;
449 spin_unlock(&(master_image
->lock
));
454 EXPORT_SYMBOL(vme_master_request
);
456 int vme_master_set(struct vme_resource
*resource
, int enabled
,
457 unsigned long long vme_base
, unsigned long long size
,
458 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
460 struct vme_bridge
*bridge
= find_bridge(resource
);
461 struct vme_master_resource
*image
;
464 if (resource
->type
!= VME_MASTER
) {
465 printk(KERN_ERR
"Not a master resource\n");
469 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
471 if (bridge
->master_set
== NULL
) {
472 printk(KERN_WARNING
"vme_master_set not supported\n");
476 if (!(((image
->address_attr
& aspace
) == aspace
) &&
477 ((image
->cycle_attr
& cycle
) == cycle
) &&
478 ((image
->width_attr
& dwidth
) == dwidth
))) {
479 printk(KERN_WARNING
"Invalid attributes\n");
483 retval
= vme_check_window(aspace
, vme_base
, size
);
487 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
490 EXPORT_SYMBOL(vme_master_set
);
492 int vme_master_get(struct vme_resource
*resource
, int *enabled
,
493 unsigned long long *vme_base
, unsigned long long *size
,
494 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
496 struct vme_bridge
*bridge
= find_bridge(resource
);
497 struct vme_master_resource
*image
;
499 if (resource
->type
!= VME_MASTER
) {
500 printk(KERN_ERR
"Not a master resource\n");
504 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
506 if (bridge
->master_get
== NULL
) {
507 printk(KERN_WARNING
"vme_master_set not supported\n");
511 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
514 EXPORT_SYMBOL(vme_master_get
);
517 * Read data out of VME space into a buffer.
519 ssize_t
vme_master_read(struct vme_resource
*resource
, void *buf
, size_t count
,
522 struct vme_bridge
*bridge
= find_bridge(resource
);
523 struct vme_master_resource
*image
;
526 if (bridge
->master_read
== NULL
) {
527 printk(KERN_WARNING
"Reading from resource not supported\n");
531 if (resource
->type
!= VME_MASTER
) {
532 printk(KERN_ERR
"Not a master resource\n");
536 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
538 length
= vme_get_size(resource
);
540 if (offset
> length
) {
541 printk(KERN_WARNING
"Invalid Offset\n");
545 if ((offset
+ count
) > length
)
546 count
= length
- offset
;
548 return bridge
->master_read(image
, buf
, count
, offset
);
551 EXPORT_SYMBOL(vme_master_read
);
554 * Write data out to VME space from a buffer.
556 ssize_t
vme_master_write(struct vme_resource
*resource
, void *buf
,
557 size_t count
, loff_t offset
)
559 struct vme_bridge
*bridge
= find_bridge(resource
);
560 struct vme_master_resource
*image
;
563 if (bridge
->master_write
== NULL
) {
564 printk(KERN_WARNING
"Writing to resource not supported\n");
568 if (resource
->type
!= VME_MASTER
) {
569 printk(KERN_ERR
"Not a master resource\n");
573 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
575 length
= vme_get_size(resource
);
577 if (offset
> length
) {
578 printk(KERN_WARNING
"Invalid Offset\n");
582 if ((offset
+ count
) > length
)
583 count
= length
- offset
;
585 return bridge
->master_write(image
, buf
, count
, offset
);
587 EXPORT_SYMBOL(vme_master_write
);
590 * Perform RMW cycle to provided location.
592 unsigned int vme_master_rmw(struct vme_resource
*resource
, unsigned int mask
,
593 unsigned int compare
, unsigned int swap
, loff_t offset
)
595 struct vme_bridge
*bridge
= find_bridge(resource
);
596 struct vme_master_resource
*image
;
598 if (bridge
->master_rmw
== NULL
) {
599 printk(KERN_WARNING
"Writing to resource not supported\n");
603 if (resource
->type
!= VME_MASTER
) {
604 printk(KERN_ERR
"Not a master resource\n");
608 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
610 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
612 EXPORT_SYMBOL(vme_master_rmw
);
614 void vme_master_free(struct vme_resource
*resource
)
616 struct vme_master_resource
*master_image
;
618 if (resource
->type
!= VME_MASTER
) {
619 printk(KERN_ERR
"Not a master resource\n");
623 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
625 if (master_image
== NULL
) {
626 printk(KERN_ERR
"Can't find master resource\n");
631 spin_lock(&(master_image
->lock
));
632 if (master_image
->locked
== 0)
633 printk(KERN_ERR
"Image is already free\n");
635 master_image
->locked
= 0;
636 spin_unlock(&(master_image
->lock
));
638 /* Free up resource memory */
641 EXPORT_SYMBOL(vme_master_free
);
644 * Request a DMA controller with specific attributes, return some unique
647 struct vme_resource
*vme_dma_request(struct device
*dev
, vme_dma_route_t route
)
649 struct vme_bridge
*bridge
;
650 struct list_head
*dma_pos
= NULL
;
651 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
652 struct vme_dma_resource
*dma_ctrlr
= NULL
;
653 struct vme_resource
*resource
= NULL
;
655 /* XXX Not checking resource attributes */
656 printk(KERN_ERR
"No VME resource Attribute tests done\n");
658 bridge
= dev_to_bridge(dev
);
659 if (bridge
== NULL
) {
660 printk(KERN_ERR
"Can't find VME bus\n");
664 /* Loop through DMA resources */
665 list_for_each(dma_pos
, &(bridge
->dma_resources
)) {
666 dma_ctrlr
= list_entry(dma_pos
,
667 struct vme_dma_resource
, list
);
669 if (dma_ctrlr
== NULL
) {
670 printk(KERN_ERR
"Registered NULL DMA resource\n");
674 /* Find an unlocked and compatible controller */
675 mutex_lock(&(dma_ctrlr
->mtx
));
676 if (((dma_ctrlr
->route_attr
& route
) == route
) &&
677 (dma_ctrlr
->locked
== 0)) {
679 dma_ctrlr
->locked
= 1;
680 mutex_unlock(&(dma_ctrlr
->mtx
));
681 allocated_ctrlr
= dma_ctrlr
;
684 mutex_unlock(&(dma_ctrlr
->mtx
));
687 /* Check to see if we found a resource */
688 if (allocated_ctrlr
== NULL
)
691 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
692 if (resource
== NULL
) {
693 printk(KERN_WARNING
"Unable to allocate resource structure\n");
696 resource
->type
= VME_DMA
;
697 resource
->entry
= &(allocated_ctrlr
->list
);
703 mutex_lock(&(dma_ctrlr
->mtx
));
704 dma_ctrlr
->locked
= 0;
705 mutex_unlock(&(dma_ctrlr
->mtx
));
710 EXPORT_SYMBOL(vme_dma_request
);
715 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
717 struct vme_dma_resource
*ctrlr
;
718 struct vme_dma_list
*dma_list
;
720 if (resource
->type
!= VME_DMA
) {
721 printk(KERN_ERR
"Not a DMA resource\n");
725 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
727 dma_list
= kmalloc(sizeof(struct vme_dma_list
), GFP_KERNEL
);
728 if (dma_list
== NULL
) {
729 printk(KERN_ERR
"Unable to allocate memory for new dma list\n");
732 INIT_LIST_HEAD(&(dma_list
->entries
));
733 dma_list
->parent
= ctrlr
;
734 mutex_init(&(dma_list
->mtx
));
738 EXPORT_SYMBOL(vme_new_dma_list
);
741 * Create "Pattern" type attributes
743 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
,
746 struct vme_dma_attr
*attributes
;
747 struct vme_dma_pattern
*pattern_attr
;
749 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
750 if (attributes
== NULL
) {
751 printk(KERN_ERR
"Unable to allocate memory for attributes "
756 pattern_attr
= kmalloc(sizeof(struct vme_dma_pattern
), GFP_KERNEL
);
757 if (pattern_attr
== NULL
) {
758 printk(KERN_ERR
"Unable to allocate memory for pattern "
763 attributes
->type
= VME_DMA_PATTERN
;
764 attributes
->private = (void *)pattern_attr
;
766 pattern_attr
->pattern
= pattern
;
767 pattern_attr
->type
= type
;
777 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
780 * Create "PCI" type attributes
782 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
784 struct vme_dma_attr
*attributes
;
785 struct vme_dma_pci
*pci_attr
;
787 /* XXX Run some sanity checks here */
789 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
790 if (attributes
== NULL
) {
791 printk(KERN_ERR
"Unable to allocate memory for attributes "
796 pci_attr
= kmalloc(sizeof(struct vme_dma_pci
), GFP_KERNEL
);
797 if (pci_attr
== NULL
) {
798 printk(KERN_ERR
"Unable to allocate memory for pci "
805 attributes
->type
= VME_DMA_PCI
;
806 attributes
->private = (void *)pci_attr
;
808 pci_attr
->address
= address
;
818 EXPORT_SYMBOL(vme_dma_pci_attribute
);
821 * Create "VME" type attributes
823 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
824 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
826 struct vme_dma_attr
*attributes
;
827 struct vme_dma_vme
*vme_attr
;
829 attributes
= kmalloc(
830 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
831 if (attributes
== NULL
) {
832 printk(KERN_ERR
"Unable to allocate memory for attributes "
837 vme_attr
= kmalloc(sizeof(struct vme_dma_vme
), GFP_KERNEL
);
838 if (vme_attr
== NULL
) {
839 printk(KERN_ERR
"Unable to allocate memory for vme "
844 attributes
->type
= VME_DMA_VME
;
845 attributes
->private = (void *)vme_attr
;
847 vme_attr
->address
= address
;
848 vme_attr
->aspace
= aspace
;
849 vme_attr
->cycle
= cycle
;
850 vme_attr
->dwidth
= dwidth
;
860 EXPORT_SYMBOL(vme_dma_vme_attribute
);
865 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
867 kfree(attributes
->private);
870 EXPORT_SYMBOL(vme_dma_free_attribute
);
872 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
873 struct vme_dma_attr
*dest
, size_t count
)
875 struct vme_bridge
*bridge
= list
->parent
->parent
;
878 if (bridge
->dma_list_add
== NULL
) {
879 printk(KERN_WARNING
"Link List DMA generation not supported\n");
883 if (!mutex_trylock(&(list
->mtx
))) {
884 printk(KERN_ERR
"Link List already submitted\n");
888 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
890 mutex_unlock(&(list
->mtx
));
894 EXPORT_SYMBOL(vme_dma_list_add
);
896 int vme_dma_list_exec(struct vme_dma_list
*list
)
898 struct vme_bridge
*bridge
= list
->parent
->parent
;
901 if (bridge
->dma_list_exec
== NULL
) {
902 printk(KERN_ERR
"Link List DMA execution not supported\n");
906 mutex_lock(&(list
->mtx
));
908 retval
= bridge
->dma_list_exec(list
);
910 mutex_unlock(&(list
->mtx
));
914 EXPORT_SYMBOL(vme_dma_list_exec
);
916 int vme_dma_list_free(struct vme_dma_list
*list
)
918 struct vme_bridge
*bridge
= list
->parent
->parent
;
921 if (bridge
->dma_list_empty
== NULL
) {
922 printk(KERN_WARNING
"Emptying of Link Lists not supported\n");
926 if (!mutex_trylock(&(list
->mtx
))) {
927 printk(KERN_ERR
"Link List in use\n");
932 * Empty out all of the entries from the dma list. We need to go to the
933 * low level driver as dma entries are driver specific.
935 retval
= bridge
->dma_list_empty(list
);
937 printk(KERN_ERR
"Unable to empty link-list entries\n");
938 mutex_unlock(&(list
->mtx
));
941 mutex_unlock(&(list
->mtx
));
946 EXPORT_SYMBOL(vme_dma_list_free
);
948 int vme_dma_free(struct vme_resource
*resource
)
950 struct vme_dma_resource
*ctrlr
;
952 if (resource
->type
!= VME_DMA
) {
953 printk(KERN_ERR
"Not a DMA resource\n");
957 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
959 if (!mutex_trylock(&(ctrlr
->mtx
))) {
960 printk(KERN_ERR
"Resource busy, can't free\n");
964 if (!(list_empty(&(ctrlr
->pending
)) && list_empty(&(ctrlr
->running
)))) {
965 printk(KERN_WARNING
"Resource still processing transfers\n");
966 mutex_unlock(&(ctrlr
->mtx
));
972 mutex_unlock(&(ctrlr
->mtx
));
976 EXPORT_SYMBOL(vme_dma_free
);
978 void vme_irq_handler(struct vme_bridge
*bridge
, int level
, int statid
)
980 void (*call
)(int, int, void *);
983 call
= bridge
->irq
[level
- 1].callback
[statid
].func
;
984 priv_data
= bridge
->irq
[level
- 1].callback
[statid
].priv_data
;
987 call(level
, statid
, priv_data
);
989 printk(KERN_WARNING
"Spurilous VME interrupt, level:%x, "
990 "vector:%x\n", level
, statid
);
992 EXPORT_SYMBOL(vme_irq_handler
);
994 int vme_irq_request(struct device
*dev
, int level
, int statid
,
995 void (*callback
)(int, int, void *),
998 struct vme_bridge
*bridge
;
1000 bridge
= dev_to_bridge(dev
);
1001 if (bridge
== NULL
) {
1002 printk(KERN_ERR
"Can't find VME bus\n");
1006 if ((level
< 1) || (level
> 7)) {
1007 printk(KERN_ERR
"Invalid interrupt level\n");
1011 if (bridge
->irq_set
== NULL
) {
1012 printk(KERN_ERR
"Configuring interrupts not supported\n");
1016 mutex_lock(&(bridge
->irq_mtx
));
1018 if (bridge
->irq
[level
- 1].callback
[statid
].func
) {
1019 mutex_unlock(&(bridge
->irq_mtx
));
1020 printk(KERN_WARNING
"VME Interrupt already taken\n");
1024 bridge
->irq
[level
- 1].count
++;
1025 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
1026 bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
1028 /* Enable IRQ level */
1029 bridge
->irq_set(bridge
, level
, 1, 1);
1031 mutex_unlock(&(bridge
->irq_mtx
));
1035 EXPORT_SYMBOL(vme_irq_request
);
1037 void vme_irq_free(struct device
*dev
, int level
, int statid
)
1039 struct vme_bridge
*bridge
;
1041 bridge
= dev_to_bridge(dev
);
1042 if (bridge
== NULL
) {
1043 printk(KERN_ERR
"Can't find VME bus\n");
1047 if ((level
< 1) || (level
> 7)) {
1048 printk(KERN_ERR
"Invalid interrupt level\n");
1052 if (bridge
->irq_set
== NULL
) {
1053 printk(KERN_ERR
"Configuring interrupts not supported\n");
1057 mutex_lock(&(bridge
->irq_mtx
));
1059 bridge
->irq
[level
- 1].count
--;
1061 /* Disable IRQ level if no more interrupts attached at this level*/
1062 if (bridge
->irq
[level
- 1].count
== 0)
1063 bridge
->irq_set(bridge
, level
, 0, 1);
1065 bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
1066 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
1068 mutex_unlock(&(bridge
->irq_mtx
));
1070 EXPORT_SYMBOL(vme_irq_free
);
1072 int vme_irq_generate(struct device
*dev
, int level
, int statid
)
1074 struct vme_bridge
*bridge
;
1076 bridge
= dev_to_bridge(dev
);
1077 if (bridge
== NULL
) {
1078 printk(KERN_ERR
"Can't find VME bus\n");
1082 if ((level
< 1) || (level
> 7)) {
1083 printk(KERN_WARNING
"Invalid interrupt level\n");
1087 if (bridge
->irq_generate
== NULL
) {
1088 printk(KERN_WARNING
"Interrupt generation not supported\n");
1092 return bridge
->irq_generate(bridge
, level
, statid
);
1094 EXPORT_SYMBOL(vme_irq_generate
);
1097 * Request the location monitor, return resource or NULL
1099 struct vme_resource
*vme_lm_request(struct device
*dev
)
1101 struct vme_bridge
*bridge
;
1102 struct list_head
*lm_pos
= NULL
;
1103 struct vme_lm_resource
*allocated_lm
= NULL
;
1104 struct vme_lm_resource
*lm
= NULL
;
1105 struct vme_resource
*resource
= NULL
;
1107 bridge
= dev_to_bridge(dev
);
1108 if (bridge
== NULL
) {
1109 printk(KERN_ERR
"Can't find VME bus\n");
1113 /* Loop through DMA resources */
1114 list_for_each(lm_pos
, &(bridge
->lm_resources
)) {
1115 lm
= list_entry(lm_pos
,
1116 struct vme_lm_resource
, list
);
1119 printk(KERN_ERR
"Registered NULL Location Monitor "
1124 /* Find an unlocked controller */
1125 mutex_lock(&(lm
->mtx
));
1126 if (lm
->locked
== 0) {
1128 mutex_unlock(&(lm
->mtx
));
1132 mutex_unlock(&(lm
->mtx
));
1135 /* Check to see if we found a resource */
1136 if (allocated_lm
== NULL
)
1139 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
1140 if (resource
== NULL
) {
1141 printk(KERN_ERR
"Unable to allocate resource structure\n");
1144 resource
->type
= VME_LM
;
1145 resource
->entry
= &(allocated_lm
->list
);
1151 mutex_lock(&(lm
->mtx
));
1153 mutex_unlock(&(lm
->mtx
));
1158 EXPORT_SYMBOL(vme_lm_request
);
1160 int vme_lm_count(struct vme_resource
*resource
)
1162 struct vme_lm_resource
*lm
;
1164 if (resource
->type
!= VME_LM
) {
1165 printk(KERN_ERR
"Not a Location Monitor resource\n");
1169 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1171 return lm
->monitors
;
1173 EXPORT_SYMBOL(vme_lm_count
);
1175 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1176 vme_address_t aspace
, vme_cycle_t cycle
)
1178 struct vme_bridge
*bridge
= find_bridge(resource
);
1179 struct vme_lm_resource
*lm
;
1181 if (resource
->type
!= VME_LM
) {
1182 printk(KERN_ERR
"Not a Location Monitor resource\n");
1186 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1188 if (bridge
->lm_set
== NULL
) {
1189 printk(KERN_ERR
"vme_lm_set not supported\n");
1193 return bridge
->lm_set(lm
, lm_base
, aspace
, cycle
);
1195 EXPORT_SYMBOL(vme_lm_set
);
1197 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1198 vme_address_t
*aspace
, vme_cycle_t
*cycle
)
1200 struct vme_bridge
*bridge
= find_bridge(resource
);
1201 struct vme_lm_resource
*lm
;
1203 if (resource
->type
!= VME_LM
) {
1204 printk(KERN_ERR
"Not a Location Monitor resource\n");
1208 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1210 if (bridge
->lm_get
== NULL
) {
1211 printk(KERN_ERR
"vme_lm_get not supported\n");
1215 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1217 EXPORT_SYMBOL(vme_lm_get
);
1219 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1220 void (*callback
)(int))
1222 struct vme_bridge
*bridge
= find_bridge(resource
);
1223 struct vme_lm_resource
*lm
;
1225 if (resource
->type
!= VME_LM
) {
1226 printk(KERN_ERR
"Not a Location Monitor resource\n");
1230 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1232 if (bridge
->lm_attach
== NULL
) {
1233 printk(KERN_ERR
"vme_lm_attach not supported\n");
1237 return bridge
->lm_attach(lm
, monitor
, callback
);
1239 EXPORT_SYMBOL(vme_lm_attach
);
1241 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1243 struct vme_bridge
*bridge
= find_bridge(resource
);
1244 struct vme_lm_resource
*lm
;
1246 if (resource
->type
!= VME_LM
) {
1247 printk(KERN_ERR
"Not a Location Monitor resource\n");
1251 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1253 if (bridge
->lm_detach
== NULL
) {
1254 printk(KERN_ERR
"vme_lm_detach not supported\n");
1258 return bridge
->lm_detach(lm
, monitor
);
1260 EXPORT_SYMBOL(vme_lm_detach
);
1262 void vme_lm_free(struct vme_resource
*resource
)
1264 struct vme_lm_resource
*lm
;
1266 if (resource
->type
!= VME_LM
) {
1267 printk(KERN_ERR
"Not a Location Monitor resource\n");
1271 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1273 mutex_lock(&(lm
->mtx
));
1276 * Check to see that there aren't any callbacks still attached, if
1277 * there are we should probably be detaching them!
1282 mutex_unlock(&(lm
->mtx
));
1286 EXPORT_SYMBOL(vme_lm_free
);
1288 int vme_slot_get(struct device
*bus
)
1290 struct vme_bridge
*bridge
;
1292 bridge
= dev_to_bridge(bus
);
1293 if (bridge
== NULL
) {
1294 printk(KERN_ERR
"Can't find VME bus\n");
1298 if (bridge
->slot_get
== NULL
) {
1299 printk(KERN_WARNING
"vme_slot_get not supported\n");
1303 return bridge
->slot_get(bridge
);
1305 EXPORT_SYMBOL(vme_slot_get
);
1308 /* - Bridge Registration --------------------------------------------------- */
1310 static int vme_alloc_bus_num(void)
1314 mutex_lock(&vme_bus_num_mtx
);
1315 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1316 if (((vme_bus_numbers
>> i
) & 0x1) == 0) {
1317 vme_bus_numbers
|= (0x1 << i
);
1321 mutex_unlock(&vme_bus_num_mtx
);
1326 static void vme_free_bus_num(int bus
)
1328 mutex_lock(&vme_bus_num_mtx
);
1329 vme_bus_numbers
|= ~(0x1 << bus
);
1330 mutex_unlock(&vme_bus_num_mtx
);
1333 int vme_register_bridge(struct vme_bridge
*bridge
)
1339 bridge
->num
= vme_alloc_bus_num();
1341 /* This creates 32 vme "slot" devices. This equates to a slot for each
1342 * ID available in a system conforming to the ANSI/VITA 1-1994
1345 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1346 dev
= &(bridge
->dev
[i
]);
1347 memset(dev
, 0, sizeof(struct device
));
1349 dev
->parent
= bridge
->parent
;
1350 dev
->bus
= &(vme_bus_type
);
1352 * We save a pointer to the bridge in platform_data so that we
1353 * can get to it later. We keep driver_data for use by the
1354 * driver that binds against the slot
1356 dev
->platform_data
= bridge
;
1357 dev_set_name(dev
, "vme-%x.%x", bridge
->num
, i
+ 1);
1359 retval
= device_register(dev
);
1369 dev
= &(bridge
->dev
[i
]);
1370 device_unregister(dev
);
1372 vme_free_bus_num(bridge
->num
);
1375 EXPORT_SYMBOL(vme_register_bridge
);
1377 void vme_unregister_bridge(struct vme_bridge
*bridge
)
1383 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1384 dev
= &(bridge
->dev
[i
]);
1385 device_unregister(dev
);
1387 vme_free_bus_num(bridge
->num
);
1389 EXPORT_SYMBOL(vme_unregister_bridge
);
1392 /* - Driver Registration --------------------------------------------------- */
1394 int vme_register_driver(struct vme_driver
*drv
)
1396 drv
->driver
.name
= drv
->name
;
1397 drv
->driver
.bus
= &vme_bus_type
;
1399 return driver_register(&drv
->driver
);
1401 EXPORT_SYMBOL(vme_register_driver
);
1403 void vme_unregister_driver(struct vme_driver
*drv
)
1405 driver_unregister(&drv
->driver
);
1407 EXPORT_SYMBOL(vme_unregister_driver
);
1409 /* - Bus Registration ------------------------------------------------------ */
1411 static int vme_calc_slot(struct device
*dev
)
1413 struct vme_bridge
*bridge
;
1416 bridge
= dev_to_bridge(dev
);
1418 /* Determine slot number */
1420 while (num
< VME_SLOTS_MAX
) {
1421 if (&(bridge
->dev
[num
]) == dev
)
1426 if (num
== VME_SLOTS_MAX
) {
1427 dev_err(dev
, "Failed to identify slot\n");
1437 static struct vme_driver
*dev_to_vme_driver(struct device
*dev
)
1439 if (dev
->driver
== NULL
)
1440 printk(KERN_ERR
"Bugger dev->driver is NULL\n");
1442 return container_of(dev
->driver
, struct vme_driver
, driver
);
1445 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1447 struct vme_bridge
*bridge
;
1448 struct vme_driver
*driver
;
1451 bridge
= dev_to_bridge(dev
);
1452 driver
= container_of(drv
, struct vme_driver
, driver
);
1454 num
= vme_calc_slot(dev
);
1458 if (driver
->bind_table
== NULL
) {
1459 dev_err(dev
, "Bind table NULL\n");
1464 while ((driver
->bind_table
[i
].bus
!= 0) ||
1465 (driver
->bind_table
[i
].slot
!= 0)) {
1467 if (bridge
->num
== driver
->bind_table
[i
].bus
) {
1468 if (num
== driver
->bind_table
[i
].slot
)
1471 if (driver
->bind_table
[i
].slot
== VME_SLOT_ALL
)
1474 if ((driver
->bind_table
[i
].slot
== VME_SLOT_CURRENT
) &&
1475 (num
== vme_slot_get(dev
)))
1486 static int vme_bus_probe(struct device
*dev
)
1488 struct vme_bridge
*bridge
;
1489 struct vme_driver
*driver
;
1490 int retval
= -ENODEV
;
1492 driver
= dev_to_vme_driver(dev
);
1493 bridge
= dev_to_bridge(dev
);
1495 if (driver
->probe
!= NULL
)
1496 retval
= driver
->probe(dev
, bridge
->num
, vme_calc_slot(dev
));
1501 static int vme_bus_remove(struct device
*dev
)
1503 struct vme_bridge
*bridge
;
1504 struct vme_driver
*driver
;
1505 int retval
= -ENODEV
;
1507 driver
= dev_to_vme_driver(dev
);
1508 bridge
= dev_to_bridge(dev
);
1510 if (driver
->remove
!= NULL
)
1511 retval
= driver
->remove(dev
, bridge
->num
, vme_calc_slot(dev
));
1516 struct bus_type vme_bus_type
= {
1518 .match
= vme_bus_match
,
1519 .probe
= vme_bus_probe
,
1520 .remove
= vme_bus_remove
,
1522 EXPORT_SYMBOL(vme_bus_type
);
1524 static int __init
vme_init(void)
1526 return bus_register(&vme_bus_type
);
1529 static void __exit
vme_exit(void)
1531 bus_unregister(&vme_bus_type
);
1534 MODULE_DESCRIPTION("VME bridge driver framework");
1535 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1536 MODULE_LICENSE("GPL");
1538 module_init(vme_init
);
1539 module_exit(vme_exit
);