4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/version.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/pci.h>
24 #include <linux/poll.h>
25 #include <linux/highmem.h>
26 #include <linux/interrupt.h>
27 #include <linux/pagemap.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/syscalls.h>
31 #include <linux/mutex.h>
32 #include <linux/spinlock.h>
35 #include "vme_bridge.h"
37 /* Bitmask and mutex to keep track of bridge numbers */
38 static unsigned int vme_bus_numbers
;
39 DEFINE_MUTEX(vme_bus_num_mtx
);
41 static void __exit
vme_exit (void);
42 static int __init
vme_init (void);
46 * Find the bridge resource associated with a specific device resource
48 static struct vme_bridge
*dev_to_bridge(struct device
*dev
)
50 return dev
->platform_data
;
54 * Find the bridge that the resource is associated with.
56 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
58 /* Get list to search */
59 switch (resource
->type
) {
61 return list_entry(resource
->entry
, struct vme_master_resource
,
65 return list_entry(resource
->entry
, struct vme_slave_resource
,
69 return list_entry(resource
->entry
, struct vme_dma_resource
,
73 return list_entry(resource
->entry
, struct vme_lm_resource
,
77 printk(KERN_ERR
"Unknown resource type\n");
84 * Allocate a contiguous block of memory for use by the driver. This is used to
85 * create the buffers for the slave windows.
87 * XXX VME bridges could be available on buses other than PCI. At the momment
88 * this framework only supports PCI devices.
90 void * vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
93 struct vme_bridge
*bridge
;
96 if(resource
== NULL
) {
97 printk("No resource\n");
101 bridge
= find_bridge(resource
);
103 printk("Can't find bridge\n");
107 /* Find pci_dev container of dev */
108 if (bridge
->parent
== NULL
) {
109 printk("Dev entry NULL\n");
112 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
114 return pci_alloc_consistent(pdev
, size
, dma
);
116 EXPORT_SYMBOL(vme_alloc_consistent
);
119 * Free previously allocated contiguous block of memory.
121 * XXX VME bridges could be available on buses other than PCI. At the momment
122 * this framework only supports PCI devices.
124 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
125 void *vaddr
, dma_addr_t dma
)
127 struct vme_bridge
*bridge
;
128 struct pci_dev
*pdev
;
130 if(resource
== NULL
) {
131 printk("No resource\n");
135 bridge
= find_bridge(resource
);
137 printk("Can't find bridge\n");
141 /* Find pci_dev container of dev */
142 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
144 pci_free_consistent(pdev
, size
, vaddr
, dma
);
146 EXPORT_SYMBOL(vme_free_consistent
);
148 size_t vme_get_size(struct vme_resource
*resource
)
151 unsigned long long base
, size
;
153 vme_address_t aspace
;
157 switch (resource
->type
) {
159 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
160 &aspace
, &cycle
, &dwidth
);
165 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
166 &buf_base
, &aspace
, &cycle
);
174 printk(KERN_ERR
"Unknown resource type\n");
179 EXPORT_SYMBOL(vme_get_size
);
181 static int vme_check_window(vme_address_t aspace
, unsigned long long vme_base
,
182 unsigned long long size
)
188 if (((vme_base
+ size
) > VME_A16_MAX
) ||
189 (vme_base
> VME_A16_MAX
))
193 if (((vme_base
+ size
) > VME_A24_MAX
) ||
194 (vme_base
> VME_A24_MAX
))
198 if (((vme_base
+ size
) > VME_A32_MAX
) ||
199 (vme_base
> VME_A32_MAX
))
204 * Any value held in an unsigned long long can be used as the
209 if (((vme_base
+ size
) > VME_CRCSR_MAX
) ||
210 (vme_base
> VME_CRCSR_MAX
))
220 printk("Invalid address space\n");
229 * Request a slave image with specific attributes, return some unique
232 struct vme_resource
* vme_slave_request(struct device
*dev
,
233 vme_address_t address
, vme_cycle_t cycle
)
235 struct vme_bridge
*bridge
;
236 struct list_head
*slave_pos
= NULL
;
237 struct vme_slave_resource
*allocated_image
= NULL
;
238 struct vme_slave_resource
*slave_image
= NULL
;
239 struct vme_resource
*resource
= NULL
;
241 bridge
= dev_to_bridge(dev
);
242 if (bridge
== NULL
) {
243 printk(KERN_ERR
"Can't find VME bus\n");
247 /* Loop through slave resources */
248 list_for_each(slave_pos
, &(bridge
->slave_resources
)) {
249 slave_image
= list_entry(slave_pos
,
250 struct vme_slave_resource
, list
);
252 if (slave_image
== NULL
) {
253 printk("Registered NULL Slave resource\n");
257 /* Find an unlocked and compatible image */
258 mutex_lock(&(slave_image
->mtx
));
259 if(((slave_image
->address_attr
& address
) == address
) &&
260 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
261 (slave_image
->locked
== 0)) {
263 slave_image
->locked
= 1;
264 mutex_unlock(&(slave_image
->mtx
));
265 allocated_image
= slave_image
;
268 mutex_unlock(&(slave_image
->mtx
));
272 if (allocated_image
== NULL
)
275 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
276 if (resource
== NULL
) {
277 printk(KERN_WARNING
"Unable to allocate resource structure\n");
280 resource
->type
= VME_SLAVE
;
281 resource
->entry
= &(allocated_image
->list
);
287 mutex_lock(&(slave_image
->mtx
));
288 slave_image
->locked
= 0;
289 mutex_unlock(&(slave_image
->mtx
));
294 EXPORT_SYMBOL(vme_slave_request
);
296 int vme_slave_set (struct vme_resource
*resource
, int enabled
,
297 unsigned long long vme_base
, unsigned long long size
,
298 dma_addr_t buf_base
, vme_address_t aspace
, vme_cycle_t cycle
)
300 struct vme_bridge
*bridge
= find_bridge(resource
);
301 struct vme_slave_resource
*image
;
304 if (resource
->type
!= VME_SLAVE
) {
305 printk("Not a slave resource\n");
309 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
311 if (bridge
->slave_set
== NULL
) {
312 printk("Function not supported\n");
316 if(!(((image
->address_attr
& aspace
) == aspace
) &&
317 ((image
->cycle_attr
& cycle
) == cycle
))) {
318 printk("Invalid attributes\n");
322 retval
= vme_check_window(aspace
, vme_base
, size
);
326 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
329 EXPORT_SYMBOL(vme_slave_set
);
331 int vme_slave_get (struct vme_resource
*resource
, int *enabled
,
332 unsigned long long *vme_base
, unsigned long long *size
,
333 dma_addr_t
*buf_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
335 struct vme_bridge
*bridge
= find_bridge(resource
);
336 struct vme_slave_resource
*image
;
338 if (resource
->type
!= VME_SLAVE
) {
339 printk("Not a slave resource\n");
343 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
345 if (bridge
->slave_get
== NULL
) {
346 printk("vme_slave_get not supported\n");
350 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
353 EXPORT_SYMBOL(vme_slave_get
);
355 void vme_slave_free(struct vme_resource
*resource
)
357 struct vme_slave_resource
*slave_image
;
359 if (resource
->type
!= VME_SLAVE
) {
360 printk("Not a slave resource\n");
364 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
366 if (slave_image
== NULL
) {
367 printk("Can't find slave resource\n");
372 mutex_lock(&(slave_image
->mtx
));
373 if (slave_image
->locked
== 0)
374 printk(KERN_ERR
"Image is already free\n");
376 slave_image
->locked
= 0;
377 mutex_unlock(&(slave_image
->mtx
));
379 /* Free up resource memory */
382 EXPORT_SYMBOL(vme_slave_free
);
385 * Request a master image with specific attributes, return some unique
388 struct vme_resource
* vme_master_request(struct device
*dev
,
389 vme_address_t address
, vme_cycle_t cycle
, vme_width_t dwidth
)
391 struct vme_bridge
*bridge
;
392 struct list_head
*master_pos
= NULL
;
393 struct vme_master_resource
*allocated_image
= NULL
;
394 struct vme_master_resource
*master_image
= NULL
;
395 struct vme_resource
*resource
= NULL
;
397 bridge
= dev_to_bridge(dev
);
398 if (bridge
== NULL
) {
399 printk(KERN_ERR
"Can't find VME bus\n");
403 /* Loop through master resources */
404 list_for_each(master_pos
, &(bridge
->master_resources
)) {
405 master_image
= list_entry(master_pos
,
406 struct vme_master_resource
, list
);
408 if (master_image
== NULL
) {
409 printk(KERN_WARNING
"Registered NULL master resource\n");
413 /* Find an unlocked and compatible image */
414 spin_lock(&(master_image
->lock
));
415 if(((master_image
->address_attr
& address
) == address
) &&
416 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
417 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
418 (master_image
->locked
== 0)) {
420 master_image
->locked
= 1;
421 spin_unlock(&(master_image
->lock
));
422 allocated_image
= master_image
;
425 spin_unlock(&(master_image
->lock
));
428 /* Check to see if we found a resource */
429 if (allocated_image
== NULL
) {
430 printk(KERN_ERR
"Can't find a suitable resource\n");
434 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
435 if (resource
== NULL
) {
436 printk(KERN_ERR
"Unable to allocate resource structure\n");
439 resource
->type
= VME_MASTER
;
440 resource
->entry
= &(allocated_image
->list
);
447 spin_lock(&(master_image
->lock
));
448 master_image
->locked
= 0;
449 spin_unlock(&(master_image
->lock
));
454 EXPORT_SYMBOL(vme_master_request
);
456 int vme_master_set (struct vme_resource
*resource
, int enabled
,
457 unsigned long long vme_base
, unsigned long long size
,
458 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
460 struct vme_bridge
*bridge
= find_bridge(resource
);
461 struct vme_master_resource
*image
;
464 if (resource
->type
!= VME_MASTER
) {
465 printk("Not a master resource\n");
469 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
471 if (bridge
->master_set
== NULL
) {
472 printk("vme_master_set not supported\n");
476 if(!(((image
->address_attr
& aspace
) == aspace
) &&
477 ((image
->cycle_attr
& cycle
) == cycle
) &&
478 ((image
->width_attr
& dwidth
) == dwidth
))) {
479 printk("Invalid attributes\n");
483 retval
= vme_check_window(aspace
, vme_base
, size
);
487 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
490 EXPORT_SYMBOL(vme_master_set
);
492 int vme_master_get (struct vme_resource
*resource
, int *enabled
,
493 unsigned long long *vme_base
, unsigned long long *size
,
494 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
496 struct vme_bridge
*bridge
= find_bridge(resource
);
497 struct vme_master_resource
*image
;
499 if (resource
->type
!= VME_MASTER
) {
500 printk("Not a master resource\n");
504 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
506 if (bridge
->master_get
== NULL
) {
507 printk("vme_master_set not supported\n");
511 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
514 EXPORT_SYMBOL(vme_master_get
);
517 * Read data out of VME space into a buffer.
519 ssize_t
vme_master_read (struct vme_resource
*resource
, void *buf
, size_t count
,
522 struct vme_bridge
*bridge
= find_bridge(resource
);
523 struct vme_master_resource
*image
;
526 if (bridge
->master_read
== NULL
) {
527 printk("Reading from resource not supported\n");
531 if (resource
->type
!= VME_MASTER
) {
532 printk("Not a master resource\n");
536 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
538 length
= vme_get_size(resource
);
540 if (offset
> length
) {
541 printk("Invalid Offset\n");
545 if ((offset
+ count
) > length
)
546 count
= length
- offset
;
548 return bridge
->master_read(image
, buf
, count
, offset
);
551 EXPORT_SYMBOL(vme_master_read
);
554 * Write data out to VME space from a buffer.
556 ssize_t
vme_master_write (struct vme_resource
*resource
, void *buf
,
557 size_t count
, loff_t offset
)
559 struct vme_bridge
*bridge
= find_bridge(resource
);
560 struct vme_master_resource
*image
;
563 if (bridge
->master_write
== NULL
) {
564 printk("Writing to resource not supported\n");
568 if (resource
->type
!= VME_MASTER
) {
569 printk("Not a master resource\n");
573 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
575 length
= vme_get_size(resource
);
577 if (offset
> length
) {
578 printk("Invalid Offset\n");
582 if ((offset
+ count
) > length
)
583 count
= length
- offset
;
585 return bridge
->master_write(image
, buf
, count
, offset
);
587 EXPORT_SYMBOL(vme_master_write
);
590 * Perform RMW cycle to provided location.
592 unsigned int vme_master_rmw (struct vme_resource
*resource
, unsigned int mask
,
593 unsigned int compare
, unsigned int swap
, loff_t offset
)
595 struct vme_bridge
*bridge
= find_bridge(resource
);
596 struct vme_master_resource
*image
;
598 if (bridge
->master_rmw
== NULL
) {
599 printk("Writing to resource not supported\n");
603 if (resource
->type
!= VME_MASTER
) {
604 printk("Not a master resource\n");
608 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
610 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
612 EXPORT_SYMBOL(vme_master_rmw
);
614 void vme_master_free(struct vme_resource
*resource
)
616 struct vme_master_resource
*master_image
;
618 if (resource
->type
!= VME_MASTER
) {
619 printk("Not a master resource\n");
623 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
625 if (master_image
== NULL
) {
626 printk("Can't find master resource\n");
631 spin_lock(&(master_image
->lock
));
632 if (master_image
->locked
== 0)
633 printk(KERN_ERR
"Image is already free\n");
635 master_image
->locked
= 0;
636 spin_unlock(&(master_image
->lock
));
638 /* Free up resource memory */
641 EXPORT_SYMBOL(vme_master_free
);
644 * Request a DMA controller with specific attributes, return some unique
647 struct vme_resource
*vme_request_dma(struct device
*dev
)
649 struct vme_bridge
*bridge
;
650 struct list_head
*dma_pos
= NULL
;
651 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
652 struct vme_dma_resource
*dma_ctrlr
= NULL
;
653 struct vme_resource
*resource
= NULL
;
655 /* XXX Not checking resource attributes */
656 printk(KERN_ERR
"No VME resource Attribute tests done\n");
658 bridge
= dev_to_bridge(dev
);
659 if (bridge
== NULL
) {
660 printk(KERN_ERR
"Can't find VME bus\n");
664 /* Loop through DMA resources */
665 list_for_each(dma_pos
, &(bridge
->dma_resources
)) {
666 dma_ctrlr
= list_entry(dma_pos
,
667 struct vme_dma_resource
, list
);
669 if (dma_ctrlr
== NULL
) {
670 printk("Registered NULL DMA resource\n");
674 /* Find an unlocked controller */
675 mutex_lock(&(dma_ctrlr
->mtx
));
676 if(dma_ctrlr
->locked
== 0) {
677 dma_ctrlr
->locked
= 1;
678 mutex_unlock(&(dma_ctrlr
->mtx
));
679 allocated_ctrlr
= dma_ctrlr
;
682 mutex_unlock(&(dma_ctrlr
->mtx
));
685 /* Check to see if we found a resource */
686 if (allocated_ctrlr
== NULL
)
689 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
690 if (resource
== NULL
) {
691 printk(KERN_WARNING
"Unable to allocate resource structure\n");
694 resource
->type
= VME_DMA
;
695 resource
->entry
= &(allocated_ctrlr
->list
);
701 mutex_lock(&(dma_ctrlr
->mtx
));
702 dma_ctrlr
->locked
= 0;
703 mutex_unlock(&(dma_ctrlr
->mtx
));
708 EXPORT_SYMBOL(vme_request_dma
);
713 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
715 struct vme_dma_resource
*ctrlr
;
716 struct vme_dma_list
*dma_list
;
718 if (resource
->type
!= VME_DMA
) {
719 printk("Not a DMA resource\n");
723 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
725 dma_list
= (struct vme_dma_list
*)kmalloc(
726 sizeof(struct vme_dma_list
), GFP_KERNEL
);
727 if(dma_list
== NULL
) {
728 printk("Unable to allocate memory for new dma list\n");
731 INIT_LIST_HEAD(&(dma_list
->entries
));
732 dma_list
->parent
= ctrlr
;
733 mutex_init(&(dma_list
->mtx
));
737 EXPORT_SYMBOL(vme_new_dma_list
);
740 * Create "Pattern" type attributes
742 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
,
745 struct vme_dma_attr
*attributes
;
746 struct vme_dma_pattern
*pattern_attr
;
748 attributes
= (struct vme_dma_attr
*)kmalloc(
749 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
750 if(attributes
== NULL
) {
751 printk("Unable to allocate memory for attributes structure\n");
755 pattern_attr
= (struct vme_dma_pattern
*)kmalloc(
756 sizeof(struct vme_dma_pattern
), GFP_KERNEL
);
757 if(pattern_attr
== NULL
) {
758 printk("Unable to allocate memory for pattern attributes\n");
762 attributes
->type
= VME_DMA_PATTERN
;
763 attributes
->private = (void *)pattern_attr
;
765 pattern_attr
->pattern
= pattern
;
766 pattern_attr
->type
= type
;
776 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
779 * Create "PCI" type attributes
781 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
783 struct vme_dma_attr
*attributes
;
784 struct vme_dma_pci
*pci_attr
;
786 /* XXX Run some sanity checks here */
788 attributes
= (struct vme_dma_attr
*)kmalloc(
789 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
790 if(attributes
== NULL
) {
791 printk("Unable to allocate memory for attributes structure\n");
795 pci_attr
= (struct vme_dma_pci
*)kmalloc(sizeof(struct vme_dma_pci
),
797 if(pci_attr
== NULL
) {
798 printk("Unable to allocate memory for pci attributes\n");
804 attributes
->type
= VME_DMA_PCI
;
805 attributes
->private = (void *)pci_attr
;
807 pci_attr
->address
= address
;
817 EXPORT_SYMBOL(vme_dma_pci_attribute
);
820 * Create "VME" type attributes
822 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
823 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
825 struct vme_dma_attr
*attributes
;
826 struct vme_dma_vme
*vme_attr
;
828 /* XXX Run some sanity checks here */
830 attributes
= (struct vme_dma_attr
*)kmalloc(
831 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
832 if(attributes
== NULL
) {
833 printk("Unable to allocate memory for attributes structure\n");
837 vme_attr
= (struct vme_dma_vme
*)kmalloc(sizeof(struct vme_dma_vme
),
839 if(vme_attr
== NULL
) {
840 printk("Unable to allocate memory for vme attributes\n");
844 attributes
->type
= VME_DMA_VME
;
845 attributes
->private = (void *)vme_attr
;
847 vme_attr
->address
= address
;
848 vme_attr
->aspace
= aspace
;
849 vme_attr
->cycle
= cycle
;
850 vme_attr
->dwidth
= dwidth
;
860 EXPORT_SYMBOL(vme_dma_vme_attribute
);
865 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
867 kfree(attributes
->private);
870 EXPORT_SYMBOL(vme_dma_free_attribute
);
872 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
873 struct vme_dma_attr
*dest
, size_t count
)
875 struct vme_bridge
*bridge
= list
->parent
->parent
;
878 if (bridge
->dma_list_add
== NULL
) {
879 printk("Link List DMA generation not supported\n");
883 if (mutex_trylock(&(list
->mtx
))) {
884 printk("Link List already submitted\n");
888 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
890 mutex_unlock(&(list
->mtx
));
894 EXPORT_SYMBOL(vme_dma_list_add
);
896 int vme_dma_list_exec(struct vme_dma_list
*list
)
898 struct vme_bridge
*bridge
= list
->parent
->parent
;
901 if (bridge
->dma_list_exec
== NULL
) {
902 printk("Link List DMA execution not supported\n");
906 mutex_lock(&(list
->mtx
));
908 retval
= bridge
->dma_list_exec(list
);
910 mutex_unlock(&(list
->mtx
));
914 EXPORT_SYMBOL(vme_dma_list_exec
);
916 int vme_dma_list_free(struct vme_dma_list
*list
)
918 struct vme_bridge
*bridge
= list
->parent
->parent
;
921 if (bridge
->dma_list_empty
== NULL
) {
922 printk("Emptying of Link Lists not supported\n");
926 if (mutex_trylock(&(list
->mtx
))) {
927 printk("Link List in use\n");
932 * Empty out all of the entries from the dma list. We need to go to the
933 * low level driver as dma entries are driver specific.
935 retval
= bridge
->dma_list_empty(list
);
937 printk("Unable to empty link-list entries\n");
938 mutex_unlock(&(list
->mtx
));
941 mutex_unlock(&(list
->mtx
));
946 EXPORT_SYMBOL(vme_dma_list_free
);
948 int vme_dma_free(struct vme_resource
*resource
)
950 struct vme_dma_resource
*ctrlr
;
952 if (resource
->type
!= VME_DMA
) {
953 printk("Not a DMA resource\n");
957 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
959 if (mutex_trylock(&(ctrlr
->mtx
))) {
960 printk("Resource busy, can't free\n");
964 if (!(list_empty(&(ctrlr
->pending
)) && list_empty(&(ctrlr
->running
)))) {
965 printk("Resource still processing transfers\n");
966 mutex_unlock(&(ctrlr
->mtx
));
972 mutex_unlock(&(ctrlr
->mtx
));
976 EXPORT_SYMBOL(vme_dma_free
);
978 int vme_request_irq(struct device
*dev
, int level
, int statid
,
979 void (*callback
)(int level
, int vector
, void *priv_data
),
982 struct vme_bridge
*bridge
;
984 bridge
= dev_to_bridge(dev
);
985 if (bridge
== NULL
) {
986 printk(KERN_ERR
"Can't find VME bus\n");
990 if((level
< 1) || (level
> 7)) {
991 printk(KERN_WARNING
"Invalid interrupt level\n");
995 if (bridge
->request_irq
== NULL
) {
996 printk("Registering interrupts not supported\n");
1000 return bridge
->request_irq(level
, statid
, callback
, priv_data
);
1002 EXPORT_SYMBOL(vme_request_irq
);
1004 void vme_free_irq(struct device
*dev
, int level
, int statid
)
1006 struct vme_bridge
*bridge
;
1008 bridge
= dev_to_bridge(dev
);
1009 if (bridge
== NULL
) {
1010 printk(KERN_ERR
"Can't find VME bus\n");
1014 if((level
< 1) || (level
> 7)) {
1015 printk(KERN_WARNING
"Invalid interrupt level\n");
1019 if (bridge
->free_irq
== NULL
) {
1020 printk("Freeing interrupts not supported\n");
1024 bridge
->free_irq(level
, statid
);
1026 EXPORT_SYMBOL(vme_free_irq
);
1028 int vme_generate_irq(struct device
*dev
, int level
, int statid
)
1030 struct vme_bridge
*bridge
;
1032 bridge
= dev_to_bridge(dev
);
1033 if (bridge
== NULL
) {
1034 printk(KERN_ERR
"Can't find VME bus\n");
1038 if((level
< 1) || (level
> 7)) {
1039 printk(KERN_WARNING
"Invalid interrupt level\n");
1043 if (bridge
->generate_irq
== NULL
) {
1044 printk("Interrupt generation not supported\n");
1048 return bridge
->generate_irq(level
, statid
);
1050 EXPORT_SYMBOL(vme_generate_irq
);
1053 * Request the location monitor, return resource or NULL
1055 struct vme_resource
*vme_lm_request(struct device
*dev
)
1057 struct vme_bridge
*bridge
;
1058 struct list_head
*lm_pos
= NULL
;
1059 struct vme_lm_resource
*allocated_lm
= NULL
;
1060 struct vme_lm_resource
*lm
= NULL
;
1061 struct vme_resource
*resource
= NULL
;
1063 bridge
= dev_to_bridge(dev
);
1064 if (bridge
== NULL
) {
1065 printk(KERN_ERR
"Can't find VME bus\n");
1069 /* Loop through DMA resources */
1070 list_for_each(lm_pos
, &(bridge
->lm_resources
)) {
1071 lm
= list_entry(lm_pos
,
1072 struct vme_lm_resource
, list
);
1075 printk(KERN_ERR
"Registered NULL Location Monitor "
1080 /* Find an unlocked controller */
1081 mutex_lock(&(lm
->mtx
));
1082 if (lm
->locked
== 0) {
1084 mutex_unlock(&(lm
->mtx
));
1088 mutex_unlock(&(lm
->mtx
));
1091 /* Check to see if we found a resource */
1092 if (allocated_lm
== NULL
)
1095 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
1096 if (resource
== NULL
) {
1097 printk(KERN_ERR
"Unable to allocate resource structure\n");
1100 resource
->type
= VME_LM
;
1101 resource
->entry
= &(allocated_lm
->list
);
1107 mutex_lock(&(lm
->mtx
));
1109 mutex_unlock(&(lm
->mtx
));
1114 EXPORT_SYMBOL(vme_lm_request
);
1116 int vme_lm_count(struct vme_resource
*resource
)
1118 struct vme_lm_resource
*lm
;
1120 if (resource
->type
!= VME_LM
) {
1121 printk(KERN_ERR
"Not a Location Monitor resource\n");
1125 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1127 return lm
->monitors
;
1129 EXPORT_SYMBOL(vme_lm_count
);
1131 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1132 vme_address_t aspace
, vme_cycle_t cycle
)
1134 struct vme_bridge
*bridge
= find_bridge(resource
);
1135 struct vme_lm_resource
*lm
;
1137 if (resource
->type
!= VME_LM
) {
1138 printk(KERN_ERR
"Not a Location Monitor resource\n");
1142 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1144 if (bridge
->lm_set
== NULL
) {
1145 printk(KERN_ERR
"vme_lm_set not supported\n");
1149 /* XXX Check parameters */
1151 return lm
->parent
->lm_set(lm
, lm_base
, aspace
, cycle
);
1153 EXPORT_SYMBOL(vme_lm_set
);
1155 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1156 vme_address_t
*aspace
, vme_cycle_t
*cycle
)
1158 struct vme_bridge
*bridge
= find_bridge(resource
);
1159 struct vme_lm_resource
*lm
;
1161 if (resource
->type
!= VME_LM
) {
1162 printk(KERN_ERR
"Not a Location Monitor resource\n");
1166 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1168 if (bridge
->lm_get
== NULL
) {
1169 printk(KERN_ERR
"vme_lm_get not supported\n");
1173 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1175 EXPORT_SYMBOL(vme_lm_get
);
1177 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1178 void (*callback
)(int))
1180 struct vme_bridge
*bridge
= find_bridge(resource
);
1181 struct vme_lm_resource
*lm
;
1183 if (resource
->type
!= VME_LM
) {
1184 printk(KERN_ERR
"Not a Location Monitor resource\n");
1188 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1190 if (bridge
->lm_attach
== NULL
) {
1191 printk(KERN_ERR
"vme_lm_attach not supported\n");
1195 return bridge
->lm_attach(lm
, monitor
, callback
);
1197 EXPORT_SYMBOL(vme_lm_attach
);
1199 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1201 struct vme_bridge
*bridge
= find_bridge(resource
);
1202 struct vme_lm_resource
*lm
;
1204 if (resource
->type
!= VME_LM
) {
1205 printk(KERN_ERR
"Not a Location Monitor resource\n");
1209 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1211 if (bridge
->lm_detach
== NULL
) {
1212 printk(KERN_ERR
"vme_lm_detach not supported\n");
1216 return bridge
->lm_detach(lm
, monitor
);
1218 EXPORT_SYMBOL(vme_lm_detach
);
1220 void vme_lm_free(struct vme_resource
*resource
)
1222 struct vme_lm_resource
*lm
;
1224 if (resource
->type
!= VME_LM
) {
1225 printk(KERN_ERR
"Not a Location Monitor resource\n");
1229 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1231 if (mutex_trylock(&(lm
->mtx
))) {
1232 printk(KERN_ERR
"Resource busy, can't free\n");
1236 /* XXX Check to see that there aren't any callbacks still attached */
1240 mutex_unlock(&(lm
->mtx
));
1242 EXPORT_SYMBOL(vme_lm_free
);
1244 int vme_slot_get(struct device
*bus
)
1246 struct vme_bridge
*bridge
;
1248 bridge
= dev_to_bridge(bus
);
1249 if (bridge
== NULL
) {
1250 printk(KERN_ERR
"Can't find VME bus\n");
1254 if (bridge
->slot_get
== NULL
) {
1255 printk("vme_slot_get not supported\n");
1259 return bridge
->slot_get();
1261 EXPORT_SYMBOL(vme_slot_get
);
1264 /* - Bridge Registration --------------------------------------------------- */
1266 static int vme_alloc_bus_num(void)
1270 mutex_lock(&vme_bus_num_mtx
);
1271 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1272 if (((vme_bus_numbers
>> i
) & 0x1) == 0) {
1273 vme_bus_numbers
|= (0x1 << i
);
1277 mutex_unlock(&vme_bus_num_mtx
);
1282 static void vme_free_bus_num(int bus
)
1284 mutex_lock(&vme_bus_num_mtx
);
1285 vme_bus_numbers
|= ~(0x1 << bus
);
1286 mutex_unlock(&vme_bus_num_mtx
);
1289 int vme_register_bridge (struct vme_bridge
*bridge
)
1295 bridge
->num
= vme_alloc_bus_num();
1297 /* This creates 32 vme "slot" devices. This equates to a slot for each
1298 * ID available in a system conforming to the ANSI/VITA 1-1994
1301 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1302 dev
= &(bridge
->dev
[i
]);
1303 memset(dev
, 0, sizeof(struct device
));
1305 dev
->parent
= bridge
->parent
;
1306 dev
->bus
= &(vme_bus_type
);
1308 * We save a pointer to the bridge in platform_data so that we
1309 * can get to it later. We keep driver_data for use by the
1310 * driver that binds against the slot
1312 dev
->platform_data
= bridge
;
1313 dev_set_name(dev
, "vme-%x.%x", bridge
->num
, i
+ 1);
1315 retval
= device_register(dev
);
1325 dev
= &(bridge
->dev
[i
]);
1326 device_unregister(dev
);
1328 vme_free_bus_num(bridge
->num
);
1331 EXPORT_SYMBOL(vme_register_bridge
);
1333 void vme_unregister_bridge (struct vme_bridge
*bridge
)
1339 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1340 dev
= &(bridge
->dev
[i
]);
1341 device_unregister(dev
);
1343 vme_free_bus_num(bridge
->num
);
1345 EXPORT_SYMBOL(vme_unregister_bridge
);
1348 /* - Driver Registration --------------------------------------------------- */
1350 int vme_register_driver (struct vme_driver
*drv
)
1352 drv
->driver
.name
= drv
->name
;
1353 drv
->driver
.bus
= &vme_bus_type
;
1355 return driver_register(&drv
->driver
);
1357 EXPORT_SYMBOL(vme_register_driver
);
1359 void vme_unregister_driver (struct vme_driver
*drv
)
1361 driver_unregister(&drv
->driver
);
1363 EXPORT_SYMBOL(vme_unregister_driver
);
1365 /* - Bus Registration ------------------------------------------------------ */
1367 int vme_calc_slot(struct device
*dev
)
1369 struct vme_bridge
*bridge
;
1372 bridge
= dev_to_bridge(dev
);
1374 /* Determine slot number */
1376 while(num
< VME_SLOTS_MAX
) {
1377 if(&(bridge
->dev
[num
]) == dev
) {
1382 if (num
== VME_SLOTS_MAX
) {
1383 dev_err(dev
, "Failed to identify slot\n");
1393 static struct vme_driver
*dev_to_vme_driver(struct device
*dev
)
1395 if(dev
->driver
== NULL
)
1396 printk("Bugger dev->driver is NULL\n");
1398 return container_of(dev
->driver
, struct vme_driver
, driver
);
1401 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1403 struct vme_bridge
*bridge
;
1404 struct vme_driver
*driver
;
1407 bridge
= dev_to_bridge(dev
);
1408 driver
= container_of(drv
, struct vme_driver
, driver
);
1410 num
= vme_calc_slot(dev
);
1414 if (driver
->bind_table
== NULL
) {
1415 dev_err(dev
, "Bind table NULL\n");
1420 while((driver
->bind_table
[i
].bus
!= 0) ||
1421 (driver
->bind_table
[i
].slot
!= 0)) {
1423 if (bridge
->num
== driver
->bind_table
[i
].bus
) {
1424 if (num
== driver
->bind_table
[i
].slot
)
1427 if (driver
->bind_table
[i
].slot
== VME_SLOT_ALL
)
1430 if ((driver
->bind_table
[i
].slot
== VME_SLOT_CURRENT
) &&
1431 (num
== vme_slot_get(dev
)))
1442 static int vme_bus_probe(struct device
*dev
)
1444 struct vme_bridge
*bridge
;
1445 struct vme_driver
*driver
;
1446 int retval
= -ENODEV
;
1448 driver
= dev_to_vme_driver(dev
);
1449 bridge
= dev_to_bridge(dev
);
1451 if(driver
->probe
!= NULL
) {
1452 retval
= driver
->probe(dev
, bridge
->num
, vme_calc_slot(dev
));
1458 static int vme_bus_remove(struct device
*dev
)
1460 struct vme_bridge
*bridge
;
1461 struct vme_driver
*driver
;
1462 int retval
= -ENODEV
;
1464 driver
= dev_to_vme_driver(dev
);
1465 bridge
= dev_to_bridge(dev
);
1467 if(driver
->remove
!= NULL
) {
1468 retval
= driver
->remove(dev
, bridge
->num
, vme_calc_slot(dev
));
1474 struct bus_type vme_bus_type
= {
1476 .match
= vme_bus_match
,
1477 .probe
= vme_bus_probe
,
1478 .remove
= vme_bus_remove
,
1480 EXPORT_SYMBOL(vme_bus_type
);
1482 static int __init
vme_init (void)
1484 return bus_register(&vme_bus_type
);
1487 static void __exit
vme_exit (void)
1489 bus_unregister(&vme_bus_type
);
1492 MODULE_DESCRIPTION("VME bridge driver framework");
1493 MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
1494 MODULE_LICENSE("GPL");
1496 module_init(vme_init
);
1497 module_exit(vme_exit
);