4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
35 #include "vme_bridge.h"
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers
;
39 static LIST_HEAD(vme_bus_list
);
40 static DEFINE_MUTEX(vme_buses_lock
);
42 static void __exit
vme_exit(void);
43 static int __init
vme_init(void);
47 * Find the bridge resource associated with a specific device resource
49 static struct vme_bridge
*dev_to_bridge(struct device
*dev
)
51 return dev
->platform_data
;
55 * Find the bridge that the resource is associated with.
57 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
59 /* Get list to search */
60 switch (resource
->type
) {
62 return list_entry(resource
->entry
, struct vme_master_resource
,
66 return list_entry(resource
->entry
, struct vme_slave_resource
,
70 return list_entry(resource
->entry
, struct vme_dma_resource
,
74 return list_entry(resource
->entry
, struct vme_lm_resource
,
78 printk(KERN_ERR
"Unknown resource type\n");
85 * Allocate a contiguous block of memory for use by the driver. This is used to
86 * create the buffers for the slave windows.
88 void *vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
91 struct vme_bridge
*bridge
;
93 if (resource
== NULL
) {
94 printk(KERN_ERR
"No resource\n");
98 bridge
= find_bridge(resource
);
100 printk(KERN_ERR
"Can't find bridge\n");
104 if (bridge
->parent
== NULL
) {
105 printk(KERN_ERR
"Dev entry NULL for"
106 " bridge %s\n", bridge
->name
);
110 if (bridge
->alloc_consistent
== NULL
) {
111 printk(KERN_ERR
"alloc_consistent not supported by"
112 " bridge %s\n", bridge
->name
);
116 return bridge
->alloc_consistent(bridge
->parent
, size
, dma
);
118 EXPORT_SYMBOL(vme_alloc_consistent
);
121 * Free previously allocated contiguous block of memory.
123 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
124 void *vaddr
, dma_addr_t dma
)
126 struct vme_bridge
*bridge
;
128 if (resource
== NULL
) {
129 printk(KERN_ERR
"No resource\n");
133 bridge
= find_bridge(resource
);
134 if (bridge
== NULL
) {
135 printk(KERN_ERR
"Can't find bridge\n");
139 if (bridge
->parent
== NULL
) {
140 printk(KERN_ERR
"Dev entry NULL for"
141 " bridge %s\n", bridge
->name
);
145 if (bridge
->free_consistent
== NULL
) {
146 printk(KERN_ERR
"free_consistent not supported by"
147 " bridge %s\n", bridge
->name
);
151 bridge
->free_consistent(bridge
->parent
, size
, vaddr
, dma
);
153 EXPORT_SYMBOL(vme_free_consistent
);
155 size_t vme_get_size(struct vme_resource
*resource
)
158 unsigned long long base
, size
;
160 vme_address_t aspace
;
164 switch (resource
->type
) {
166 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
167 &aspace
, &cycle
, &dwidth
);
172 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
173 &buf_base
, &aspace
, &cycle
);
181 printk(KERN_ERR
"Unknown resource type\n");
186 EXPORT_SYMBOL(vme_get_size
);
188 static int vme_check_window(vme_address_t aspace
, unsigned long long vme_base
,
189 unsigned long long size
)
195 if (((vme_base
+ size
) > VME_A16_MAX
) ||
196 (vme_base
> VME_A16_MAX
))
200 if (((vme_base
+ size
) > VME_A24_MAX
) ||
201 (vme_base
> VME_A24_MAX
))
205 if (((vme_base
+ size
) > VME_A32_MAX
) ||
206 (vme_base
> VME_A32_MAX
))
211 * Any value held in an unsigned long long can be used as the
216 if (((vme_base
+ size
) > VME_CRCSR_MAX
) ||
217 (vme_base
> VME_CRCSR_MAX
))
227 printk(KERN_ERR
"Invalid address space\n");
236 * Request a slave image with specific attributes, return some unique
239 struct vme_resource
*vme_slave_request(struct device
*dev
,
240 vme_address_t address
, vme_cycle_t cycle
)
242 struct vme_bridge
*bridge
;
243 struct list_head
*slave_pos
= NULL
;
244 struct vme_slave_resource
*allocated_image
= NULL
;
245 struct vme_slave_resource
*slave_image
= NULL
;
246 struct vme_resource
*resource
= NULL
;
248 bridge
= dev_to_bridge(dev
);
249 if (bridge
== NULL
) {
250 printk(KERN_ERR
"Can't find VME bus\n");
254 /* Loop through slave resources */
255 list_for_each(slave_pos
, &bridge
->slave_resources
) {
256 slave_image
= list_entry(slave_pos
,
257 struct vme_slave_resource
, list
);
259 if (slave_image
== NULL
) {
260 printk(KERN_ERR
"Registered NULL Slave resource\n");
264 /* Find an unlocked and compatible image */
265 mutex_lock(&slave_image
->mtx
);
266 if (((slave_image
->address_attr
& address
) == address
) &&
267 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
268 (slave_image
->locked
== 0)) {
270 slave_image
->locked
= 1;
271 mutex_unlock(&slave_image
->mtx
);
272 allocated_image
= slave_image
;
275 mutex_unlock(&slave_image
->mtx
);
279 if (allocated_image
== NULL
)
282 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
283 if (resource
== NULL
) {
284 printk(KERN_WARNING
"Unable to allocate resource structure\n");
287 resource
->type
= VME_SLAVE
;
288 resource
->entry
= &allocated_image
->list
;
294 mutex_lock(&slave_image
->mtx
);
295 slave_image
->locked
= 0;
296 mutex_unlock(&slave_image
->mtx
);
301 EXPORT_SYMBOL(vme_slave_request
);
303 int vme_slave_set(struct vme_resource
*resource
, int enabled
,
304 unsigned long long vme_base
, unsigned long long size
,
305 dma_addr_t buf_base
, vme_address_t aspace
, vme_cycle_t cycle
)
307 struct vme_bridge
*bridge
= find_bridge(resource
);
308 struct vme_slave_resource
*image
;
311 if (resource
->type
!= VME_SLAVE
) {
312 printk(KERN_ERR
"Not a slave resource\n");
316 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
318 if (bridge
->slave_set
== NULL
) {
319 printk(KERN_ERR
"Function not supported\n");
323 if (!(((image
->address_attr
& aspace
) == aspace
) &&
324 ((image
->cycle_attr
& cycle
) == cycle
))) {
325 printk(KERN_ERR
"Invalid attributes\n");
329 retval
= vme_check_window(aspace
, vme_base
, size
);
333 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
336 EXPORT_SYMBOL(vme_slave_set
);
338 int vme_slave_get(struct vme_resource
*resource
, int *enabled
,
339 unsigned long long *vme_base
, unsigned long long *size
,
340 dma_addr_t
*buf_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
342 struct vme_bridge
*bridge
= find_bridge(resource
);
343 struct vme_slave_resource
*image
;
345 if (resource
->type
!= VME_SLAVE
) {
346 printk(KERN_ERR
"Not a slave resource\n");
350 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
352 if (bridge
->slave_get
== NULL
) {
353 printk(KERN_ERR
"vme_slave_get not supported\n");
357 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
360 EXPORT_SYMBOL(vme_slave_get
);
362 void vme_slave_free(struct vme_resource
*resource
)
364 struct vme_slave_resource
*slave_image
;
366 if (resource
->type
!= VME_SLAVE
) {
367 printk(KERN_ERR
"Not a slave resource\n");
371 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
373 if (slave_image
== NULL
) {
374 printk(KERN_ERR
"Can't find slave resource\n");
379 mutex_lock(&slave_image
->mtx
);
380 if (slave_image
->locked
== 0)
381 printk(KERN_ERR
"Image is already free\n");
383 slave_image
->locked
= 0;
384 mutex_unlock(&slave_image
->mtx
);
386 /* Free up resource memory */
389 EXPORT_SYMBOL(vme_slave_free
);
392 * Request a master image with specific attributes, return some unique
395 struct vme_resource
*vme_master_request(struct device
*dev
,
396 vme_address_t address
, vme_cycle_t cycle
, vme_width_t dwidth
)
398 struct vme_bridge
*bridge
;
399 struct list_head
*master_pos
= NULL
;
400 struct vme_master_resource
*allocated_image
= NULL
;
401 struct vme_master_resource
*master_image
= NULL
;
402 struct vme_resource
*resource
= NULL
;
404 bridge
= dev_to_bridge(dev
);
405 if (bridge
== NULL
) {
406 printk(KERN_ERR
"Can't find VME bus\n");
410 /* Loop through master resources */
411 list_for_each(master_pos
, &bridge
->master_resources
) {
412 master_image
= list_entry(master_pos
,
413 struct vme_master_resource
, list
);
415 if (master_image
== NULL
) {
416 printk(KERN_WARNING
"Registered NULL master resource\n");
420 /* Find an unlocked and compatible image */
421 spin_lock(&master_image
->lock
);
422 if (((master_image
->address_attr
& address
) == address
) &&
423 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
424 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
425 (master_image
->locked
== 0)) {
427 master_image
->locked
= 1;
428 spin_unlock(&master_image
->lock
);
429 allocated_image
= master_image
;
432 spin_unlock(&master_image
->lock
);
435 /* Check to see if we found a resource */
436 if (allocated_image
== NULL
) {
437 printk(KERN_ERR
"Can't find a suitable resource\n");
441 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
442 if (resource
== NULL
) {
443 printk(KERN_ERR
"Unable to allocate resource structure\n");
446 resource
->type
= VME_MASTER
;
447 resource
->entry
= &allocated_image
->list
;
453 spin_lock(&master_image
->lock
);
454 master_image
->locked
= 0;
455 spin_unlock(&master_image
->lock
);
460 EXPORT_SYMBOL(vme_master_request
);
462 int vme_master_set(struct vme_resource
*resource
, int enabled
,
463 unsigned long long vme_base
, unsigned long long size
,
464 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
466 struct vme_bridge
*bridge
= find_bridge(resource
);
467 struct vme_master_resource
*image
;
470 if (resource
->type
!= VME_MASTER
) {
471 printk(KERN_ERR
"Not a master resource\n");
475 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
477 if (bridge
->master_set
== NULL
) {
478 printk(KERN_WARNING
"vme_master_set not supported\n");
482 if (!(((image
->address_attr
& aspace
) == aspace
) &&
483 ((image
->cycle_attr
& cycle
) == cycle
) &&
484 ((image
->width_attr
& dwidth
) == dwidth
))) {
485 printk(KERN_WARNING
"Invalid attributes\n");
489 retval
= vme_check_window(aspace
, vme_base
, size
);
493 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
496 EXPORT_SYMBOL(vme_master_set
);
498 int vme_master_get(struct vme_resource
*resource
, int *enabled
,
499 unsigned long long *vme_base
, unsigned long long *size
,
500 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
502 struct vme_bridge
*bridge
= find_bridge(resource
);
503 struct vme_master_resource
*image
;
505 if (resource
->type
!= VME_MASTER
) {
506 printk(KERN_ERR
"Not a master resource\n");
510 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
512 if (bridge
->master_get
== NULL
) {
513 printk(KERN_WARNING
"vme_master_set not supported\n");
517 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
520 EXPORT_SYMBOL(vme_master_get
);
523 * Read data out of VME space into a buffer.
525 ssize_t
vme_master_read(struct vme_resource
*resource
, void *buf
, size_t count
,
528 struct vme_bridge
*bridge
= find_bridge(resource
);
529 struct vme_master_resource
*image
;
532 if (bridge
->master_read
== NULL
) {
533 printk(KERN_WARNING
"Reading from resource not supported\n");
537 if (resource
->type
!= VME_MASTER
) {
538 printk(KERN_ERR
"Not a master resource\n");
542 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
544 length
= vme_get_size(resource
);
546 if (offset
> length
) {
547 printk(KERN_WARNING
"Invalid Offset\n");
551 if ((offset
+ count
) > length
)
552 count
= length
- offset
;
554 return bridge
->master_read(image
, buf
, count
, offset
);
557 EXPORT_SYMBOL(vme_master_read
);
560 * Write data out to VME space from a buffer.
562 ssize_t
vme_master_write(struct vme_resource
*resource
, void *buf
,
563 size_t count
, loff_t offset
)
565 struct vme_bridge
*bridge
= find_bridge(resource
);
566 struct vme_master_resource
*image
;
569 if (bridge
->master_write
== NULL
) {
570 printk(KERN_WARNING
"Writing to resource not supported\n");
574 if (resource
->type
!= VME_MASTER
) {
575 printk(KERN_ERR
"Not a master resource\n");
579 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
581 length
= vme_get_size(resource
);
583 if (offset
> length
) {
584 printk(KERN_WARNING
"Invalid Offset\n");
588 if ((offset
+ count
) > length
)
589 count
= length
- offset
;
591 return bridge
->master_write(image
, buf
, count
, offset
);
593 EXPORT_SYMBOL(vme_master_write
);
596 * Perform RMW cycle to provided location.
598 unsigned int vme_master_rmw(struct vme_resource
*resource
, unsigned int mask
,
599 unsigned int compare
, unsigned int swap
, loff_t offset
)
601 struct vme_bridge
*bridge
= find_bridge(resource
);
602 struct vme_master_resource
*image
;
604 if (bridge
->master_rmw
== NULL
) {
605 printk(KERN_WARNING
"Writing to resource not supported\n");
609 if (resource
->type
!= VME_MASTER
) {
610 printk(KERN_ERR
"Not a master resource\n");
614 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
616 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
618 EXPORT_SYMBOL(vme_master_rmw
);
620 void vme_master_free(struct vme_resource
*resource
)
622 struct vme_master_resource
*master_image
;
624 if (resource
->type
!= VME_MASTER
) {
625 printk(KERN_ERR
"Not a master resource\n");
629 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
631 if (master_image
== NULL
) {
632 printk(KERN_ERR
"Can't find master resource\n");
637 spin_lock(&master_image
->lock
);
638 if (master_image
->locked
== 0)
639 printk(KERN_ERR
"Image is already free\n");
641 master_image
->locked
= 0;
642 spin_unlock(&master_image
->lock
);
644 /* Free up resource memory */
647 EXPORT_SYMBOL(vme_master_free
);
650 * Request a DMA controller with specific attributes, return some unique
653 struct vme_resource
*vme_dma_request(struct device
*dev
, vme_dma_route_t route
)
655 struct vme_bridge
*bridge
;
656 struct list_head
*dma_pos
= NULL
;
657 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
658 struct vme_dma_resource
*dma_ctrlr
= NULL
;
659 struct vme_resource
*resource
= NULL
;
661 /* XXX Not checking resource attributes */
662 printk(KERN_ERR
"No VME resource Attribute tests done\n");
664 bridge
= dev_to_bridge(dev
);
665 if (bridge
== NULL
) {
666 printk(KERN_ERR
"Can't find VME bus\n");
670 /* Loop through DMA resources */
671 list_for_each(dma_pos
, &bridge
->dma_resources
) {
672 dma_ctrlr
= list_entry(dma_pos
,
673 struct vme_dma_resource
, list
);
675 if (dma_ctrlr
== NULL
) {
676 printk(KERN_ERR
"Registered NULL DMA resource\n");
680 /* Find an unlocked and compatible controller */
681 mutex_lock(&dma_ctrlr
->mtx
);
682 if (((dma_ctrlr
->route_attr
& route
) == route
) &&
683 (dma_ctrlr
->locked
== 0)) {
685 dma_ctrlr
->locked
= 1;
686 mutex_unlock(&dma_ctrlr
->mtx
);
687 allocated_ctrlr
= dma_ctrlr
;
690 mutex_unlock(&dma_ctrlr
->mtx
);
693 /* Check to see if we found a resource */
694 if (allocated_ctrlr
== NULL
)
697 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
698 if (resource
== NULL
) {
699 printk(KERN_WARNING
"Unable to allocate resource structure\n");
702 resource
->type
= VME_DMA
;
703 resource
->entry
= &allocated_ctrlr
->list
;
709 mutex_lock(&dma_ctrlr
->mtx
);
710 dma_ctrlr
->locked
= 0;
711 mutex_unlock(&dma_ctrlr
->mtx
);
716 EXPORT_SYMBOL(vme_dma_request
);
721 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
723 struct vme_dma_resource
*ctrlr
;
724 struct vme_dma_list
*dma_list
;
726 if (resource
->type
!= VME_DMA
) {
727 printk(KERN_ERR
"Not a DMA resource\n");
731 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
733 dma_list
= kmalloc(sizeof(struct vme_dma_list
), GFP_KERNEL
);
734 if (dma_list
== NULL
) {
735 printk(KERN_ERR
"Unable to allocate memory for new dma list\n");
738 INIT_LIST_HEAD(&dma_list
->entries
);
739 dma_list
->parent
= ctrlr
;
740 mutex_init(&dma_list
->mtx
);
744 EXPORT_SYMBOL(vme_new_dma_list
);
747 * Create "Pattern" type attributes
749 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
,
752 struct vme_dma_attr
*attributes
;
753 struct vme_dma_pattern
*pattern_attr
;
755 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
756 if (attributes
== NULL
) {
757 printk(KERN_ERR
"Unable to allocate memory for attributes "
762 pattern_attr
= kmalloc(sizeof(struct vme_dma_pattern
), GFP_KERNEL
);
763 if (pattern_attr
== NULL
) {
764 printk(KERN_ERR
"Unable to allocate memory for pattern "
769 attributes
->type
= VME_DMA_PATTERN
;
770 attributes
->private = (void *)pattern_attr
;
772 pattern_attr
->pattern
= pattern
;
773 pattern_attr
->type
= type
;
782 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
785 * Create "PCI" type attributes
787 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
789 struct vme_dma_attr
*attributes
;
790 struct vme_dma_pci
*pci_attr
;
792 /* XXX Run some sanity checks here */
794 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
795 if (attributes
== NULL
) {
796 printk(KERN_ERR
"Unable to allocate memory for attributes "
801 pci_attr
= kmalloc(sizeof(struct vme_dma_pci
), GFP_KERNEL
);
802 if (pci_attr
== NULL
) {
803 printk(KERN_ERR
"Unable to allocate memory for pci "
810 attributes
->type
= VME_DMA_PCI
;
811 attributes
->private = (void *)pci_attr
;
813 pci_attr
->address
= address
;
822 EXPORT_SYMBOL(vme_dma_pci_attribute
);
825 * Create "VME" type attributes
827 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
828 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
830 struct vme_dma_attr
*attributes
;
831 struct vme_dma_vme
*vme_attr
;
833 attributes
= kmalloc(
834 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
835 if (attributes
== NULL
) {
836 printk(KERN_ERR
"Unable to allocate memory for attributes "
841 vme_attr
= kmalloc(sizeof(struct vme_dma_vme
), GFP_KERNEL
);
842 if (vme_attr
== NULL
) {
843 printk(KERN_ERR
"Unable to allocate memory for vme "
848 attributes
->type
= VME_DMA_VME
;
849 attributes
->private = (void *)vme_attr
;
851 vme_attr
->address
= address
;
852 vme_attr
->aspace
= aspace
;
853 vme_attr
->cycle
= cycle
;
854 vme_attr
->dwidth
= dwidth
;
863 EXPORT_SYMBOL(vme_dma_vme_attribute
);
868 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
870 kfree(attributes
->private);
873 EXPORT_SYMBOL(vme_dma_free_attribute
);
875 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
876 struct vme_dma_attr
*dest
, size_t count
)
878 struct vme_bridge
*bridge
= list
->parent
->parent
;
881 if (bridge
->dma_list_add
== NULL
) {
882 printk(KERN_WARNING
"Link List DMA generation not supported\n");
886 if (!mutex_trylock(&list
->mtx
)) {
887 printk(KERN_ERR
"Link List already submitted\n");
891 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
893 mutex_unlock(&list
->mtx
);
897 EXPORT_SYMBOL(vme_dma_list_add
);
899 int vme_dma_list_exec(struct vme_dma_list
*list
)
901 struct vme_bridge
*bridge
= list
->parent
->parent
;
904 if (bridge
->dma_list_exec
== NULL
) {
905 printk(KERN_ERR
"Link List DMA execution not supported\n");
909 mutex_lock(&list
->mtx
);
911 retval
= bridge
->dma_list_exec(list
);
913 mutex_unlock(&list
->mtx
);
917 EXPORT_SYMBOL(vme_dma_list_exec
);
919 int vme_dma_list_free(struct vme_dma_list
*list
)
921 struct vme_bridge
*bridge
= list
->parent
->parent
;
924 if (bridge
->dma_list_empty
== NULL
) {
925 printk(KERN_WARNING
"Emptying of Link Lists not supported\n");
929 if (!mutex_trylock(&list
->mtx
)) {
930 printk(KERN_ERR
"Link List in use\n");
935 * Empty out all of the entries from the dma list. We need to go to the
936 * low level driver as dma entries are driver specific.
938 retval
= bridge
->dma_list_empty(list
);
940 printk(KERN_ERR
"Unable to empty link-list entries\n");
941 mutex_unlock(&list
->mtx
);
944 mutex_unlock(&list
->mtx
);
949 EXPORT_SYMBOL(vme_dma_list_free
);
951 int vme_dma_free(struct vme_resource
*resource
)
953 struct vme_dma_resource
*ctrlr
;
955 if (resource
->type
!= VME_DMA
) {
956 printk(KERN_ERR
"Not a DMA resource\n");
960 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
962 if (!mutex_trylock(&ctrlr
->mtx
)) {
963 printk(KERN_ERR
"Resource busy, can't free\n");
967 if (!(list_empty(&ctrlr
->pending
) && list_empty(&ctrlr
->running
))) {
968 printk(KERN_WARNING
"Resource still processing transfers\n");
969 mutex_unlock(&ctrlr
->mtx
);
975 mutex_unlock(&ctrlr
->mtx
);
979 EXPORT_SYMBOL(vme_dma_free
);
981 void vme_irq_handler(struct vme_bridge
*bridge
, int level
, int statid
)
983 void (*call
)(int, int, void *);
986 call
= bridge
->irq
[level
- 1].callback
[statid
].func
;
987 priv_data
= bridge
->irq
[level
- 1].callback
[statid
].priv_data
;
990 call(level
, statid
, priv_data
);
992 printk(KERN_WARNING
"Spurilous VME interrupt, level:%x, "
993 "vector:%x\n", level
, statid
);
995 EXPORT_SYMBOL(vme_irq_handler
);
997 int vme_irq_request(struct device
*dev
, int level
, int statid
,
998 void (*callback
)(int, int, void *),
1001 struct vme_bridge
*bridge
;
1003 bridge
= dev_to_bridge(dev
);
1004 if (bridge
== NULL
) {
1005 printk(KERN_ERR
"Can't find VME bus\n");
1009 if ((level
< 1) || (level
> 7)) {
1010 printk(KERN_ERR
"Invalid interrupt level\n");
1014 if (bridge
->irq_set
== NULL
) {
1015 printk(KERN_ERR
"Configuring interrupts not supported\n");
1019 mutex_lock(&bridge
->irq_mtx
);
1021 if (bridge
->irq
[level
- 1].callback
[statid
].func
) {
1022 mutex_unlock(&bridge
->irq_mtx
);
1023 printk(KERN_WARNING
"VME Interrupt already taken\n");
1027 bridge
->irq
[level
- 1].count
++;
1028 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
1029 bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
1031 /* Enable IRQ level */
1032 bridge
->irq_set(bridge
, level
, 1, 1);
1034 mutex_unlock(&bridge
->irq_mtx
);
1038 EXPORT_SYMBOL(vme_irq_request
);
1040 void vme_irq_free(struct device
*dev
, int level
, int statid
)
1042 struct vme_bridge
*bridge
;
1044 bridge
= dev_to_bridge(dev
);
1045 if (bridge
== NULL
) {
1046 printk(KERN_ERR
"Can't find VME bus\n");
1050 if ((level
< 1) || (level
> 7)) {
1051 printk(KERN_ERR
"Invalid interrupt level\n");
1055 if (bridge
->irq_set
== NULL
) {
1056 printk(KERN_ERR
"Configuring interrupts not supported\n");
1060 mutex_lock(&bridge
->irq_mtx
);
1062 bridge
->irq
[level
- 1].count
--;
1064 /* Disable IRQ level if no more interrupts attached at this level*/
1065 if (bridge
->irq
[level
- 1].count
== 0)
1066 bridge
->irq_set(bridge
, level
, 0, 1);
1068 bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
1069 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
1071 mutex_unlock(&bridge
->irq_mtx
);
1073 EXPORT_SYMBOL(vme_irq_free
);
1075 int vme_irq_generate(struct device
*dev
, int level
, int statid
)
1077 struct vme_bridge
*bridge
;
1079 bridge
= dev_to_bridge(dev
);
1080 if (bridge
== NULL
) {
1081 printk(KERN_ERR
"Can't find VME bus\n");
1085 if ((level
< 1) || (level
> 7)) {
1086 printk(KERN_WARNING
"Invalid interrupt level\n");
1090 if (bridge
->irq_generate
== NULL
) {
1091 printk(KERN_WARNING
"Interrupt generation not supported\n");
1095 return bridge
->irq_generate(bridge
, level
, statid
);
1097 EXPORT_SYMBOL(vme_irq_generate
);
1100 * Request the location monitor, return resource or NULL
1102 struct vme_resource
*vme_lm_request(struct device
*dev
)
1104 struct vme_bridge
*bridge
;
1105 struct list_head
*lm_pos
= NULL
;
1106 struct vme_lm_resource
*allocated_lm
= NULL
;
1107 struct vme_lm_resource
*lm
= NULL
;
1108 struct vme_resource
*resource
= NULL
;
1110 bridge
= dev_to_bridge(dev
);
1111 if (bridge
== NULL
) {
1112 printk(KERN_ERR
"Can't find VME bus\n");
1116 /* Loop through DMA resources */
1117 list_for_each(lm_pos
, &bridge
->lm_resources
) {
1118 lm
= list_entry(lm_pos
,
1119 struct vme_lm_resource
, list
);
1122 printk(KERN_ERR
"Registered NULL Location Monitor "
1127 /* Find an unlocked controller */
1128 mutex_lock(&lm
->mtx
);
1129 if (lm
->locked
== 0) {
1131 mutex_unlock(&lm
->mtx
);
1135 mutex_unlock(&lm
->mtx
);
1138 /* Check to see if we found a resource */
1139 if (allocated_lm
== NULL
)
1142 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
1143 if (resource
== NULL
) {
1144 printk(KERN_ERR
"Unable to allocate resource structure\n");
1147 resource
->type
= VME_LM
;
1148 resource
->entry
= &allocated_lm
->list
;
1154 mutex_lock(&lm
->mtx
);
1156 mutex_unlock(&lm
->mtx
);
1161 EXPORT_SYMBOL(vme_lm_request
);
1163 int vme_lm_count(struct vme_resource
*resource
)
1165 struct vme_lm_resource
*lm
;
1167 if (resource
->type
!= VME_LM
) {
1168 printk(KERN_ERR
"Not a Location Monitor resource\n");
1172 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1174 return lm
->monitors
;
1176 EXPORT_SYMBOL(vme_lm_count
);
1178 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1179 vme_address_t aspace
, vme_cycle_t cycle
)
1181 struct vme_bridge
*bridge
= find_bridge(resource
);
1182 struct vme_lm_resource
*lm
;
1184 if (resource
->type
!= VME_LM
) {
1185 printk(KERN_ERR
"Not a Location Monitor resource\n");
1189 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1191 if (bridge
->lm_set
== NULL
) {
1192 printk(KERN_ERR
"vme_lm_set not supported\n");
1196 return bridge
->lm_set(lm
, lm_base
, aspace
, cycle
);
1198 EXPORT_SYMBOL(vme_lm_set
);
1200 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1201 vme_address_t
*aspace
, vme_cycle_t
*cycle
)
1203 struct vme_bridge
*bridge
= find_bridge(resource
);
1204 struct vme_lm_resource
*lm
;
1206 if (resource
->type
!= VME_LM
) {
1207 printk(KERN_ERR
"Not a Location Monitor resource\n");
1211 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1213 if (bridge
->lm_get
== NULL
) {
1214 printk(KERN_ERR
"vme_lm_get not supported\n");
1218 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1220 EXPORT_SYMBOL(vme_lm_get
);
1222 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1223 void (*callback
)(int))
1225 struct vme_bridge
*bridge
= find_bridge(resource
);
1226 struct vme_lm_resource
*lm
;
1228 if (resource
->type
!= VME_LM
) {
1229 printk(KERN_ERR
"Not a Location Monitor resource\n");
1233 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1235 if (bridge
->lm_attach
== NULL
) {
1236 printk(KERN_ERR
"vme_lm_attach not supported\n");
1240 return bridge
->lm_attach(lm
, monitor
, callback
);
1242 EXPORT_SYMBOL(vme_lm_attach
);
1244 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1246 struct vme_bridge
*bridge
= find_bridge(resource
);
1247 struct vme_lm_resource
*lm
;
1249 if (resource
->type
!= VME_LM
) {
1250 printk(KERN_ERR
"Not a Location Monitor resource\n");
1254 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1256 if (bridge
->lm_detach
== NULL
) {
1257 printk(KERN_ERR
"vme_lm_detach not supported\n");
1261 return bridge
->lm_detach(lm
, monitor
);
1263 EXPORT_SYMBOL(vme_lm_detach
);
1265 void vme_lm_free(struct vme_resource
*resource
)
1267 struct vme_lm_resource
*lm
;
1269 if (resource
->type
!= VME_LM
) {
1270 printk(KERN_ERR
"Not a Location Monitor resource\n");
1274 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1276 mutex_lock(&lm
->mtx
);
1279 * Check to see that there aren't any callbacks still attached, if
1280 * there are we should probably be detaching them!
1285 mutex_unlock(&lm
->mtx
);
1289 EXPORT_SYMBOL(vme_lm_free
);
1291 int vme_slot_get(struct device
*bus
)
1293 struct vme_bridge
*bridge
;
1295 bridge
= dev_to_bridge(bus
);
1296 if (bridge
== NULL
) {
1297 printk(KERN_ERR
"Can't find VME bus\n");
1301 if (bridge
->slot_get
== NULL
) {
1302 printk(KERN_WARNING
"vme_slot_get not supported\n");
1306 return bridge
->slot_get(bridge
);
1308 EXPORT_SYMBOL(vme_slot_get
);
1311 /* - Bridge Registration --------------------------------------------------- */
1313 static int vme_add_bus(struct vme_bridge
*bridge
)
1318 mutex_lock(&vme_buses_lock
);
1319 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1320 if ((vme_bus_numbers
& (1 << i
)) == 0) {
1321 vme_bus_numbers
|= (1 << i
);
1323 list_add_tail(&bridge
->bus_list
, &vme_bus_list
);
1328 mutex_unlock(&vme_buses_lock
);
1333 static void vme_remove_bus(struct vme_bridge
*bridge
)
1335 mutex_lock(&vme_buses_lock
);
1336 vme_bus_numbers
&= ~(1 << bridge
->num
);
1337 list_del(&bridge
->bus_list
);
1338 mutex_unlock(&vme_buses_lock
);
1341 int vme_register_bridge(struct vme_bridge
*bridge
)
1347 retval
= vme_add_bus(bridge
);
1351 /* This creates 32 vme "slot" devices. This equates to a slot for each
1352 * ID available in a system conforming to the ANSI/VITA 1-1994
1355 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1356 dev
= &bridge
->dev
[i
];
1357 memset(dev
, 0, sizeof(struct device
));
1359 dev
->parent
= bridge
->parent
;
1360 dev
->bus
= &vme_bus_type
;
1362 * We save a pointer to the bridge in platform_data so that we
1363 * can get to it later. We keep driver_data for use by the
1364 * driver that binds against the slot
1366 dev
->platform_data
= bridge
;
1367 dev_set_name(dev
, "vme-%x.%x", bridge
->num
, i
+ 1);
1369 retval
= device_register(dev
);
1378 dev
= &bridge
->dev
[i
];
1379 device_unregister(dev
);
1381 vme_remove_bus(bridge
);
1384 EXPORT_SYMBOL(vme_register_bridge
);
1386 void vme_unregister_bridge(struct vme_bridge
*bridge
)
1392 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1393 dev
= &bridge
->dev
[i
];
1394 device_unregister(dev
);
1396 vme_remove_bus(bridge
);
1398 EXPORT_SYMBOL(vme_unregister_bridge
);
1401 /* - Driver Registration --------------------------------------------------- */
1403 int vme_register_driver(struct vme_driver
*drv
)
1405 drv
->driver
.name
= drv
->name
;
1406 drv
->driver
.bus
= &vme_bus_type
;
1408 return driver_register(&drv
->driver
);
1410 EXPORT_SYMBOL(vme_register_driver
);
1412 void vme_unregister_driver(struct vme_driver
*drv
)
1414 driver_unregister(&drv
->driver
);
1416 EXPORT_SYMBOL(vme_unregister_driver
);
1418 /* - Bus Registration ------------------------------------------------------ */
1420 static int vme_calc_slot(struct device
*dev
)
1422 struct vme_bridge
*bridge
;
1425 bridge
= dev_to_bridge(dev
);
1427 /* Determine slot number */
1429 while (num
< VME_SLOTS_MAX
) {
1430 if (&bridge
->dev
[num
] == dev
)
1435 if (num
== VME_SLOTS_MAX
) {
1436 dev_err(dev
, "Failed to identify slot\n");
1446 static struct vme_driver
*dev_to_vme_driver(struct device
*dev
)
1448 if (dev
->driver
== NULL
)
1449 printk(KERN_ERR
"Bugger dev->driver is NULL\n");
1451 return container_of(dev
->driver
, struct vme_driver
, driver
);
1454 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1456 struct vme_bridge
*bridge
;
1457 struct vme_driver
*driver
;
1460 bridge
= dev_to_bridge(dev
);
1461 driver
= container_of(drv
, struct vme_driver
, driver
);
1463 num
= vme_calc_slot(dev
);
1467 if (driver
->bind_table
== NULL
) {
1468 dev_err(dev
, "Bind table NULL\n");
1473 while ((driver
->bind_table
[i
].bus
!= 0) ||
1474 (driver
->bind_table
[i
].slot
!= 0)) {
1476 if (bridge
->num
== driver
->bind_table
[i
].bus
) {
1477 if (num
== driver
->bind_table
[i
].slot
)
1480 if (driver
->bind_table
[i
].slot
== VME_SLOT_ALL
)
1483 if ((driver
->bind_table
[i
].slot
== VME_SLOT_CURRENT
) &&
1484 (num
== vme_slot_get(dev
)))
1495 static int vme_bus_probe(struct device
*dev
)
1497 struct vme_bridge
*bridge
;
1498 struct vme_driver
*driver
;
1499 int retval
= -ENODEV
;
1501 driver
= dev_to_vme_driver(dev
);
1502 bridge
= dev_to_bridge(dev
);
1504 if (driver
->probe
!= NULL
)
1505 retval
= driver
->probe(dev
, bridge
->num
, vme_calc_slot(dev
));
1510 static int vme_bus_remove(struct device
*dev
)
1512 struct vme_bridge
*bridge
;
1513 struct vme_driver
*driver
;
1514 int retval
= -ENODEV
;
1516 driver
= dev_to_vme_driver(dev
);
1517 bridge
= dev_to_bridge(dev
);
1519 if (driver
->remove
!= NULL
)
1520 retval
= driver
->remove(dev
, bridge
->num
, vme_calc_slot(dev
));
1525 struct bus_type vme_bus_type
= {
1527 .match
= vme_bus_match
,
1528 .probe
= vme_bus_probe
,
1529 .remove
= vme_bus_remove
,
1531 EXPORT_SYMBOL(vme_bus_type
);
1533 static int __init
vme_init(void)
1535 return bus_register(&vme_bus_type
);
1538 static void __exit
vme_exit(void)
1540 bus_unregister(&vme_bus_type
);
1543 MODULE_DESCRIPTION("VME bridge driver framework");
1544 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1545 MODULE_LICENSE("GPL");
1547 module_init(vme_init
);
1548 module_exit(vme_exit
);