Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / staging / vme / vme.c
blobb2968f8588ff2644e280cc76bd981b7752b8be97
1 /*
2 * VME Bridge Framework
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
34 #include "vme.h"
35 #include "vme_bridge.h"
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers;
39 static LIST_HEAD(vme_bus_list);
40 static DEFINE_MUTEX(vme_buses_lock);
42 static void __exit vme_exit(void);
43 static int __init vme_init(void);
47 * Find the bridge resource associated with a specific device resource
49 static struct vme_bridge *dev_to_bridge(struct device *dev)
51 return dev->platform_data;
55 * Find the bridge that the resource is associated with.
57 static struct vme_bridge *find_bridge(struct vme_resource *resource)
59 /* Get list to search */
60 switch (resource->type) {
61 case VME_MASTER:
62 return list_entry(resource->entry, struct vme_master_resource,
63 list)->parent;
64 break;
65 case VME_SLAVE:
66 return list_entry(resource->entry, struct vme_slave_resource,
67 list)->parent;
68 break;
69 case VME_DMA:
70 return list_entry(resource->entry, struct vme_dma_resource,
71 list)->parent;
72 break;
73 case VME_LM:
74 return list_entry(resource->entry, struct vme_lm_resource,
75 list)->parent;
76 break;
77 default:
78 printk(KERN_ERR "Unknown resource type\n");
79 return NULL;
80 break;
85 * Allocate a contiguous block of memory for use by the driver. This is used to
86 * create the buffers for the slave windows.
88 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
89 dma_addr_t *dma)
91 struct vme_bridge *bridge;
93 if (resource == NULL) {
94 printk(KERN_ERR "No resource\n");
95 return NULL;
98 bridge = find_bridge(resource);
99 if (bridge == NULL) {
100 printk(KERN_ERR "Can't find bridge\n");
101 return NULL;
104 if (bridge->parent == NULL) {
105 printk(KERN_ERR "Dev entry NULL for"
106 " bridge %s\n", bridge->name);
107 return NULL;
110 if (bridge->alloc_consistent == NULL) {
111 printk(KERN_ERR "alloc_consistent not supported by"
112 " bridge %s\n", bridge->name);
113 return NULL;
116 return bridge->alloc_consistent(bridge->parent, size, dma);
118 EXPORT_SYMBOL(vme_alloc_consistent);
121 * Free previously allocated contiguous block of memory.
123 void vme_free_consistent(struct vme_resource *resource, size_t size,
124 void *vaddr, dma_addr_t dma)
126 struct vme_bridge *bridge;
128 if (resource == NULL) {
129 printk(KERN_ERR "No resource\n");
130 return;
133 bridge = find_bridge(resource);
134 if (bridge == NULL) {
135 printk(KERN_ERR "Can't find bridge\n");
136 return;
139 if (bridge->parent == NULL) {
140 printk(KERN_ERR "Dev entry NULL for"
141 " bridge %s\n", bridge->name);
142 return;
145 if (bridge->free_consistent == NULL) {
146 printk(KERN_ERR "free_consistent not supported by"
147 " bridge %s\n", bridge->name);
148 return;
151 bridge->free_consistent(bridge->parent, size, vaddr, dma);
153 EXPORT_SYMBOL(vme_free_consistent);
155 size_t vme_get_size(struct vme_resource *resource)
157 int enabled, retval;
158 unsigned long long base, size;
159 dma_addr_t buf_base;
160 vme_address_t aspace;
161 vme_cycle_t cycle;
162 vme_width_t dwidth;
164 switch (resource->type) {
165 case VME_MASTER:
166 retval = vme_master_get(resource, &enabled, &base, &size,
167 &aspace, &cycle, &dwidth);
169 return size;
170 break;
171 case VME_SLAVE:
172 retval = vme_slave_get(resource, &enabled, &base, &size,
173 &buf_base, &aspace, &cycle);
175 return size;
176 break;
177 case VME_DMA:
178 return 0;
179 break;
180 default:
181 printk(KERN_ERR "Unknown resource type\n");
182 return 0;
183 break;
186 EXPORT_SYMBOL(vme_get_size);
188 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
189 unsigned long long size)
191 int retval = 0;
193 switch (aspace) {
194 case VME_A16:
195 if (((vme_base + size) > VME_A16_MAX) ||
196 (vme_base > VME_A16_MAX))
197 retval = -EFAULT;
198 break;
199 case VME_A24:
200 if (((vme_base + size) > VME_A24_MAX) ||
201 (vme_base > VME_A24_MAX))
202 retval = -EFAULT;
203 break;
204 case VME_A32:
205 if (((vme_base + size) > VME_A32_MAX) ||
206 (vme_base > VME_A32_MAX))
207 retval = -EFAULT;
208 break;
209 case VME_A64:
211 * Any value held in an unsigned long long can be used as the
212 * base
214 break;
215 case VME_CRCSR:
216 if (((vme_base + size) > VME_CRCSR_MAX) ||
217 (vme_base > VME_CRCSR_MAX))
218 retval = -EFAULT;
219 break;
220 case VME_USER1:
221 case VME_USER2:
222 case VME_USER3:
223 case VME_USER4:
224 /* User Defined */
225 break;
226 default:
227 printk(KERN_ERR "Invalid address space\n");
228 retval = -EINVAL;
229 break;
232 return retval;
236 * Request a slave image with specific attributes, return some unique
237 * identifier.
239 struct vme_resource *vme_slave_request(struct device *dev,
240 vme_address_t address, vme_cycle_t cycle)
242 struct vme_bridge *bridge;
243 struct list_head *slave_pos = NULL;
244 struct vme_slave_resource *allocated_image = NULL;
245 struct vme_slave_resource *slave_image = NULL;
246 struct vme_resource *resource = NULL;
248 bridge = dev_to_bridge(dev);
249 if (bridge == NULL) {
250 printk(KERN_ERR "Can't find VME bus\n");
251 goto err_bus;
254 /* Loop through slave resources */
255 list_for_each(slave_pos, &bridge->slave_resources) {
256 slave_image = list_entry(slave_pos,
257 struct vme_slave_resource, list);
259 if (slave_image == NULL) {
260 printk(KERN_ERR "Registered NULL Slave resource\n");
261 continue;
264 /* Find an unlocked and compatible image */
265 mutex_lock(&slave_image->mtx);
266 if (((slave_image->address_attr & address) == address) &&
267 ((slave_image->cycle_attr & cycle) == cycle) &&
268 (slave_image->locked == 0)) {
270 slave_image->locked = 1;
271 mutex_unlock(&slave_image->mtx);
272 allocated_image = slave_image;
273 break;
275 mutex_unlock(&slave_image->mtx);
278 /* No free image */
279 if (allocated_image == NULL)
280 goto err_image;
282 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
283 if (resource == NULL) {
284 printk(KERN_WARNING "Unable to allocate resource structure\n");
285 goto err_alloc;
287 resource->type = VME_SLAVE;
288 resource->entry = &allocated_image->list;
290 return resource;
292 err_alloc:
293 /* Unlock image */
294 mutex_lock(&slave_image->mtx);
295 slave_image->locked = 0;
296 mutex_unlock(&slave_image->mtx);
297 err_image:
298 err_bus:
299 return NULL;
301 EXPORT_SYMBOL(vme_slave_request);
303 int vme_slave_set(struct vme_resource *resource, int enabled,
304 unsigned long long vme_base, unsigned long long size,
305 dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
307 struct vme_bridge *bridge = find_bridge(resource);
308 struct vme_slave_resource *image;
309 int retval;
311 if (resource->type != VME_SLAVE) {
312 printk(KERN_ERR "Not a slave resource\n");
313 return -EINVAL;
316 image = list_entry(resource->entry, struct vme_slave_resource, list);
318 if (bridge->slave_set == NULL) {
319 printk(KERN_ERR "Function not supported\n");
320 return -ENOSYS;
323 if (!(((image->address_attr & aspace) == aspace) &&
324 ((image->cycle_attr & cycle) == cycle))) {
325 printk(KERN_ERR "Invalid attributes\n");
326 return -EINVAL;
329 retval = vme_check_window(aspace, vme_base, size);
330 if (retval)
331 return retval;
333 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
334 aspace, cycle);
336 EXPORT_SYMBOL(vme_slave_set);
338 int vme_slave_get(struct vme_resource *resource, int *enabled,
339 unsigned long long *vme_base, unsigned long long *size,
340 dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
342 struct vme_bridge *bridge = find_bridge(resource);
343 struct vme_slave_resource *image;
345 if (resource->type != VME_SLAVE) {
346 printk(KERN_ERR "Not a slave resource\n");
347 return -EINVAL;
350 image = list_entry(resource->entry, struct vme_slave_resource, list);
352 if (bridge->slave_get == NULL) {
353 printk(KERN_ERR "vme_slave_get not supported\n");
354 return -EINVAL;
357 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
358 aspace, cycle);
360 EXPORT_SYMBOL(vme_slave_get);
362 void vme_slave_free(struct vme_resource *resource)
364 struct vme_slave_resource *slave_image;
366 if (resource->type != VME_SLAVE) {
367 printk(KERN_ERR "Not a slave resource\n");
368 return;
371 slave_image = list_entry(resource->entry, struct vme_slave_resource,
372 list);
373 if (slave_image == NULL) {
374 printk(KERN_ERR "Can't find slave resource\n");
375 return;
378 /* Unlock image */
379 mutex_lock(&slave_image->mtx);
380 if (slave_image->locked == 0)
381 printk(KERN_ERR "Image is already free\n");
383 slave_image->locked = 0;
384 mutex_unlock(&slave_image->mtx);
386 /* Free up resource memory */
387 kfree(resource);
389 EXPORT_SYMBOL(vme_slave_free);
392 * Request a master image with specific attributes, return some unique
393 * identifier.
395 struct vme_resource *vme_master_request(struct device *dev,
396 vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
398 struct vme_bridge *bridge;
399 struct list_head *master_pos = NULL;
400 struct vme_master_resource *allocated_image = NULL;
401 struct vme_master_resource *master_image = NULL;
402 struct vme_resource *resource = NULL;
404 bridge = dev_to_bridge(dev);
405 if (bridge == NULL) {
406 printk(KERN_ERR "Can't find VME bus\n");
407 goto err_bus;
410 /* Loop through master resources */
411 list_for_each(master_pos, &bridge->master_resources) {
412 master_image = list_entry(master_pos,
413 struct vme_master_resource, list);
415 if (master_image == NULL) {
416 printk(KERN_WARNING "Registered NULL master resource\n");
417 continue;
420 /* Find an unlocked and compatible image */
421 spin_lock(&master_image->lock);
422 if (((master_image->address_attr & address) == address) &&
423 ((master_image->cycle_attr & cycle) == cycle) &&
424 ((master_image->width_attr & dwidth) == dwidth) &&
425 (master_image->locked == 0)) {
427 master_image->locked = 1;
428 spin_unlock(&master_image->lock);
429 allocated_image = master_image;
430 break;
432 spin_unlock(&master_image->lock);
435 /* Check to see if we found a resource */
436 if (allocated_image == NULL) {
437 printk(KERN_ERR "Can't find a suitable resource\n");
438 goto err_image;
441 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
442 if (resource == NULL) {
443 printk(KERN_ERR "Unable to allocate resource structure\n");
444 goto err_alloc;
446 resource->type = VME_MASTER;
447 resource->entry = &allocated_image->list;
449 return resource;
451 err_alloc:
452 /* Unlock image */
453 spin_lock(&master_image->lock);
454 master_image->locked = 0;
455 spin_unlock(&master_image->lock);
456 err_image:
457 err_bus:
458 return NULL;
460 EXPORT_SYMBOL(vme_master_request);
462 int vme_master_set(struct vme_resource *resource, int enabled,
463 unsigned long long vme_base, unsigned long long size,
464 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
466 struct vme_bridge *bridge = find_bridge(resource);
467 struct vme_master_resource *image;
468 int retval;
470 if (resource->type != VME_MASTER) {
471 printk(KERN_ERR "Not a master resource\n");
472 return -EINVAL;
475 image = list_entry(resource->entry, struct vme_master_resource, list);
477 if (bridge->master_set == NULL) {
478 printk(KERN_WARNING "vme_master_set not supported\n");
479 return -EINVAL;
482 if (!(((image->address_attr & aspace) == aspace) &&
483 ((image->cycle_attr & cycle) == cycle) &&
484 ((image->width_attr & dwidth) == dwidth))) {
485 printk(KERN_WARNING "Invalid attributes\n");
486 return -EINVAL;
489 retval = vme_check_window(aspace, vme_base, size);
490 if (retval)
491 return retval;
493 return bridge->master_set(image, enabled, vme_base, size, aspace,
494 cycle, dwidth);
496 EXPORT_SYMBOL(vme_master_set);
498 int vme_master_get(struct vme_resource *resource, int *enabled,
499 unsigned long long *vme_base, unsigned long long *size,
500 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
502 struct vme_bridge *bridge = find_bridge(resource);
503 struct vme_master_resource *image;
505 if (resource->type != VME_MASTER) {
506 printk(KERN_ERR "Not a master resource\n");
507 return -EINVAL;
510 image = list_entry(resource->entry, struct vme_master_resource, list);
512 if (bridge->master_get == NULL) {
513 printk(KERN_WARNING "vme_master_set not supported\n");
514 return -EINVAL;
517 return bridge->master_get(image, enabled, vme_base, size, aspace,
518 cycle, dwidth);
520 EXPORT_SYMBOL(vme_master_get);
523 * Read data out of VME space into a buffer.
525 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
526 loff_t offset)
528 struct vme_bridge *bridge = find_bridge(resource);
529 struct vme_master_resource *image;
530 size_t length;
532 if (bridge->master_read == NULL) {
533 printk(KERN_WARNING "Reading from resource not supported\n");
534 return -EINVAL;
537 if (resource->type != VME_MASTER) {
538 printk(KERN_ERR "Not a master resource\n");
539 return -EINVAL;
542 image = list_entry(resource->entry, struct vme_master_resource, list);
544 length = vme_get_size(resource);
546 if (offset > length) {
547 printk(KERN_WARNING "Invalid Offset\n");
548 return -EFAULT;
551 if ((offset + count) > length)
552 count = length - offset;
554 return bridge->master_read(image, buf, count, offset);
557 EXPORT_SYMBOL(vme_master_read);
560 * Write data out to VME space from a buffer.
562 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
563 size_t count, loff_t offset)
565 struct vme_bridge *bridge = find_bridge(resource);
566 struct vme_master_resource *image;
567 size_t length;
569 if (bridge->master_write == NULL) {
570 printk(KERN_WARNING "Writing to resource not supported\n");
571 return -EINVAL;
574 if (resource->type != VME_MASTER) {
575 printk(KERN_ERR "Not a master resource\n");
576 return -EINVAL;
579 image = list_entry(resource->entry, struct vme_master_resource, list);
581 length = vme_get_size(resource);
583 if (offset > length) {
584 printk(KERN_WARNING "Invalid Offset\n");
585 return -EFAULT;
588 if ((offset + count) > length)
589 count = length - offset;
591 return bridge->master_write(image, buf, count, offset);
593 EXPORT_SYMBOL(vme_master_write);
596 * Perform RMW cycle to provided location.
598 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
599 unsigned int compare, unsigned int swap, loff_t offset)
601 struct vme_bridge *bridge = find_bridge(resource);
602 struct vme_master_resource *image;
604 if (bridge->master_rmw == NULL) {
605 printk(KERN_WARNING "Writing to resource not supported\n");
606 return -EINVAL;
609 if (resource->type != VME_MASTER) {
610 printk(KERN_ERR "Not a master resource\n");
611 return -EINVAL;
614 image = list_entry(resource->entry, struct vme_master_resource, list);
616 return bridge->master_rmw(image, mask, compare, swap, offset);
618 EXPORT_SYMBOL(vme_master_rmw);
620 void vme_master_free(struct vme_resource *resource)
622 struct vme_master_resource *master_image;
624 if (resource->type != VME_MASTER) {
625 printk(KERN_ERR "Not a master resource\n");
626 return;
629 master_image = list_entry(resource->entry, struct vme_master_resource,
630 list);
631 if (master_image == NULL) {
632 printk(KERN_ERR "Can't find master resource\n");
633 return;
636 /* Unlock image */
637 spin_lock(&master_image->lock);
638 if (master_image->locked == 0)
639 printk(KERN_ERR "Image is already free\n");
641 master_image->locked = 0;
642 spin_unlock(&master_image->lock);
644 /* Free up resource memory */
645 kfree(resource);
647 EXPORT_SYMBOL(vme_master_free);
650 * Request a DMA controller with specific attributes, return some unique
651 * identifier.
653 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
655 struct vme_bridge *bridge;
656 struct list_head *dma_pos = NULL;
657 struct vme_dma_resource *allocated_ctrlr = NULL;
658 struct vme_dma_resource *dma_ctrlr = NULL;
659 struct vme_resource *resource = NULL;
661 /* XXX Not checking resource attributes */
662 printk(KERN_ERR "No VME resource Attribute tests done\n");
664 bridge = dev_to_bridge(dev);
665 if (bridge == NULL) {
666 printk(KERN_ERR "Can't find VME bus\n");
667 goto err_bus;
670 /* Loop through DMA resources */
671 list_for_each(dma_pos, &bridge->dma_resources) {
672 dma_ctrlr = list_entry(dma_pos,
673 struct vme_dma_resource, list);
675 if (dma_ctrlr == NULL) {
676 printk(KERN_ERR "Registered NULL DMA resource\n");
677 continue;
680 /* Find an unlocked and compatible controller */
681 mutex_lock(&dma_ctrlr->mtx);
682 if (((dma_ctrlr->route_attr & route) == route) &&
683 (dma_ctrlr->locked == 0)) {
685 dma_ctrlr->locked = 1;
686 mutex_unlock(&dma_ctrlr->mtx);
687 allocated_ctrlr = dma_ctrlr;
688 break;
690 mutex_unlock(&dma_ctrlr->mtx);
693 /* Check to see if we found a resource */
694 if (allocated_ctrlr == NULL)
695 goto err_ctrlr;
697 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
698 if (resource == NULL) {
699 printk(KERN_WARNING "Unable to allocate resource structure\n");
700 goto err_alloc;
702 resource->type = VME_DMA;
703 resource->entry = &allocated_ctrlr->list;
705 return resource;
707 err_alloc:
708 /* Unlock image */
709 mutex_lock(&dma_ctrlr->mtx);
710 dma_ctrlr->locked = 0;
711 mutex_unlock(&dma_ctrlr->mtx);
712 err_ctrlr:
713 err_bus:
714 return NULL;
716 EXPORT_SYMBOL(vme_dma_request);
719 * Start new list
721 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
723 struct vme_dma_resource *ctrlr;
724 struct vme_dma_list *dma_list;
726 if (resource->type != VME_DMA) {
727 printk(KERN_ERR "Not a DMA resource\n");
728 return NULL;
731 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
733 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
734 if (dma_list == NULL) {
735 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
736 return NULL;
738 INIT_LIST_HEAD(&dma_list->entries);
739 dma_list->parent = ctrlr;
740 mutex_init(&dma_list->mtx);
742 return dma_list;
744 EXPORT_SYMBOL(vme_new_dma_list);
747 * Create "Pattern" type attributes
749 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
750 vme_pattern_t type)
752 struct vme_dma_attr *attributes;
753 struct vme_dma_pattern *pattern_attr;
755 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
756 if (attributes == NULL) {
757 printk(KERN_ERR "Unable to allocate memory for attributes "
758 "structure\n");
759 goto err_attr;
762 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
763 if (pattern_attr == NULL) {
764 printk(KERN_ERR "Unable to allocate memory for pattern "
765 "attributes\n");
766 goto err_pat;
769 attributes->type = VME_DMA_PATTERN;
770 attributes->private = (void *)pattern_attr;
772 pattern_attr->pattern = pattern;
773 pattern_attr->type = type;
775 return attributes;
777 err_pat:
778 kfree(attributes);
779 err_attr:
780 return NULL;
782 EXPORT_SYMBOL(vme_dma_pattern_attribute);
785 * Create "PCI" type attributes
787 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
789 struct vme_dma_attr *attributes;
790 struct vme_dma_pci *pci_attr;
792 /* XXX Run some sanity checks here */
794 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
795 if (attributes == NULL) {
796 printk(KERN_ERR "Unable to allocate memory for attributes "
797 "structure\n");
798 goto err_attr;
801 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
802 if (pci_attr == NULL) {
803 printk(KERN_ERR "Unable to allocate memory for pci "
804 "attributes\n");
805 goto err_pci;
810 attributes->type = VME_DMA_PCI;
811 attributes->private = (void *)pci_attr;
813 pci_attr->address = address;
815 return attributes;
817 err_pci:
818 kfree(attributes);
819 err_attr:
820 return NULL;
822 EXPORT_SYMBOL(vme_dma_pci_attribute);
825 * Create "VME" type attributes
827 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
828 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
830 struct vme_dma_attr *attributes;
831 struct vme_dma_vme *vme_attr;
833 attributes = kmalloc(
834 sizeof(struct vme_dma_attr), GFP_KERNEL);
835 if (attributes == NULL) {
836 printk(KERN_ERR "Unable to allocate memory for attributes "
837 "structure\n");
838 goto err_attr;
841 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
842 if (vme_attr == NULL) {
843 printk(KERN_ERR "Unable to allocate memory for vme "
844 "attributes\n");
845 goto err_vme;
848 attributes->type = VME_DMA_VME;
849 attributes->private = (void *)vme_attr;
851 vme_attr->address = address;
852 vme_attr->aspace = aspace;
853 vme_attr->cycle = cycle;
854 vme_attr->dwidth = dwidth;
856 return attributes;
858 err_vme:
859 kfree(attributes);
860 err_attr:
861 return NULL;
863 EXPORT_SYMBOL(vme_dma_vme_attribute);
866 * Free attribute
868 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
870 kfree(attributes->private);
871 kfree(attributes);
873 EXPORT_SYMBOL(vme_dma_free_attribute);
875 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
876 struct vme_dma_attr *dest, size_t count)
878 struct vme_bridge *bridge = list->parent->parent;
879 int retval;
881 if (bridge->dma_list_add == NULL) {
882 printk(KERN_WARNING "Link List DMA generation not supported\n");
883 return -EINVAL;
886 if (!mutex_trylock(&list->mtx)) {
887 printk(KERN_ERR "Link List already submitted\n");
888 return -EINVAL;
891 retval = bridge->dma_list_add(list, src, dest, count);
893 mutex_unlock(&list->mtx);
895 return retval;
897 EXPORT_SYMBOL(vme_dma_list_add);
899 int vme_dma_list_exec(struct vme_dma_list *list)
901 struct vme_bridge *bridge = list->parent->parent;
902 int retval;
904 if (bridge->dma_list_exec == NULL) {
905 printk(KERN_ERR "Link List DMA execution not supported\n");
906 return -EINVAL;
909 mutex_lock(&list->mtx);
911 retval = bridge->dma_list_exec(list);
913 mutex_unlock(&list->mtx);
915 return retval;
917 EXPORT_SYMBOL(vme_dma_list_exec);
919 int vme_dma_list_free(struct vme_dma_list *list)
921 struct vme_bridge *bridge = list->parent->parent;
922 int retval;
924 if (bridge->dma_list_empty == NULL) {
925 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
926 return -EINVAL;
929 if (!mutex_trylock(&list->mtx)) {
930 printk(KERN_ERR "Link List in use\n");
931 return -EINVAL;
935 * Empty out all of the entries from the dma list. We need to go to the
936 * low level driver as dma entries are driver specific.
938 retval = bridge->dma_list_empty(list);
939 if (retval) {
940 printk(KERN_ERR "Unable to empty link-list entries\n");
941 mutex_unlock(&list->mtx);
942 return retval;
944 mutex_unlock(&list->mtx);
945 kfree(list);
947 return retval;
949 EXPORT_SYMBOL(vme_dma_list_free);
951 int vme_dma_free(struct vme_resource *resource)
953 struct vme_dma_resource *ctrlr;
955 if (resource->type != VME_DMA) {
956 printk(KERN_ERR "Not a DMA resource\n");
957 return -EINVAL;
960 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
962 if (!mutex_trylock(&ctrlr->mtx)) {
963 printk(KERN_ERR "Resource busy, can't free\n");
964 return -EBUSY;
967 if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
968 printk(KERN_WARNING "Resource still processing transfers\n");
969 mutex_unlock(&ctrlr->mtx);
970 return -EBUSY;
973 ctrlr->locked = 0;
975 mutex_unlock(&ctrlr->mtx);
977 return 0;
979 EXPORT_SYMBOL(vme_dma_free);
981 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
983 void (*call)(int, int, void *);
984 void *priv_data;
986 call = bridge->irq[level - 1].callback[statid].func;
987 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
989 if (call != NULL)
990 call(level, statid, priv_data);
991 else
992 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
993 "vector:%x\n", level, statid);
995 EXPORT_SYMBOL(vme_irq_handler);
997 int vme_irq_request(struct device *dev, int level, int statid,
998 void (*callback)(int, int, void *),
999 void *priv_data)
1001 struct vme_bridge *bridge;
1003 bridge = dev_to_bridge(dev);
1004 if (bridge == NULL) {
1005 printk(KERN_ERR "Can't find VME bus\n");
1006 return -EINVAL;
1009 if ((level < 1) || (level > 7)) {
1010 printk(KERN_ERR "Invalid interrupt level\n");
1011 return -EINVAL;
1014 if (bridge->irq_set == NULL) {
1015 printk(KERN_ERR "Configuring interrupts not supported\n");
1016 return -EINVAL;
1019 mutex_lock(&bridge->irq_mtx);
1021 if (bridge->irq[level - 1].callback[statid].func) {
1022 mutex_unlock(&bridge->irq_mtx);
1023 printk(KERN_WARNING "VME Interrupt already taken\n");
1024 return -EBUSY;
1027 bridge->irq[level - 1].count++;
1028 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1029 bridge->irq[level - 1].callback[statid].func = callback;
1031 /* Enable IRQ level */
1032 bridge->irq_set(bridge, level, 1, 1);
1034 mutex_unlock(&bridge->irq_mtx);
1036 return 0;
1038 EXPORT_SYMBOL(vme_irq_request);
1040 void vme_irq_free(struct device *dev, int level, int statid)
1042 struct vme_bridge *bridge;
1044 bridge = dev_to_bridge(dev);
1045 if (bridge == NULL) {
1046 printk(KERN_ERR "Can't find VME bus\n");
1047 return;
1050 if ((level < 1) || (level > 7)) {
1051 printk(KERN_ERR "Invalid interrupt level\n");
1052 return;
1055 if (bridge->irq_set == NULL) {
1056 printk(KERN_ERR "Configuring interrupts not supported\n");
1057 return;
1060 mutex_lock(&bridge->irq_mtx);
1062 bridge->irq[level - 1].count--;
1064 /* Disable IRQ level if no more interrupts attached at this level*/
1065 if (bridge->irq[level - 1].count == 0)
1066 bridge->irq_set(bridge, level, 0, 1);
1068 bridge->irq[level - 1].callback[statid].func = NULL;
1069 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1071 mutex_unlock(&bridge->irq_mtx);
1073 EXPORT_SYMBOL(vme_irq_free);
1075 int vme_irq_generate(struct device *dev, int level, int statid)
1077 struct vme_bridge *bridge;
1079 bridge = dev_to_bridge(dev);
1080 if (bridge == NULL) {
1081 printk(KERN_ERR "Can't find VME bus\n");
1082 return -EINVAL;
1085 if ((level < 1) || (level > 7)) {
1086 printk(KERN_WARNING "Invalid interrupt level\n");
1087 return -EINVAL;
1090 if (bridge->irq_generate == NULL) {
1091 printk(KERN_WARNING "Interrupt generation not supported\n");
1092 return -EINVAL;
1095 return bridge->irq_generate(bridge, level, statid);
1097 EXPORT_SYMBOL(vme_irq_generate);
1100 * Request the location monitor, return resource or NULL
1102 struct vme_resource *vme_lm_request(struct device *dev)
1104 struct vme_bridge *bridge;
1105 struct list_head *lm_pos = NULL;
1106 struct vme_lm_resource *allocated_lm = NULL;
1107 struct vme_lm_resource *lm = NULL;
1108 struct vme_resource *resource = NULL;
1110 bridge = dev_to_bridge(dev);
1111 if (bridge == NULL) {
1112 printk(KERN_ERR "Can't find VME bus\n");
1113 goto err_bus;
1116 /* Loop through DMA resources */
1117 list_for_each(lm_pos, &bridge->lm_resources) {
1118 lm = list_entry(lm_pos,
1119 struct vme_lm_resource, list);
1121 if (lm == NULL) {
1122 printk(KERN_ERR "Registered NULL Location Monitor "
1123 "resource\n");
1124 continue;
1127 /* Find an unlocked controller */
1128 mutex_lock(&lm->mtx);
1129 if (lm->locked == 0) {
1130 lm->locked = 1;
1131 mutex_unlock(&lm->mtx);
1132 allocated_lm = lm;
1133 break;
1135 mutex_unlock(&lm->mtx);
1138 /* Check to see if we found a resource */
1139 if (allocated_lm == NULL)
1140 goto err_lm;
1142 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1143 if (resource == NULL) {
1144 printk(KERN_ERR "Unable to allocate resource structure\n");
1145 goto err_alloc;
1147 resource->type = VME_LM;
1148 resource->entry = &allocated_lm->list;
1150 return resource;
1152 err_alloc:
1153 /* Unlock image */
1154 mutex_lock(&lm->mtx);
1155 lm->locked = 0;
1156 mutex_unlock(&lm->mtx);
1157 err_lm:
1158 err_bus:
1159 return NULL;
1161 EXPORT_SYMBOL(vme_lm_request);
1163 int vme_lm_count(struct vme_resource *resource)
1165 struct vme_lm_resource *lm;
1167 if (resource->type != VME_LM) {
1168 printk(KERN_ERR "Not a Location Monitor resource\n");
1169 return -EINVAL;
1172 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1174 return lm->monitors;
1176 EXPORT_SYMBOL(vme_lm_count);
1178 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1179 vme_address_t aspace, vme_cycle_t cycle)
1181 struct vme_bridge *bridge = find_bridge(resource);
1182 struct vme_lm_resource *lm;
1184 if (resource->type != VME_LM) {
1185 printk(KERN_ERR "Not a Location Monitor resource\n");
1186 return -EINVAL;
1189 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1191 if (bridge->lm_set == NULL) {
1192 printk(KERN_ERR "vme_lm_set not supported\n");
1193 return -EINVAL;
1196 return bridge->lm_set(lm, lm_base, aspace, cycle);
1198 EXPORT_SYMBOL(vme_lm_set);
1200 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1201 vme_address_t *aspace, vme_cycle_t *cycle)
1203 struct vme_bridge *bridge = find_bridge(resource);
1204 struct vme_lm_resource *lm;
1206 if (resource->type != VME_LM) {
1207 printk(KERN_ERR "Not a Location Monitor resource\n");
1208 return -EINVAL;
1211 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1213 if (bridge->lm_get == NULL) {
1214 printk(KERN_ERR "vme_lm_get not supported\n");
1215 return -EINVAL;
1218 return bridge->lm_get(lm, lm_base, aspace, cycle);
1220 EXPORT_SYMBOL(vme_lm_get);
1222 int vme_lm_attach(struct vme_resource *resource, int monitor,
1223 void (*callback)(int))
1225 struct vme_bridge *bridge = find_bridge(resource);
1226 struct vme_lm_resource *lm;
1228 if (resource->type != VME_LM) {
1229 printk(KERN_ERR "Not a Location Monitor resource\n");
1230 return -EINVAL;
1233 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1235 if (bridge->lm_attach == NULL) {
1236 printk(KERN_ERR "vme_lm_attach not supported\n");
1237 return -EINVAL;
1240 return bridge->lm_attach(lm, monitor, callback);
1242 EXPORT_SYMBOL(vme_lm_attach);
1244 int vme_lm_detach(struct vme_resource *resource, int monitor)
1246 struct vme_bridge *bridge = find_bridge(resource);
1247 struct vme_lm_resource *lm;
1249 if (resource->type != VME_LM) {
1250 printk(KERN_ERR "Not a Location Monitor resource\n");
1251 return -EINVAL;
1254 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1256 if (bridge->lm_detach == NULL) {
1257 printk(KERN_ERR "vme_lm_detach not supported\n");
1258 return -EINVAL;
1261 return bridge->lm_detach(lm, monitor);
1263 EXPORT_SYMBOL(vme_lm_detach);
1265 void vme_lm_free(struct vme_resource *resource)
1267 struct vme_lm_resource *lm;
1269 if (resource->type != VME_LM) {
1270 printk(KERN_ERR "Not a Location Monitor resource\n");
1271 return;
1274 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1276 mutex_lock(&lm->mtx);
1278 /* XXX
1279 * Check to see that there aren't any callbacks still attached, if
1280 * there are we should probably be detaching them!
1283 lm->locked = 0;
1285 mutex_unlock(&lm->mtx);
1287 kfree(resource);
1289 EXPORT_SYMBOL(vme_lm_free);
1291 int vme_slot_get(struct device *bus)
1293 struct vme_bridge *bridge;
1295 bridge = dev_to_bridge(bus);
1296 if (bridge == NULL) {
1297 printk(KERN_ERR "Can't find VME bus\n");
1298 return -EINVAL;
1301 if (bridge->slot_get == NULL) {
1302 printk(KERN_WARNING "vme_slot_get not supported\n");
1303 return -EINVAL;
1306 return bridge->slot_get(bridge);
1308 EXPORT_SYMBOL(vme_slot_get);
1311 /* - Bridge Registration --------------------------------------------------- */
1313 static int vme_add_bus(struct vme_bridge *bridge)
1315 int i;
1316 int ret = -1;
1318 mutex_lock(&vme_buses_lock);
1319 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1320 if ((vme_bus_numbers & (1 << i)) == 0) {
1321 vme_bus_numbers |= (1 << i);
1322 bridge->num = i;
1323 list_add_tail(&bridge->bus_list, &vme_bus_list);
1324 ret = 0;
1325 break;
1328 mutex_unlock(&vme_buses_lock);
1330 return ret;
1333 static void vme_remove_bus(struct vme_bridge *bridge)
1335 mutex_lock(&vme_buses_lock);
1336 vme_bus_numbers &= ~(1 << bridge->num);
1337 list_del(&bridge->bus_list);
1338 mutex_unlock(&vme_buses_lock);
1341 int vme_register_bridge(struct vme_bridge *bridge)
1343 struct device *dev;
1344 int retval;
1345 int i;
1347 retval = vme_add_bus(bridge);
1348 if (retval)
1349 return retval;
1351 /* This creates 32 vme "slot" devices. This equates to a slot for each
1352 * ID available in a system conforming to the ANSI/VITA 1-1994
1353 * specification.
1355 for (i = 0; i < VME_SLOTS_MAX; i++) {
1356 dev = &bridge->dev[i];
1357 memset(dev, 0, sizeof(struct device));
1359 dev->parent = bridge->parent;
1360 dev->bus = &vme_bus_type;
1362 * We save a pointer to the bridge in platform_data so that we
1363 * can get to it later. We keep driver_data for use by the
1364 * driver that binds against the slot
1366 dev->platform_data = bridge;
1367 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1369 retval = device_register(dev);
1370 if (retval)
1371 goto err_reg;
1374 return retval;
1376 err_reg:
1377 while (--i >= 0) {
1378 dev = &bridge->dev[i];
1379 device_unregister(dev);
1381 vme_remove_bus(bridge);
1382 return retval;
1384 EXPORT_SYMBOL(vme_register_bridge);
1386 void vme_unregister_bridge(struct vme_bridge *bridge)
1388 int i;
1389 struct device *dev;
1392 for (i = 0; i < VME_SLOTS_MAX; i++) {
1393 dev = &bridge->dev[i];
1394 device_unregister(dev);
1396 vme_remove_bus(bridge);
1398 EXPORT_SYMBOL(vme_unregister_bridge);
1401 /* - Driver Registration --------------------------------------------------- */
1403 int vme_register_driver(struct vme_driver *drv)
1405 drv->driver.name = drv->name;
1406 drv->driver.bus = &vme_bus_type;
1408 return driver_register(&drv->driver);
1410 EXPORT_SYMBOL(vme_register_driver);
1412 void vme_unregister_driver(struct vme_driver *drv)
1414 driver_unregister(&drv->driver);
1416 EXPORT_SYMBOL(vme_unregister_driver);
1418 /* - Bus Registration ------------------------------------------------------ */
1420 static int vme_calc_slot(struct device *dev)
1422 struct vme_bridge *bridge;
1423 int num;
1425 bridge = dev_to_bridge(dev);
1427 /* Determine slot number */
1428 num = 0;
1429 while (num < VME_SLOTS_MAX) {
1430 if (&bridge->dev[num] == dev)
1431 break;
1433 num++;
1435 if (num == VME_SLOTS_MAX) {
1436 dev_err(dev, "Failed to identify slot\n");
1437 num = 0;
1438 goto err_dev;
1440 num++;
1442 err_dev:
1443 return num;
1446 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1448 if (dev->driver == NULL)
1449 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1451 return container_of(dev->driver, struct vme_driver, driver);
1454 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1456 struct vme_bridge *bridge;
1457 struct vme_driver *driver;
1458 int i, num;
1460 bridge = dev_to_bridge(dev);
1461 driver = container_of(drv, struct vme_driver, driver);
1463 num = vme_calc_slot(dev);
1464 if (!num)
1465 goto err_dev;
1467 if (driver->bind_table == NULL) {
1468 dev_err(dev, "Bind table NULL\n");
1469 goto err_table;
1472 i = 0;
1473 while ((driver->bind_table[i].bus != 0) ||
1474 (driver->bind_table[i].slot != 0)) {
1476 if (bridge->num == driver->bind_table[i].bus) {
1477 if (num == driver->bind_table[i].slot)
1478 return 1;
1480 if (driver->bind_table[i].slot == VME_SLOT_ALL)
1481 return 1;
1483 if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1484 (num == vme_slot_get(dev)))
1485 return 1;
1487 i++;
1490 err_dev:
1491 err_table:
1492 return 0;
1495 static int vme_bus_probe(struct device *dev)
1497 struct vme_bridge *bridge;
1498 struct vme_driver *driver;
1499 int retval = -ENODEV;
1501 driver = dev_to_vme_driver(dev);
1502 bridge = dev_to_bridge(dev);
1504 if (driver->probe != NULL)
1505 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1507 return retval;
1510 static int vme_bus_remove(struct device *dev)
1512 struct vme_bridge *bridge;
1513 struct vme_driver *driver;
1514 int retval = -ENODEV;
1516 driver = dev_to_vme_driver(dev);
1517 bridge = dev_to_bridge(dev);
1519 if (driver->remove != NULL)
1520 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1522 return retval;
1525 struct bus_type vme_bus_type = {
1526 .name = "vme",
1527 .match = vme_bus_match,
1528 .probe = vme_bus_probe,
1529 .remove = vme_bus_remove,
1531 EXPORT_SYMBOL(vme_bus_type);
1533 static int __init vme_init(void)
1535 return bus_register(&vme_bus_type);
1538 static void __exit vme_exit(void)
1540 bus_unregister(&vme_bus_type);
1543 MODULE_DESCRIPTION("VME bridge driver framework");
1544 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1545 MODULE_LICENSE("GPL");
1547 module_init(vme_init);
1548 module_exit(vme_exit);