1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
12 #include <linux/init.h>
13 #include <linux/export.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/pagemap.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/syscalls.h>
26 #include <linux/mutex.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/vme.h>
31 #include "vme_bridge.h"
33 /* Bitmask and list of registered buses both protected by common mutex */
34 static unsigned int vme_bus_numbers
;
35 static LIST_HEAD(vme_bus_list
);
36 static DEFINE_MUTEX(vme_buses_lock
);
38 static int __init
vme_init(void);
40 static struct vme_dev
*dev_to_vme_dev(struct device
*dev
)
42 return container_of(dev
, struct vme_dev
, dev
);
46 * Find the bridge that the resource is associated with.
48 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
50 /* Get list to search */
51 switch (resource
->type
) {
53 return list_entry(resource
->entry
, struct vme_master_resource
,
57 return list_entry(resource
->entry
, struct vme_slave_resource
,
61 return list_entry(resource
->entry
, struct vme_dma_resource
,
65 return list_entry(resource
->entry
, struct vme_lm_resource
,
69 printk(KERN_ERR
"Unknown resource type\n");
76 * vme_free_consistent - Allocate contiguous memory.
77 * @resource: Pointer to VME resource.
78 * @size: Size of allocation required.
79 * @dma: Pointer to variable to store physical address of allocation.
81 * Allocate a contiguous block of memory for use by the driver. This is used to
82 * create the buffers for the slave windows.
84 * Return: Virtual address of allocation on success, NULL on failure.
86 void *vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
89 struct vme_bridge
*bridge
;
92 printk(KERN_ERR
"No resource\n");
96 bridge
= find_bridge(resource
);
98 printk(KERN_ERR
"Can't find bridge\n");
102 if (!bridge
->parent
) {
103 printk(KERN_ERR
"Dev entry NULL for bridge %s\n", bridge
->name
);
107 if (!bridge
->alloc_consistent
) {
108 printk(KERN_ERR
"alloc_consistent not supported by bridge %s\n",
113 return bridge
->alloc_consistent(bridge
->parent
, size
, dma
);
115 EXPORT_SYMBOL(vme_alloc_consistent
);
118 * vme_free_consistent - Free previously allocated memory.
119 * @resource: Pointer to VME resource.
120 * @size: Size of allocation to free.
121 * @vaddr: Virtual address of allocation.
122 * @dma: Physical address of allocation.
124 * Free previously allocated block of contiguous memory.
126 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
127 void *vaddr
, dma_addr_t dma
)
129 struct vme_bridge
*bridge
;
132 printk(KERN_ERR
"No resource\n");
136 bridge
= find_bridge(resource
);
138 printk(KERN_ERR
"Can't find bridge\n");
142 if (!bridge
->parent
) {
143 printk(KERN_ERR
"Dev entry NULL for bridge %s\n", bridge
->name
);
147 if (!bridge
->free_consistent
) {
148 printk(KERN_ERR
"free_consistent not supported by bridge %s\n",
153 bridge
->free_consistent(bridge
->parent
, size
, vaddr
, dma
);
155 EXPORT_SYMBOL(vme_free_consistent
);
158 * vme_get_size - Helper function returning size of a VME window
159 * @resource: Pointer to VME slave or master resource.
161 * Determine the size of the VME window provided. This is a helper
162 * function, wrappering the call to vme_master_get or vme_slave_get
163 * depending on the type of window resource handed to it.
165 * Return: Size of the window on success, zero on failure.
167 size_t vme_get_size(struct vme_resource
*resource
)
170 unsigned long long base
, size
;
172 u32 aspace
, cycle
, dwidth
;
174 switch (resource
->type
) {
176 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
177 &aspace
, &cycle
, &dwidth
);
184 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
185 &buf_base
, &aspace
, &cycle
);
195 printk(KERN_ERR
"Unknown resource type\n");
200 EXPORT_SYMBOL(vme_get_size
);
202 int vme_check_window(u32 aspace
, unsigned long long vme_base
,
203 unsigned long long size
)
207 if (vme_base
+ size
< size
)
212 if (vme_base
+ size
> VME_A16_MAX
)
216 if (vme_base
+ size
> VME_A24_MAX
)
220 if (vme_base
+ size
> VME_A32_MAX
)
224 /* The VME_A64_MAX limit is actually U64_MAX + 1 */
227 if (vme_base
+ size
> VME_CRCSR_MAX
)
237 printk(KERN_ERR
"Invalid address space\n");
244 EXPORT_SYMBOL(vme_check_window
);
246 static u32
vme_get_aspace(int am
)
280 * vme_slave_request - Request a VME slave window resource.
281 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
282 * @address: Required VME address space.
283 * @cycle: Required VME data transfer cycle type.
285 * Request use of a VME window resource capable of being set for the requested
286 * address space and data transfer cycle.
288 * Return: Pointer to VME resource on success, NULL on failure.
290 struct vme_resource
*vme_slave_request(struct vme_dev
*vdev
, u32 address
,
293 struct vme_bridge
*bridge
;
294 struct list_head
*slave_pos
= NULL
;
295 struct vme_slave_resource
*allocated_image
= NULL
;
296 struct vme_slave_resource
*slave_image
= NULL
;
297 struct vme_resource
*resource
= NULL
;
299 bridge
= vdev
->bridge
;
301 printk(KERN_ERR
"Can't find VME bus\n");
305 /* Loop through slave resources */
306 list_for_each(slave_pos
, &bridge
->slave_resources
) {
307 slave_image
= list_entry(slave_pos
,
308 struct vme_slave_resource
, list
);
311 printk(KERN_ERR
"Registered NULL Slave resource\n");
315 /* Find an unlocked and compatible image */
316 mutex_lock(&slave_image
->mtx
);
317 if (((slave_image
->address_attr
& address
) == address
) &&
318 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
319 (slave_image
->locked
== 0)) {
321 slave_image
->locked
= 1;
322 mutex_unlock(&slave_image
->mtx
);
323 allocated_image
= slave_image
;
326 mutex_unlock(&slave_image
->mtx
);
330 if (!allocated_image
)
333 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
337 resource
->type
= VME_SLAVE
;
338 resource
->entry
= &allocated_image
->list
;
344 mutex_lock(&slave_image
->mtx
);
345 slave_image
->locked
= 0;
346 mutex_unlock(&slave_image
->mtx
);
351 EXPORT_SYMBOL(vme_slave_request
);
354 * vme_slave_set - Set VME slave window configuration.
355 * @resource: Pointer to VME slave resource.
356 * @enabled: State to which the window should be configured.
357 * @vme_base: Base address for the window.
358 * @size: Size of the VME window.
359 * @buf_base: Based address of buffer used to provide VME slave window storage.
360 * @aspace: VME address space for the VME window.
361 * @cycle: VME data transfer cycle type for the VME window.
363 * Set configuration for provided VME slave window.
365 * Return: Zero on success, -EINVAL if operation is not supported on this
366 * device, if an invalid resource has been provided or invalid
367 * attributes are provided. Hardware specific errors may also be
370 int vme_slave_set(struct vme_resource
*resource
, int enabled
,
371 unsigned long long vme_base
, unsigned long long size
,
372 dma_addr_t buf_base
, u32 aspace
, u32 cycle
)
374 struct vme_bridge
*bridge
= find_bridge(resource
);
375 struct vme_slave_resource
*image
;
378 if (resource
->type
!= VME_SLAVE
) {
379 printk(KERN_ERR
"Not a slave resource\n");
383 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
385 if (!bridge
->slave_set
) {
386 printk(KERN_ERR
"Function not supported\n");
390 if (!(((image
->address_attr
& aspace
) == aspace
) &&
391 ((image
->cycle_attr
& cycle
) == cycle
))) {
392 printk(KERN_ERR
"Invalid attributes\n");
396 retval
= vme_check_window(aspace
, vme_base
, size
);
400 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
403 EXPORT_SYMBOL(vme_slave_set
);
406 * vme_slave_get - Retrieve VME slave window configuration.
407 * @resource: Pointer to VME slave resource.
408 * @enabled: Pointer to variable for storing state.
409 * @vme_base: Pointer to variable for storing window base address.
410 * @size: Pointer to variable for storing window size.
411 * @buf_base: Pointer to variable for storing slave buffer base address.
412 * @aspace: Pointer to variable for storing VME address space.
413 * @cycle: Pointer to variable for storing VME data transfer cycle type.
415 * Return configuration for provided VME slave window.
417 * Return: Zero on success, -EINVAL if operation is not supported on this
418 * device or if an invalid resource has been provided.
420 int vme_slave_get(struct vme_resource
*resource
, int *enabled
,
421 unsigned long long *vme_base
, unsigned long long *size
,
422 dma_addr_t
*buf_base
, u32
*aspace
, u32
*cycle
)
424 struct vme_bridge
*bridge
= find_bridge(resource
);
425 struct vme_slave_resource
*image
;
427 if (resource
->type
!= VME_SLAVE
) {
428 printk(KERN_ERR
"Not a slave resource\n");
432 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
434 if (!bridge
->slave_get
) {
435 printk(KERN_ERR
"vme_slave_get not supported\n");
439 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
442 EXPORT_SYMBOL(vme_slave_get
);
445 * vme_slave_free - Free VME slave window
446 * @resource: Pointer to VME slave resource.
448 * Free the provided slave resource so that it may be reallocated.
450 void vme_slave_free(struct vme_resource
*resource
)
452 struct vme_slave_resource
*slave_image
;
454 if (resource
->type
!= VME_SLAVE
) {
455 printk(KERN_ERR
"Not a slave resource\n");
459 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
462 printk(KERN_ERR
"Can't find slave resource\n");
467 mutex_lock(&slave_image
->mtx
);
468 if (slave_image
->locked
== 0)
469 printk(KERN_ERR
"Image is already free\n");
471 slave_image
->locked
= 0;
472 mutex_unlock(&slave_image
->mtx
);
474 /* Free up resource memory */
477 EXPORT_SYMBOL(vme_slave_free
);
480 * vme_master_request - Request a VME master window resource.
481 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
482 * @address: Required VME address space.
483 * @cycle: Required VME data transfer cycle type.
484 * @dwidth: Required VME data transfer width.
486 * Request use of a VME window resource capable of being set for the requested
487 * address space, data transfer cycle and width.
489 * Return: Pointer to VME resource on success, NULL on failure.
491 struct vme_resource
*vme_master_request(struct vme_dev
*vdev
, u32 address
,
492 u32 cycle
, u32 dwidth
)
494 struct vme_bridge
*bridge
;
495 struct list_head
*master_pos
= NULL
;
496 struct vme_master_resource
*allocated_image
= NULL
;
497 struct vme_master_resource
*master_image
= NULL
;
498 struct vme_resource
*resource
= NULL
;
500 bridge
= vdev
->bridge
;
502 printk(KERN_ERR
"Can't find VME bus\n");
506 /* Loop through master resources */
507 list_for_each(master_pos
, &bridge
->master_resources
) {
508 master_image
= list_entry(master_pos
,
509 struct vme_master_resource
, list
);
512 printk(KERN_WARNING
"Registered NULL master resource\n");
516 /* Find an unlocked and compatible image */
517 spin_lock(&master_image
->lock
);
518 if (((master_image
->address_attr
& address
) == address
) &&
519 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
520 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
521 (master_image
->locked
== 0)) {
523 master_image
->locked
= 1;
524 spin_unlock(&master_image
->lock
);
525 allocated_image
= master_image
;
528 spin_unlock(&master_image
->lock
);
531 /* Check to see if we found a resource */
532 if (!allocated_image
) {
533 printk(KERN_ERR
"Can't find a suitable resource\n");
537 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
541 resource
->type
= VME_MASTER
;
542 resource
->entry
= &allocated_image
->list
;
548 spin_lock(&master_image
->lock
);
549 master_image
->locked
= 0;
550 spin_unlock(&master_image
->lock
);
555 EXPORT_SYMBOL(vme_master_request
);
558 * vme_master_set - Set VME master window configuration.
559 * @resource: Pointer to VME master resource.
560 * @enabled: State to which the window should be configured.
561 * @vme_base: Base address for the window.
562 * @size: Size of the VME window.
563 * @aspace: VME address space for the VME window.
564 * @cycle: VME data transfer cycle type for the VME window.
565 * @dwidth: VME data transfer width for the VME window.
567 * Set configuration for provided VME master window.
569 * Return: Zero on success, -EINVAL if operation is not supported on this
570 * device, if an invalid resource has been provided or invalid
571 * attributes are provided. Hardware specific errors may also be
574 int vme_master_set(struct vme_resource
*resource
, int enabled
,
575 unsigned long long vme_base
, unsigned long long size
, u32 aspace
,
576 u32 cycle
, u32 dwidth
)
578 struct vme_bridge
*bridge
= find_bridge(resource
);
579 struct vme_master_resource
*image
;
582 if (resource
->type
!= VME_MASTER
) {
583 printk(KERN_ERR
"Not a master resource\n");
587 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
589 if (!bridge
->master_set
) {
590 printk(KERN_WARNING
"vme_master_set not supported\n");
594 if (!(((image
->address_attr
& aspace
) == aspace
) &&
595 ((image
->cycle_attr
& cycle
) == cycle
) &&
596 ((image
->width_attr
& dwidth
) == dwidth
))) {
597 printk(KERN_WARNING
"Invalid attributes\n");
601 retval
= vme_check_window(aspace
, vme_base
, size
);
605 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
608 EXPORT_SYMBOL(vme_master_set
);
611 * vme_master_get - Retrieve VME master window configuration.
612 * @resource: Pointer to VME master resource.
613 * @enabled: Pointer to variable for storing state.
614 * @vme_base: Pointer to variable for storing window base address.
615 * @size: Pointer to variable for storing window size.
616 * @aspace: Pointer to variable for storing VME address space.
617 * @cycle: Pointer to variable for storing VME data transfer cycle type.
618 * @dwidth: Pointer to variable for storing VME data transfer width.
620 * Return configuration for provided VME master window.
622 * Return: Zero on success, -EINVAL if operation is not supported on this
623 * device or if an invalid resource has been provided.
625 int vme_master_get(struct vme_resource
*resource
, int *enabled
,
626 unsigned long long *vme_base
, unsigned long long *size
, u32
*aspace
,
627 u32
*cycle
, u32
*dwidth
)
629 struct vme_bridge
*bridge
= find_bridge(resource
);
630 struct vme_master_resource
*image
;
632 if (resource
->type
!= VME_MASTER
) {
633 printk(KERN_ERR
"Not a master resource\n");
637 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
639 if (!bridge
->master_get
) {
640 printk(KERN_WARNING
"%s not supported\n", __func__
);
644 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
647 EXPORT_SYMBOL(vme_master_get
);
650 * vme_master_write - Read data from VME space into a buffer.
651 * @resource: Pointer to VME master resource.
652 * @buf: Pointer to buffer where data should be transferred.
653 * @count: Number of bytes to transfer.
654 * @offset: Offset into VME master window at which to start transfer.
656 * Perform read of count bytes of data from location on VME bus which maps into
657 * the VME master window at offset to buf.
659 * Return: Number of bytes read, -EINVAL if resource is not a VME master
660 * resource or read operation is not supported. -EFAULT returned if
661 * invalid offset is provided. Hardware specific errors may also be
664 ssize_t
vme_master_read(struct vme_resource
*resource
, void *buf
, size_t count
,
667 struct vme_bridge
*bridge
= find_bridge(resource
);
668 struct vme_master_resource
*image
;
671 if (!bridge
->master_read
) {
672 printk(KERN_WARNING
"Reading from resource not supported\n");
676 if (resource
->type
!= VME_MASTER
) {
677 printk(KERN_ERR
"Not a master resource\n");
681 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
683 length
= vme_get_size(resource
);
685 if (offset
> length
) {
686 printk(KERN_WARNING
"Invalid Offset\n");
690 if ((offset
+ count
) > length
)
691 count
= length
- offset
;
693 return bridge
->master_read(image
, buf
, count
, offset
);
696 EXPORT_SYMBOL(vme_master_read
);
699 * vme_master_write - Write data out to VME space from a buffer.
700 * @resource: Pointer to VME master resource.
701 * @buf: Pointer to buffer holding data to transfer.
702 * @count: Number of bytes to transfer.
703 * @offset: Offset into VME master window at which to start transfer.
705 * Perform write of count bytes of data from buf to location on VME bus which
706 * maps into the VME master window at offset.
708 * Return: Number of bytes written, -EINVAL if resource is not a VME master
709 * resource or write operation is not supported. -EFAULT returned if
710 * invalid offset is provided. Hardware specific errors may also be
713 ssize_t
vme_master_write(struct vme_resource
*resource
, void *buf
,
714 size_t count
, loff_t offset
)
716 struct vme_bridge
*bridge
= find_bridge(resource
);
717 struct vme_master_resource
*image
;
720 if (!bridge
->master_write
) {
721 printk(KERN_WARNING
"Writing to resource not supported\n");
725 if (resource
->type
!= VME_MASTER
) {
726 printk(KERN_ERR
"Not a master resource\n");
730 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
732 length
= vme_get_size(resource
);
734 if (offset
> length
) {
735 printk(KERN_WARNING
"Invalid Offset\n");
739 if ((offset
+ count
) > length
)
740 count
= length
- offset
;
742 return bridge
->master_write(image
, buf
, count
, offset
);
744 EXPORT_SYMBOL(vme_master_write
);
747 * vme_master_rmw - Perform read-modify-write cycle.
748 * @resource: Pointer to VME master resource.
749 * @mask: Bits to be compared and swapped in operation.
750 * @compare: Bits to be compared with data read from offset.
751 * @swap: Bits to be swapped in data read from offset.
752 * @offset: Offset into VME master window at which to perform operation.
754 * Perform read-modify-write cycle on provided location:
755 * - Location on VME bus is read.
756 * - Bits selected by mask are compared with compare.
757 * - Where a selected bit matches that in compare and are selected in swap,
758 * the bit is swapped.
759 * - Result written back to location on VME bus.
761 * Return: Bytes written on success, -EINVAL if resource is not a VME master
762 * resource or RMW operation is not supported. Hardware specific
763 * errors may also be returned.
765 unsigned int vme_master_rmw(struct vme_resource
*resource
, unsigned int mask
,
766 unsigned int compare
, unsigned int swap
, loff_t offset
)
768 struct vme_bridge
*bridge
= find_bridge(resource
);
769 struct vme_master_resource
*image
;
771 if (!bridge
->master_rmw
) {
772 printk(KERN_WARNING
"Writing to resource not supported\n");
776 if (resource
->type
!= VME_MASTER
) {
777 printk(KERN_ERR
"Not a master resource\n");
781 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
783 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
785 EXPORT_SYMBOL(vme_master_rmw
);
788 * vme_master_mmap - Mmap region of VME master window.
789 * @resource: Pointer to VME master resource.
790 * @vma: Pointer to definition of user mapping.
792 * Memory map a region of the VME master window into user space.
794 * Return: Zero on success, -EINVAL if resource is not a VME master
795 * resource or -EFAULT if map exceeds window size. Other generic mmap
796 * errors may also be returned.
798 int vme_master_mmap(struct vme_resource
*resource
, struct vm_area_struct
*vma
)
800 struct vme_master_resource
*image
;
801 phys_addr_t phys_addr
;
802 unsigned long vma_size
;
804 if (resource
->type
!= VME_MASTER
) {
805 pr_err("Not a master resource\n");
809 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
810 phys_addr
= image
->bus_resource
.start
+ (vma
->vm_pgoff
<< PAGE_SHIFT
);
811 vma_size
= vma
->vm_end
- vma
->vm_start
;
813 if (phys_addr
+ vma_size
> image
->bus_resource
.end
+ 1) {
814 pr_err("Map size cannot exceed the window size\n");
818 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
820 return vm_iomap_memory(vma
, phys_addr
, vma
->vm_end
- vma
->vm_start
);
822 EXPORT_SYMBOL(vme_master_mmap
);
825 * vme_master_free - Free VME master window
826 * @resource: Pointer to VME master resource.
828 * Free the provided master resource so that it may be reallocated.
830 void vme_master_free(struct vme_resource
*resource
)
832 struct vme_master_resource
*master_image
;
834 if (resource
->type
!= VME_MASTER
) {
835 printk(KERN_ERR
"Not a master resource\n");
839 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
842 printk(KERN_ERR
"Can't find master resource\n");
847 spin_lock(&master_image
->lock
);
848 if (master_image
->locked
== 0)
849 printk(KERN_ERR
"Image is already free\n");
851 master_image
->locked
= 0;
852 spin_unlock(&master_image
->lock
);
854 /* Free up resource memory */
857 EXPORT_SYMBOL(vme_master_free
);
860 * vme_dma_request - Request a DMA controller.
861 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
862 * @route: Required src/destination combination.
864 * Request a VME DMA controller with capability to perform transfers bewteen
865 * requested source/destination combination.
867 * Return: Pointer to VME DMA resource on success, NULL on failure.
869 struct vme_resource
*vme_dma_request(struct vme_dev
*vdev
, u32 route
)
871 struct vme_bridge
*bridge
;
872 struct list_head
*dma_pos
= NULL
;
873 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
874 struct vme_dma_resource
*dma_ctrlr
= NULL
;
875 struct vme_resource
*resource
= NULL
;
877 /* XXX Not checking resource attributes */
878 printk(KERN_ERR
"No VME resource Attribute tests done\n");
880 bridge
= vdev
->bridge
;
882 printk(KERN_ERR
"Can't find VME bus\n");
886 /* Loop through DMA resources */
887 list_for_each(dma_pos
, &bridge
->dma_resources
) {
888 dma_ctrlr
= list_entry(dma_pos
,
889 struct vme_dma_resource
, list
);
891 printk(KERN_ERR
"Registered NULL DMA resource\n");
895 /* Find an unlocked and compatible controller */
896 mutex_lock(&dma_ctrlr
->mtx
);
897 if (((dma_ctrlr
->route_attr
& route
) == route
) &&
898 (dma_ctrlr
->locked
== 0)) {
900 dma_ctrlr
->locked
= 1;
901 mutex_unlock(&dma_ctrlr
->mtx
);
902 allocated_ctrlr
= dma_ctrlr
;
905 mutex_unlock(&dma_ctrlr
->mtx
);
908 /* Check to see if we found a resource */
909 if (!allocated_ctrlr
)
912 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
916 resource
->type
= VME_DMA
;
917 resource
->entry
= &allocated_ctrlr
->list
;
923 mutex_lock(&dma_ctrlr
->mtx
);
924 dma_ctrlr
->locked
= 0;
925 mutex_unlock(&dma_ctrlr
->mtx
);
930 EXPORT_SYMBOL(vme_dma_request
);
933 * vme_new_dma_list - Create new VME DMA list.
934 * @resource: Pointer to VME DMA resource.
936 * Create a new VME DMA list. It is the responsibility of the user to free
937 * the list once it is no longer required with vme_dma_list_free().
939 * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
942 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
944 struct vme_dma_list
*dma_list
;
946 if (resource
->type
!= VME_DMA
) {
947 printk(KERN_ERR
"Not a DMA resource\n");
951 dma_list
= kmalloc(sizeof(*dma_list
), GFP_KERNEL
);
955 INIT_LIST_HEAD(&dma_list
->entries
);
956 dma_list
->parent
= list_entry(resource
->entry
,
957 struct vme_dma_resource
,
959 mutex_init(&dma_list
->mtx
);
963 EXPORT_SYMBOL(vme_new_dma_list
);
966 * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
967 * @pattern: Value to use used as pattern
968 * @type: Type of pattern to be written.
970 * Create VME DMA list attribute for pattern generation. It is the
971 * responsibility of the user to free used attributes using
972 * vme_dma_free_attribute().
974 * Return: Pointer to VME DMA attribute, NULL on failure.
976 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
, u32 type
)
978 struct vme_dma_attr
*attributes
;
979 struct vme_dma_pattern
*pattern_attr
;
981 attributes
= kmalloc(sizeof(*attributes
), GFP_KERNEL
);
985 pattern_attr
= kmalloc(sizeof(*pattern_attr
), GFP_KERNEL
);
989 attributes
->type
= VME_DMA_PATTERN
;
990 attributes
->private = (void *)pattern_attr
;
992 pattern_attr
->pattern
= pattern
;
993 pattern_attr
->type
= type
;
1002 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
1005 * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
1006 * @address: PCI base address for DMA transfer.
1008 * Create VME DMA list attribute pointing to a location on PCI for DMA
1009 * transfers. It is the responsibility of the user to free used attributes
1010 * using vme_dma_free_attribute().
1012 * Return: Pointer to VME DMA attribute, NULL on failure.
1014 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
1016 struct vme_dma_attr
*attributes
;
1017 struct vme_dma_pci
*pci_attr
;
1019 /* XXX Run some sanity checks here */
1021 attributes
= kmalloc(sizeof(*attributes
), GFP_KERNEL
);
1025 pci_attr
= kmalloc(sizeof(*pci_attr
), GFP_KERNEL
);
1029 attributes
->type
= VME_DMA_PCI
;
1030 attributes
->private = (void *)pci_attr
;
1032 pci_attr
->address
= address
;
1041 EXPORT_SYMBOL(vme_dma_pci_attribute
);
1044 * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
1045 * @address: VME base address for DMA transfer.
1046 * @aspace: VME address space to use for DMA transfer.
1047 * @cycle: VME bus cycle to use for DMA transfer.
1048 * @dwidth: VME data width to use for DMA transfer.
1050 * Create VME DMA list attribute pointing to a location on the VME bus for DMA
1051 * transfers. It is the responsibility of the user to free used attributes
1052 * using vme_dma_free_attribute().
1054 * Return: Pointer to VME DMA attribute, NULL on failure.
1056 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
1057 u32 aspace
, u32 cycle
, u32 dwidth
)
1059 struct vme_dma_attr
*attributes
;
1060 struct vme_dma_vme
*vme_attr
;
1062 attributes
= kmalloc(sizeof(*attributes
), GFP_KERNEL
);
1066 vme_attr
= kmalloc(sizeof(*vme_attr
), GFP_KERNEL
);
1070 attributes
->type
= VME_DMA_VME
;
1071 attributes
->private = (void *)vme_attr
;
1073 vme_attr
->address
= address
;
1074 vme_attr
->aspace
= aspace
;
1075 vme_attr
->cycle
= cycle
;
1076 vme_attr
->dwidth
= dwidth
;
1085 EXPORT_SYMBOL(vme_dma_vme_attribute
);
1088 * vme_dma_free_attribute - Free DMA list attribute.
1089 * @attributes: Pointer to DMA list attribute.
1091 * Free VME DMA list attribute. VME DMA list attributes can be safely freed
1092 * once vme_dma_list_add() has returned.
1094 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
1096 kfree(attributes
->private);
1099 EXPORT_SYMBOL(vme_dma_free_attribute
);
1102 * vme_dma_list_add - Add enty to a VME DMA list.
1103 * @list: Pointer to VME list.
1104 * @src: Pointer to DMA list attribute to use as source.
1105 * @dest: Pointer to DMA list attribute to use as destination.
1106 * @count: Number of bytes to transfer.
1108 * Add an entry to the provided VME DMA list. Entry requires pointers to source
1109 * and destination DMA attributes and a count.
1111 * Please note, the attributes supported as source and destinations for
1112 * transfers are hardware dependent.
1114 * Return: Zero on success, -EINVAL if operation is not supported on this
1115 * device or if the link list has already been submitted for execution.
1116 * Hardware specific errors also possible.
1118 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
1119 struct vme_dma_attr
*dest
, size_t count
)
1121 struct vme_bridge
*bridge
= list
->parent
->parent
;
1124 if (!bridge
->dma_list_add
) {
1125 printk(KERN_WARNING
"Link List DMA generation not supported\n");
1129 if (!mutex_trylock(&list
->mtx
)) {
1130 printk(KERN_ERR
"Link List already submitted\n");
1134 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
1136 mutex_unlock(&list
->mtx
);
1140 EXPORT_SYMBOL(vme_dma_list_add
);
1143 * vme_dma_list_exec - Queue a VME DMA list for execution.
1144 * @list: Pointer to VME list.
1146 * Queue the provided VME DMA list for execution. The call will return once the
1147 * list has been executed.
1149 * Return: Zero on success, -EINVAL if operation is not supported on this
1150 * device. Hardware specific errors also possible.
1152 int vme_dma_list_exec(struct vme_dma_list
*list
)
1154 struct vme_bridge
*bridge
= list
->parent
->parent
;
1157 if (!bridge
->dma_list_exec
) {
1158 printk(KERN_ERR
"Link List DMA execution not supported\n");
1162 mutex_lock(&list
->mtx
);
1164 retval
= bridge
->dma_list_exec(list
);
1166 mutex_unlock(&list
->mtx
);
1170 EXPORT_SYMBOL(vme_dma_list_exec
);
1173 * vme_dma_list_free - Free a VME DMA list.
1174 * @list: Pointer to VME list.
1176 * Free the provided DMA list and all its entries.
1178 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1179 * is still in use. Hardware specific errors also possible.
1181 int vme_dma_list_free(struct vme_dma_list
*list
)
1183 struct vme_bridge
*bridge
= list
->parent
->parent
;
1186 if (!bridge
->dma_list_empty
) {
1187 printk(KERN_WARNING
"Emptying of Link Lists not supported\n");
1191 if (!mutex_trylock(&list
->mtx
)) {
1192 printk(KERN_ERR
"Link List in use\n");
1197 * Empty out all of the entries from the DMA list. We need to go to the
1198 * low level driver as DMA entries are driver specific.
1200 retval
= bridge
->dma_list_empty(list
);
1202 printk(KERN_ERR
"Unable to empty link-list entries\n");
1203 mutex_unlock(&list
->mtx
);
1206 mutex_unlock(&list
->mtx
);
1211 EXPORT_SYMBOL(vme_dma_list_free
);
1214 * vme_dma_free - Free a VME DMA resource.
1215 * @resource: Pointer to VME DMA resource.
1217 * Free the provided DMA resource so that it may be reallocated.
1219 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1222 int vme_dma_free(struct vme_resource
*resource
)
1224 struct vme_dma_resource
*ctrlr
;
1226 if (resource
->type
!= VME_DMA
) {
1227 printk(KERN_ERR
"Not a DMA resource\n");
1231 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
1233 if (!mutex_trylock(&ctrlr
->mtx
)) {
1234 printk(KERN_ERR
"Resource busy, can't free\n");
1238 if (!(list_empty(&ctrlr
->pending
) && list_empty(&ctrlr
->running
))) {
1239 printk(KERN_WARNING
"Resource still processing transfers\n");
1240 mutex_unlock(&ctrlr
->mtx
);
1246 mutex_unlock(&ctrlr
->mtx
);
1252 EXPORT_SYMBOL(vme_dma_free
);
1254 void vme_bus_error_handler(struct vme_bridge
*bridge
,
1255 unsigned long long address
, int am
)
1257 struct list_head
*handler_pos
= NULL
;
1258 struct vme_error_handler
*handler
;
1259 int handler_triggered
= 0;
1260 u32 aspace
= vme_get_aspace(am
);
1262 list_for_each(handler_pos
, &bridge
->vme_error_handlers
) {
1263 handler
= list_entry(handler_pos
, struct vme_error_handler
,
1265 if ((aspace
== handler
->aspace
) &&
1266 (address
>= handler
->start
) &&
1267 (address
< handler
->end
)) {
1268 if (!handler
->num_errors
)
1269 handler
->first_error
= address
;
1270 if (handler
->num_errors
!= UINT_MAX
)
1271 handler
->num_errors
++;
1272 handler_triggered
= 1;
1276 if (!handler_triggered
)
1277 dev_err(bridge
->parent
,
1278 "Unhandled VME access error at address 0x%llx\n",
1281 EXPORT_SYMBOL(vme_bus_error_handler
);
1283 struct vme_error_handler
*vme_register_error_handler(
1284 struct vme_bridge
*bridge
, u32 aspace
,
1285 unsigned long long address
, size_t len
)
1287 struct vme_error_handler
*handler
;
1289 handler
= kmalloc(sizeof(*handler
), GFP_ATOMIC
);
1293 handler
->aspace
= aspace
;
1294 handler
->start
= address
;
1295 handler
->end
= address
+ len
;
1296 handler
->num_errors
= 0;
1297 handler
->first_error
= 0;
1298 list_add_tail(&handler
->list
, &bridge
->vme_error_handlers
);
1302 EXPORT_SYMBOL(vme_register_error_handler
);
1304 void vme_unregister_error_handler(struct vme_error_handler
*handler
)
1306 list_del(&handler
->list
);
1309 EXPORT_SYMBOL(vme_unregister_error_handler
);
1311 void vme_irq_handler(struct vme_bridge
*bridge
, int level
, int statid
)
1313 void (*call
)(int, int, void *);
1316 call
= bridge
->irq
[level
- 1].callback
[statid
].func
;
1317 priv_data
= bridge
->irq
[level
- 1].callback
[statid
].priv_data
;
1319 call(level
, statid
, priv_data
);
1321 printk(KERN_WARNING
"Spurious VME interrupt, level:%x, vector:%x\n",
1324 EXPORT_SYMBOL(vme_irq_handler
);
1327 * vme_irq_request - Request a specific VME interrupt.
1328 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1329 * @level: Interrupt priority being requested.
1330 * @statid: Interrupt vector being requested.
1331 * @callback: Pointer to callback function called when VME interrupt/vector
1333 * @priv_data: Generic pointer that will be passed to the callback function.
1335 * Request callback to be attached as a handler for VME interrupts with provided
1338 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1339 * function is not supported, -EBUSY if the level/statid combination is
1340 * already in use. Hardware specific errors also possible.
1342 int vme_irq_request(struct vme_dev
*vdev
, int level
, int statid
,
1343 void (*callback
)(int, int, void *),
1346 struct vme_bridge
*bridge
;
1348 bridge
= vdev
->bridge
;
1350 printk(KERN_ERR
"Can't find VME bus\n");
1354 if ((level
< 1) || (level
> 7)) {
1355 printk(KERN_ERR
"Invalid interrupt level\n");
1359 if (!bridge
->irq_set
) {
1360 printk(KERN_ERR
"Configuring interrupts not supported\n");
1364 mutex_lock(&bridge
->irq_mtx
);
1366 if (bridge
->irq
[level
- 1].callback
[statid
].func
) {
1367 mutex_unlock(&bridge
->irq_mtx
);
1368 printk(KERN_WARNING
"VME Interrupt already taken\n");
1372 bridge
->irq
[level
- 1].count
++;
1373 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
1374 bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
1376 /* Enable IRQ level */
1377 bridge
->irq_set(bridge
, level
, 1, 1);
1379 mutex_unlock(&bridge
->irq_mtx
);
1383 EXPORT_SYMBOL(vme_irq_request
);
1386 * vme_irq_free - Free a VME interrupt.
1387 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1388 * @level: Interrupt priority of interrupt being freed.
1389 * @statid: Interrupt vector of interrupt being freed.
1391 * Remove previously attached callback from VME interrupt priority/vector.
1393 void vme_irq_free(struct vme_dev
*vdev
, int level
, int statid
)
1395 struct vme_bridge
*bridge
;
1397 bridge
= vdev
->bridge
;
1399 printk(KERN_ERR
"Can't find VME bus\n");
1403 if ((level
< 1) || (level
> 7)) {
1404 printk(KERN_ERR
"Invalid interrupt level\n");
1408 if (!bridge
->irq_set
) {
1409 printk(KERN_ERR
"Configuring interrupts not supported\n");
1413 mutex_lock(&bridge
->irq_mtx
);
1415 bridge
->irq
[level
- 1].count
--;
1417 /* Disable IRQ level if no more interrupts attached at this level*/
1418 if (bridge
->irq
[level
- 1].count
== 0)
1419 bridge
->irq_set(bridge
, level
, 0, 1);
1421 bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
1422 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
1424 mutex_unlock(&bridge
->irq_mtx
);
1426 EXPORT_SYMBOL(vme_irq_free
);
1429 * vme_irq_generate - Generate VME interrupt.
1430 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1431 * @level: Interrupt priority at which to assert the interrupt.
1432 * @statid: Interrupt vector to associate with the interrupt.
1434 * Generate a VME interrupt of the provided level and with the provided
1437 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1438 * function is not supported. Hardware specific errors also possible.
1440 int vme_irq_generate(struct vme_dev
*vdev
, int level
, int statid
)
1442 struct vme_bridge
*bridge
;
1444 bridge
= vdev
->bridge
;
1446 printk(KERN_ERR
"Can't find VME bus\n");
1450 if ((level
< 1) || (level
> 7)) {
1451 printk(KERN_WARNING
"Invalid interrupt level\n");
1455 if (!bridge
->irq_generate
) {
1456 printk(KERN_WARNING
"Interrupt generation not supported\n");
1460 return bridge
->irq_generate(bridge
, level
, statid
);
1462 EXPORT_SYMBOL(vme_irq_generate
);
1465 * vme_lm_request - Request a VME location monitor
1466 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1468 * Allocate a location monitor resource to the driver. A location monitor
1469 * allows the driver to monitor accesses to a contiguous number of
1470 * addresses on the VME bus.
1472 * Return: Pointer to a VME resource on success or NULL on failure.
1474 struct vme_resource
*vme_lm_request(struct vme_dev
*vdev
)
1476 struct vme_bridge
*bridge
;
1477 struct list_head
*lm_pos
= NULL
;
1478 struct vme_lm_resource
*allocated_lm
= NULL
;
1479 struct vme_lm_resource
*lm
= NULL
;
1480 struct vme_resource
*resource
= NULL
;
1482 bridge
= vdev
->bridge
;
1484 printk(KERN_ERR
"Can't find VME bus\n");
1488 /* Loop through LM resources */
1489 list_for_each(lm_pos
, &bridge
->lm_resources
) {
1490 lm
= list_entry(lm_pos
,
1491 struct vme_lm_resource
, list
);
1493 printk(KERN_ERR
"Registered NULL Location Monitor resource\n");
1497 /* Find an unlocked controller */
1498 mutex_lock(&lm
->mtx
);
1499 if (lm
->locked
== 0) {
1501 mutex_unlock(&lm
->mtx
);
1505 mutex_unlock(&lm
->mtx
);
1508 /* Check to see if we found a resource */
1512 resource
= kmalloc(sizeof(*resource
), GFP_KERNEL
);
1516 resource
->type
= VME_LM
;
1517 resource
->entry
= &allocated_lm
->list
;
1523 mutex_lock(&lm
->mtx
);
1525 mutex_unlock(&lm
->mtx
);
1530 EXPORT_SYMBOL(vme_lm_request
);
1533 * vme_lm_count - Determine number of VME Addresses monitored
1534 * @resource: Pointer to VME location monitor resource.
1536 * The number of contiguous addresses monitored is hardware dependent.
1537 * Return the number of contiguous addresses monitored by the
1540 * Return: Count of addresses monitored or -EINVAL when provided with an
1541 * invalid location monitor resource.
1543 int vme_lm_count(struct vme_resource
*resource
)
1545 struct vme_lm_resource
*lm
;
1547 if (resource
->type
!= VME_LM
) {
1548 printk(KERN_ERR
"Not a Location Monitor resource\n");
1552 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1554 return lm
->monitors
;
1556 EXPORT_SYMBOL(vme_lm_count
);
1559 * vme_lm_set - Configure location monitor
1560 * @resource: Pointer to VME location monitor resource.
1561 * @lm_base: Base address to monitor.
1562 * @aspace: VME address space to monitor.
1563 * @cycle: VME bus cycle type to monitor.
1565 * Set the base address, address space and cycle type of accesses to be
1566 * monitored by the location monitor.
1568 * Return: Zero on success, -EINVAL when provided with an invalid location
1569 * monitor resource or function is not supported. Hardware specific
1570 * errors may also be returned.
1572 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1573 u32 aspace
, u32 cycle
)
1575 struct vme_bridge
*bridge
= find_bridge(resource
);
1576 struct vme_lm_resource
*lm
;
1578 if (resource
->type
!= VME_LM
) {
1579 printk(KERN_ERR
"Not a Location Monitor resource\n");
1583 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1585 if (!bridge
->lm_set
) {
1586 printk(KERN_ERR
"vme_lm_set not supported\n");
1590 return bridge
->lm_set(lm
, lm_base
, aspace
, cycle
);
1592 EXPORT_SYMBOL(vme_lm_set
);
1595 * vme_lm_get - Retrieve location monitor settings
1596 * @resource: Pointer to VME location monitor resource.
1597 * @lm_base: Pointer used to output the base address monitored.
1598 * @aspace: Pointer used to output the address space monitored.
1599 * @cycle: Pointer used to output the VME bus cycle type monitored.
1601 * Retrieve the base address, address space and cycle type of accesses to
1602 * be monitored by the location monitor.
1604 * Return: Zero on success, -EINVAL when provided with an invalid location
1605 * monitor resource or function is not supported. Hardware specific
1606 * errors may also be returned.
1608 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1609 u32
*aspace
, u32
*cycle
)
1611 struct vme_bridge
*bridge
= find_bridge(resource
);
1612 struct vme_lm_resource
*lm
;
1614 if (resource
->type
!= VME_LM
) {
1615 printk(KERN_ERR
"Not a Location Monitor resource\n");
1619 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1621 if (!bridge
->lm_get
) {
1622 printk(KERN_ERR
"vme_lm_get not supported\n");
1626 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1628 EXPORT_SYMBOL(vme_lm_get
);
1631 * vme_lm_attach - Provide callback for location monitor address
1632 * @resource: Pointer to VME location monitor resource.
1633 * @monitor: Offset to which callback should be attached.
1634 * @callback: Pointer to callback function called when triggered.
1635 * @data: Generic pointer that will be passed to the callback function.
1637 * Attach a callback to the specificed offset into the location monitors
1638 * monitored addresses. A generic pointer is provided to allow data to be
1639 * passed to the callback when called.
1641 * Return: Zero on success, -EINVAL when provided with an invalid location
1642 * monitor resource or function is not supported. Hardware specific
1643 * errors may also be returned.
1645 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1646 void (*callback
)(void *), void *data
)
1648 struct vme_bridge
*bridge
= find_bridge(resource
);
1649 struct vme_lm_resource
*lm
;
1651 if (resource
->type
!= VME_LM
) {
1652 printk(KERN_ERR
"Not a Location Monitor resource\n");
1656 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1658 if (!bridge
->lm_attach
) {
1659 printk(KERN_ERR
"vme_lm_attach not supported\n");
1663 return bridge
->lm_attach(lm
, monitor
, callback
, data
);
1665 EXPORT_SYMBOL(vme_lm_attach
);
1668 * vme_lm_detach - Remove callback for location monitor address
1669 * @resource: Pointer to VME location monitor resource.
1670 * @monitor: Offset to which callback should be removed.
1672 * Remove the callback associated with the specificed offset into the
1673 * location monitors monitored addresses.
1675 * Return: Zero on success, -EINVAL when provided with an invalid location
1676 * monitor resource or function is not supported. Hardware specific
1677 * errors may also be returned.
1679 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1681 struct vme_bridge
*bridge
= find_bridge(resource
);
1682 struct vme_lm_resource
*lm
;
1684 if (resource
->type
!= VME_LM
) {
1685 printk(KERN_ERR
"Not a Location Monitor resource\n");
1689 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1691 if (!bridge
->lm_detach
) {
1692 printk(KERN_ERR
"vme_lm_detach not supported\n");
1696 return bridge
->lm_detach(lm
, monitor
);
1698 EXPORT_SYMBOL(vme_lm_detach
);
1701 * vme_lm_free - Free allocated VME location monitor
1702 * @resource: Pointer to VME location monitor resource.
1704 * Free allocation of a VME location monitor.
1706 * WARNING: This function currently expects that any callbacks that have
1707 * been attached to the location monitor have been removed.
1709 * Return: Zero on success, -EINVAL when provided with an invalid location
1712 void vme_lm_free(struct vme_resource
*resource
)
1714 struct vme_lm_resource
*lm
;
1716 if (resource
->type
!= VME_LM
) {
1717 printk(KERN_ERR
"Not a Location Monitor resource\n");
1721 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1723 mutex_lock(&lm
->mtx
);
1726 * Check to see that there aren't any callbacks still attached, if
1727 * there are we should probably be detaching them!
1732 mutex_unlock(&lm
->mtx
);
1736 EXPORT_SYMBOL(vme_lm_free
);
1739 * vme_slot_num - Retrieve slot ID
1740 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1742 * Retrieve the slot ID associated with the provided VME device.
1744 * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
1745 * or the function is not supported. Hardware specific errors may also
1748 int vme_slot_num(struct vme_dev
*vdev
)
1750 struct vme_bridge
*bridge
;
1752 bridge
= vdev
->bridge
;
1754 printk(KERN_ERR
"Can't find VME bus\n");
1758 if (!bridge
->slot_get
) {
1759 printk(KERN_WARNING
"vme_slot_num not supported\n");
1763 return bridge
->slot_get(bridge
);
1765 EXPORT_SYMBOL(vme_slot_num
);
1768 * vme_bus_num - Retrieve bus number
1769 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1771 * Retrieve the bus enumeration associated with the provided VME device.
1773 * Return: The bus number on success, -EINVAL if VME bridge cannot be
1776 int vme_bus_num(struct vme_dev
*vdev
)
1778 struct vme_bridge
*bridge
;
1780 bridge
= vdev
->bridge
;
1782 pr_err("Can't find VME bus\n");
1788 EXPORT_SYMBOL(vme_bus_num
);
1790 /* - Bridge Registration --------------------------------------------------- */
1792 static void vme_dev_release(struct device
*dev
)
1794 kfree(dev_to_vme_dev(dev
));
1797 /* Common bridge initialization */
1798 struct vme_bridge
*vme_init_bridge(struct vme_bridge
*bridge
)
1800 INIT_LIST_HEAD(&bridge
->vme_error_handlers
);
1801 INIT_LIST_HEAD(&bridge
->master_resources
);
1802 INIT_LIST_HEAD(&bridge
->slave_resources
);
1803 INIT_LIST_HEAD(&bridge
->dma_resources
);
1804 INIT_LIST_HEAD(&bridge
->lm_resources
);
1805 mutex_init(&bridge
->irq_mtx
);
1809 EXPORT_SYMBOL(vme_init_bridge
);
1811 int vme_register_bridge(struct vme_bridge
*bridge
)
1816 mutex_lock(&vme_buses_lock
);
1817 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1818 if ((vme_bus_numbers
& (1 << i
)) == 0) {
1819 vme_bus_numbers
|= (1 << i
);
1821 INIT_LIST_HEAD(&bridge
->devices
);
1822 list_add_tail(&bridge
->bus_list
, &vme_bus_list
);
1827 mutex_unlock(&vme_buses_lock
);
1831 EXPORT_SYMBOL(vme_register_bridge
);
1833 void vme_unregister_bridge(struct vme_bridge
*bridge
)
1835 struct vme_dev
*vdev
;
1836 struct vme_dev
*tmp
;
1838 mutex_lock(&vme_buses_lock
);
1839 vme_bus_numbers
&= ~(1 << bridge
->num
);
1840 list_for_each_entry_safe(vdev
, tmp
, &bridge
->devices
, bridge_list
) {
1841 list_del(&vdev
->drv_list
);
1842 list_del(&vdev
->bridge_list
);
1843 device_unregister(&vdev
->dev
);
1845 list_del(&bridge
->bus_list
);
1846 mutex_unlock(&vme_buses_lock
);
1848 EXPORT_SYMBOL(vme_unregister_bridge
);
1850 /* - Driver Registration --------------------------------------------------- */
1852 static int __vme_register_driver_bus(struct vme_driver
*drv
,
1853 struct vme_bridge
*bridge
, unsigned int ndevs
)
1857 struct vme_dev
*vdev
;
1858 struct vme_dev
*tmp
;
1860 for (i
= 0; i
< ndevs
; i
++) {
1861 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
1867 vdev
->bridge
= bridge
;
1868 vdev
->dev
.platform_data
= drv
;
1869 vdev
->dev
.release
= vme_dev_release
;
1870 vdev
->dev
.parent
= bridge
->parent
;
1871 vdev
->dev
.bus
= &vme_bus_type
;
1872 dev_set_name(&vdev
->dev
, "%s.%u-%u", drv
->name
, bridge
->num
,
1875 err
= device_register(&vdev
->dev
);
1879 if (vdev
->dev
.platform_data
) {
1880 list_add_tail(&vdev
->drv_list
, &drv
->devices
);
1881 list_add_tail(&vdev
->bridge_list
, &bridge
->devices
);
1883 device_unregister(&vdev
->dev
);
1888 put_device(&vdev
->dev
);
1890 list_for_each_entry_safe(vdev
, tmp
, &drv
->devices
, drv_list
) {
1891 list_del(&vdev
->drv_list
);
1892 list_del(&vdev
->bridge_list
);
1893 device_unregister(&vdev
->dev
);
1898 static int __vme_register_driver(struct vme_driver
*drv
, unsigned int ndevs
)
1900 struct vme_bridge
*bridge
;
1903 mutex_lock(&vme_buses_lock
);
1904 list_for_each_entry(bridge
, &vme_bus_list
, bus_list
) {
1906 * This cannot cause trouble as we already have vme_buses_lock
1907 * and if the bridge is removed, it will have to go through
1908 * vme_unregister_bridge() to do it (which calls remove() on
1909 * the bridge which in turn tries to acquire vme_buses_lock and
1910 * will have to wait).
1912 err
= __vme_register_driver_bus(drv
, bridge
, ndevs
);
1916 mutex_unlock(&vme_buses_lock
);
1921 * vme_register_driver - Register a VME driver
1922 * @drv: Pointer to VME driver structure to register.
1923 * @ndevs: Maximum number of devices to allow to be enumerated.
1925 * Register a VME device driver with the VME subsystem.
1927 * Return: Zero on success, error value on registration failure.
1929 int vme_register_driver(struct vme_driver
*drv
, unsigned int ndevs
)
1933 drv
->driver
.name
= drv
->name
;
1934 drv
->driver
.bus
= &vme_bus_type
;
1935 INIT_LIST_HEAD(&drv
->devices
);
1937 err
= driver_register(&drv
->driver
);
1941 err
= __vme_register_driver(drv
, ndevs
);
1943 driver_unregister(&drv
->driver
);
1947 EXPORT_SYMBOL(vme_register_driver
);
1950 * vme_unregister_driver - Unregister a VME driver
1951 * @drv: Pointer to VME driver structure to unregister.
1953 * Unregister a VME device driver from the VME subsystem.
1955 void vme_unregister_driver(struct vme_driver
*drv
)
1957 struct vme_dev
*dev
, *dev_tmp
;
1959 mutex_lock(&vme_buses_lock
);
1960 list_for_each_entry_safe(dev
, dev_tmp
, &drv
->devices
, drv_list
) {
1961 list_del(&dev
->drv_list
);
1962 list_del(&dev
->bridge_list
);
1963 device_unregister(&dev
->dev
);
1965 mutex_unlock(&vme_buses_lock
);
1967 driver_unregister(&drv
->driver
);
1969 EXPORT_SYMBOL(vme_unregister_driver
);
1971 /* - Bus Registration ------------------------------------------------------ */
1973 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1975 struct vme_driver
*vme_drv
;
1977 vme_drv
= container_of(drv
, struct vme_driver
, driver
);
1979 if (dev
->platform_data
== vme_drv
) {
1980 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
1982 if (vme_drv
->match
&& vme_drv
->match(vdev
))
1985 dev
->platform_data
= NULL
;
1990 static int vme_bus_probe(struct device
*dev
)
1992 struct vme_driver
*driver
;
1993 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
1995 driver
= dev
->platform_data
;
1997 return driver
->probe(vdev
);
2002 static int vme_bus_remove(struct device
*dev
)
2004 struct vme_driver
*driver
;
2005 struct vme_dev
*vdev
= dev_to_vme_dev(dev
);
2007 driver
= dev
->platform_data
;
2009 return driver
->remove(vdev
);
2014 struct bus_type vme_bus_type
= {
2016 .match
= vme_bus_match
,
2017 .probe
= vme_bus_probe
,
2018 .remove
= vme_bus_remove
,
2020 EXPORT_SYMBOL(vme_bus_type
);
2022 static int __init
vme_init(void)
2024 return bus_register(&vme_bus_type
);
2026 subsys_initcall(vme_init
);