1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
11 * Derived from ca91c042.c by Michael Wyrick
14 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/errno.h>
18 #include <linux/pci.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/poll.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
27 #include <linux/uaccess.h>
28 #include <linux/vme.h>
30 #include "../vme_bridge.h"
31 #include "vme_ca91cx42.h"
33 static int ca91cx42_probe(struct pci_dev
*, const struct pci_device_id
*);
34 static void ca91cx42_remove(struct pci_dev
*);
36 /* Module parameters */
39 static const char driver_name
[] = "vme_ca91cx42";
41 static const struct pci_device_id ca91cx42_ids
[] = {
42 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_CA91C142
) },
46 MODULE_DEVICE_TABLE(pci
, ca91cx42_ids
);
48 static struct pci_driver ca91cx42_driver
= {
50 .id_table
= ca91cx42_ids
,
51 .probe
= ca91cx42_probe
,
52 .remove
= ca91cx42_remove
,
55 static u32
ca91cx42_DMA_irqhandler(struct ca91cx42_driver
*bridge
)
57 wake_up(&bridge
->dma_queue
);
59 return CA91CX42_LINT_DMA
;
62 static u32
ca91cx42_LM_irqhandler(struct ca91cx42_driver
*bridge
, u32 stat
)
67 for (i
= 0; i
< 4; i
++) {
68 if (stat
& CA91CX42_LINT_LM
[i
]) {
69 /* We only enable interrupts if the callback is set */
70 bridge
->lm_callback
[i
](bridge
->lm_data
[i
]);
71 serviced
|= CA91CX42_LINT_LM
[i
];
78 /* XXX This needs to be split into 4 queues */
79 static u32
ca91cx42_MB_irqhandler(struct ca91cx42_driver
*bridge
, int mbox_mask
)
81 wake_up(&bridge
->mbox_queue
);
83 return CA91CX42_LINT_MBOX
;
86 static u32
ca91cx42_IACK_irqhandler(struct ca91cx42_driver
*bridge
)
88 wake_up(&bridge
->iack_queue
);
90 return CA91CX42_LINT_SW_IACK
;
93 static u32
ca91cx42_VERR_irqhandler(struct vme_bridge
*ca91cx42_bridge
)
96 struct ca91cx42_driver
*bridge
;
98 bridge
= ca91cx42_bridge
->driver_priv
;
100 val
= ioread32(bridge
->base
+ DGCS
);
102 if (!(val
& 0x00000800)) {
103 dev_err(ca91cx42_bridge
->parent
, "ca91cx42_VERR_irqhandler DMA "
104 "Read Error DGCS=%08X\n", val
);
107 return CA91CX42_LINT_VERR
;
110 static u32
ca91cx42_LERR_irqhandler(struct vme_bridge
*ca91cx42_bridge
)
113 struct ca91cx42_driver
*bridge
;
115 bridge
= ca91cx42_bridge
->driver_priv
;
117 val
= ioread32(bridge
->base
+ DGCS
);
119 if (!(val
& 0x00000800))
120 dev_err(ca91cx42_bridge
->parent
, "ca91cx42_LERR_irqhandler DMA "
121 "Read Error DGCS=%08X\n", val
);
123 return CA91CX42_LINT_LERR
;
127 static u32
ca91cx42_VIRQ_irqhandler(struct vme_bridge
*ca91cx42_bridge
,
130 int vec
, i
, serviced
= 0;
131 struct ca91cx42_driver
*bridge
;
133 bridge
= ca91cx42_bridge
->driver_priv
;
136 for (i
= 7; i
> 0; i
--) {
137 if (stat
& (1 << i
)) {
138 vec
= ioread32(bridge
->base
+
139 CA91CX42_V_STATID
[i
]) & 0xff;
141 vme_irq_handler(ca91cx42_bridge
, i
, vec
);
143 serviced
|= (1 << i
);
150 static irqreturn_t
ca91cx42_irqhandler(int irq
, void *ptr
)
152 u32 stat
, enable
, serviced
= 0;
153 struct vme_bridge
*ca91cx42_bridge
;
154 struct ca91cx42_driver
*bridge
;
156 ca91cx42_bridge
= ptr
;
158 bridge
= ca91cx42_bridge
->driver_priv
;
160 enable
= ioread32(bridge
->base
+ LINT_EN
);
161 stat
= ioread32(bridge
->base
+ LINT_STAT
);
163 /* Only look at unmasked interrupts */
169 if (stat
& CA91CX42_LINT_DMA
)
170 serviced
|= ca91cx42_DMA_irqhandler(bridge
);
171 if (stat
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
173 serviced
|= ca91cx42_LM_irqhandler(bridge
, stat
);
174 if (stat
& CA91CX42_LINT_MBOX
)
175 serviced
|= ca91cx42_MB_irqhandler(bridge
, stat
);
176 if (stat
& CA91CX42_LINT_SW_IACK
)
177 serviced
|= ca91cx42_IACK_irqhandler(bridge
);
178 if (stat
& CA91CX42_LINT_VERR
)
179 serviced
|= ca91cx42_VERR_irqhandler(ca91cx42_bridge
);
180 if (stat
& CA91CX42_LINT_LERR
)
181 serviced
|= ca91cx42_LERR_irqhandler(ca91cx42_bridge
);
182 if (stat
& (CA91CX42_LINT_VIRQ1
| CA91CX42_LINT_VIRQ2
|
183 CA91CX42_LINT_VIRQ3
| CA91CX42_LINT_VIRQ4
|
184 CA91CX42_LINT_VIRQ5
| CA91CX42_LINT_VIRQ6
|
185 CA91CX42_LINT_VIRQ7
))
186 serviced
|= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge
, stat
);
188 /* Clear serviced interrupts */
189 iowrite32(serviced
, bridge
->base
+ LINT_STAT
);
194 static int ca91cx42_irq_init(struct vme_bridge
*ca91cx42_bridge
)
197 struct pci_dev
*pdev
;
198 struct ca91cx42_driver
*bridge
;
200 bridge
= ca91cx42_bridge
->driver_priv
;
203 pdev
= to_pci_dev(ca91cx42_bridge
->parent
);
205 /* Disable interrupts from PCI to VME */
206 iowrite32(0, bridge
->base
+ VINT_EN
);
208 /* Disable PCI interrupts */
209 iowrite32(0, bridge
->base
+ LINT_EN
);
210 /* Clear Any Pending PCI Interrupts */
211 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
213 result
= request_irq(pdev
->irq
, ca91cx42_irqhandler
, IRQF_SHARED
,
214 driver_name
, ca91cx42_bridge
);
216 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
221 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
222 iowrite32(0, bridge
->base
+ LINT_MAP0
);
223 iowrite32(0, bridge
->base
+ LINT_MAP1
);
224 iowrite32(0, bridge
->base
+ LINT_MAP2
);
226 /* Enable DMA, mailbox & LM Interrupts */
227 tmp
= CA91CX42_LINT_MBOX3
| CA91CX42_LINT_MBOX2
| CA91CX42_LINT_MBOX1
|
228 CA91CX42_LINT_MBOX0
| CA91CX42_LINT_SW_IACK
|
229 CA91CX42_LINT_VERR
| CA91CX42_LINT_LERR
| CA91CX42_LINT_DMA
;
231 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
236 static void ca91cx42_irq_exit(struct ca91cx42_driver
*bridge
,
237 struct pci_dev
*pdev
)
239 struct vme_bridge
*ca91cx42_bridge
;
241 /* Disable interrupts from PCI to VME */
242 iowrite32(0, bridge
->base
+ VINT_EN
);
244 /* Disable PCI interrupts */
245 iowrite32(0, bridge
->base
+ LINT_EN
);
246 /* Clear Any Pending PCI Interrupts */
247 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
249 ca91cx42_bridge
= container_of((void *)bridge
, struct vme_bridge
,
251 free_irq(pdev
->irq
, ca91cx42_bridge
);
254 static int ca91cx42_iack_received(struct ca91cx42_driver
*bridge
, int level
)
258 tmp
= ioread32(bridge
->base
+ LINT_STAT
);
260 if (tmp
& (1 << level
))
267 * Set up an VME interrupt
269 static void ca91cx42_irq_set(struct vme_bridge
*ca91cx42_bridge
, int level
,
273 struct pci_dev
*pdev
;
275 struct ca91cx42_driver
*bridge
;
277 bridge
= ca91cx42_bridge
->driver_priv
;
279 /* Enable IRQ level */
280 tmp
= ioread32(bridge
->base
+ LINT_EN
);
283 tmp
&= ~CA91CX42_LINT_VIRQ
[level
];
285 tmp
|= CA91CX42_LINT_VIRQ
[level
];
287 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
289 if ((state
== 0) && (sync
!= 0)) {
290 pdev
= to_pci_dev(ca91cx42_bridge
->parent
);
292 synchronize_irq(pdev
->irq
);
296 static int ca91cx42_irq_generate(struct vme_bridge
*ca91cx42_bridge
, int level
,
300 struct ca91cx42_driver
*bridge
;
302 bridge
= ca91cx42_bridge
->driver_priv
;
304 /* Universe can only generate even vectors */
308 mutex_lock(&bridge
->vme_int
);
310 tmp
= ioread32(bridge
->base
+ VINT_EN
);
313 iowrite32(statid
<< 24, bridge
->base
+ STATID
);
315 /* Assert VMEbus IRQ */
316 tmp
= tmp
| (1 << (level
+ 24));
317 iowrite32(tmp
, bridge
->base
+ VINT_EN
);
320 wait_event_interruptible(bridge
->iack_queue
,
321 ca91cx42_iack_received(bridge
, level
));
323 /* Return interrupt to low state */
324 tmp
= ioread32(bridge
->base
+ VINT_EN
);
325 tmp
= tmp
& ~(1 << (level
+ 24));
326 iowrite32(tmp
, bridge
->base
+ VINT_EN
);
328 mutex_unlock(&bridge
->vme_int
);
333 static int ca91cx42_slave_set(struct vme_slave_resource
*image
, int enabled
,
334 unsigned long long vme_base
, unsigned long long size
,
335 dma_addr_t pci_base
, u32 aspace
, u32 cycle
)
337 unsigned int i
, addr
= 0, granularity
;
338 unsigned int temp_ctl
= 0;
339 unsigned int vme_bound
, pci_offset
;
340 struct vme_bridge
*ca91cx42_bridge
;
341 struct ca91cx42_driver
*bridge
;
343 ca91cx42_bridge
= image
->parent
;
345 bridge
= ca91cx42_bridge
->driver_priv
;
351 addr
|= CA91CX42_VSI_CTL_VAS_A16
;
354 addr
|= CA91CX42_VSI_CTL_VAS_A24
;
357 addr
|= CA91CX42_VSI_CTL_VAS_A32
;
360 addr
|= CA91CX42_VSI_CTL_VAS_USER1
;
363 addr
|= CA91CX42_VSI_CTL_VAS_USER2
;
370 dev_err(ca91cx42_bridge
->parent
, "Invalid address space\n");
376 * Bound address is a valid address for the window, adjust
379 vme_bound
= vme_base
+ size
;
380 pci_offset
= pci_base
- vme_base
;
382 if ((i
== 0) || (i
== 4))
383 granularity
= 0x1000;
385 granularity
= 0x10000;
387 if (vme_base
& (granularity
- 1)) {
388 dev_err(ca91cx42_bridge
->parent
, "Invalid VME base "
392 if (vme_bound
& (granularity
- 1)) {
393 dev_err(ca91cx42_bridge
->parent
, "Invalid VME bound "
397 if (pci_offset
& (granularity
- 1)) {
398 dev_err(ca91cx42_bridge
->parent
, "Invalid PCI Offset "
403 /* Disable while we are mucking around */
404 temp_ctl
= ioread32(bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
405 temp_ctl
&= ~CA91CX42_VSI_CTL_EN
;
406 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
409 iowrite32(vme_base
, bridge
->base
+ CA91CX42_VSI_BS
[i
]);
410 iowrite32(vme_bound
, bridge
->base
+ CA91CX42_VSI_BD
[i
]);
411 iowrite32(pci_offset
, bridge
->base
+ CA91CX42_VSI_TO
[i
]);
413 /* Setup address space */
414 temp_ctl
&= ~CA91CX42_VSI_CTL_VAS_M
;
417 /* Setup cycle types */
418 temp_ctl
&= ~(CA91CX42_VSI_CTL_PGM_M
| CA91CX42_VSI_CTL_SUPER_M
);
419 if (cycle
& VME_SUPER
)
420 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_SUPR
;
421 if (cycle
& VME_USER
)
422 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_NPRIV
;
423 if (cycle
& VME_PROG
)
424 temp_ctl
|= CA91CX42_VSI_CTL_PGM_PGM
;
425 if (cycle
& VME_DATA
)
426 temp_ctl
|= CA91CX42_VSI_CTL_PGM_DATA
;
428 /* Write ctl reg without enable */
429 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
432 temp_ctl
|= CA91CX42_VSI_CTL_EN
;
434 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
439 static int ca91cx42_slave_get(struct vme_slave_resource
*image
, int *enabled
,
440 unsigned long long *vme_base
, unsigned long long *size
,
441 dma_addr_t
*pci_base
, u32
*aspace
, u32
*cycle
)
443 unsigned int i
, granularity
= 0, ctl
= 0;
444 unsigned long long vme_bound
, pci_offset
;
445 struct ca91cx42_driver
*bridge
;
447 bridge
= image
->parent
->driver_priv
;
451 if ((i
== 0) || (i
== 4))
452 granularity
= 0x1000;
454 granularity
= 0x10000;
457 ctl
= ioread32(bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
459 *vme_base
= ioread32(bridge
->base
+ CA91CX42_VSI_BS
[i
]);
460 vme_bound
= ioread32(bridge
->base
+ CA91CX42_VSI_BD
[i
]);
461 pci_offset
= ioread32(bridge
->base
+ CA91CX42_VSI_TO
[i
]);
463 *pci_base
= (dma_addr_t
)*vme_base
+ pci_offset
;
464 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
470 if (ctl
& CA91CX42_VSI_CTL_EN
)
473 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A16
)
475 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A24
)
477 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A32
)
479 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER1
)
481 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER2
)
484 if (ctl
& CA91CX42_VSI_CTL_SUPER_SUPR
)
486 if (ctl
& CA91CX42_VSI_CTL_SUPER_NPRIV
)
488 if (ctl
& CA91CX42_VSI_CTL_PGM_PGM
)
490 if (ctl
& CA91CX42_VSI_CTL_PGM_DATA
)
497 * Allocate and map PCI Resource
499 static int ca91cx42_alloc_resource(struct vme_master_resource
*image
,
500 unsigned long long size
)
502 unsigned long long existing_size
;
504 struct pci_dev
*pdev
;
505 struct vme_bridge
*ca91cx42_bridge
;
507 ca91cx42_bridge
= image
->parent
;
509 /* Find pci_dev container of dev */
510 if (!ca91cx42_bridge
->parent
) {
511 dev_err(ca91cx42_bridge
->parent
, "Dev entry NULL\n");
514 pdev
= to_pci_dev(ca91cx42_bridge
->parent
);
516 existing_size
= (unsigned long long)(image
->bus_resource
.end
-
517 image
->bus_resource
.start
);
519 /* If the existing size is OK, return */
520 if (existing_size
== (size
- 1))
523 if (existing_size
!= 0) {
524 iounmap(image
->kern_base
);
525 image
->kern_base
= NULL
;
526 kfree(image
->bus_resource
.name
);
527 release_resource(&image
->bus_resource
);
528 memset(&image
->bus_resource
, 0, sizeof(image
->bus_resource
));
531 if (!image
->bus_resource
.name
) {
532 image
->bus_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_ATOMIC
);
533 if (!image
->bus_resource
.name
) {
539 sprintf((char *)image
->bus_resource
.name
, "%s.%d",
540 ca91cx42_bridge
->name
, image
->number
);
542 image
->bus_resource
.start
= 0;
543 image
->bus_resource
.end
= (unsigned long)size
;
544 image
->bus_resource
.flags
= IORESOURCE_MEM
;
546 retval
= pci_bus_alloc_resource(pdev
->bus
,
547 &image
->bus_resource
, size
, 0x10000, PCIBIOS_MIN_MEM
,
550 dev_err(ca91cx42_bridge
->parent
, "Failed to allocate mem "
551 "resource for window %d size 0x%lx start 0x%lx\n",
552 image
->number
, (unsigned long)size
,
553 (unsigned long)image
->bus_resource
.start
);
557 image
->kern_base
= ioremap(
558 image
->bus_resource
.start
, size
);
559 if (!image
->kern_base
) {
560 dev_err(ca91cx42_bridge
->parent
, "Failed to remap resource\n");
568 release_resource(&image
->bus_resource
);
570 kfree(image
->bus_resource
.name
);
571 memset(&image
->bus_resource
, 0, sizeof(image
->bus_resource
));
577 * Free and unmap PCI Resource
579 static void ca91cx42_free_resource(struct vme_master_resource
*image
)
581 iounmap(image
->kern_base
);
582 image
->kern_base
= NULL
;
583 release_resource(&image
->bus_resource
);
584 kfree(image
->bus_resource
.name
);
585 memset(&image
->bus_resource
, 0, sizeof(image
->bus_resource
));
589 static int ca91cx42_master_set(struct vme_master_resource
*image
, int enabled
,
590 unsigned long long vme_base
, unsigned long long size
, u32 aspace
,
591 u32 cycle
, u32 dwidth
)
594 unsigned int i
, granularity
= 0;
595 unsigned int temp_ctl
= 0;
596 unsigned long long pci_bound
, vme_offset
, pci_base
;
597 struct vme_bridge
*ca91cx42_bridge
;
598 struct ca91cx42_driver
*bridge
;
600 ca91cx42_bridge
= image
->parent
;
602 bridge
= ca91cx42_bridge
->driver_priv
;
606 if ((i
== 0) || (i
== 4))
607 granularity
= 0x1000;
609 granularity
= 0x10000;
611 /* Verify input data */
612 if (vme_base
& (granularity
- 1)) {
613 dev_err(ca91cx42_bridge
->parent
, "Invalid VME Window "
618 if (size
& (granularity
- 1)) {
619 dev_err(ca91cx42_bridge
->parent
, "Invalid VME Window "
625 spin_lock(&image
->lock
);
628 * Let's allocate the resource here rather than further up the stack as
629 * it avoids pushing loads of bus dependent stuff up the stack
631 retval
= ca91cx42_alloc_resource(image
, size
);
633 spin_unlock(&image
->lock
);
634 dev_err(ca91cx42_bridge
->parent
, "Unable to allocate memory "
635 "for resource name\n");
640 pci_base
= (unsigned long long)image
->bus_resource
.start
;
643 * Bound address is a valid address for the window, adjust
644 * according to window granularity.
646 pci_bound
= pci_base
+ size
;
647 vme_offset
= vme_base
- pci_base
;
649 /* Disable while we are mucking around */
650 temp_ctl
= ioread32(bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
651 temp_ctl
&= ~CA91CX42_LSI_CTL_EN
;
652 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
654 /* Setup cycle types */
655 temp_ctl
&= ~CA91CX42_LSI_CTL_VCT_M
;
657 temp_ctl
|= CA91CX42_LSI_CTL_VCT_BLT
;
658 if (cycle
& VME_MBLT
)
659 temp_ctl
|= CA91CX42_LSI_CTL_VCT_MBLT
;
661 /* Setup data width */
662 temp_ctl
&= ~CA91CX42_LSI_CTL_VDW_M
;
665 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D8
;
668 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D16
;
671 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D32
;
674 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D64
;
677 spin_unlock(&image
->lock
);
678 dev_err(ca91cx42_bridge
->parent
, "Invalid data width\n");
684 /* Setup address space */
685 temp_ctl
&= ~CA91CX42_LSI_CTL_VAS_M
;
688 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A16
;
691 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A24
;
694 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A32
;
697 temp_ctl
|= CA91CX42_LSI_CTL_VAS_CRCSR
;
700 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER1
;
703 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER2
;
709 spin_unlock(&image
->lock
);
710 dev_err(ca91cx42_bridge
->parent
, "Invalid address space\n");
716 temp_ctl
&= ~(CA91CX42_LSI_CTL_PGM_M
| CA91CX42_LSI_CTL_SUPER_M
);
717 if (cycle
& VME_SUPER
)
718 temp_ctl
|= CA91CX42_LSI_CTL_SUPER_SUPR
;
719 if (cycle
& VME_PROG
)
720 temp_ctl
|= CA91CX42_LSI_CTL_PGM_PGM
;
723 iowrite32(pci_base
, bridge
->base
+ CA91CX42_LSI_BS
[i
]);
724 iowrite32(pci_bound
, bridge
->base
+ CA91CX42_LSI_BD
[i
]);
725 iowrite32(vme_offset
, bridge
->base
+ CA91CX42_LSI_TO
[i
]);
727 /* Write ctl reg without enable */
728 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
731 temp_ctl
|= CA91CX42_LSI_CTL_EN
;
733 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
735 spin_unlock(&image
->lock
);
740 ca91cx42_free_resource(image
);
746 static int __ca91cx42_master_get(struct vme_master_resource
*image
,
747 int *enabled
, unsigned long long *vme_base
, unsigned long long *size
,
748 u32
*aspace
, u32
*cycle
, u32
*dwidth
)
751 unsigned long long pci_base
, pci_bound
, vme_offset
;
752 struct ca91cx42_driver
*bridge
;
754 bridge
= image
->parent
->driver_priv
;
758 ctl
= ioread32(bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
760 pci_base
= ioread32(bridge
->base
+ CA91CX42_LSI_BS
[i
]);
761 vme_offset
= ioread32(bridge
->base
+ CA91CX42_LSI_TO
[i
]);
762 pci_bound
= ioread32(bridge
->base
+ CA91CX42_LSI_BD
[i
]);
764 *vme_base
= pci_base
+ vme_offset
;
765 *size
= (unsigned long long)(pci_bound
- pci_base
);
772 if (ctl
& CA91CX42_LSI_CTL_EN
)
775 /* Setup address space */
776 switch (ctl
& CA91CX42_LSI_CTL_VAS_M
) {
777 case CA91CX42_LSI_CTL_VAS_A16
:
780 case CA91CX42_LSI_CTL_VAS_A24
:
783 case CA91CX42_LSI_CTL_VAS_A32
:
786 case CA91CX42_LSI_CTL_VAS_CRCSR
:
789 case CA91CX42_LSI_CTL_VAS_USER1
:
792 case CA91CX42_LSI_CTL_VAS_USER2
:
797 /* XXX Not sure howto check for MBLT */
798 /* Setup cycle types */
799 if (ctl
& CA91CX42_LSI_CTL_VCT_BLT
)
804 if (ctl
& CA91CX42_LSI_CTL_SUPER_SUPR
)
809 if (ctl
& CA91CX42_LSI_CTL_PGM_PGM
)
814 /* Setup data width */
815 switch (ctl
& CA91CX42_LSI_CTL_VDW_M
) {
816 case CA91CX42_LSI_CTL_VDW_D8
:
819 case CA91CX42_LSI_CTL_VDW_D16
:
822 case CA91CX42_LSI_CTL_VDW_D32
:
825 case CA91CX42_LSI_CTL_VDW_D64
:
833 static int ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
834 unsigned long long *vme_base
, unsigned long long *size
, u32
*aspace
,
835 u32
*cycle
, u32
*dwidth
)
839 spin_lock(&image
->lock
);
841 retval
= __ca91cx42_master_get(image
, enabled
, vme_base
, size
, aspace
,
844 spin_unlock(&image
->lock
);
849 static ssize_t
ca91cx42_master_read(struct vme_master_resource
*image
,
850 void *buf
, size_t count
, loff_t offset
)
853 void __iomem
*addr
= image
->kern_base
+ offset
;
854 unsigned int done
= 0;
855 unsigned int count32
;
860 spin_lock(&image
->lock
);
862 /* The following code handles VME address alignment. We cannot use
863 * memcpy_xxx here because it may cut data transfers in to 8-bit
864 * cycles when D16 or D32 cycles are required on the VME bus.
865 * On the other hand, the bridge itself assures that the maximum data
866 * cycle configured for the transfer is used and splits it
867 * automatically for non-aligned addresses, so we don't want the
868 * overhead of needlessly forcing small transfers for the entire cycle.
870 if ((uintptr_t)addr
& 0x1) {
871 *(u8
*)buf
= ioread8(addr
);
876 if ((uintptr_t)(addr
+ done
) & 0x2) {
877 if ((count
- done
) < 2) {
878 *(u8
*)(buf
+ done
) = ioread8(addr
+ done
);
882 *(u16
*)(buf
+ done
) = ioread16(addr
+ done
);
887 count32
= (count
- done
) & ~0x3;
888 while (done
< count32
) {
889 *(u32
*)(buf
+ done
) = ioread32(addr
+ done
);
893 if ((count
- done
) & 0x2) {
894 *(u16
*)(buf
+ done
) = ioread16(addr
+ done
);
897 if ((count
- done
) & 0x1) {
898 *(u8
*)(buf
+ done
) = ioread8(addr
+ done
);
903 spin_unlock(&image
->lock
);
908 static ssize_t
ca91cx42_master_write(struct vme_master_resource
*image
,
909 void *buf
, size_t count
, loff_t offset
)
912 void __iomem
*addr
= image
->kern_base
+ offset
;
913 unsigned int done
= 0;
914 unsigned int count32
;
919 spin_lock(&image
->lock
);
921 /* Here we apply for the same strategy we do in master_read
922 * function in order to assure the correct cycles.
924 if ((uintptr_t)addr
& 0x1) {
925 iowrite8(*(u8
*)buf
, addr
);
930 if ((uintptr_t)(addr
+ done
) & 0x2) {
931 if ((count
- done
) < 2) {
932 iowrite8(*(u8
*)(buf
+ done
), addr
+ done
);
936 iowrite16(*(u16
*)(buf
+ done
), addr
+ done
);
941 count32
= (count
- done
) & ~0x3;
942 while (done
< count32
) {
943 iowrite32(*(u32
*)(buf
+ done
), addr
+ done
);
947 if ((count
- done
) & 0x2) {
948 iowrite16(*(u16
*)(buf
+ done
), addr
+ done
);
951 if ((count
- done
) & 0x1) {
952 iowrite8(*(u8
*)(buf
+ done
), addr
+ done
);
958 spin_unlock(&image
->lock
);
963 static unsigned int ca91cx42_master_rmw(struct vme_master_resource
*image
,
964 unsigned int mask
, unsigned int compare
, unsigned int swap
,
969 struct ca91cx42_driver
*bridge
;
972 bridge
= image
->parent
->driver_priv
;
973 dev
= image
->parent
->parent
;
975 /* Find the PCI address that maps to the desired VME address */
977 /* Locking as we can only do one of these at a time */
978 mutex_lock(&bridge
->vme_rmw
);
981 spin_lock(&image
->lock
);
983 pci_addr
= (uintptr_t)image
->kern_base
+ offset
;
985 /* Address must be 4-byte aligned */
986 if (pci_addr
& 0x3) {
987 dev_err(dev
, "RMW Address not 4-byte aligned\n");
992 /* Ensure RMW Disabled whilst configuring */
993 iowrite32(0, bridge
->base
+ SCYC_CTL
);
995 /* Configure registers */
996 iowrite32(mask
, bridge
->base
+ SCYC_EN
);
997 iowrite32(compare
, bridge
->base
+ SCYC_CMP
);
998 iowrite32(swap
, bridge
->base
+ SCYC_SWP
);
999 iowrite32(pci_addr
, bridge
->base
+ SCYC_ADDR
);
1002 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW
, bridge
->base
+ SCYC_CTL
);
1004 /* Kick process off with a read to the required address. */
1005 result
= ioread32(image
->kern_base
+ offset
);
1008 iowrite32(0, bridge
->base
+ SCYC_CTL
);
1011 spin_unlock(&image
->lock
);
1013 mutex_unlock(&bridge
->vme_rmw
);
1018 static int ca91cx42_dma_list_add(struct vme_dma_list
*list
,
1019 struct vme_dma_attr
*src
, struct vme_dma_attr
*dest
, size_t count
)
1021 struct ca91cx42_dma_entry
*entry
, *prev
;
1022 struct vme_dma_pci
*pci_attr
;
1023 struct vme_dma_vme
*vme_attr
;
1024 dma_addr_t desc_ptr
;
1028 dev
= list
->parent
->parent
->parent
;
1030 /* XXX descriptor must be aligned on 64-bit boundaries */
1031 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1037 /* Test descriptor alignment */
1038 if ((unsigned long)&entry
->descriptor
& CA91CX42_DCPP_M
) {
1039 dev_err(dev
, "Descriptor not aligned to 16 byte boundary as "
1040 "required: %p\n", &entry
->descriptor
);
1045 memset(&entry
->descriptor
, 0, sizeof(entry
->descriptor
));
1047 if (dest
->type
== VME_DMA_VME
) {
1048 entry
->descriptor
.dctl
|= CA91CX42_DCTL_L2V
;
1049 vme_attr
= dest
->private;
1050 pci_attr
= src
->private;
1052 vme_attr
= src
->private;
1053 pci_attr
= dest
->private;
1056 /* Check we can do fulfill required attributes */
1057 if ((vme_attr
->aspace
& ~(VME_A16
| VME_A24
| VME_A32
| VME_USER1
|
1060 dev_err(dev
, "Unsupported cycle type\n");
1065 if ((vme_attr
->cycle
& ~(VME_SCT
| VME_BLT
| VME_SUPER
| VME_USER
|
1066 VME_PROG
| VME_DATA
)) != 0) {
1068 dev_err(dev
, "Unsupported cycle type\n");
1073 /* Check to see if we can fulfill source and destination */
1074 if (!(((src
->type
== VME_DMA_PCI
) && (dest
->type
== VME_DMA_VME
)) ||
1075 ((src
->type
== VME_DMA_VME
) && (dest
->type
== VME_DMA_PCI
)))) {
1077 dev_err(dev
, "Cannot perform transfer with this "
1078 "source-destination combination\n");
1083 /* Setup cycle types */
1084 if (vme_attr
->cycle
& VME_BLT
)
1085 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VCT_BLT
;
1087 /* Setup data width */
1088 switch (vme_attr
->dwidth
) {
1090 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D8
;
1093 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D16
;
1096 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D32
;
1099 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D64
;
1102 dev_err(dev
, "Invalid data width\n");
1106 /* Setup address space */
1107 switch (vme_attr
->aspace
) {
1109 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_A16
;
1112 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_A24
;
1115 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_A32
;
1118 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_USER1
;
1121 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_USER2
;
1124 dev_err(dev
, "Invalid address space\n");
1129 if (vme_attr
->cycle
& VME_SUPER
)
1130 entry
->descriptor
.dctl
|= CA91CX42_DCTL_SUPER_SUPR
;
1131 if (vme_attr
->cycle
& VME_PROG
)
1132 entry
->descriptor
.dctl
|= CA91CX42_DCTL_PGM_PGM
;
1134 entry
->descriptor
.dtbc
= count
;
1135 entry
->descriptor
.dla
= pci_attr
->address
;
1136 entry
->descriptor
.dva
= vme_attr
->address
;
1137 entry
->descriptor
.dcpp
= CA91CX42_DCPP_NULL
;
1140 list_add_tail(&entry
->list
, &list
->entries
);
1142 /* Fill out previous descriptors "Next Address" */
1143 if (entry
->list
.prev
!= &list
->entries
) {
1144 prev
= list_entry(entry
->list
.prev
, struct ca91cx42_dma_entry
,
1146 /* We need the bus address for the pointer */
1147 desc_ptr
= virt_to_bus(&entry
->descriptor
);
1148 prev
->descriptor
.dcpp
= desc_ptr
& ~CA91CX42_DCPP_M
;
1162 static int ca91cx42_dma_busy(struct vme_bridge
*ca91cx42_bridge
)
1165 struct ca91cx42_driver
*bridge
;
1167 bridge
= ca91cx42_bridge
->driver_priv
;
1169 tmp
= ioread32(bridge
->base
+ DGCS
);
1171 if (tmp
& CA91CX42_DGCS_ACT
)
1177 static int ca91cx42_dma_list_exec(struct vme_dma_list
*list
)
1179 struct vme_dma_resource
*ctrlr
;
1180 struct ca91cx42_dma_entry
*entry
;
1182 dma_addr_t bus_addr
;
1185 struct ca91cx42_driver
*bridge
;
1187 ctrlr
= list
->parent
;
1189 bridge
= ctrlr
->parent
->driver_priv
;
1190 dev
= ctrlr
->parent
->parent
;
1192 mutex_lock(&ctrlr
->mtx
);
1194 if (!(list_empty(&ctrlr
->running
))) {
1196 * XXX We have an active DMA transfer and currently haven't
1197 * sorted out the mechanism for "pending" DMA transfers.
1200 /* Need to add to pending here */
1201 mutex_unlock(&ctrlr
->mtx
);
1204 list_add(&list
->list
, &ctrlr
->running
);
1207 /* Get first bus address and write into registers */
1208 entry
= list_first_entry(&list
->entries
, struct ca91cx42_dma_entry
,
1211 bus_addr
= virt_to_bus(&entry
->descriptor
);
1213 mutex_unlock(&ctrlr
->mtx
);
1215 iowrite32(0, bridge
->base
+ DTBC
);
1216 iowrite32(bus_addr
& ~CA91CX42_DCPP_M
, bridge
->base
+ DCPP
);
1218 /* Start the operation */
1219 val
= ioread32(bridge
->base
+ DGCS
);
1221 /* XXX Could set VMEbus On and Off Counters here */
1222 val
&= (CA91CX42_DGCS_VON_M
| CA91CX42_DGCS_VOFF_M
);
1224 val
|= (CA91CX42_DGCS_CHAIN
| CA91CX42_DGCS_STOP
| CA91CX42_DGCS_HALT
|
1225 CA91CX42_DGCS_DONE
| CA91CX42_DGCS_LERR
| CA91CX42_DGCS_VERR
|
1226 CA91CX42_DGCS_PERR
);
1228 iowrite32(val
, bridge
->base
+ DGCS
);
1230 val
|= CA91CX42_DGCS_GO
;
1232 iowrite32(val
, bridge
->base
+ DGCS
);
1234 retval
= wait_event_interruptible(bridge
->dma_queue
,
1235 ca91cx42_dma_busy(ctrlr
->parent
));
1238 val
= ioread32(bridge
->base
+ DGCS
);
1239 iowrite32(val
| CA91CX42_DGCS_STOP_REQ
, bridge
->base
+ DGCS
);
1240 /* Wait for the operation to abort */
1241 wait_event(bridge
->dma_queue
,
1242 ca91cx42_dma_busy(ctrlr
->parent
));
1248 * Read status register, this register is valid until we kick off a
1251 val
= ioread32(bridge
->base
+ DGCS
);
1253 if (val
& (CA91CX42_DGCS_LERR
| CA91CX42_DGCS_VERR
|
1254 CA91CX42_DGCS_PERR
)) {
1256 dev_err(dev
, "ca91c042: DMA Error. DGCS=%08X\n", val
);
1257 val
= ioread32(bridge
->base
+ DCTL
);
1262 /* Remove list from running list */
1263 mutex_lock(&ctrlr
->mtx
);
1264 list_del(&list
->list
);
1265 mutex_unlock(&ctrlr
->mtx
);
1271 static int ca91cx42_dma_list_empty(struct vme_dma_list
*list
)
1273 struct list_head
*pos
, *temp
;
1274 struct ca91cx42_dma_entry
*entry
;
1276 /* detach and free each entry */
1277 list_for_each_safe(pos
, temp
, &list
->entries
) {
1279 entry
= list_entry(pos
, struct ca91cx42_dma_entry
, list
);
1287 * All 4 location monitors reside at the same base - this is therefore a
1288 * system wide configuration.
1290 * This does not enable the LM monitor - that should be done when the first
1291 * callback is attached and disabled when the last callback is removed.
1293 static int ca91cx42_lm_set(struct vme_lm_resource
*lm
,
1294 unsigned long long lm_base
, u32 aspace
, u32 cycle
)
1296 u32 temp_base
, lm_ctl
= 0;
1298 struct ca91cx42_driver
*bridge
;
1301 bridge
= lm
->parent
->driver_priv
;
1302 dev
= lm
->parent
->parent
;
1304 /* Check the alignment of the location monitor */
1305 temp_base
= (u32
)lm_base
;
1306 if (temp_base
& 0xffff) {
1307 dev_err(dev
, "Location monitor must be aligned to 64KB "
1312 mutex_lock(&lm
->mtx
);
1314 /* If we already have a callback attached, we can't move it! */
1315 for (i
= 0; i
< lm
->monitors
; i
++) {
1316 if (bridge
->lm_callback
[i
]) {
1317 mutex_unlock(&lm
->mtx
);
1318 dev_err(dev
, "Location monitor callback attached, "
1326 lm_ctl
|= CA91CX42_LM_CTL_AS_A16
;
1329 lm_ctl
|= CA91CX42_LM_CTL_AS_A24
;
1332 lm_ctl
|= CA91CX42_LM_CTL_AS_A32
;
1335 mutex_unlock(&lm
->mtx
);
1336 dev_err(dev
, "Invalid address space\n");
1341 if (cycle
& VME_SUPER
)
1342 lm_ctl
|= CA91CX42_LM_CTL_SUPR
;
1343 if (cycle
& VME_USER
)
1344 lm_ctl
|= CA91CX42_LM_CTL_NPRIV
;
1345 if (cycle
& VME_PROG
)
1346 lm_ctl
|= CA91CX42_LM_CTL_PGM
;
1347 if (cycle
& VME_DATA
)
1348 lm_ctl
|= CA91CX42_LM_CTL_DATA
;
1350 iowrite32(lm_base
, bridge
->base
+ LM_BS
);
1351 iowrite32(lm_ctl
, bridge
->base
+ LM_CTL
);
1353 mutex_unlock(&lm
->mtx
);
1358 /* Get configuration of the callback monitor and return whether it is enabled
1361 static int ca91cx42_lm_get(struct vme_lm_resource
*lm
,
1362 unsigned long long *lm_base
, u32
*aspace
, u32
*cycle
)
1364 u32 lm_ctl
, enabled
= 0;
1365 struct ca91cx42_driver
*bridge
;
1367 bridge
= lm
->parent
->driver_priv
;
1369 mutex_lock(&lm
->mtx
);
1371 *lm_base
= (unsigned long long)ioread32(bridge
->base
+ LM_BS
);
1372 lm_ctl
= ioread32(bridge
->base
+ LM_CTL
);
1374 if (lm_ctl
& CA91CX42_LM_CTL_EN
)
1377 if ((lm_ctl
& CA91CX42_LM_CTL_AS_M
) == CA91CX42_LM_CTL_AS_A16
)
1379 if ((lm_ctl
& CA91CX42_LM_CTL_AS_M
) == CA91CX42_LM_CTL_AS_A24
)
1381 if ((lm_ctl
& CA91CX42_LM_CTL_AS_M
) == CA91CX42_LM_CTL_AS_A32
)
1385 if (lm_ctl
& CA91CX42_LM_CTL_SUPR
)
1386 *cycle
|= VME_SUPER
;
1387 if (lm_ctl
& CA91CX42_LM_CTL_NPRIV
)
1389 if (lm_ctl
& CA91CX42_LM_CTL_PGM
)
1391 if (lm_ctl
& CA91CX42_LM_CTL_DATA
)
1394 mutex_unlock(&lm
->mtx
);
1400 * Attach a callback to a specific location monitor.
1402 * Callback will be passed the monitor triggered.
1404 static int ca91cx42_lm_attach(struct vme_lm_resource
*lm
, int monitor
,
1405 void (*callback
)(void *), void *data
)
1408 struct ca91cx42_driver
*bridge
;
1411 bridge
= lm
->parent
->driver_priv
;
1412 dev
= lm
->parent
->parent
;
1414 mutex_lock(&lm
->mtx
);
1416 /* Ensure that the location monitor is configured - need PGM or DATA */
1417 lm_ctl
= ioread32(bridge
->base
+ LM_CTL
);
1418 if ((lm_ctl
& (CA91CX42_LM_CTL_PGM
| CA91CX42_LM_CTL_DATA
)) == 0) {
1419 mutex_unlock(&lm
->mtx
);
1420 dev_err(dev
, "Location monitor not properly configured\n");
1424 /* Check that a callback isn't already attached */
1425 if (bridge
->lm_callback
[monitor
]) {
1426 mutex_unlock(&lm
->mtx
);
1427 dev_err(dev
, "Existing callback attached\n");
1431 /* Attach callback */
1432 bridge
->lm_callback
[monitor
] = callback
;
1433 bridge
->lm_data
[monitor
] = data
;
1435 /* Enable Location Monitor interrupt */
1436 tmp
= ioread32(bridge
->base
+ LINT_EN
);
1437 tmp
|= CA91CX42_LINT_LM
[monitor
];
1438 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
1440 /* Ensure that global Location Monitor Enable set */
1441 if ((lm_ctl
& CA91CX42_LM_CTL_EN
) == 0) {
1442 lm_ctl
|= CA91CX42_LM_CTL_EN
;
1443 iowrite32(lm_ctl
, bridge
->base
+ LM_CTL
);
1446 mutex_unlock(&lm
->mtx
);
1452 * Detach a callback function forn a specific location monitor.
1454 static int ca91cx42_lm_detach(struct vme_lm_resource
*lm
, int monitor
)
1457 struct ca91cx42_driver
*bridge
;
1459 bridge
= lm
->parent
->driver_priv
;
1461 mutex_lock(&lm
->mtx
);
1463 /* Disable Location Monitor and ensure previous interrupts are clear */
1464 tmp
= ioread32(bridge
->base
+ LINT_EN
);
1465 tmp
&= ~CA91CX42_LINT_LM
[monitor
];
1466 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
1468 iowrite32(CA91CX42_LINT_LM
[monitor
],
1469 bridge
->base
+ LINT_STAT
);
1471 /* Detach callback */
1472 bridge
->lm_callback
[monitor
] = NULL
;
1473 bridge
->lm_data
[monitor
] = NULL
;
1475 /* If all location monitors disabled, disable global Location Monitor */
1476 if ((tmp
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
1477 CA91CX42_LINT_LM3
)) == 0) {
1478 tmp
= ioread32(bridge
->base
+ LM_CTL
);
1479 tmp
&= ~CA91CX42_LM_CTL_EN
;
1480 iowrite32(tmp
, bridge
->base
+ LM_CTL
);
1483 mutex_unlock(&lm
->mtx
);
1488 static int ca91cx42_slot_get(struct vme_bridge
*ca91cx42_bridge
)
1491 struct ca91cx42_driver
*bridge
;
1493 bridge
= ca91cx42_bridge
->driver_priv
;
1496 slot
= ioread32(bridge
->base
+ VCSR_BS
);
1497 slot
= ((slot
& CA91CX42_VCSR_BS_SLOT_M
) >> 27);
1505 static void *ca91cx42_alloc_consistent(struct device
*parent
, size_t size
,
1508 struct pci_dev
*pdev
;
1510 /* Find pci_dev container of dev */
1511 pdev
= to_pci_dev(parent
);
1513 return dma_alloc_coherent(&pdev
->dev
, size
, dma
, GFP_KERNEL
);
1516 static void ca91cx42_free_consistent(struct device
*parent
, size_t size
,
1517 void *vaddr
, dma_addr_t dma
)
1519 struct pci_dev
*pdev
;
1521 /* Find pci_dev container of dev */
1522 pdev
= to_pci_dev(parent
);
1524 dma_free_coherent(&pdev
->dev
, size
, vaddr
, dma
);
1528 * Configure CR/CSR space
1530 * Access to the CR/CSR can be configured at power-up. The location of the
1531 * CR/CSR registers in the CR/CSR address space is determined by the boards
1532 * Auto-ID or Geographic address. This function ensures that the window is
1533 * enabled at an offset consistent with the boards geopgraphic address.
1535 static int ca91cx42_crcsr_init(struct vme_bridge
*ca91cx42_bridge
,
1536 struct pci_dev
*pdev
)
1538 unsigned int crcsr_addr
;
1540 struct ca91cx42_driver
*bridge
;
1542 bridge
= ca91cx42_bridge
->driver_priv
;
1544 slot
= ca91cx42_slot_get(ca91cx42_bridge
);
1546 /* Write CSR Base Address if slot ID is supplied as a module param */
1548 iowrite32(geoid
<< 27, bridge
->base
+ VCSR_BS
);
1550 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", slot
);
1552 dev_err(&pdev
->dev
, "Slot number is unset, not configuring "
1557 /* Allocate mem for CR/CSR image */
1558 bridge
->crcsr_kernel
= dma_alloc_coherent(&pdev
->dev
,
1560 &bridge
->crcsr_bus
, GFP_KERNEL
);
1561 if (!bridge
->crcsr_kernel
) {
1562 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
1567 crcsr_addr
= slot
* (512 * 1024);
1568 iowrite32(bridge
->crcsr_bus
- crcsr_addr
, bridge
->base
+ VCSR_TO
);
1570 tmp
= ioread32(bridge
->base
+ VCSR_CTL
);
1571 tmp
|= CA91CX42_VCSR_CTL_EN
;
1572 iowrite32(tmp
, bridge
->base
+ VCSR_CTL
);
1577 static void ca91cx42_crcsr_exit(struct vme_bridge
*ca91cx42_bridge
,
1578 struct pci_dev
*pdev
)
1581 struct ca91cx42_driver
*bridge
;
1583 bridge
= ca91cx42_bridge
->driver_priv
;
1585 /* Turn off CR/CSR space */
1586 tmp
= ioread32(bridge
->base
+ VCSR_CTL
);
1587 tmp
&= ~CA91CX42_VCSR_CTL_EN
;
1588 iowrite32(tmp
, bridge
->base
+ VCSR_CTL
);
1591 iowrite32(0, bridge
->base
+ VCSR_TO
);
1593 dma_free_coherent(&pdev
->dev
, VME_CRCSR_BUF_SIZE
,
1594 bridge
->crcsr_kernel
, bridge
->crcsr_bus
);
1597 static int ca91cx42_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1601 struct list_head
*pos
= NULL
, *n
;
1602 struct vme_bridge
*ca91cx42_bridge
;
1603 struct ca91cx42_driver
*ca91cx42_device
;
1604 struct vme_master_resource
*master_image
;
1605 struct vme_slave_resource
*slave_image
;
1606 struct vme_dma_resource
*dma_ctrlr
;
1607 struct vme_lm_resource
*lm
;
1609 /* We want to support more than one of each bridge so we need to
1610 * dynamically allocate the bridge structure
1612 ca91cx42_bridge
= kzalloc(sizeof(*ca91cx42_bridge
), GFP_KERNEL
);
1613 if (!ca91cx42_bridge
) {
1617 vme_init_bridge(ca91cx42_bridge
);
1619 ca91cx42_device
= kzalloc(sizeof(*ca91cx42_device
), GFP_KERNEL
);
1620 if (!ca91cx42_device
) {
1625 ca91cx42_bridge
->driver_priv
= ca91cx42_device
;
1627 /* Enable the device */
1628 retval
= pci_enable_device(pdev
);
1630 dev_err(&pdev
->dev
, "Unable to enable device\n");
1635 retval
= pci_request_regions(pdev
, driver_name
);
1637 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
1641 /* map registers in BAR 0 */
1642 ca91cx42_device
->base
= ioremap(pci_resource_start(pdev
, 0),
1644 if (!ca91cx42_device
->base
) {
1645 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
1650 /* Check to see if the mapping worked out */
1651 data
= ioread32(ca91cx42_device
->base
+ CA91CX42_PCI_ID
) & 0x0000FFFF;
1652 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
1653 dev_err(&pdev
->dev
, "PCI_ID check failed\n");
1658 /* Initialize wait queues & mutual exclusion flags */
1659 init_waitqueue_head(&ca91cx42_device
->dma_queue
);
1660 init_waitqueue_head(&ca91cx42_device
->iack_queue
);
1661 mutex_init(&ca91cx42_device
->vme_int
);
1662 mutex_init(&ca91cx42_device
->vme_rmw
);
1664 ca91cx42_bridge
->parent
= &pdev
->dev
;
1665 strcpy(ca91cx42_bridge
->name
, driver_name
);
1668 retval
= ca91cx42_irq_init(ca91cx42_bridge
);
1670 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
1674 /* Add master windows to list */
1675 for (i
= 0; i
< CA91C142_MAX_MASTER
; i
++) {
1676 master_image
= kmalloc(sizeof(*master_image
), GFP_KERNEL
);
1677 if (!master_image
) {
1681 master_image
->parent
= ca91cx42_bridge
;
1682 spin_lock_init(&master_image
->lock
);
1683 master_image
->locked
= 0;
1684 master_image
->number
= i
;
1685 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
1686 VME_CRCSR
| VME_USER1
| VME_USER2
;
1687 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1688 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1689 master_image
->width_attr
= VME_D8
| VME_D16
| VME_D32
| VME_D64
;
1690 memset(&master_image
->bus_resource
, 0,
1691 sizeof(master_image
->bus_resource
));
1692 master_image
->kern_base
= NULL
;
1693 list_add_tail(&master_image
->list
,
1694 &ca91cx42_bridge
->master_resources
);
1697 /* Add slave windows to list */
1698 for (i
= 0; i
< CA91C142_MAX_SLAVE
; i
++) {
1699 slave_image
= kmalloc(sizeof(*slave_image
), GFP_KERNEL
);
1704 slave_image
->parent
= ca91cx42_bridge
;
1705 mutex_init(&slave_image
->mtx
);
1706 slave_image
->locked
= 0;
1707 slave_image
->number
= i
;
1708 slave_image
->address_attr
= VME_A24
| VME_A32
| VME_USER1
|
1711 /* Only windows 0 and 4 support A16 */
1712 if (i
== 0 || i
== 4)
1713 slave_image
->address_attr
|= VME_A16
;
1715 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1716 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1717 list_add_tail(&slave_image
->list
,
1718 &ca91cx42_bridge
->slave_resources
);
1721 /* Add dma engines to list */
1722 for (i
= 0; i
< CA91C142_MAX_DMA
; i
++) {
1723 dma_ctrlr
= kmalloc(sizeof(*dma_ctrlr
), GFP_KERNEL
);
1728 dma_ctrlr
->parent
= ca91cx42_bridge
;
1729 mutex_init(&dma_ctrlr
->mtx
);
1730 dma_ctrlr
->locked
= 0;
1731 dma_ctrlr
->number
= i
;
1732 dma_ctrlr
->route_attr
= VME_DMA_VME_TO_MEM
|
1734 INIT_LIST_HEAD(&dma_ctrlr
->pending
);
1735 INIT_LIST_HEAD(&dma_ctrlr
->running
);
1736 list_add_tail(&dma_ctrlr
->list
,
1737 &ca91cx42_bridge
->dma_resources
);
1740 /* Add location monitor to list */
1741 lm
= kmalloc(sizeof(*lm
), GFP_KERNEL
);
1746 lm
->parent
= ca91cx42_bridge
;
1747 mutex_init(&lm
->mtx
);
1751 list_add_tail(&lm
->list
, &ca91cx42_bridge
->lm_resources
);
1753 ca91cx42_bridge
->slave_get
= ca91cx42_slave_get
;
1754 ca91cx42_bridge
->slave_set
= ca91cx42_slave_set
;
1755 ca91cx42_bridge
->master_get
= ca91cx42_master_get
;
1756 ca91cx42_bridge
->master_set
= ca91cx42_master_set
;
1757 ca91cx42_bridge
->master_read
= ca91cx42_master_read
;
1758 ca91cx42_bridge
->master_write
= ca91cx42_master_write
;
1759 ca91cx42_bridge
->master_rmw
= ca91cx42_master_rmw
;
1760 ca91cx42_bridge
->dma_list_add
= ca91cx42_dma_list_add
;
1761 ca91cx42_bridge
->dma_list_exec
= ca91cx42_dma_list_exec
;
1762 ca91cx42_bridge
->dma_list_empty
= ca91cx42_dma_list_empty
;
1763 ca91cx42_bridge
->irq_set
= ca91cx42_irq_set
;
1764 ca91cx42_bridge
->irq_generate
= ca91cx42_irq_generate
;
1765 ca91cx42_bridge
->lm_set
= ca91cx42_lm_set
;
1766 ca91cx42_bridge
->lm_get
= ca91cx42_lm_get
;
1767 ca91cx42_bridge
->lm_attach
= ca91cx42_lm_attach
;
1768 ca91cx42_bridge
->lm_detach
= ca91cx42_lm_detach
;
1769 ca91cx42_bridge
->slot_get
= ca91cx42_slot_get
;
1770 ca91cx42_bridge
->alloc_consistent
= ca91cx42_alloc_consistent
;
1771 ca91cx42_bridge
->free_consistent
= ca91cx42_free_consistent
;
1773 data
= ioread32(ca91cx42_device
->base
+ MISC_CTL
);
1774 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
1775 (data
& CA91CX42_MISC_CTL_SYSCON
) ? "" : " not");
1776 dev_info(&pdev
->dev
, "Slot ID is %d\n",
1777 ca91cx42_slot_get(ca91cx42_bridge
));
1779 if (ca91cx42_crcsr_init(ca91cx42_bridge
, pdev
))
1780 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
1782 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1785 retval
= vme_register_bridge(ca91cx42_bridge
);
1787 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
1791 pci_set_drvdata(pdev
, ca91cx42_bridge
);
1796 ca91cx42_crcsr_exit(ca91cx42_bridge
, pdev
);
1798 /* resources are stored in link list */
1799 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->lm_resources
) {
1800 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1805 /* resources are stored in link list */
1806 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->dma_resources
) {
1807 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1812 /* resources are stored in link list */
1813 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->slave_resources
) {
1814 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1819 /* resources are stored in link list */
1820 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->master_resources
) {
1821 master_image
= list_entry(pos
, struct vme_master_resource
,
1824 kfree(master_image
);
1827 ca91cx42_irq_exit(ca91cx42_device
, pdev
);
1830 iounmap(ca91cx42_device
->base
);
1832 pci_release_regions(pdev
);
1834 pci_disable_device(pdev
);
1836 kfree(ca91cx42_device
);
1838 kfree(ca91cx42_bridge
);
1844 static void ca91cx42_remove(struct pci_dev
*pdev
)
1846 struct list_head
*pos
= NULL
, *n
;
1847 struct vme_master_resource
*master_image
;
1848 struct vme_slave_resource
*slave_image
;
1849 struct vme_dma_resource
*dma_ctrlr
;
1850 struct vme_lm_resource
*lm
;
1851 struct ca91cx42_driver
*bridge
;
1852 struct vme_bridge
*ca91cx42_bridge
= pci_get_drvdata(pdev
);
1854 bridge
= ca91cx42_bridge
->driver_priv
;
1858 iowrite32(0, bridge
->base
+ LINT_EN
);
1860 /* Turn off the windows */
1861 iowrite32(0x00800000, bridge
->base
+ LSI0_CTL
);
1862 iowrite32(0x00800000, bridge
->base
+ LSI1_CTL
);
1863 iowrite32(0x00800000, bridge
->base
+ LSI2_CTL
);
1864 iowrite32(0x00800000, bridge
->base
+ LSI3_CTL
);
1865 iowrite32(0x00800000, bridge
->base
+ LSI4_CTL
);
1866 iowrite32(0x00800000, bridge
->base
+ LSI5_CTL
);
1867 iowrite32(0x00800000, bridge
->base
+ LSI6_CTL
);
1868 iowrite32(0x00800000, bridge
->base
+ LSI7_CTL
);
1869 iowrite32(0x00F00000, bridge
->base
+ VSI0_CTL
);
1870 iowrite32(0x00F00000, bridge
->base
+ VSI1_CTL
);
1871 iowrite32(0x00F00000, bridge
->base
+ VSI2_CTL
);
1872 iowrite32(0x00F00000, bridge
->base
+ VSI3_CTL
);
1873 iowrite32(0x00F00000, bridge
->base
+ VSI4_CTL
);
1874 iowrite32(0x00F00000, bridge
->base
+ VSI5_CTL
);
1875 iowrite32(0x00F00000, bridge
->base
+ VSI6_CTL
);
1876 iowrite32(0x00F00000, bridge
->base
+ VSI7_CTL
);
1878 vme_unregister_bridge(ca91cx42_bridge
);
1880 ca91cx42_crcsr_exit(ca91cx42_bridge
, pdev
);
1882 /* resources are stored in link list */
1883 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->lm_resources
) {
1884 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1889 /* resources are stored in link list */
1890 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->dma_resources
) {
1891 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1896 /* resources are stored in link list */
1897 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->slave_resources
) {
1898 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1903 /* resources are stored in link list */
1904 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->master_resources
) {
1905 master_image
= list_entry(pos
, struct vme_master_resource
,
1908 kfree(master_image
);
1911 ca91cx42_irq_exit(bridge
, pdev
);
1913 iounmap(bridge
->base
);
1915 pci_release_regions(pdev
);
1917 pci_disable_device(pdev
);
1919 kfree(ca91cx42_bridge
);
1922 module_pci_driver(ca91cx42_driver
);
1924 MODULE_PARM_DESC(geoid
, "Override geographical addressing");
1925 module_param(geoid
, int, 0);
1927 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1928 MODULE_LICENSE("GPL");