2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
31 #include <linux/uaccess.h>
32 #include <linux/vme.h>
34 #include "../vme_bridge.h"
35 #include "vme_ca91cx42.h"
37 static int ca91cx42_probe(struct pci_dev
*, const struct pci_device_id
*);
38 static void ca91cx42_remove(struct pci_dev
*);
40 /* Module parameters */
43 static const char driver_name
[] = "vme_ca91cx42";
45 static const struct pci_device_id ca91cx42_ids
[] = {
46 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_CA91C142
) },
50 MODULE_DEVICE_TABLE(pci
, ca91cx42_ids
);
52 static struct pci_driver ca91cx42_driver
= {
54 .id_table
= ca91cx42_ids
,
55 .probe
= ca91cx42_probe
,
56 .remove
= ca91cx42_remove
,
59 static u32
ca91cx42_DMA_irqhandler(struct ca91cx42_driver
*bridge
)
61 wake_up(&bridge
->dma_queue
);
63 return CA91CX42_LINT_DMA
;
66 static u32
ca91cx42_LM_irqhandler(struct ca91cx42_driver
*bridge
, u32 stat
)
71 for (i
= 0; i
< 4; i
++) {
72 if (stat
& CA91CX42_LINT_LM
[i
]) {
73 /* We only enable interrupts if the callback is set */
74 bridge
->lm_callback
[i
](bridge
->lm_data
[i
]);
75 serviced
|= CA91CX42_LINT_LM
[i
];
82 /* XXX This needs to be split into 4 queues */
83 static u32
ca91cx42_MB_irqhandler(struct ca91cx42_driver
*bridge
, int mbox_mask
)
85 wake_up(&bridge
->mbox_queue
);
87 return CA91CX42_LINT_MBOX
;
90 static u32
ca91cx42_IACK_irqhandler(struct ca91cx42_driver
*bridge
)
92 wake_up(&bridge
->iack_queue
);
94 return CA91CX42_LINT_SW_IACK
;
97 static u32
ca91cx42_VERR_irqhandler(struct vme_bridge
*ca91cx42_bridge
)
100 struct ca91cx42_driver
*bridge
;
102 bridge
= ca91cx42_bridge
->driver_priv
;
104 val
= ioread32(bridge
->base
+ DGCS
);
106 if (!(val
& 0x00000800)) {
107 dev_err(ca91cx42_bridge
->parent
, "ca91cx42_VERR_irqhandler DMA "
108 "Read Error DGCS=%08X\n", val
);
111 return CA91CX42_LINT_VERR
;
114 static u32
ca91cx42_LERR_irqhandler(struct vme_bridge
*ca91cx42_bridge
)
117 struct ca91cx42_driver
*bridge
;
119 bridge
= ca91cx42_bridge
->driver_priv
;
121 val
= ioread32(bridge
->base
+ DGCS
);
123 if (!(val
& 0x00000800))
124 dev_err(ca91cx42_bridge
->parent
, "ca91cx42_LERR_irqhandler DMA "
125 "Read Error DGCS=%08X\n", val
);
127 return CA91CX42_LINT_LERR
;
131 static u32
ca91cx42_VIRQ_irqhandler(struct vme_bridge
*ca91cx42_bridge
,
134 int vec
, i
, serviced
= 0;
135 struct ca91cx42_driver
*bridge
;
137 bridge
= ca91cx42_bridge
->driver_priv
;
140 for (i
= 7; i
> 0; i
--) {
141 if (stat
& (1 << i
)) {
142 vec
= ioread32(bridge
->base
+
143 CA91CX42_V_STATID
[i
]) & 0xff;
145 vme_irq_handler(ca91cx42_bridge
, i
, vec
);
147 serviced
|= (1 << i
);
154 static irqreturn_t
ca91cx42_irqhandler(int irq
, void *ptr
)
156 u32 stat
, enable
, serviced
= 0;
157 struct vme_bridge
*ca91cx42_bridge
;
158 struct ca91cx42_driver
*bridge
;
160 ca91cx42_bridge
= ptr
;
162 bridge
= ca91cx42_bridge
->driver_priv
;
164 enable
= ioread32(bridge
->base
+ LINT_EN
);
165 stat
= ioread32(bridge
->base
+ LINT_STAT
);
167 /* Only look at unmasked interrupts */
173 if (stat
& CA91CX42_LINT_DMA
)
174 serviced
|= ca91cx42_DMA_irqhandler(bridge
);
175 if (stat
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
177 serviced
|= ca91cx42_LM_irqhandler(bridge
, stat
);
178 if (stat
& CA91CX42_LINT_MBOX
)
179 serviced
|= ca91cx42_MB_irqhandler(bridge
, stat
);
180 if (stat
& CA91CX42_LINT_SW_IACK
)
181 serviced
|= ca91cx42_IACK_irqhandler(bridge
);
182 if (stat
& CA91CX42_LINT_VERR
)
183 serviced
|= ca91cx42_VERR_irqhandler(ca91cx42_bridge
);
184 if (stat
& CA91CX42_LINT_LERR
)
185 serviced
|= ca91cx42_LERR_irqhandler(ca91cx42_bridge
);
186 if (stat
& (CA91CX42_LINT_VIRQ1
| CA91CX42_LINT_VIRQ2
|
187 CA91CX42_LINT_VIRQ3
| CA91CX42_LINT_VIRQ4
|
188 CA91CX42_LINT_VIRQ5
| CA91CX42_LINT_VIRQ6
|
189 CA91CX42_LINT_VIRQ7
))
190 serviced
|= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge
, stat
);
192 /* Clear serviced interrupts */
193 iowrite32(serviced
, bridge
->base
+ LINT_STAT
);
198 static int ca91cx42_irq_init(struct vme_bridge
*ca91cx42_bridge
)
201 struct pci_dev
*pdev
;
202 struct ca91cx42_driver
*bridge
;
204 bridge
= ca91cx42_bridge
->driver_priv
;
207 pdev
= to_pci_dev(ca91cx42_bridge
->parent
);
209 /* Disable interrupts from PCI to VME */
210 iowrite32(0, bridge
->base
+ VINT_EN
);
212 /* Disable PCI interrupts */
213 iowrite32(0, bridge
->base
+ LINT_EN
);
214 /* Clear Any Pending PCI Interrupts */
215 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
217 result
= request_irq(pdev
->irq
, ca91cx42_irqhandler
, IRQF_SHARED
,
218 driver_name
, ca91cx42_bridge
);
220 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
225 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
226 iowrite32(0, bridge
->base
+ LINT_MAP0
);
227 iowrite32(0, bridge
->base
+ LINT_MAP1
);
228 iowrite32(0, bridge
->base
+ LINT_MAP2
);
230 /* Enable DMA, mailbox & LM Interrupts */
231 tmp
= CA91CX42_LINT_MBOX3
| CA91CX42_LINT_MBOX2
| CA91CX42_LINT_MBOX1
|
232 CA91CX42_LINT_MBOX0
| CA91CX42_LINT_SW_IACK
|
233 CA91CX42_LINT_VERR
| CA91CX42_LINT_LERR
| CA91CX42_LINT_DMA
;
235 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
240 static void ca91cx42_irq_exit(struct ca91cx42_driver
*bridge
,
241 struct pci_dev
*pdev
)
243 struct vme_bridge
*ca91cx42_bridge
;
245 /* Disable interrupts from PCI to VME */
246 iowrite32(0, bridge
->base
+ VINT_EN
);
248 /* Disable PCI interrupts */
249 iowrite32(0, bridge
->base
+ LINT_EN
);
250 /* Clear Any Pending PCI Interrupts */
251 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
253 ca91cx42_bridge
= container_of((void *)bridge
, struct vme_bridge
,
255 free_irq(pdev
->irq
, ca91cx42_bridge
);
258 static int ca91cx42_iack_received(struct ca91cx42_driver
*bridge
, int level
)
262 tmp
= ioread32(bridge
->base
+ LINT_STAT
);
264 if (tmp
& (1 << level
))
271 * Set up an VME interrupt
273 static void ca91cx42_irq_set(struct vme_bridge
*ca91cx42_bridge
, int level
,
277 struct pci_dev
*pdev
;
279 struct ca91cx42_driver
*bridge
;
281 bridge
= ca91cx42_bridge
->driver_priv
;
283 /* Enable IRQ level */
284 tmp
= ioread32(bridge
->base
+ LINT_EN
);
287 tmp
&= ~CA91CX42_LINT_VIRQ
[level
];
289 tmp
|= CA91CX42_LINT_VIRQ
[level
];
291 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
293 if ((state
== 0) && (sync
!= 0)) {
294 pdev
= to_pci_dev(ca91cx42_bridge
->parent
);
296 synchronize_irq(pdev
->irq
);
300 static int ca91cx42_irq_generate(struct vme_bridge
*ca91cx42_bridge
, int level
,
304 struct ca91cx42_driver
*bridge
;
306 bridge
= ca91cx42_bridge
->driver_priv
;
308 /* Universe can only generate even vectors */
312 mutex_lock(&bridge
->vme_int
);
314 tmp
= ioread32(bridge
->base
+ VINT_EN
);
317 iowrite32(statid
<< 24, bridge
->base
+ STATID
);
319 /* Assert VMEbus IRQ */
320 tmp
= tmp
| (1 << (level
+ 24));
321 iowrite32(tmp
, bridge
->base
+ VINT_EN
);
324 wait_event_interruptible(bridge
->iack_queue
,
325 ca91cx42_iack_received(bridge
, level
));
327 /* Return interrupt to low state */
328 tmp
= ioread32(bridge
->base
+ VINT_EN
);
329 tmp
= tmp
& ~(1 << (level
+ 24));
330 iowrite32(tmp
, bridge
->base
+ VINT_EN
);
332 mutex_unlock(&bridge
->vme_int
);
337 static int ca91cx42_slave_set(struct vme_slave_resource
*image
, int enabled
,
338 unsigned long long vme_base
, unsigned long long size
,
339 dma_addr_t pci_base
, u32 aspace
, u32 cycle
)
341 unsigned int i
, addr
= 0, granularity
;
342 unsigned int temp_ctl
= 0;
343 unsigned int vme_bound
, pci_offset
;
344 struct vme_bridge
*ca91cx42_bridge
;
345 struct ca91cx42_driver
*bridge
;
347 ca91cx42_bridge
= image
->parent
;
349 bridge
= ca91cx42_bridge
->driver_priv
;
355 addr
|= CA91CX42_VSI_CTL_VAS_A16
;
358 addr
|= CA91CX42_VSI_CTL_VAS_A24
;
361 addr
|= CA91CX42_VSI_CTL_VAS_A32
;
364 addr
|= CA91CX42_VSI_CTL_VAS_USER1
;
367 addr
|= CA91CX42_VSI_CTL_VAS_USER2
;
374 dev_err(ca91cx42_bridge
->parent
, "Invalid address space\n");
380 * Bound address is a valid address for the window, adjust
383 vme_bound
= vme_base
+ size
;
384 pci_offset
= pci_base
- vme_base
;
386 if ((i
== 0) || (i
== 4))
387 granularity
= 0x1000;
389 granularity
= 0x10000;
391 if (vme_base
& (granularity
- 1)) {
392 dev_err(ca91cx42_bridge
->parent
, "Invalid VME base "
396 if (vme_bound
& (granularity
- 1)) {
397 dev_err(ca91cx42_bridge
->parent
, "Invalid VME bound "
401 if (pci_offset
& (granularity
- 1)) {
402 dev_err(ca91cx42_bridge
->parent
, "Invalid PCI Offset "
407 /* Disable while we are mucking around */
408 temp_ctl
= ioread32(bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
409 temp_ctl
&= ~CA91CX42_VSI_CTL_EN
;
410 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
413 iowrite32(vme_base
, bridge
->base
+ CA91CX42_VSI_BS
[i
]);
414 iowrite32(vme_bound
, bridge
->base
+ CA91CX42_VSI_BD
[i
]);
415 iowrite32(pci_offset
, bridge
->base
+ CA91CX42_VSI_TO
[i
]);
417 /* Setup address space */
418 temp_ctl
&= ~CA91CX42_VSI_CTL_VAS_M
;
421 /* Setup cycle types */
422 temp_ctl
&= ~(CA91CX42_VSI_CTL_PGM_M
| CA91CX42_VSI_CTL_SUPER_M
);
423 if (cycle
& VME_SUPER
)
424 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_SUPR
;
425 if (cycle
& VME_USER
)
426 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_NPRIV
;
427 if (cycle
& VME_PROG
)
428 temp_ctl
|= CA91CX42_VSI_CTL_PGM_PGM
;
429 if (cycle
& VME_DATA
)
430 temp_ctl
|= CA91CX42_VSI_CTL_PGM_DATA
;
432 /* Write ctl reg without enable */
433 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
436 temp_ctl
|= CA91CX42_VSI_CTL_EN
;
438 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
443 static int ca91cx42_slave_get(struct vme_slave_resource
*image
, int *enabled
,
444 unsigned long long *vme_base
, unsigned long long *size
,
445 dma_addr_t
*pci_base
, u32
*aspace
, u32
*cycle
)
447 unsigned int i
, granularity
= 0, ctl
= 0;
448 unsigned long long vme_bound
, pci_offset
;
449 struct ca91cx42_driver
*bridge
;
451 bridge
= image
->parent
->driver_priv
;
455 if ((i
== 0) || (i
== 4))
456 granularity
= 0x1000;
458 granularity
= 0x10000;
461 ctl
= ioread32(bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
463 *vme_base
= ioread32(bridge
->base
+ CA91CX42_VSI_BS
[i
]);
464 vme_bound
= ioread32(bridge
->base
+ CA91CX42_VSI_BD
[i
]);
465 pci_offset
= ioread32(bridge
->base
+ CA91CX42_VSI_TO
[i
]);
467 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
468 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
474 if (ctl
& CA91CX42_VSI_CTL_EN
)
477 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A16
)
479 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A24
)
481 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A32
)
483 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER1
)
485 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER2
)
488 if (ctl
& CA91CX42_VSI_CTL_SUPER_SUPR
)
490 if (ctl
& CA91CX42_VSI_CTL_SUPER_NPRIV
)
492 if (ctl
& CA91CX42_VSI_CTL_PGM_PGM
)
494 if (ctl
& CA91CX42_VSI_CTL_PGM_DATA
)
501 * Allocate and map PCI Resource
503 static int ca91cx42_alloc_resource(struct vme_master_resource
*image
,
504 unsigned long long size
)
506 unsigned long long existing_size
;
508 struct pci_dev
*pdev
;
509 struct vme_bridge
*ca91cx42_bridge
;
511 ca91cx42_bridge
= image
->parent
;
513 /* Find pci_dev container of dev */
514 if (ca91cx42_bridge
->parent
== NULL
) {
515 dev_err(ca91cx42_bridge
->parent
, "Dev entry NULL\n");
518 pdev
= to_pci_dev(ca91cx42_bridge
->parent
);
520 existing_size
= (unsigned long long)(image
->bus_resource
.end
-
521 image
->bus_resource
.start
);
523 /* If the existing size is OK, return */
524 if (existing_size
== (size
- 1))
527 if (existing_size
!= 0) {
528 iounmap(image
->kern_base
);
529 image
->kern_base
= NULL
;
530 kfree(image
->bus_resource
.name
);
531 release_resource(&image
->bus_resource
);
532 memset(&image
->bus_resource
, 0, sizeof(struct resource
));
535 if (image
->bus_resource
.name
== NULL
) {
536 image
->bus_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_ATOMIC
);
537 if (image
->bus_resource
.name
== NULL
) {
538 dev_err(ca91cx42_bridge
->parent
, "Unable to allocate "
539 "memory for resource name\n");
545 sprintf((char *)image
->bus_resource
.name
, "%s.%d",
546 ca91cx42_bridge
->name
, image
->number
);
548 image
->bus_resource
.start
= 0;
549 image
->bus_resource
.end
= (unsigned long)size
;
550 image
->bus_resource
.flags
= IORESOURCE_MEM
;
552 retval
= pci_bus_alloc_resource(pdev
->bus
,
553 &image
->bus_resource
, size
, 0x10000, PCIBIOS_MIN_MEM
,
556 dev_err(ca91cx42_bridge
->parent
, "Failed to allocate mem "
557 "resource for window %d size 0x%lx start 0x%lx\n",
558 image
->number
, (unsigned long)size
,
559 (unsigned long)image
->bus_resource
.start
);
563 image
->kern_base
= ioremap_nocache(
564 image
->bus_resource
.start
, size
);
565 if (image
->kern_base
== NULL
) {
566 dev_err(ca91cx42_bridge
->parent
, "Failed to remap resource\n");
574 release_resource(&image
->bus_resource
);
576 kfree(image
->bus_resource
.name
);
577 memset(&image
->bus_resource
, 0, sizeof(struct resource
));
583 * Free and unmap PCI Resource
585 static void ca91cx42_free_resource(struct vme_master_resource
*image
)
587 iounmap(image
->kern_base
);
588 image
->kern_base
= NULL
;
589 release_resource(&image
->bus_resource
);
590 kfree(image
->bus_resource
.name
);
591 memset(&image
->bus_resource
, 0, sizeof(struct resource
));
595 static int ca91cx42_master_set(struct vme_master_resource
*image
, int enabled
,
596 unsigned long long vme_base
, unsigned long long size
, u32 aspace
,
597 u32 cycle
, u32 dwidth
)
600 unsigned int i
, granularity
= 0;
601 unsigned int temp_ctl
= 0;
602 unsigned long long pci_bound
, vme_offset
, pci_base
;
603 struct vme_bridge
*ca91cx42_bridge
;
604 struct ca91cx42_driver
*bridge
;
606 ca91cx42_bridge
= image
->parent
;
608 bridge
= ca91cx42_bridge
->driver_priv
;
612 if ((i
== 0) || (i
== 4))
613 granularity
= 0x1000;
615 granularity
= 0x10000;
617 /* Verify input data */
618 if (vme_base
& (granularity
- 1)) {
619 dev_err(ca91cx42_bridge
->parent
, "Invalid VME Window "
624 if (size
& (granularity
- 1)) {
625 dev_err(ca91cx42_bridge
->parent
, "Invalid VME Window "
631 spin_lock(&image
->lock
);
634 * Let's allocate the resource here rather than further up the stack as
635 * it avoids pushing loads of bus dependent stuff up the stack
637 retval
= ca91cx42_alloc_resource(image
, size
);
639 spin_unlock(&image
->lock
);
640 dev_err(ca91cx42_bridge
->parent
, "Unable to allocate memory "
641 "for resource name\n");
646 pci_base
= (unsigned long long)image
->bus_resource
.start
;
649 * Bound address is a valid address for the window, adjust
650 * according to window granularity.
652 pci_bound
= pci_base
+ size
;
653 vme_offset
= vme_base
- pci_base
;
655 /* Disable while we are mucking around */
656 temp_ctl
= ioread32(bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
657 temp_ctl
&= ~CA91CX42_LSI_CTL_EN
;
658 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
660 /* Setup cycle types */
661 temp_ctl
&= ~CA91CX42_LSI_CTL_VCT_M
;
663 temp_ctl
|= CA91CX42_LSI_CTL_VCT_BLT
;
664 if (cycle
& VME_MBLT
)
665 temp_ctl
|= CA91CX42_LSI_CTL_VCT_MBLT
;
667 /* Setup data width */
668 temp_ctl
&= ~CA91CX42_LSI_CTL_VDW_M
;
671 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D8
;
674 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D16
;
677 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D32
;
680 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D64
;
683 spin_unlock(&image
->lock
);
684 dev_err(ca91cx42_bridge
->parent
, "Invalid data width\n");
690 /* Setup address space */
691 temp_ctl
&= ~CA91CX42_LSI_CTL_VAS_M
;
694 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A16
;
697 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A24
;
700 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A32
;
703 temp_ctl
|= CA91CX42_LSI_CTL_VAS_CRCSR
;
706 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER1
;
709 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER2
;
715 spin_unlock(&image
->lock
);
716 dev_err(ca91cx42_bridge
->parent
, "Invalid address space\n");
722 temp_ctl
&= ~(CA91CX42_LSI_CTL_PGM_M
| CA91CX42_LSI_CTL_SUPER_M
);
723 if (cycle
& VME_SUPER
)
724 temp_ctl
|= CA91CX42_LSI_CTL_SUPER_SUPR
;
725 if (cycle
& VME_PROG
)
726 temp_ctl
|= CA91CX42_LSI_CTL_PGM_PGM
;
729 iowrite32(pci_base
, bridge
->base
+ CA91CX42_LSI_BS
[i
]);
730 iowrite32(pci_bound
, bridge
->base
+ CA91CX42_LSI_BD
[i
]);
731 iowrite32(vme_offset
, bridge
->base
+ CA91CX42_LSI_TO
[i
]);
733 /* Write ctl reg without enable */
734 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
737 temp_ctl
|= CA91CX42_LSI_CTL_EN
;
739 iowrite32(temp_ctl
, bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
741 spin_unlock(&image
->lock
);
746 ca91cx42_free_resource(image
);
752 static int __ca91cx42_master_get(struct vme_master_resource
*image
,
753 int *enabled
, unsigned long long *vme_base
, unsigned long long *size
,
754 u32
*aspace
, u32
*cycle
, u32
*dwidth
)
757 unsigned long long pci_base
, pci_bound
, vme_offset
;
758 struct ca91cx42_driver
*bridge
;
760 bridge
= image
->parent
->driver_priv
;
764 ctl
= ioread32(bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
766 pci_base
= ioread32(bridge
->base
+ CA91CX42_LSI_BS
[i
]);
767 vme_offset
= ioread32(bridge
->base
+ CA91CX42_LSI_TO
[i
]);
768 pci_bound
= ioread32(bridge
->base
+ CA91CX42_LSI_BD
[i
]);
770 *vme_base
= pci_base
+ vme_offset
;
771 *size
= (unsigned long long)(pci_bound
- pci_base
);
778 if (ctl
& CA91CX42_LSI_CTL_EN
)
781 /* Setup address space */
782 switch (ctl
& CA91CX42_LSI_CTL_VAS_M
) {
783 case CA91CX42_LSI_CTL_VAS_A16
:
786 case CA91CX42_LSI_CTL_VAS_A24
:
789 case CA91CX42_LSI_CTL_VAS_A32
:
792 case CA91CX42_LSI_CTL_VAS_CRCSR
:
795 case CA91CX42_LSI_CTL_VAS_USER1
:
798 case CA91CX42_LSI_CTL_VAS_USER2
:
803 /* XXX Not sure howto check for MBLT */
804 /* Setup cycle types */
805 if (ctl
& CA91CX42_LSI_CTL_VCT_BLT
)
810 if (ctl
& CA91CX42_LSI_CTL_SUPER_SUPR
)
815 if (ctl
& CA91CX42_LSI_CTL_PGM_PGM
)
820 /* Setup data width */
821 switch (ctl
& CA91CX42_LSI_CTL_VDW_M
) {
822 case CA91CX42_LSI_CTL_VDW_D8
:
825 case CA91CX42_LSI_CTL_VDW_D16
:
828 case CA91CX42_LSI_CTL_VDW_D32
:
831 case CA91CX42_LSI_CTL_VDW_D64
:
839 static int ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
840 unsigned long long *vme_base
, unsigned long long *size
, u32
*aspace
,
841 u32
*cycle
, u32
*dwidth
)
845 spin_lock(&image
->lock
);
847 retval
= __ca91cx42_master_get(image
, enabled
, vme_base
, size
, aspace
,
850 spin_unlock(&image
->lock
);
855 static ssize_t
ca91cx42_master_read(struct vme_master_resource
*image
,
856 void *buf
, size_t count
, loff_t offset
)
859 void __iomem
*addr
= image
->kern_base
+ offset
;
860 unsigned int done
= 0;
861 unsigned int count32
;
866 spin_lock(&image
->lock
);
868 /* The following code handles VME address alignment. We cannot use
869 * memcpy_xxx here because it may cut data transfers in to 8-bit
870 * cycles when D16 or D32 cycles are required on the VME bus.
871 * On the other hand, the bridge itself assures that the maximum data
872 * cycle configured for the transfer is used and splits it
873 * automatically for non-aligned addresses, so we don't want the
874 * overhead of needlessly forcing small transfers for the entire cycle.
876 if ((uintptr_t)addr
& 0x1) {
877 *(u8
*)buf
= ioread8(addr
);
882 if ((uintptr_t)(addr
+ done
) & 0x2) {
883 if ((count
- done
) < 2) {
884 *(u8
*)(buf
+ done
) = ioread8(addr
+ done
);
888 *(u16
*)(buf
+ done
) = ioread16(addr
+ done
);
893 count32
= (count
- done
) & ~0x3;
894 while (done
< count32
) {
895 *(u32
*)(buf
+ done
) = ioread32(addr
+ done
);
899 if ((count
- done
) & 0x2) {
900 *(u16
*)(buf
+ done
) = ioread16(addr
+ done
);
903 if ((count
- done
) & 0x1) {
904 *(u8
*)(buf
+ done
) = ioread8(addr
+ done
);
909 spin_unlock(&image
->lock
);
914 static ssize_t
ca91cx42_master_write(struct vme_master_resource
*image
,
915 void *buf
, size_t count
, loff_t offset
)
918 void __iomem
*addr
= image
->kern_base
+ offset
;
919 unsigned int done
= 0;
920 unsigned int count32
;
925 spin_lock(&image
->lock
);
927 /* Here we apply for the same strategy we do in master_read
928 * function in order to assure the correct cycles.
930 if ((uintptr_t)addr
& 0x1) {
931 iowrite8(*(u8
*)buf
, addr
);
936 if ((uintptr_t)(addr
+ done
) & 0x2) {
937 if ((count
- done
) < 2) {
938 iowrite8(*(u8
*)(buf
+ done
), addr
+ done
);
942 iowrite16(*(u16
*)(buf
+ done
), addr
+ done
);
947 count32
= (count
- done
) & ~0x3;
948 while (done
< count32
) {
949 iowrite32(*(u32
*)(buf
+ done
), addr
+ done
);
953 if ((count
- done
) & 0x2) {
954 iowrite16(*(u16
*)(buf
+ done
), addr
+ done
);
957 if ((count
- done
) & 0x1) {
958 iowrite8(*(u8
*)(buf
+ done
), addr
+ done
);
964 spin_unlock(&image
->lock
);
969 static unsigned int ca91cx42_master_rmw(struct vme_master_resource
*image
,
970 unsigned int mask
, unsigned int compare
, unsigned int swap
,
976 struct ca91cx42_driver
*bridge
;
979 bridge
= image
->parent
->driver_priv
;
980 dev
= image
->parent
->parent
;
982 /* Find the PCI address that maps to the desired VME address */
985 /* Locking as we can only do one of these at a time */
986 mutex_lock(&bridge
->vme_rmw
);
989 spin_lock(&image
->lock
);
991 pci_addr
= (uintptr_t)image
->kern_base
+ offset
;
993 /* Address must be 4-byte aligned */
994 if (pci_addr
& 0x3) {
995 dev_err(dev
, "RMW Address not 4-byte aligned\n");
1000 /* Ensure RMW Disabled whilst configuring */
1001 iowrite32(0, bridge
->base
+ SCYC_CTL
);
1003 /* Configure registers */
1004 iowrite32(mask
, bridge
->base
+ SCYC_EN
);
1005 iowrite32(compare
, bridge
->base
+ SCYC_CMP
);
1006 iowrite32(swap
, bridge
->base
+ SCYC_SWP
);
1007 iowrite32(pci_addr
, bridge
->base
+ SCYC_ADDR
);
1010 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW
, bridge
->base
+ SCYC_CTL
);
1012 /* Kick process off with a read to the required address. */
1013 result
= ioread32(image
->kern_base
+ offset
);
1016 iowrite32(0, bridge
->base
+ SCYC_CTL
);
1019 spin_unlock(&image
->lock
);
1021 mutex_unlock(&bridge
->vme_rmw
);
1026 static int ca91cx42_dma_list_add(struct vme_dma_list
*list
,
1027 struct vme_dma_attr
*src
, struct vme_dma_attr
*dest
, size_t count
)
1029 struct ca91cx42_dma_entry
*entry
, *prev
;
1030 struct vme_dma_pci
*pci_attr
;
1031 struct vme_dma_vme
*vme_attr
;
1032 dma_addr_t desc_ptr
;
1036 dev
= list
->parent
->parent
->parent
;
1038 /* XXX descriptor must be aligned on 64-bit boundaries */
1039 entry
= kmalloc(sizeof(struct ca91cx42_dma_entry
), GFP_KERNEL
);
1040 if (entry
== NULL
) {
1041 dev_err(dev
, "Failed to allocate memory for dma resource "
1047 /* Test descriptor alignment */
1048 if ((unsigned long)&entry
->descriptor
& CA91CX42_DCPP_M
) {
1049 dev_err(dev
, "Descriptor not aligned to 16 byte boundary as "
1050 "required: %p\n", &entry
->descriptor
);
1055 memset(&entry
->descriptor
, 0, sizeof(struct ca91cx42_dma_descriptor
));
1057 if (dest
->type
== VME_DMA_VME
) {
1058 entry
->descriptor
.dctl
|= CA91CX42_DCTL_L2V
;
1059 vme_attr
= dest
->private;
1060 pci_attr
= src
->private;
1062 vme_attr
= src
->private;
1063 pci_attr
= dest
->private;
1066 /* Check we can do fulfill required attributes */
1067 if ((vme_attr
->aspace
& ~(VME_A16
| VME_A24
| VME_A32
| VME_USER1
|
1070 dev_err(dev
, "Unsupported cycle type\n");
1075 if ((vme_attr
->cycle
& ~(VME_SCT
| VME_BLT
| VME_SUPER
| VME_USER
|
1076 VME_PROG
| VME_DATA
)) != 0) {
1078 dev_err(dev
, "Unsupported cycle type\n");
1083 /* Check to see if we can fulfill source and destination */
1084 if (!(((src
->type
== VME_DMA_PCI
) && (dest
->type
== VME_DMA_VME
)) ||
1085 ((src
->type
== VME_DMA_VME
) && (dest
->type
== VME_DMA_PCI
)))) {
1087 dev_err(dev
, "Cannot perform transfer with this "
1088 "source-destination combination\n");
1093 /* Setup cycle types */
1094 if (vme_attr
->cycle
& VME_BLT
)
1095 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VCT_BLT
;
1097 /* Setup data width */
1098 switch (vme_attr
->dwidth
) {
1100 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D8
;
1103 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D16
;
1106 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D32
;
1109 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VDW_D64
;
1112 dev_err(dev
, "Invalid data width\n");
1116 /* Setup address space */
1117 switch (vme_attr
->aspace
) {
1119 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_A16
;
1122 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_A24
;
1125 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_A32
;
1128 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_USER1
;
1131 entry
->descriptor
.dctl
|= CA91CX42_DCTL_VAS_USER2
;
1134 dev_err(dev
, "Invalid address space\n");
1139 if (vme_attr
->cycle
& VME_SUPER
)
1140 entry
->descriptor
.dctl
|= CA91CX42_DCTL_SUPER_SUPR
;
1141 if (vme_attr
->cycle
& VME_PROG
)
1142 entry
->descriptor
.dctl
|= CA91CX42_DCTL_PGM_PGM
;
1144 entry
->descriptor
.dtbc
= count
;
1145 entry
->descriptor
.dla
= pci_attr
->address
;
1146 entry
->descriptor
.dva
= vme_attr
->address
;
1147 entry
->descriptor
.dcpp
= CA91CX42_DCPP_NULL
;
1150 list_add_tail(&entry
->list
, &list
->entries
);
1152 /* Fill out previous descriptors "Next Address" */
1153 if (entry
->list
.prev
!= &list
->entries
) {
1154 prev
= list_entry(entry
->list
.prev
, struct ca91cx42_dma_entry
,
1156 /* We need the bus address for the pointer */
1157 desc_ptr
= virt_to_bus(&entry
->descriptor
);
1158 prev
->descriptor
.dcpp
= desc_ptr
& ~CA91CX42_DCPP_M
;
1172 static int ca91cx42_dma_busy(struct vme_bridge
*ca91cx42_bridge
)
1175 struct ca91cx42_driver
*bridge
;
1177 bridge
= ca91cx42_bridge
->driver_priv
;
1179 tmp
= ioread32(bridge
->base
+ DGCS
);
1181 if (tmp
& CA91CX42_DGCS_ACT
)
1187 static int ca91cx42_dma_list_exec(struct vme_dma_list
*list
)
1189 struct vme_dma_resource
*ctrlr
;
1190 struct ca91cx42_dma_entry
*entry
;
1192 dma_addr_t bus_addr
;
1195 struct ca91cx42_driver
*bridge
;
1197 ctrlr
= list
->parent
;
1199 bridge
= ctrlr
->parent
->driver_priv
;
1200 dev
= ctrlr
->parent
->parent
;
1202 mutex_lock(&ctrlr
->mtx
);
1204 if (!(list_empty(&ctrlr
->running
))) {
1206 * XXX We have an active DMA transfer and currently haven't
1207 * sorted out the mechanism for "pending" DMA transfers.
1210 /* Need to add to pending here */
1211 mutex_unlock(&ctrlr
->mtx
);
1214 list_add(&list
->list
, &ctrlr
->running
);
1217 /* Get first bus address and write into registers */
1218 entry
= list_first_entry(&list
->entries
, struct ca91cx42_dma_entry
,
1221 bus_addr
= virt_to_bus(&entry
->descriptor
);
1223 mutex_unlock(&ctrlr
->mtx
);
1225 iowrite32(0, bridge
->base
+ DTBC
);
1226 iowrite32(bus_addr
& ~CA91CX42_DCPP_M
, bridge
->base
+ DCPP
);
1228 /* Start the operation */
1229 val
= ioread32(bridge
->base
+ DGCS
);
1231 /* XXX Could set VMEbus On and Off Counters here */
1232 val
&= (CA91CX42_DGCS_VON_M
| CA91CX42_DGCS_VOFF_M
);
1234 val
|= (CA91CX42_DGCS_CHAIN
| CA91CX42_DGCS_STOP
| CA91CX42_DGCS_HALT
|
1235 CA91CX42_DGCS_DONE
| CA91CX42_DGCS_LERR
| CA91CX42_DGCS_VERR
|
1236 CA91CX42_DGCS_PERR
);
1238 iowrite32(val
, bridge
->base
+ DGCS
);
1240 val
|= CA91CX42_DGCS_GO
;
1242 iowrite32(val
, bridge
->base
+ DGCS
);
1244 retval
= wait_event_interruptible(bridge
->dma_queue
,
1245 ca91cx42_dma_busy(ctrlr
->parent
));
1248 val
= ioread32(bridge
->base
+ DGCS
);
1249 iowrite32(val
| CA91CX42_DGCS_STOP_REQ
, bridge
->base
+ DGCS
);
1250 /* Wait for the operation to abort */
1251 wait_event(bridge
->dma_queue
,
1252 ca91cx42_dma_busy(ctrlr
->parent
));
1258 * Read status register, this register is valid until we kick off a
1261 val
= ioread32(bridge
->base
+ DGCS
);
1263 if (val
& (CA91CX42_DGCS_LERR
| CA91CX42_DGCS_VERR
|
1264 CA91CX42_DGCS_PERR
)) {
1266 dev_err(dev
, "ca91c042: DMA Error. DGCS=%08X\n", val
);
1267 val
= ioread32(bridge
->base
+ DCTL
);
1272 /* Remove list from running list */
1273 mutex_lock(&ctrlr
->mtx
);
1274 list_del(&list
->list
);
1275 mutex_unlock(&ctrlr
->mtx
);
1281 static int ca91cx42_dma_list_empty(struct vme_dma_list
*list
)
1283 struct list_head
*pos
, *temp
;
1284 struct ca91cx42_dma_entry
*entry
;
1286 /* detach and free each entry */
1287 list_for_each_safe(pos
, temp
, &list
->entries
) {
1289 entry
= list_entry(pos
, struct ca91cx42_dma_entry
, list
);
1297 * All 4 location monitors reside at the same base - this is therefore a
1298 * system wide configuration.
1300 * This does not enable the LM monitor - that should be done when the first
1301 * callback is attached and disabled when the last callback is removed.
1303 static int ca91cx42_lm_set(struct vme_lm_resource
*lm
,
1304 unsigned long long lm_base
, u32 aspace
, u32 cycle
)
1306 u32 temp_base
, lm_ctl
= 0;
1308 struct ca91cx42_driver
*bridge
;
1311 bridge
= lm
->parent
->driver_priv
;
1312 dev
= lm
->parent
->parent
;
1314 /* Check the alignment of the location monitor */
1315 temp_base
= (u32
)lm_base
;
1316 if (temp_base
& 0xffff) {
1317 dev_err(dev
, "Location monitor must be aligned to 64KB "
1322 mutex_lock(&lm
->mtx
);
1324 /* If we already have a callback attached, we can't move it! */
1325 for (i
= 0; i
< lm
->monitors
; i
++) {
1326 if (bridge
->lm_callback
[i
] != NULL
) {
1327 mutex_unlock(&lm
->mtx
);
1328 dev_err(dev
, "Location monitor callback attached, "
1336 lm_ctl
|= CA91CX42_LM_CTL_AS_A16
;
1339 lm_ctl
|= CA91CX42_LM_CTL_AS_A24
;
1342 lm_ctl
|= CA91CX42_LM_CTL_AS_A32
;
1345 mutex_unlock(&lm
->mtx
);
1346 dev_err(dev
, "Invalid address space\n");
1351 if (cycle
& VME_SUPER
)
1352 lm_ctl
|= CA91CX42_LM_CTL_SUPR
;
1353 if (cycle
& VME_USER
)
1354 lm_ctl
|= CA91CX42_LM_CTL_NPRIV
;
1355 if (cycle
& VME_PROG
)
1356 lm_ctl
|= CA91CX42_LM_CTL_PGM
;
1357 if (cycle
& VME_DATA
)
1358 lm_ctl
|= CA91CX42_LM_CTL_DATA
;
1360 iowrite32(lm_base
, bridge
->base
+ LM_BS
);
1361 iowrite32(lm_ctl
, bridge
->base
+ LM_CTL
);
1363 mutex_unlock(&lm
->mtx
);
1368 /* Get configuration of the callback monitor and return whether it is enabled
1371 static int ca91cx42_lm_get(struct vme_lm_resource
*lm
,
1372 unsigned long long *lm_base
, u32
*aspace
, u32
*cycle
)
1374 u32 lm_ctl
, enabled
= 0;
1375 struct ca91cx42_driver
*bridge
;
1377 bridge
= lm
->parent
->driver_priv
;
1379 mutex_lock(&lm
->mtx
);
1381 *lm_base
= (unsigned long long)ioread32(bridge
->base
+ LM_BS
);
1382 lm_ctl
= ioread32(bridge
->base
+ LM_CTL
);
1384 if (lm_ctl
& CA91CX42_LM_CTL_EN
)
1387 if ((lm_ctl
& CA91CX42_LM_CTL_AS_M
) == CA91CX42_LM_CTL_AS_A16
)
1389 if ((lm_ctl
& CA91CX42_LM_CTL_AS_M
) == CA91CX42_LM_CTL_AS_A24
)
1391 if ((lm_ctl
& CA91CX42_LM_CTL_AS_M
) == CA91CX42_LM_CTL_AS_A32
)
1395 if (lm_ctl
& CA91CX42_LM_CTL_SUPR
)
1396 *cycle
|= VME_SUPER
;
1397 if (lm_ctl
& CA91CX42_LM_CTL_NPRIV
)
1399 if (lm_ctl
& CA91CX42_LM_CTL_PGM
)
1401 if (lm_ctl
& CA91CX42_LM_CTL_DATA
)
1404 mutex_unlock(&lm
->mtx
);
1410 * Attach a callback to a specific location monitor.
1412 * Callback will be passed the monitor triggered.
1414 static int ca91cx42_lm_attach(struct vme_lm_resource
*lm
, int monitor
,
1415 void (*callback
)(void *), void *data
)
1418 struct ca91cx42_driver
*bridge
;
1421 bridge
= lm
->parent
->driver_priv
;
1422 dev
= lm
->parent
->parent
;
1424 mutex_lock(&lm
->mtx
);
1426 /* Ensure that the location monitor is configured - need PGM or DATA */
1427 lm_ctl
= ioread32(bridge
->base
+ LM_CTL
);
1428 if ((lm_ctl
& (CA91CX42_LM_CTL_PGM
| CA91CX42_LM_CTL_DATA
)) == 0) {
1429 mutex_unlock(&lm
->mtx
);
1430 dev_err(dev
, "Location monitor not properly configured\n");
1434 /* Check that a callback isn't already attached */
1435 if (bridge
->lm_callback
[monitor
] != NULL
) {
1436 mutex_unlock(&lm
->mtx
);
1437 dev_err(dev
, "Existing callback attached\n");
1441 /* Attach callback */
1442 bridge
->lm_callback
[monitor
] = callback
;
1443 bridge
->lm_data
[monitor
] = data
;
1445 /* Enable Location Monitor interrupt */
1446 tmp
= ioread32(bridge
->base
+ LINT_EN
);
1447 tmp
|= CA91CX42_LINT_LM
[monitor
];
1448 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
1450 /* Ensure that global Location Monitor Enable set */
1451 if ((lm_ctl
& CA91CX42_LM_CTL_EN
) == 0) {
1452 lm_ctl
|= CA91CX42_LM_CTL_EN
;
1453 iowrite32(lm_ctl
, bridge
->base
+ LM_CTL
);
1456 mutex_unlock(&lm
->mtx
);
1462 * Detach a callback function forn a specific location monitor.
1464 static int ca91cx42_lm_detach(struct vme_lm_resource
*lm
, int monitor
)
1467 struct ca91cx42_driver
*bridge
;
1469 bridge
= lm
->parent
->driver_priv
;
1471 mutex_lock(&lm
->mtx
);
1473 /* Disable Location Monitor and ensure previous interrupts are clear */
1474 tmp
= ioread32(bridge
->base
+ LINT_EN
);
1475 tmp
&= ~CA91CX42_LINT_LM
[monitor
];
1476 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
1478 iowrite32(CA91CX42_LINT_LM
[monitor
],
1479 bridge
->base
+ LINT_STAT
);
1481 /* Detach callback */
1482 bridge
->lm_callback
[monitor
] = NULL
;
1483 bridge
->lm_data
[monitor
] = NULL
;
1485 /* If all location monitors disabled, disable global Location Monitor */
1486 if ((tmp
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
1487 CA91CX42_LINT_LM3
)) == 0) {
1488 tmp
= ioread32(bridge
->base
+ LM_CTL
);
1489 tmp
&= ~CA91CX42_LM_CTL_EN
;
1490 iowrite32(tmp
, bridge
->base
+ LM_CTL
);
1493 mutex_unlock(&lm
->mtx
);
1498 static int ca91cx42_slot_get(struct vme_bridge
*ca91cx42_bridge
)
1501 struct ca91cx42_driver
*bridge
;
1503 bridge
= ca91cx42_bridge
->driver_priv
;
1506 slot
= ioread32(bridge
->base
+ VCSR_BS
);
1507 slot
= ((slot
& CA91CX42_VCSR_BS_SLOT_M
) >> 27);
1515 static void *ca91cx42_alloc_consistent(struct device
*parent
, size_t size
,
1518 struct pci_dev
*pdev
;
1520 /* Find pci_dev container of dev */
1521 pdev
= to_pci_dev(parent
);
1523 return pci_alloc_consistent(pdev
, size
, dma
);
1526 static void ca91cx42_free_consistent(struct device
*parent
, size_t size
,
1527 void *vaddr
, dma_addr_t dma
)
1529 struct pci_dev
*pdev
;
1531 /* Find pci_dev container of dev */
1532 pdev
= to_pci_dev(parent
);
1534 pci_free_consistent(pdev
, size
, vaddr
, dma
);
1538 * Configure CR/CSR space
1540 * Access to the CR/CSR can be configured at power-up. The location of the
1541 * CR/CSR registers in the CR/CSR address space is determined by the boards
1542 * Auto-ID or Geographic address. This function ensures that the window is
1543 * enabled at an offset consistent with the boards geopgraphic address.
1545 static int ca91cx42_crcsr_init(struct vme_bridge
*ca91cx42_bridge
,
1546 struct pci_dev
*pdev
)
1548 unsigned int crcsr_addr
;
1550 struct ca91cx42_driver
*bridge
;
1552 bridge
= ca91cx42_bridge
->driver_priv
;
1554 slot
= ca91cx42_slot_get(ca91cx42_bridge
);
1556 /* Write CSR Base Address if slot ID is supplied as a module param */
1558 iowrite32(geoid
<< 27, bridge
->base
+ VCSR_BS
);
1560 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", slot
);
1562 dev_err(&pdev
->dev
, "Slot number is unset, not configuring "
1567 /* Allocate mem for CR/CSR image */
1568 bridge
->crcsr_kernel
= pci_zalloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
1569 &bridge
->crcsr_bus
);
1570 if (bridge
->crcsr_kernel
== NULL
) {
1571 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
1576 crcsr_addr
= slot
* (512 * 1024);
1577 iowrite32(bridge
->crcsr_bus
- crcsr_addr
, bridge
->base
+ VCSR_TO
);
1579 tmp
= ioread32(bridge
->base
+ VCSR_CTL
);
1580 tmp
|= CA91CX42_VCSR_CTL_EN
;
1581 iowrite32(tmp
, bridge
->base
+ VCSR_CTL
);
1586 static void ca91cx42_crcsr_exit(struct vme_bridge
*ca91cx42_bridge
,
1587 struct pci_dev
*pdev
)
1590 struct ca91cx42_driver
*bridge
;
1592 bridge
= ca91cx42_bridge
->driver_priv
;
1594 /* Turn off CR/CSR space */
1595 tmp
= ioread32(bridge
->base
+ VCSR_CTL
);
1596 tmp
&= ~CA91CX42_VCSR_CTL_EN
;
1597 iowrite32(tmp
, bridge
->base
+ VCSR_CTL
);
1600 iowrite32(0, bridge
->base
+ VCSR_TO
);
1602 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, bridge
->crcsr_kernel
,
1606 static int ca91cx42_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1610 struct list_head
*pos
= NULL
, *n
;
1611 struct vme_bridge
*ca91cx42_bridge
;
1612 struct ca91cx42_driver
*ca91cx42_device
;
1613 struct vme_master_resource
*master_image
;
1614 struct vme_slave_resource
*slave_image
;
1615 struct vme_dma_resource
*dma_ctrlr
;
1616 struct vme_lm_resource
*lm
;
1618 /* We want to support more than one of each bridge so we need to
1619 * dynamically allocate the bridge structure
1621 ca91cx42_bridge
= kzalloc(sizeof(struct vme_bridge
), GFP_KERNEL
);
1623 if (ca91cx42_bridge
== NULL
) {
1624 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
1629 vme_init_bridge(ca91cx42_bridge
);
1631 ca91cx42_device
= kzalloc(sizeof(struct ca91cx42_driver
), GFP_KERNEL
);
1633 if (ca91cx42_device
== NULL
) {
1634 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
1640 ca91cx42_bridge
->driver_priv
= ca91cx42_device
;
1642 /* Enable the device */
1643 retval
= pci_enable_device(pdev
);
1645 dev_err(&pdev
->dev
, "Unable to enable device\n");
1650 retval
= pci_request_regions(pdev
, driver_name
);
1652 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
1656 /* map registers in BAR 0 */
1657 ca91cx42_device
->base
= ioremap_nocache(pci_resource_start(pdev
, 0),
1659 if (!ca91cx42_device
->base
) {
1660 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
1665 /* Check to see if the mapping worked out */
1666 data
= ioread32(ca91cx42_device
->base
+ CA91CX42_PCI_ID
) & 0x0000FFFF;
1667 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
1668 dev_err(&pdev
->dev
, "PCI_ID check failed\n");
1673 /* Initialize wait queues & mutual exclusion flags */
1674 init_waitqueue_head(&ca91cx42_device
->dma_queue
);
1675 init_waitqueue_head(&ca91cx42_device
->iack_queue
);
1676 mutex_init(&ca91cx42_device
->vme_int
);
1677 mutex_init(&ca91cx42_device
->vme_rmw
);
1679 ca91cx42_bridge
->parent
= &pdev
->dev
;
1680 strcpy(ca91cx42_bridge
->name
, driver_name
);
1683 retval
= ca91cx42_irq_init(ca91cx42_bridge
);
1685 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
1689 /* Add master windows to list */
1690 for (i
= 0; i
< CA91C142_MAX_MASTER
; i
++) {
1691 master_image
= kmalloc(sizeof(struct vme_master_resource
),
1693 if (master_image
== NULL
) {
1694 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1695 "master resource structure\n");
1699 master_image
->parent
= ca91cx42_bridge
;
1700 spin_lock_init(&master_image
->lock
);
1701 master_image
->locked
= 0;
1702 master_image
->number
= i
;
1703 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
1704 VME_CRCSR
| VME_USER1
| VME_USER2
;
1705 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1706 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1707 master_image
->width_attr
= VME_D8
| VME_D16
| VME_D32
| VME_D64
;
1708 memset(&master_image
->bus_resource
, 0,
1709 sizeof(struct resource
));
1710 master_image
->kern_base
= NULL
;
1711 list_add_tail(&master_image
->list
,
1712 &ca91cx42_bridge
->master_resources
);
1715 /* Add slave windows to list */
1716 for (i
= 0; i
< CA91C142_MAX_SLAVE
; i
++) {
1717 slave_image
= kmalloc(sizeof(struct vme_slave_resource
),
1719 if (slave_image
== NULL
) {
1720 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1721 "slave resource structure\n");
1725 slave_image
->parent
= ca91cx42_bridge
;
1726 mutex_init(&slave_image
->mtx
);
1727 slave_image
->locked
= 0;
1728 slave_image
->number
= i
;
1729 slave_image
->address_attr
= VME_A24
| VME_A32
| VME_USER1
|
1732 /* Only windows 0 and 4 support A16 */
1733 if (i
== 0 || i
== 4)
1734 slave_image
->address_attr
|= VME_A16
;
1736 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1737 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1738 list_add_tail(&slave_image
->list
,
1739 &ca91cx42_bridge
->slave_resources
);
1742 /* Add dma engines to list */
1743 for (i
= 0; i
< CA91C142_MAX_DMA
; i
++) {
1744 dma_ctrlr
= kmalloc(sizeof(struct vme_dma_resource
),
1746 if (dma_ctrlr
== NULL
) {
1747 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1748 "dma resource structure\n");
1752 dma_ctrlr
->parent
= ca91cx42_bridge
;
1753 mutex_init(&dma_ctrlr
->mtx
);
1754 dma_ctrlr
->locked
= 0;
1755 dma_ctrlr
->number
= i
;
1756 dma_ctrlr
->route_attr
= VME_DMA_VME_TO_MEM
|
1758 INIT_LIST_HEAD(&dma_ctrlr
->pending
);
1759 INIT_LIST_HEAD(&dma_ctrlr
->running
);
1760 list_add_tail(&dma_ctrlr
->list
,
1761 &ca91cx42_bridge
->dma_resources
);
1764 /* Add location monitor to list */
1765 lm
= kmalloc(sizeof(struct vme_lm_resource
), GFP_KERNEL
);
1767 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1768 "location monitor resource structure\n");
1772 lm
->parent
= ca91cx42_bridge
;
1773 mutex_init(&lm
->mtx
);
1777 list_add_tail(&lm
->list
, &ca91cx42_bridge
->lm_resources
);
1779 ca91cx42_bridge
->slave_get
= ca91cx42_slave_get
;
1780 ca91cx42_bridge
->slave_set
= ca91cx42_slave_set
;
1781 ca91cx42_bridge
->master_get
= ca91cx42_master_get
;
1782 ca91cx42_bridge
->master_set
= ca91cx42_master_set
;
1783 ca91cx42_bridge
->master_read
= ca91cx42_master_read
;
1784 ca91cx42_bridge
->master_write
= ca91cx42_master_write
;
1785 ca91cx42_bridge
->master_rmw
= ca91cx42_master_rmw
;
1786 ca91cx42_bridge
->dma_list_add
= ca91cx42_dma_list_add
;
1787 ca91cx42_bridge
->dma_list_exec
= ca91cx42_dma_list_exec
;
1788 ca91cx42_bridge
->dma_list_empty
= ca91cx42_dma_list_empty
;
1789 ca91cx42_bridge
->irq_set
= ca91cx42_irq_set
;
1790 ca91cx42_bridge
->irq_generate
= ca91cx42_irq_generate
;
1791 ca91cx42_bridge
->lm_set
= ca91cx42_lm_set
;
1792 ca91cx42_bridge
->lm_get
= ca91cx42_lm_get
;
1793 ca91cx42_bridge
->lm_attach
= ca91cx42_lm_attach
;
1794 ca91cx42_bridge
->lm_detach
= ca91cx42_lm_detach
;
1795 ca91cx42_bridge
->slot_get
= ca91cx42_slot_get
;
1796 ca91cx42_bridge
->alloc_consistent
= ca91cx42_alloc_consistent
;
1797 ca91cx42_bridge
->free_consistent
= ca91cx42_free_consistent
;
1799 data
= ioread32(ca91cx42_device
->base
+ MISC_CTL
);
1800 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
1801 (data
& CA91CX42_MISC_CTL_SYSCON
) ? "" : " not");
1802 dev_info(&pdev
->dev
, "Slot ID is %d\n",
1803 ca91cx42_slot_get(ca91cx42_bridge
));
1805 if (ca91cx42_crcsr_init(ca91cx42_bridge
, pdev
))
1806 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
1808 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1811 retval
= vme_register_bridge(ca91cx42_bridge
);
1813 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
1817 pci_set_drvdata(pdev
, ca91cx42_bridge
);
1822 ca91cx42_crcsr_exit(ca91cx42_bridge
, pdev
);
1824 /* resources are stored in link list */
1825 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->lm_resources
) {
1826 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1831 /* resources are stored in link list */
1832 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->dma_resources
) {
1833 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1838 /* resources are stored in link list */
1839 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->slave_resources
) {
1840 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1845 /* resources are stored in link list */
1846 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->master_resources
) {
1847 master_image
= list_entry(pos
, struct vme_master_resource
,
1850 kfree(master_image
);
1853 ca91cx42_irq_exit(ca91cx42_device
, pdev
);
1856 iounmap(ca91cx42_device
->base
);
1858 pci_release_regions(pdev
);
1860 pci_disable_device(pdev
);
1862 kfree(ca91cx42_device
);
1864 kfree(ca91cx42_bridge
);
1870 static void ca91cx42_remove(struct pci_dev
*pdev
)
1872 struct list_head
*pos
= NULL
, *n
;
1873 struct vme_master_resource
*master_image
;
1874 struct vme_slave_resource
*slave_image
;
1875 struct vme_dma_resource
*dma_ctrlr
;
1876 struct vme_lm_resource
*lm
;
1877 struct ca91cx42_driver
*bridge
;
1878 struct vme_bridge
*ca91cx42_bridge
= pci_get_drvdata(pdev
);
1880 bridge
= ca91cx42_bridge
->driver_priv
;
1884 iowrite32(0, bridge
->base
+ LINT_EN
);
1886 /* Turn off the windows */
1887 iowrite32(0x00800000, bridge
->base
+ LSI0_CTL
);
1888 iowrite32(0x00800000, bridge
->base
+ LSI1_CTL
);
1889 iowrite32(0x00800000, bridge
->base
+ LSI2_CTL
);
1890 iowrite32(0x00800000, bridge
->base
+ LSI3_CTL
);
1891 iowrite32(0x00800000, bridge
->base
+ LSI4_CTL
);
1892 iowrite32(0x00800000, bridge
->base
+ LSI5_CTL
);
1893 iowrite32(0x00800000, bridge
->base
+ LSI6_CTL
);
1894 iowrite32(0x00800000, bridge
->base
+ LSI7_CTL
);
1895 iowrite32(0x00F00000, bridge
->base
+ VSI0_CTL
);
1896 iowrite32(0x00F00000, bridge
->base
+ VSI1_CTL
);
1897 iowrite32(0x00F00000, bridge
->base
+ VSI2_CTL
);
1898 iowrite32(0x00F00000, bridge
->base
+ VSI3_CTL
);
1899 iowrite32(0x00F00000, bridge
->base
+ VSI4_CTL
);
1900 iowrite32(0x00F00000, bridge
->base
+ VSI5_CTL
);
1901 iowrite32(0x00F00000, bridge
->base
+ VSI6_CTL
);
1902 iowrite32(0x00F00000, bridge
->base
+ VSI7_CTL
);
1904 vme_unregister_bridge(ca91cx42_bridge
);
1906 ca91cx42_crcsr_exit(ca91cx42_bridge
, pdev
);
1908 /* resources are stored in link list */
1909 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->lm_resources
) {
1910 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1915 /* resources are stored in link list */
1916 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->dma_resources
) {
1917 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1922 /* resources are stored in link list */
1923 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->slave_resources
) {
1924 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1929 /* resources are stored in link list */
1930 list_for_each_safe(pos
, n
, &ca91cx42_bridge
->master_resources
) {
1931 master_image
= list_entry(pos
, struct vme_master_resource
,
1934 kfree(master_image
);
1937 ca91cx42_irq_exit(bridge
, pdev
);
1939 iounmap(bridge
->base
);
1941 pci_release_regions(pdev
);
1943 pci_disable_device(pdev
);
1945 kfree(ca91cx42_bridge
);
1948 module_pci_driver(ca91cx42_driver
);
1950 MODULE_PARM_DESC(geoid
, "Override geographical addressing");
1951 module_param(geoid
, int, 0);
1953 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1954 MODULE_LICENSE("GPL");