WIP FPC-III support
[linux/fpc-iii.git] / drivers / vme / bridges / vme_ca91cx42.c
blob439b0edeca0897a0f5e40d9a369cad3bd06d65df
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
11 * Derived from ca91c042.c by Michael Wyrick
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/types.h>
17 #include <linux/errno.h>
18 #include <linux/pci.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/poll.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/vme.h>
30 #include "../vme_bridge.h"
31 #include "vme_ca91cx42.h"
33 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
34 static void ca91cx42_remove(struct pci_dev *);
36 /* Module parameters */
37 static int geoid;
39 static const char driver_name[] = "vme_ca91cx42";
41 static const struct pci_device_id ca91cx42_ids[] = {
42 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
43 { },
46 MODULE_DEVICE_TABLE(pci, ca91cx42_ids);
48 static struct pci_driver ca91cx42_driver = {
49 .name = driver_name,
50 .id_table = ca91cx42_ids,
51 .probe = ca91cx42_probe,
52 .remove = ca91cx42_remove,
55 static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
57 wake_up(&bridge->dma_queue);
59 return CA91CX42_LINT_DMA;
62 static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
64 int i;
65 u32 serviced = 0;
67 for (i = 0; i < 4; i++) {
68 if (stat & CA91CX42_LINT_LM[i]) {
69 /* We only enable interrupts if the callback is set */
70 bridge->lm_callback[i](bridge->lm_data[i]);
71 serviced |= CA91CX42_LINT_LM[i];
75 return serviced;
78 /* XXX This needs to be split into 4 queues */
79 static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
81 wake_up(&bridge->mbox_queue);
83 return CA91CX42_LINT_MBOX;
86 static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
88 wake_up(&bridge->iack_queue);
90 return CA91CX42_LINT_SW_IACK;
93 static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
95 int val;
96 struct ca91cx42_driver *bridge;
98 bridge = ca91cx42_bridge->driver_priv;
100 val = ioread32(bridge->base + DGCS);
102 if (!(val & 0x00000800)) {
103 dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
104 "Read Error DGCS=%08X\n", val);
107 return CA91CX42_LINT_VERR;
110 static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
112 int val;
113 struct ca91cx42_driver *bridge;
115 bridge = ca91cx42_bridge->driver_priv;
117 val = ioread32(bridge->base + DGCS);
119 if (!(val & 0x00000800))
120 dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
121 "Read Error DGCS=%08X\n", val);
123 return CA91CX42_LINT_LERR;
127 static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
128 int stat)
130 int vec, i, serviced = 0;
131 struct ca91cx42_driver *bridge;
133 bridge = ca91cx42_bridge->driver_priv;
136 for (i = 7; i > 0; i--) {
137 if (stat & (1 << i)) {
138 vec = ioread32(bridge->base +
139 CA91CX42_V_STATID[i]) & 0xff;
141 vme_irq_handler(ca91cx42_bridge, i, vec);
143 serviced |= (1 << i);
147 return serviced;
150 static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
152 u32 stat, enable, serviced = 0;
153 struct vme_bridge *ca91cx42_bridge;
154 struct ca91cx42_driver *bridge;
156 ca91cx42_bridge = ptr;
158 bridge = ca91cx42_bridge->driver_priv;
160 enable = ioread32(bridge->base + LINT_EN);
161 stat = ioread32(bridge->base + LINT_STAT);
163 /* Only look at unmasked interrupts */
164 stat &= enable;
166 if (unlikely(!stat))
167 return IRQ_NONE;
169 if (stat & CA91CX42_LINT_DMA)
170 serviced |= ca91cx42_DMA_irqhandler(bridge);
171 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
172 CA91CX42_LINT_LM3))
173 serviced |= ca91cx42_LM_irqhandler(bridge, stat);
174 if (stat & CA91CX42_LINT_MBOX)
175 serviced |= ca91cx42_MB_irqhandler(bridge, stat);
176 if (stat & CA91CX42_LINT_SW_IACK)
177 serviced |= ca91cx42_IACK_irqhandler(bridge);
178 if (stat & CA91CX42_LINT_VERR)
179 serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
180 if (stat & CA91CX42_LINT_LERR)
181 serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
182 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
183 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
184 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
185 CA91CX42_LINT_VIRQ7))
186 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
188 /* Clear serviced interrupts */
189 iowrite32(serviced, bridge->base + LINT_STAT);
191 return IRQ_HANDLED;
194 static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
196 int result, tmp;
197 struct pci_dev *pdev;
198 struct ca91cx42_driver *bridge;
200 bridge = ca91cx42_bridge->driver_priv;
202 /* Need pdev */
203 pdev = to_pci_dev(ca91cx42_bridge->parent);
205 /* Disable interrupts from PCI to VME */
206 iowrite32(0, bridge->base + VINT_EN);
208 /* Disable PCI interrupts */
209 iowrite32(0, bridge->base + LINT_EN);
210 /* Clear Any Pending PCI Interrupts */
211 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
213 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
214 driver_name, ca91cx42_bridge);
215 if (result) {
216 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
217 pdev->irq);
218 return result;
221 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
222 iowrite32(0, bridge->base + LINT_MAP0);
223 iowrite32(0, bridge->base + LINT_MAP1);
224 iowrite32(0, bridge->base + LINT_MAP2);
226 /* Enable DMA, mailbox & LM Interrupts */
227 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
228 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
229 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
231 iowrite32(tmp, bridge->base + LINT_EN);
233 return 0;
236 static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
237 struct pci_dev *pdev)
239 struct vme_bridge *ca91cx42_bridge;
241 /* Disable interrupts from PCI to VME */
242 iowrite32(0, bridge->base + VINT_EN);
244 /* Disable PCI interrupts */
245 iowrite32(0, bridge->base + LINT_EN);
246 /* Clear Any Pending PCI Interrupts */
247 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
249 ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
250 driver_priv);
251 free_irq(pdev->irq, ca91cx42_bridge);
254 static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
256 u32 tmp;
258 tmp = ioread32(bridge->base + LINT_STAT);
260 if (tmp & (1 << level))
261 return 0;
262 else
263 return 1;
267 * Set up an VME interrupt
269 static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
270 int state, int sync)
273 struct pci_dev *pdev;
274 u32 tmp;
275 struct ca91cx42_driver *bridge;
277 bridge = ca91cx42_bridge->driver_priv;
279 /* Enable IRQ level */
280 tmp = ioread32(bridge->base + LINT_EN);
282 if (state == 0)
283 tmp &= ~CA91CX42_LINT_VIRQ[level];
284 else
285 tmp |= CA91CX42_LINT_VIRQ[level];
287 iowrite32(tmp, bridge->base + LINT_EN);
289 if ((state == 0) && (sync != 0)) {
290 pdev = to_pci_dev(ca91cx42_bridge->parent);
292 synchronize_irq(pdev->irq);
296 static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
297 int statid)
299 u32 tmp;
300 struct ca91cx42_driver *bridge;
302 bridge = ca91cx42_bridge->driver_priv;
304 /* Universe can only generate even vectors */
305 if (statid & 1)
306 return -EINVAL;
308 mutex_lock(&bridge->vme_int);
310 tmp = ioread32(bridge->base + VINT_EN);
312 /* Set Status/ID */
313 iowrite32(statid << 24, bridge->base + STATID);
315 /* Assert VMEbus IRQ */
316 tmp = tmp | (1 << (level + 24));
317 iowrite32(tmp, bridge->base + VINT_EN);
319 /* Wait for IACK */
320 wait_event_interruptible(bridge->iack_queue,
321 ca91cx42_iack_received(bridge, level));
323 /* Return interrupt to low state */
324 tmp = ioread32(bridge->base + VINT_EN);
325 tmp = tmp & ~(1 << (level + 24));
326 iowrite32(tmp, bridge->base + VINT_EN);
328 mutex_unlock(&bridge->vme_int);
330 return 0;
333 static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
334 unsigned long long vme_base, unsigned long long size,
335 dma_addr_t pci_base, u32 aspace, u32 cycle)
337 unsigned int i, addr = 0, granularity;
338 unsigned int temp_ctl = 0;
339 unsigned int vme_bound, pci_offset;
340 struct vme_bridge *ca91cx42_bridge;
341 struct ca91cx42_driver *bridge;
343 ca91cx42_bridge = image->parent;
345 bridge = ca91cx42_bridge->driver_priv;
347 i = image->number;
349 switch (aspace) {
350 case VME_A16:
351 addr |= CA91CX42_VSI_CTL_VAS_A16;
352 break;
353 case VME_A24:
354 addr |= CA91CX42_VSI_CTL_VAS_A24;
355 break;
356 case VME_A32:
357 addr |= CA91CX42_VSI_CTL_VAS_A32;
358 break;
359 case VME_USER1:
360 addr |= CA91CX42_VSI_CTL_VAS_USER1;
361 break;
362 case VME_USER2:
363 addr |= CA91CX42_VSI_CTL_VAS_USER2;
364 break;
365 case VME_A64:
366 case VME_CRCSR:
367 case VME_USER3:
368 case VME_USER4:
369 default:
370 dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
371 return -EINVAL;
372 break;
376 * Bound address is a valid address for the window, adjust
377 * accordingly
379 vme_bound = vme_base + size;
380 pci_offset = pci_base - vme_base;
382 if ((i == 0) || (i == 4))
383 granularity = 0x1000;
384 else
385 granularity = 0x10000;
387 if (vme_base & (granularity - 1)) {
388 dev_err(ca91cx42_bridge->parent, "Invalid VME base "
389 "alignment\n");
390 return -EINVAL;
392 if (vme_bound & (granularity - 1)) {
393 dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
394 "alignment\n");
395 return -EINVAL;
397 if (pci_offset & (granularity - 1)) {
398 dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
399 "alignment\n");
400 return -EINVAL;
403 /* Disable while we are mucking around */
404 temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
405 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
406 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
408 /* Setup mapping */
409 iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
410 iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
411 iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
413 /* Setup address space */
414 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
415 temp_ctl |= addr;
417 /* Setup cycle types */
418 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
419 if (cycle & VME_SUPER)
420 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
421 if (cycle & VME_USER)
422 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
423 if (cycle & VME_PROG)
424 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
425 if (cycle & VME_DATA)
426 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
428 /* Write ctl reg without enable */
429 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
431 if (enabled)
432 temp_ctl |= CA91CX42_VSI_CTL_EN;
434 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
436 return 0;
439 static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
440 unsigned long long *vme_base, unsigned long long *size,
441 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
443 unsigned int i, granularity = 0, ctl = 0;
444 unsigned long long vme_bound, pci_offset;
445 struct ca91cx42_driver *bridge;
447 bridge = image->parent->driver_priv;
449 i = image->number;
451 if ((i == 0) || (i == 4))
452 granularity = 0x1000;
453 else
454 granularity = 0x10000;
456 /* Read Registers */
457 ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
459 *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
460 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
461 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
463 *pci_base = (dma_addr_t)*vme_base + pci_offset;
464 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
466 *enabled = 0;
467 *aspace = 0;
468 *cycle = 0;
470 if (ctl & CA91CX42_VSI_CTL_EN)
471 *enabled = 1;
473 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
474 *aspace = VME_A16;
475 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
476 *aspace = VME_A24;
477 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
478 *aspace = VME_A32;
479 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
480 *aspace = VME_USER1;
481 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
482 *aspace = VME_USER2;
484 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
485 *cycle |= VME_SUPER;
486 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
487 *cycle |= VME_USER;
488 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
489 *cycle |= VME_PROG;
490 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
491 *cycle |= VME_DATA;
493 return 0;
497 * Allocate and map PCI Resource
499 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
500 unsigned long long size)
502 unsigned long long existing_size;
503 int retval = 0;
504 struct pci_dev *pdev;
505 struct vme_bridge *ca91cx42_bridge;
507 ca91cx42_bridge = image->parent;
509 /* Find pci_dev container of dev */
510 if (!ca91cx42_bridge->parent) {
511 dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
512 return -EINVAL;
514 pdev = to_pci_dev(ca91cx42_bridge->parent);
516 existing_size = (unsigned long long)(image->bus_resource.end -
517 image->bus_resource.start);
519 /* If the existing size is OK, return */
520 if (existing_size == (size - 1))
521 return 0;
523 if (existing_size != 0) {
524 iounmap(image->kern_base);
525 image->kern_base = NULL;
526 kfree(image->bus_resource.name);
527 release_resource(&image->bus_resource);
528 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
531 if (!image->bus_resource.name) {
532 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
533 if (!image->bus_resource.name) {
534 retval = -ENOMEM;
535 goto err_name;
539 sprintf((char *)image->bus_resource.name, "%s.%d",
540 ca91cx42_bridge->name, image->number);
542 image->bus_resource.start = 0;
543 image->bus_resource.end = (unsigned long)size;
544 image->bus_resource.flags = IORESOURCE_MEM;
546 retval = pci_bus_alloc_resource(pdev->bus,
547 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
548 0, NULL, NULL);
549 if (retval) {
550 dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
551 "resource for window %d size 0x%lx start 0x%lx\n",
552 image->number, (unsigned long)size,
553 (unsigned long)image->bus_resource.start);
554 goto err_resource;
557 image->kern_base = ioremap(
558 image->bus_resource.start, size);
559 if (!image->kern_base) {
560 dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
561 retval = -ENOMEM;
562 goto err_remap;
565 return 0;
567 err_remap:
568 release_resource(&image->bus_resource);
569 err_resource:
570 kfree(image->bus_resource.name);
571 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
572 err_name:
573 return retval;
577 * Free and unmap PCI Resource
579 static void ca91cx42_free_resource(struct vme_master_resource *image)
581 iounmap(image->kern_base);
582 image->kern_base = NULL;
583 release_resource(&image->bus_resource);
584 kfree(image->bus_resource.name);
585 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
589 static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
590 unsigned long long vme_base, unsigned long long size, u32 aspace,
591 u32 cycle, u32 dwidth)
593 int retval = 0;
594 unsigned int i, granularity = 0;
595 unsigned int temp_ctl = 0;
596 unsigned long long pci_bound, vme_offset, pci_base;
597 struct vme_bridge *ca91cx42_bridge;
598 struct ca91cx42_driver *bridge;
600 ca91cx42_bridge = image->parent;
602 bridge = ca91cx42_bridge->driver_priv;
604 i = image->number;
606 if ((i == 0) || (i == 4))
607 granularity = 0x1000;
608 else
609 granularity = 0x10000;
611 /* Verify input data */
612 if (vme_base & (granularity - 1)) {
613 dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
614 "alignment\n");
615 retval = -EINVAL;
616 goto err_window;
618 if (size & (granularity - 1)) {
619 dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
620 "alignment\n");
621 retval = -EINVAL;
622 goto err_window;
625 spin_lock(&image->lock);
628 * Let's allocate the resource here rather than further up the stack as
629 * it avoids pushing loads of bus dependent stuff up the stack
631 retval = ca91cx42_alloc_resource(image, size);
632 if (retval) {
633 spin_unlock(&image->lock);
634 dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
635 "for resource name\n");
636 retval = -ENOMEM;
637 goto err_res;
640 pci_base = (unsigned long long)image->bus_resource.start;
643 * Bound address is a valid address for the window, adjust
644 * according to window granularity.
646 pci_bound = pci_base + size;
647 vme_offset = vme_base - pci_base;
649 /* Disable while we are mucking around */
650 temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
651 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
652 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
654 /* Setup cycle types */
655 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
656 if (cycle & VME_BLT)
657 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
658 if (cycle & VME_MBLT)
659 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
661 /* Setup data width */
662 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
663 switch (dwidth) {
664 case VME_D8:
665 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
666 break;
667 case VME_D16:
668 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
669 break;
670 case VME_D32:
671 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
672 break;
673 case VME_D64:
674 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
675 break;
676 default:
677 spin_unlock(&image->lock);
678 dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
679 retval = -EINVAL;
680 goto err_dwidth;
681 break;
684 /* Setup address space */
685 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
686 switch (aspace) {
687 case VME_A16:
688 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
689 break;
690 case VME_A24:
691 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
692 break;
693 case VME_A32:
694 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
695 break;
696 case VME_CRCSR:
697 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
698 break;
699 case VME_USER1:
700 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
701 break;
702 case VME_USER2:
703 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
704 break;
705 case VME_A64:
706 case VME_USER3:
707 case VME_USER4:
708 default:
709 spin_unlock(&image->lock);
710 dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
711 retval = -EINVAL;
712 goto err_aspace;
713 break;
716 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
717 if (cycle & VME_SUPER)
718 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
719 if (cycle & VME_PROG)
720 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
722 /* Setup mapping */
723 iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
724 iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
725 iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
727 /* Write ctl reg without enable */
728 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
730 if (enabled)
731 temp_ctl |= CA91CX42_LSI_CTL_EN;
733 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
735 spin_unlock(&image->lock);
736 return 0;
738 err_aspace:
739 err_dwidth:
740 ca91cx42_free_resource(image);
741 err_res:
742 err_window:
743 return retval;
746 static int __ca91cx42_master_get(struct vme_master_resource *image,
747 int *enabled, unsigned long long *vme_base, unsigned long long *size,
748 u32 *aspace, u32 *cycle, u32 *dwidth)
750 unsigned int i, ctl;
751 unsigned long long pci_base, pci_bound, vme_offset;
752 struct ca91cx42_driver *bridge;
754 bridge = image->parent->driver_priv;
756 i = image->number;
758 ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
760 pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
761 vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
762 pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
764 *vme_base = pci_base + vme_offset;
765 *size = (unsigned long long)(pci_bound - pci_base);
767 *enabled = 0;
768 *aspace = 0;
769 *cycle = 0;
770 *dwidth = 0;
772 if (ctl & CA91CX42_LSI_CTL_EN)
773 *enabled = 1;
775 /* Setup address space */
776 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
777 case CA91CX42_LSI_CTL_VAS_A16:
778 *aspace = VME_A16;
779 break;
780 case CA91CX42_LSI_CTL_VAS_A24:
781 *aspace = VME_A24;
782 break;
783 case CA91CX42_LSI_CTL_VAS_A32:
784 *aspace = VME_A32;
785 break;
786 case CA91CX42_LSI_CTL_VAS_CRCSR:
787 *aspace = VME_CRCSR;
788 break;
789 case CA91CX42_LSI_CTL_VAS_USER1:
790 *aspace = VME_USER1;
791 break;
792 case CA91CX42_LSI_CTL_VAS_USER2:
793 *aspace = VME_USER2;
794 break;
797 /* XXX Not sure howto check for MBLT */
798 /* Setup cycle types */
799 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
800 *cycle |= VME_BLT;
801 else
802 *cycle |= VME_SCT;
804 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
805 *cycle |= VME_SUPER;
806 else
807 *cycle |= VME_USER;
809 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
810 *cycle = VME_PROG;
811 else
812 *cycle = VME_DATA;
814 /* Setup data width */
815 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
816 case CA91CX42_LSI_CTL_VDW_D8:
817 *dwidth = VME_D8;
818 break;
819 case CA91CX42_LSI_CTL_VDW_D16:
820 *dwidth = VME_D16;
821 break;
822 case CA91CX42_LSI_CTL_VDW_D32:
823 *dwidth = VME_D32;
824 break;
825 case CA91CX42_LSI_CTL_VDW_D64:
826 *dwidth = VME_D64;
827 break;
830 return 0;
833 static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
834 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
835 u32 *cycle, u32 *dwidth)
837 int retval;
839 spin_lock(&image->lock);
841 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
842 cycle, dwidth);
844 spin_unlock(&image->lock);
846 return retval;
849 static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
850 void *buf, size_t count, loff_t offset)
852 ssize_t retval;
853 void __iomem *addr = image->kern_base + offset;
854 unsigned int done = 0;
855 unsigned int count32;
857 if (count == 0)
858 return 0;
860 spin_lock(&image->lock);
862 /* The following code handles VME address alignment. We cannot use
863 * memcpy_xxx here because it may cut data transfers in to 8-bit
864 * cycles when D16 or D32 cycles are required on the VME bus.
865 * On the other hand, the bridge itself assures that the maximum data
866 * cycle configured for the transfer is used and splits it
867 * automatically for non-aligned addresses, so we don't want the
868 * overhead of needlessly forcing small transfers for the entire cycle.
870 if ((uintptr_t)addr & 0x1) {
871 *(u8 *)buf = ioread8(addr);
872 done += 1;
873 if (done == count)
874 goto out;
876 if ((uintptr_t)(addr + done) & 0x2) {
877 if ((count - done) < 2) {
878 *(u8 *)(buf + done) = ioread8(addr + done);
879 done += 1;
880 goto out;
881 } else {
882 *(u16 *)(buf + done) = ioread16(addr + done);
883 done += 2;
887 count32 = (count - done) & ~0x3;
888 while (done < count32) {
889 *(u32 *)(buf + done) = ioread32(addr + done);
890 done += 4;
893 if ((count - done) & 0x2) {
894 *(u16 *)(buf + done) = ioread16(addr + done);
895 done += 2;
897 if ((count - done) & 0x1) {
898 *(u8 *)(buf + done) = ioread8(addr + done);
899 done += 1;
901 out:
902 retval = count;
903 spin_unlock(&image->lock);
905 return retval;
908 static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
909 void *buf, size_t count, loff_t offset)
911 ssize_t retval;
912 void __iomem *addr = image->kern_base + offset;
913 unsigned int done = 0;
914 unsigned int count32;
916 if (count == 0)
917 return 0;
919 spin_lock(&image->lock);
921 /* Here we apply for the same strategy we do in master_read
922 * function in order to assure the correct cycles.
924 if ((uintptr_t)addr & 0x1) {
925 iowrite8(*(u8 *)buf, addr);
926 done += 1;
927 if (done == count)
928 goto out;
930 if ((uintptr_t)(addr + done) & 0x2) {
931 if ((count - done) < 2) {
932 iowrite8(*(u8 *)(buf + done), addr + done);
933 done += 1;
934 goto out;
935 } else {
936 iowrite16(*(u16 *)(buf + done), addr + done);
937 done += 2;
941 count32 = (count - done) & ~0x3;
942 while (done < count32) {
943 iowrite32(*(u32 *)(buf + done), addr + done);
944 done += 4;
947 if ((count - done) & 0x2) {
948 iowrite16(*(u16 *)(buf + done), addr + done);
949 done += 2;
951 if ((count - done) & 0x1) {
952 iowrite8(*(u8 *)(buf + done), addr + done);
953 done += 1;
955 out:
956 retval = count;
958 spin_unlock(&image->lock);
960 return retval;
963 static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
964 unsigned int mask, unsigned int compare, unsigned int swap,
965 loff_t offset)
967 u32 result;
968 uintptr_t pci_addr;
969 struct ca91cx42_driver *bridge;
970 struct device *dev;
972 bridge = image->parent->driver_priv;
973 dev = image->parent->parent;
975 /* Find the PCI address that maps to the desired VME address */
977 /* Locking as we can only do one of these at a time */
978 mutex_lock(&bridge->vme_rmw);
980 /* Lock image */
981 spin_lock(&image->lock);
983 pci_addr = (uintptr_t)image->kern_base + offset;
985 /* Address must be 4-byte aligned */
986 if (pci_addr & 0x3) {
987 dev_err(dev, "RMW Address not 4-byte aligned\n");
988 result = -EINVAL;
989 goto out;
992 /* Ensure RMW Disabled whilst configuring */
993 iowrite32(0, bridge->base + SCYC_CTL);
995 /* Configure registers */
996 iowrite32(mask, bridge->base + SCYC_EN);
997 iowrite32(compare, bridge->base + SCYC_CMP);
998 iowrite32(swap, bridge->base + SCYC_SWP);
999 iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1001 /* Enable RMW */
1002 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1004 /* Kick process off with a read to the required address. */
1005 result = ioread32(image->kern_base + offset);
1007 /* Disable RMW */
1008 iowrite32(0, bridge->base + SCYC_CTL);
1010 out:
1011 spin_unlock(&image->lock);
1013 mutex_unlock(&bridge->vme_rmw);
1015 return result;
1018 static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1019 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1021 struct ca91cx42_dma_entry *entry, *prev;
1022 struct vme_dma_pci *pci_attr;
1023 struct vme_dma_vme *vme_attr;
1024 dma_addr_t desc_ptr;
1025 int retval = 0;
1026 struct device *dev;
1028 dev = list->parent->parent->parent;
1030 /* XXX descriptor must be aligned on 64-bit boundaries */
1031 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1032 if (!entry) {
1033 retval = -ENOMEM;
1034 goto err_mem;
1037 /* Test descriptor alignment */
1038 if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1039 dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1040 "required: %p\n", &entry->descriptor);
1041 retval = -EINVAL;
1042 goto err_align;
1045 memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1047 if (dest->type == VME_DMA_VME) {
1048 entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1049 vme_attr = dest->private;
1050 pci_attr = src->private;
1051 } else {
1052 vme_attr = src->private;
1053 pci_attr = dest->private;
1056 /* Check we can do fulfill required attributes */
1057 if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1058 VME_USER2)) != 0) {
1060 dev_err(dev, "Unsupported cycle type\n");
1061 retval = -EINVAL;
1062 goto err_aspace;
1065 if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1066 VME_PROG | VME_DATA)) != 0) {
1068 dev_err(dev, "Unsupported cycle type\n");
1069 retval = -EINVAL;
1070 goto err_cycle;
1073 /* Check to see if we can fulfill source and destination */
1074 if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1075 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1077 dev_err(dev, "Cannot perform transfer with this "
1078 "source-destination combination\n");
1079 retval = -EINVAL;
1080 goto err_direct;
1083 /* Setup cycle types */
1084 if (vme_attr->cycle & VME_BLT)
1085 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1087 /* Setup data width */
1088 switch (vme_attr->dwidth) {
1089 case VME_D8:
1090 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1091 break;
1092 case VME_D16:
1093 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1094 break;
1095 case VME_D32:
1096 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1097 break;
1098 case VME_D64:
1099 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1100 break;
1101 default:
1102 dev_err(dev, "Invalid data width\n");
1103 return -EINVAL;
1106 /* Setup address space */
1107 switch (vme_attr->aspace) {
1108 case VME_A16:
1109 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1110 break;
1111 case VME_A24:
1112 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1113 break;
1114 case VME_A32:
1115 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1116 break;
1117 case VME_USER1:
1118 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1119 break;
1120 case VME_USER2:
1121 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1122 break;
1123 default:
1124 dev_err(dev, "Invalid address space\n");
1125 return -EINVAL;
1126 break;
1129 if (vme_attr->cycle & VME_SUPER)
1130 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1131 if (vme_attr->cycle & VME_PROG)
1132 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1134 entry->descriptor.dtbc = count;
1135 entry->descriptor.dla = pci_attr->address;
1136 entry->descriptor.dva = vme_attr->address;
1137 entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1139 /* Add to list */
1140 list_add_tail(&entry->list, &list->entries);
1142 /* Fill out previous descriptors "Next Address" */
1143 if (entry->list.prev != &list->entries) {
1144 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1145 list);
1146 /* We need the bus address for the pointer */
1147 desc_ptr = virt_to_bus(&entry->descriptor);
1148 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1151 return 0;
1153 err_cycle:
1154 err_aspace:
1155 err_direct:
1156 err_align:
1157 kfree(entry);
1158 err_mem:
1159 return retval;
1162 static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1164 u32 tmp;
1165 struct ca91cx42_driver *bridge;
1167 bridge = ca91cx42_bridge->driver_priv;
1169 tmp = ioread32(bridge->base + DGCS);
1171 if (tmp & CA91CX42_DGCS_ACT)
1172 return 0;
1173 else
1174 return 1;
1177 static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1179 struct vme_dma_resource *ctrlr;
1180 struct ca91cx42_dma_entry *entry;
1181 int retval;
1182 dma_addr_t bus_addr;
1183 u32 val;
1184 struct device *dev;
1185 struct ca91cx42_driver *bridge;
1187 ctrlr = list->parent;
1189 bridge = ctrlr->parent->driver_priv;
1190 dev = ctrlr->parent->parent;
1192 mutex_lock(&ctrlr->mtx);
1194 if (!(list_empty(&ctrlr->running))) {
1196 * XXX We have an active DMA transfer and currently haven't
1197 * sorted out the mechanism for "pending" DMA transfers.
1198 * Return busy.
1200 /* Need to add to pending here */
1201 mutex_unlock(&ctrlr->mtx);
1202 return -EBUSY;
1203 } else {
1204 list_add(&list->list, &ctrlr->running);
1207 /* Get first bus address and write into registers */
1208 entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1209 list);
1211 bus_addr = virt_to_bus(&entry->descriptor);
1213 mutex_unlock(&ctrlr->mtx);
1215 iowrite32(0, bridge->base + DTBC);
1216 iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1218 /* Start the operation */
1219 val = ioread32(bridge->base + DGCS);
1221 /* XXX Could set VMEbus On and Off Counters here */
1222 val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1224 val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1225 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1226 CA91CX42_DGCS_PERR);
1228 iowrite32(val, bridge->base + DGCS);
1230 val |= CA91CX42_DGCS_GO;
1232 iowrite32(val, bridge->base + DGCS);
1234 retval = wait_event_interruptible(bridge->dma_queue,
1235 ca91cx42_dma_busy(ctrlr->parent));
1237 if (retval) {
1238 val = ioread32(bridge->base + DGCS);
1239 iowrite32(val | CA91CX42_DGCS_STOP_REQ, bridge->base + DGCS);
1240 /* Wait for the operation to abort */
1241 wait_event(bridge->dma_queue,
1242 ca91cx42_dma_busy(ctrlr->parent));
1243 retval = -EINTR;
1244 goto exit;
1248 * Read status register, this register is valid until we kick off a
1249 * new transfer.
1251 val = ioread32(bridge->base + DGCS);
1253 if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1254 CA91CX42_DGCS_PERR)) {
1256 dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1257 val = ioread32(bridge->base + DCTL);
1258 retval = -EIO;
1261 exit:
1262 /* Remove list from running list */
1263 mutex_lock(&ctrlr->mtx);
1264 list_del(&list->list);
1265 mutex_unlock(&ctrlr->mtx);
1267 return retval;
1271 static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1273 struct list_head *pos, *temp;
1274 struct ca91cx42_dma_entry *entry;
1276 /* detach and free each entry */
1277 list_for_each_safe(pos, temp, &list->entries) {
1278 list_del(pos);
1279 entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1280 kfree(entry);
1283 return 0;
1287 * All 4 location monitors reside at the same base - this is therefore a
1288 * system wide configuration.
1290 * This does not enable the LM monitor - that should be done when the first
1291 * callback is attached and disabled when the last callback is removed.
1293 static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1294 unsigned long long lm_base, u32 aspace, u32 cycle)
1296 u32 temp_base, lm_ctl = 0;
1297 int i;
1298 struct ca91cx42_driver *bridge;
1299 struct device *dev;
1301 bridge = lm->parent->driver_priv;
1302 dev = lm->parent->parent;
1304 /* Check the alignment of the location monitor */
1305 temp_base = (u32)lm_base;
1306 if (temp_base & 0xffff) {
1307 dev_err(dev, "Location monitor must be aligned to 64KB "
1308 "boundary");
1309 return -EINVAL;
1312 mutex_lock(&lm->mtx);
1314 /* If we already have a callback attached, we can't move it! */
1315 for (i = 0; i < lm->monitors; i++) {
1316 if (bridge->lm_callback[i]) {
1317 mutex_unlock(&lm->mtx);
1318 dev_err(dev, "Location monitor callback attached, "
1319 "can't reset\n");
1320 return -EBUSY;
1324 switch (aspace) {
1325 case VME_A16:
1326 lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1327 break;
1328 case VME_A24:
1329 lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1330 break;
1331 case VME_A32:
1332 lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1333 break;
1334 default:
1335 mutex_unlock(&lm->mtx);
1336 dev_err(dev, "Invalid address space\n");
1337 return -EINVAL;
1338 break;
1341 if (cycle & VME_SUPER)
1342 lm_ctl |= CA91CX42_LM_CTL_SUPR;
1343 if (cycle & VME_USER)
1344 lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1345 if (cycle & VME_PROG)
1346 lm_ctl |= CA91CX42_LM_CTL_PGM;
1347 if (cycle & VME_DATA)
1348 lm_ctl |= CA91CX42_LM_CTL_DATA;
1350 iowrite32(lm_base, bridge->base + LM_BS);
1351 iowrite32(lm_ctl, bridge->base + LM_CTL);
1353 mutex_unlock(&lm->mtx);
1355 return 0;
1358 /* Get configuration of the callback monitor and return whether it is enabled
1359 * or disabled.
1361 static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1362 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1364 u32 lm_ctl, enabled = 0;
1365 struct ca91cx42_driver *bridge;
1367 bridge = lm->parent->driver_priv;
1369 mutex_lock(&lm->mtx);
1371 *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1372 lm_ctl = ioread32(bridge->base + LM_CTL);
1374 if (lm_ctl & CA91CX42_LM_CTL_EN)
1375 enabled = 1;
1377 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1378 *aspace = VME_A16;
1379 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1380 *aspace = VME_A24;
1381 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1382 *aspace = VME_A32;
1384 *cycle = 0;
1385 if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1386 *cycle |= VME_SUPER;
1387 if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1388 *cycle |= VME_USER;
1389 if (lm_ctl & CA91CX42_LM_CTL_PGM)
1390 *cycle |= VME_PROG;
1391 if (lm_ctl & CA91CX42_LM_CTL_DATA)
1392 *cycle |= VME_DATA;
1394 mutex_unlock(&lm->mtx);
1396 return enabled;
1400 * Attach a callback to a specific location monitor.
1402 * Callback will be passed the monitor triggered.
1404 static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1405 void (*callback)(void *), void *data)
1407 u32 lm_ctl, tmp;
1408 struct ca91cx42_driver *bridge;
1409 struct device *dev;
1411 bridge = lm->parent->driver_priv;
1412 dev = lm->parent->parent;
1414 mutex_lock(&lm->mtx);
1416 /* Ensure that the location monitor is configured - need PGM or DATA */
1417 lm_ctl = ioread32(bridge->base + LM_CTL);
1418 if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1419 mutex_unlock(&lm->mtx);
1420 dev_err(dev, "Location monitor not properly configured\n");
1421 return -EINVAL;
1424 /* Check that a callback isn't already attached */
1425 if (bridge->lm_callback[monitor]) {
1426 mutex_unlock(&lm->mtx);
1427 dev_err(dev, "Existing callback attached\n");
1428 return -EBUSY;
1431 /* Attach callback */
1432 bridge->lm_callback[monitor] = callback;
1433 bridge->lm_data[monitor] = data;
1435 /* Enable Location Monitor interrupt */
1436 tmp = ioread32(bridge->base + LINT_EN);
1437 tmp |= CA91CX42_LINT_LM[monitor];
1438 iowrite32(tmp, bridge->base + LINT_EN);
1440 /* Ensure that global Location Monitor Enable set */
1441 if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1442 lm_ctl |= CA91CX42_LM_CTL_EN;
1443 iowrite32(lm_ctl, bridge->base + LM_CTL);
1446 mutex_unlock(&lm->mtx);
1448 return 0;
1452 * Detach a callback function forn a specific location monitor.
1454 static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1456 u32 tmp;
1457 struct ca91cx42_driver *bridge;
1459 bridge = lm->parent->driver_priv;
1461 mutex_lock(&lm->mtx);
1463 /* Disable Location Monitor and ensure previous interrupts are clear */
1464 tmp = ioread32(bridge->base + LINT_EN);
1465 tmp &= ~CA91CX42_LINT_LM[monitor];
1466 iowrite32(tmp, bridge->base + LINT_EN);
1468 iowrite32(CA91CX42_LINT_LM[monitor],
1469 bridge->base + LINT_STAT);
1471 /* Detach callback */
1472 bridge->lm_callback[monitor] = NULL;
1473 bridge->lm_data[monitor] = NULL;
1475 /* If all location monitors disabled, disable global Location Monitor */
1476 if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1477 CA91CX42_LINT_LM3)) == 0) {
1478 tmp = ioread32(bridge->base + LM_CTL);
1479 tmp &= ~CA91CX42_LM_CTL_EN;
1480 iowrite32(tmp, bridge->base + LM_CTL);
1483 mutex_unlock(&lm->mtx);
1485 return 0;
1488 static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1490 u32 slot = 0;
1491 struct ca91cx42_driver *bridge;
1493 bridge = ca91cx42_bridge->driver_priv;
1495 if (!geoid) {
1496 slot = ioread32(bridge->base + VCSR_BS);
1497 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1498 } else
1499 slot = geoid;
1501 return (int)slot;
1505 static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1506 dma_addr_t *dma)
1508 struct pci_dev *pdev;
1510 /* Find pci_dev container of dev */
1511 pdev = to_pci_dev(parent);
1513 return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
1516 static void ca91cx42_free_consistent(struct device *parent, size_t size,
1517 void *vaddr, dma_addr_t dma)
1519 struct pci_dev *pdev;
1521 /* Find pci_dev container of dev */
1522 pdev = to_pci_dev(parent);
1524 dma_free_coherent(&pdev->dev, size, vaddr, dma);
1528 * Configure CR/CSR space
1530 * Access to the CR/CSR can be configured at power-up. The location of the
1531 * CR/CSR registers in the CR/CSR address space is determined by the boards
1532 * Auto-ID or Geographic address. This function ensures that the window is
1533 * enabled at an offset consistent with the boards geopgraphic address.
1535 static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1536 struct pci_dev *pdev)
1538 unsigned int crcsr_addr;
1539 int tmp, slot;
1540 struct ca91cx42_driver *bridge;
1542 bridge = ca91cx42_bridge->driver_priv;
1544 slot = ca91cx42_slot_get(ca91cx42_bridge);
1546 /* Write CSR Base Address if slot ID is supplied as a module param */
1547 if (geoid)
1548 iowrite32(geoid << 27, bridge->base + VCSR_BS);
1550 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1551 if (slot == 0) {
1552 dev_err(&pdev->dev, "Slot number is unset, not configuring "
1553 "CR/CSR space\n");
1554 return -EINVAL;
1557 /* Allocate mem for CR/CSR image */
1558 bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
1559 VME_CRCSR_BUF_SIZE,
1560 &bridge->crcsr_bus, GFP_KERNEL);
1561 if (!bridge->crcsr_kernel) {
1562 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1563 "image\n");
1564 return -ENOMEM;
1567 crcsr_addr = slot * (512 * 1024);
1568 iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1570 tmp = ioread32(bridge->base + VCSR_CTL);
1571 tmp |= CA91CX42_VCSR_CTL_EN;
1572 iowrite32(tmp, bridge->base + VCSR_CTL);
1574 return 0;
1577 static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1578 struct pci_dev *pdev)
1580 u32 tmp;
1581 struct ca91cx42_driver *bridge;
1583 bridge = ca91cx42_bridge->driver_priv;
1585 /* Turn off CR/CSR space */
1586 tmp = ioread32(bridge->base + VCSR_CTL);
1587 tmp &= ~CA91CX42_VCSR_CTL_EN;
1588 iowrite32(tmp, bridge->base + VCSR_CTL);
1590 /* Free image */
1591 iowrite32(0, bridge->base + VCSR_TO);
1593 dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
1594 bridge->crcsr_kernel, bridge->crcsr_bus);
1597 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1599 int retval, i;
1600 u32 data;
1601 struct list_head *pos = NULL, *n;
1602 struct vme_bridge *ca91cx42_bridge;
1603 struct ca91cx42_driver *ca91cx42_device;
1604 struct vme_master_resource *master_image;
1605 struct vme_slave_resource *slave_image;
1606 struct vme_dma_resource *dma_ctrlr;
1607 struct vme_lm_resource *lm;
1609 /* We want to support more than one of each bridge so we need to
1610 * dynamically allocate the bridge structure
1612 ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
1613 if (!ca91cx42_bridge) {
1614 retval = -ENOMEM;
1615 goto err_struct;
1617 vme_init_bridge(ca91cx42_bridge);
1619 ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
1620 if (!ca91cx42_device) {
1621 retval = -ENOMEM;
1622 goto err_driver;
1625 ca91cx42_bridge->driver_priv = ca91cx42_device;
1627 /* Enable the device */
1628 retval = pci_enable_device(pdev);
1629 if (retval) {
1630 dev_err(&pdev->dev, "Unable to enable device\n");
1631 goto err_enable;
1634 /* Map Registers */
1635 retval = pci_request_regions(pdev, driver_name);
1636 if (retval) {
1637 dev_err(&pdev->dev, "Unable to reserve resources\n");
1638 goto err_resource;
1641 /* map registers in BAR 0 */
1642 ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
1643 4096);
1644 if (!ca91cx42_device->base) {
1645 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1646 retval = -EIO;
1647 goto err_remap;
1650 /* Check to see if the mapping worked out */
1651 data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1652 if (data != PCI_VENDOR_ID_TUNDRA) {
1653 dev_err(&pdev->dev, "PCI_ID check failed\n");
1654 retval = -EIO;
1655 goto err_test;
1658 /* Initialize wait queues & mutual exclusion flags */
1659 init_waitqueue_head(&ca91cx42_device->dma_queue);
1660 init_waitqueue_head(&ca91cx42_device->iack_queue);
1661 mutex_init(&ca91cx42_device->vme_int);
1662 mutex_init(&ca91cx42_device->vme_rmw);
1664 ca91cx42_bridge->parent = &pdev->dev;
1665 strcpy(ca91cx42_bridge->name, driver_name);
1667 /* Setup IRQ */
1668 retval = ca91cx42_irq_init(ca91cx42_bridge);
1669 if (retval != 0) {
1670 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1671 goto err_irq;
1674 /* Add master windows to list */
1675 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1676 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
1677 if (!master_image) {
1678 retval = -ENOMEM;
1679 goto err_master;
1681 master_image->parent = ca91cx42_bridge;
1682 spin_lock_init(&master_image->lock);
1683 master_image->locked = 0;
1684 master_image->number = i;
1685 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1686 VME_CRCSR | VME_USER1 | VME_USER2;
1687 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1688 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1689 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1690 memset(&master_image->bus_resource, 0,
1691 sizeof(master_image->bus_resource));
1692 master_image->kern_base = NULL;
1693 list_add_tail(&master_image->list,
1694 &ca91cx42_bridge->master_resources);
1697 /* Add slave windows to list */
1698 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1699 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
1700 if (!slave_image) {
1701 retval = -ENOMEM;
1702 goto err_slave;
1704 slave_image->parent = ca91cx42_bridge;
1705 mutex_init(&slave_image->mtx);
1706 slave_image->locked = 0;
1707 slave_image->number = i;
1708 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1709 VME_USER2;
1711 /* Only windows 0 and 4 support A16 */
1712 if (i == 0 || i == 4)
1713 slave_image->address_attr |= VME_A16;
1715 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1716 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1717 list_add_tail(&slave_image->list,
1718 &ca91cx42_bridge->slave_resources);
1721 /* Add dma engines to list */
1722 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1723 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
1724 if (!dma_ctrlr) {
1725 retval = -ENOMEM;
1726 goto err_dma;
1728 dma_ctrlr->parent = ca91cx42_bridge;
1729 mutex_init(&dma_ctrlr->mtx);
1730 dma_ctrlr->locked = 0;
1731 dma_ctrlr->number = i;
1732 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1733 VME_DMA_MEM_TO_VME;
1734 INIT_LIST_HEAD(&dma_ctrlr->pending);
1735 INIT_LIST_HEAD(&dma_ctrlr->running);
1736 list_add_tail(&dma_ctrlr->list,
1737 &ca91cx42_bridge->dma_resources);
1740 /* Add location monitor to list */
1741 lm = kmalloc(sizeof(*lm), GFP_KERNEL);
1742 if (!lm) {
1743 retval = -ENOMEM;
1744 goto err_lm;
1746 lm->parent = ca91cx42_bridge;
1747 mutex_init(&lm->mtx);
1748 lm->locked = 0;
1749 lm->number = 1;
1750 lm->monitors = 4;
1751 list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1753 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1754 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1755 ca91cx42_bridge->master_get = ca91cx42_master_get;
1756 ca91cx42_bridge->master_set = ca91cx42_master_set;
1757 ca91cx42_bridge->master_read = ca91cx42_master_read;
1758 ca91cx42_bridge->master_write = ca91cx42_master_write;
1759 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1760 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1761 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1762 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1763 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1764 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1765 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1766 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1767 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1768 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1769 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1770 ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1771 ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1773 data = ioread32(ca91cx42_device->base + MISC_CTL);
1774 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1775 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1776 dev_info(&pdev->dev, "Slot ID is %d\n",
1777 ca91cx42_slot_get(ca91cx42_bridge));
1779 if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1780 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1782 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1783 * ca91cx42_remove()
1785 retval = vme_register_bridge(ca91cx42_bridge);
1786 if (retval != 0) {
1787 dev_err(&pdev->dev, "Chip Registration failed.\n");
1788 goto err_reg;
1791 pci_set_drvdata(pdev, ca91cx42_bridge);
1793 return 0;
1795 err_reg:
1796 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1797 err_lm:
1798 /* resources are stored in link list */
1799 list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1800 lm = list_entry(pos, struct vme_lm_resource, list);
1801 list_del(pos);
1802 kfree(lm);
1804 err_dma:
1805 /* resources are stored in link list */
1806 list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1807 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1808 list_del(pos);
1809 kfree(dma_ctrlr);
1811 err_slave:
1812 /* resources are stored in link list */
1813 list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1814 slave_image = list_entry(pos, struct vme_slave_resource, list);
1815 list_del(pos);
1816 kfree(slave_image);
1818 err_master:
1819 /* resources are stored in link list */
1820 list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1821 master_image = list_entry(pos, struct vme_master_resource,
1822 list);
1823 list_del(pos);
1824 kfree(master_image);
1827 ca91cx42_irq_exit(ca91cx42_device, pdev);
1828 err_irq:
1829 err_test:
1830 iounmap(ca91cx42_device->base);
1831 err_remap:
1832 pci_release_regions(pdev);
1833 err_resource:
1834 pci_disable_device(pdev);
1835 err_enable:
1836 kfree(ca91cx42_device);
1837 err_driver:
1838 kfree(ca91cx42_bridge);
1839 err_struct:
1840 return retval;
1844 static void ca91cx42_remove(struct pci_dev *pdev)
1846 struct list_head *pos = NULL, *n;
1847 struct vme_master_resource *master_image;
1848 struct vme_slave_resource *slave_image;
1849 struct vme_dma_resource *dma_ctrlr;
1850 struct vme_lm_resource *lm;
1851 struct ca91cx42_driver *bridge;
1852 struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1854 bridge = ca91cx42_bridge->driver_priv;
1857 /* Turn off Ints */
1858 iowrite32(0, bridge->base + LINT_EN);
1860 /* Turn off the windows */
1861 iowrite32(0x00800000, bridge->base + LSI0_CTL);
1862 iowrite32(0x00800000, bridge->base + LSI1_CTL);
1863 iowrite32(0x00800000, bridge->base + LSI2_CTL);
1864 iowrite32(0x00800000, bridge->base + LSI3_CTL);
1865 iowrite32(0x00800000, bridge->base + LSI4_CTL);
1866 iowrite32(0x00800000, bridge->base + LSI5_CTL);
1867 iowrite32(0x00800000, bridge->base + LSI6_CTL);
1868 iowrite32(0x00800000, bridge->base + LSI7_CTL);
1869 iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1870 iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1871 iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1872 iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1873 iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1874 iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1875 iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1876 iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1878 vme_unregister_bridge(ca91cx42_bridge);
1880 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1882 /* resources are stored in link list */
1883 list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1884 lm = list_entry(pos, struct vme_lm_resource, list);
1885 list_del(pos);
1886 kfree(lm);
1889 /* resources are stored in link list */
1890 list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1891 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1892 list_del(pos);
1893 kfree(dma_ctrlr);
1896 /* resources are stored in link list */
1897 list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1898 slave_image = list_entry(pos, struct vme_slave_resource, list);
1899 list_del(pos);
1900 kfree(slave_image);
1903 /* resources are stored in link list */
1904 list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1905 master_image = list_entry(pos, struct vme_master_resource,
1906 list);
1907 list_del(pos);
1908 kfree(master_image);
1911 ca91cx42_irq_exit(bridge, pdev);
1913 iounmap(bridge->base);
1915 pci_release_regions(pdev);
1917 pci_disable_device(pdev);
1919 kfree(ca91cx42_bridge);
1922 module_pci_driver(ca91cx42_driver);
1924 MODULE_PARM_DESC(geoid, "Override geographical addressing");
1925 module_param(geoid, int, 0);
1927 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1928 MODULE_LICENSE("GPL");