1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/core_marvel.c
5 * Code common to all Marvel based systems.
8 #define __EXTERN_INLINE inline
10 #include <asm/core_marvel.h>
11 #undef __EXTERN_INLINE
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mc146818rtc.h>
19 #include <linux/rtc.h>
20 #include <linux/module.h>
21 #include <linux/memblock.h>
23 #include <asm/ptrace.h>
26 #include <asm/tlbflush.h>
36 #define DEBUG_CONFIG 0
39 # define DBG_CFG(args) printk args
41 # define DBG_CFG(args)
48 static struct io7
*io7_head
= NULL
;
54 static unsigned long __attribute__ ((unused
))
55 read_ev7_csr(int pe
, unsigned long offset
)
57 ev7_csr
*ev7csr
= EV7_CSR_KERN(pe
, offset
);
67 static void __attribute__ ((unused
))
68 write_ev7_csr(int pe
, unsigned long offset
, unsigned long q
)
70 ev7_csr
*ev7csr
= EV7_CSR_KERN(pe
, offset
);
78 mk_resource_name(int pe
, int port
, char *str
)
83 sprintf(tmp
, "PCI %s PE %d PORT %d", str
, pe
, port
);
84 name
= memblock_alloc(strlen(tmp
) + 1, SMP_CACHE_BYTES
);
86 panic("%s: Failed to allocate %zu bytes\n", __func__
,
94 marvel_next_io7(struct io7
*prev
)
96 return (prev
? prev
->next
: io7_head
);
100 marvel_find_io7(int pe
)
104 for (io7
= io7_head
; io7
&& io7
->pe
!= pe
; io7
= io7
->next
)
110 static struct io7
* __init
111 alloc_io7(unsigned int pe
)
117 if (marvel_find_io7(pe
)) {
118 printk(KERN_WARNING
"IO7 at PE %d already allocated!\n", pe
);
122 io7
= memblock_alloc(sizeof(*io7
), SMP_CACHE_BYTES
);
124 panic("%s: Failed to allocate %zu bytes\n", __func__
,
127 raw_spin_lock_init(&io7
->irq_lock
);
129 for (h
= 0; h
< 4; h
++) {
130 io7
->ports
[h
].io7
= io7
;
131 io7
->ports
[h
].port
= h
;
132 io7
->ports
[h
].enabled
= 0; /* default to disabled */
136 * Insert in pe sorted order.
138 if (NULL
== io7_head
) /* empty list */
140 else if (io7_head
->pe
> io7
->pe
) { /* insert at head */
141 io7
->next
= io7_head
;
143 } else { /* insert at position */
144 for (insp
= io7_head
; insp
; insp
= insp
->next
) {
145 if (insp
->pe
== io7
->pe
) {
146 printk(KERN_ERR
"Too many IO7s at PE %d\n",
151 if (NULL
== insp
->next
||
152 insp
->next
->pe
> io7
->pe
) { /* insert here */
153 io7
->next
= insp
->next
;
159 if (NULL
== insp
) { /* couldn't insert ?!? */
160 printk(KERN_WARNING
"Failed to insert IO7 at PE %d "
161 " - adding at head of list\n", io7
->pe
);
162 io7
->next
= io7_head
;
171 io7_clear_errors(struct io7
*io7
)
173 io7_port7_csrs
*p7csrs
;
174 io7_ioport_csrs
*csrs
;
179 * First the IO ports.
181 for (port
= 0; port
< 4; port
++) {
182 csrs
= IO7_CSRS_KERN(io7
->pe
, port
);
184 csrs
->POx_ERR_SUM
.csr
= -1UL;
185 csrs
->POx_TLB_ERR
.csr
= -1UL;
186 csrs
->POx_SPL_COMPLT
.csr
= -1UL;
187 csrs
->POx_TRANS_SUM
.csr
= -1UL;
191 * Then the common ones.
193 p7csrs
= IO7_PORT7_CSRS_KERN(io7
->pe
);
195 p7csrs
->PO7_ERROR_SUM
.csr
= -1UL;
196 p7csrs
->PO7_UNCRR_SYM
.csr
= -1UL;
197 p7csrs
->PO7_CRRCT_SYM
.csr
= -1UL;
202 * IO7 PCI, PCI/X, AGP configuration.
205 io7_init_hose(struct io7
*io7
, int port
)
207 static int hose_index
= 0;
209 struct pci_controller
*hose
= alloc_pci_controller();
210 struct io7_port
*io7_port
= &io7
->ports
[port
];
211 io7_ioport_csrs
*csrs
= IO7_CSRS_KERN(io7
->pe
, port
);
214 hose
->index
= hose_index
++; /* arbitrary */
217 * We don't have an isa or legacy hose, but glibc expects to be
218 * able to use the bus == 0 / dev == 0 form of the iobase syscall
219 * to determine information about the i/o system. Since XFree86
220 * relies on glibc's determination to tell whether or not to use
221 * sparse access, we need to point the pci_isa_hose at a real hose
222 * so at least that determination is correct.
224 if (hose
->index
== 0)
227 io7_port
->csrs
= csrs
;
228 io7_port
->hose
= hose
;
229 hose
->sysdata
= io7_port
;
231 hose
->io_space
= alloc_resource();
232 hose
->mem_space
= alloc_resource();
235 * Base addresses for userland consumption. Since these are going
236 * to be mapped, they are pure physical addresses.
238 hose
->sparse_mem_base
= hose
->sparse_io_base
= 0;
239 hose
->dense_mem_base
= IO7_MEM_PHYS(io7
->pe
, port
);
240 hose
->dense_io_base
= IO7_IO_PHYS(io7
->pe
, port
);
243 * Base addresses and resource ranges for kernel consumption.
245 hose
->config_space_base
= (unsigned long)IO7_CONF_KERN(io7
->pe
, port
);
247 hose
->io_space
->start
= (unsigned long)IO7_IO_KERN(io7
->pe
, port
);
248 hose
->io_space
->end
= hose
->io_space
->start
+ IO7_IO_SPACE
- 1;
249 hose
->io_space
->name
= mk_resource_name(io7
->pe
, port
, "IO");
250 hose
->io_space
->flags
= IORESOURCE_IO
;
252 hose
->mem_space
->start
= (unsigned long)IO7_MEM_KERN(io7
->pe
, port
);
253 hose
->mem_space
->end
= hose
->mem_space
->start
+ IO7_MEM_SPACE
- 1;
254 hose
->mem_space
->name
= mk_resource_name(io7
->pe
, port
, "MEM");
255 hose
->mem_space
->flags
= IORESOURCE_MEM
;
257 if (request_resource(&ioport_resource
, hose
->io_space
) < 0)
258 printk(KERN_ERR
"Failed to request IO on hose %d\n",
260 if (request_resource(&iomem_resource
, hose
->mem_space
) < 0)
261 printk(KERN_ERR
"Failed to request MEM on hose %d\n",
265 * Save the existing DMA window settings for later restoration.
267 for (i
= 0; i
< 4; i
++) {
268 io7_port
->saved_wbase
[i
] = csrs
->POx_WBASE
[i
].csr
;
269 io7_port
->saved_wmask
[i
] = csrs
->POx_WMASK
[i
].csr
;
270 io7_port
->saved_tbase
[i
] = csrs
->POx_TBASE
[i
].csr
;
274 * Set up the PCI to main memory translation windows.
276 * Window 0 is scatter-gather 8MB at 8MB
277 * Window 1 is direct access 1GB at 2GB
278 * Window 2 is scatter-gather (up-to) 1GB at 3GB
279 * Window 3 is disabled
283 * TBIA before modifying windows.
285 marvel_pci_tbi(hose
, 0, -1);
288 * Set up window 0 for scatter-gather 8MB at 8MB.
290 hose
->sg_isa
= iommu_arena_new_node(marvel_cpuid_to_nid(io7
->pe
),
291 hose
, 0x00800000, 0x00800000, 0);
292 hose
->sg_isa
->align_entry
= 8; /* cache line boundary */
293 csrs
->POx_WBASE
[0].csr
=
294 hose
->sg_isa
->dma_base
| wbase_m_ena
| wbase_m_sg
;
295 csrs
->POx_WMASK
[0].csr
= (hose
->sg_isa
->size
- 1) & wbase_m_addr
;
296 csrs
->POx_TBASE
[0].csr
= virt_to_phys(hose
->sg_isa
->ptes
);
299 * Set up window 1 for direct-mapped 1GB at 2GB.
301 csrs
->POx_WBASE
[1].csr
= __direct_map_base
| wbase_m_ena
;
302 csrs
->POx_WMASK
[1].csr
= (__direct_map_size
- 1) & wbase_m_addr
;
303 csrs
->POx_TBASE
[1].csr
= 0;
306 * Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
308 hose
->sg_pci
= iommu_arena_new_node(marvel_cpuid_to_nid(io7
->pe
),
309 hose
, 0xc0000000, 0x40000000, 0);
310 hose
->sg_pci
->align_entry
= 8; /* cache line boundary */
311 csrs
->POx_WBASE
[2].csr
=
312 hose
->sg_pci
->dma_base
| wbase_m_ena
| wbase_m_sg
;
313 csrs
->POx_WMASK
[2].csr
= (hose
->sg_pci
->size
- 1) & wbase_m_addr
;
314 csrs
->POx_TBASE
[2].csr
= virt_to_phys(hose
->sg_pci
->ptes
);
319 csrs
->POx_WBASE
[3].csr
= 0;
322 * Make sure that the AGP Monster Window is disabled.
324 csrs
->POx_CTRL
.csr
&= ~(1UL << 61);
327 printk("FIXME: disabling master aborts\n");
328 csrs
->POx_MSK_HEI
.csr
&= ~(3UL << 14);
331 * TBIA after modifying windows.
333 marvel_pci_tbi(hose
, 0, -1);
337 marvel_init_io7(struct io7
*io7
)
341 printk("Initializing IO7 at PID %d\n", io7
->pe
);
344 * Get the Port 7 CSR pointer.
346 io7
->csrs
= IO7_PORT7_CSRS_KERN(io7
->pe
);
349 * Init this IO7's hoses.
351 for (i
= 0; i
< IO7_NUM_PORTS
; i
++) {
352 io7_ioport_csrs
*csrs
= IO7_CSRS_KERN(io7
->pe
, i
);
353 if (csrs
->POx_CACHE_CTL
.csr
== 8) {
354 io7
->ports
[i
].enabled
= 1;
355 io7_init_hose(io7
, i
);
361 marvel_io7_present(gct6_node
*node
)
365 if (node
->type
!= GCT_TYPE_HOSE
||
366 node
->subtype
!= GCT_SUBTYPE_IO_PORT_MODULE
)
369 pe
= (node
->id
>> 8) & 0xff;
370 printk("Found an IO7 at PID %d\n", pe
);
376 marvel_find_console_vga_hose(void)
378 #ifdef CONFIG_VGA_HOSE
379 u64
*pu64
= (u64
*)((u64
)hwrpb
+ hwrpb
->ctbt_offset
);
381 if (pu64
[7] == 3) { /* TERM_TYPE == graphics */
382 struct pci_controller
*hose
= NULL
;
383 int h
= (pu64
[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */
387 /* FIXME - encoding is going to have to change for Marvel
388 * since hose will be able to overflow a byte...
389 * need to fix this decode when the console
390 * changes its encoding
392 printk("console graphics is on hose %d (console)\n", h
);
395 * The console's hose numbering is:
400 * We need to find the hose at that pid and port
404 if ((io7
= marvel_find_io7(pid
)))
405 hose
= io7
->ports
[port
].hose
;
408 printk("Console graphics on hose %d\n", hose
->index
);
415 gct6_search_struct gct_wanted_node_list
[] __initdata
= {
416 { GCT_TYPE_HOSE
, GCT_SUBTYPE_IO_PORT_MODULE
, marvel_io7_present
},
421 * In case the GCT is not complete, let the user specify PIDs with IO7s
422 * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal)
423 * where IO7s are connected
426 marvel_specify_io7(char *str
)
433 pid
= simple_strtoul(str
, &pchar
, 0);
435 printk("User-specified IO7 at PID %lu\n", pid
);
436 io7
= alloc_io7(pid
);
437 if (io7
) marvel_init_io7(io7
);
440 if (pchar
== str
) pchar
++;
446 __setup("io7=", marvel_specify_io7
);
449 marvel_init_arch(void)
453 /* With multiple PCI busses, we play with I/O as physical addrs. */
454 ioport_resource
.end
= ~0UL;
456 /* PCI DMA Direct Mapping is 1GB at 2GB. */
457 __direct_map_base
= 0x80000000;
458 __direct_map_size
= 0x40000000;
460 /* Parse the config tree. */
461 gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list
);
464 for (io7
= NULL
; NULL
!= (io7
= marvel_next_io7(io7
)); )
465 marvel_init_io7(io7
);
467 /* Check for graphic console location (if any). */
468 marvel_find_console_vga_hose();
472 marvel_kill_arch(int mode
)
478 * PCI Configuration Space access functions
480 * Configuration space addresses have the following format:
482 * |2 2 2 2|1 1 1 1|1 1 1 1|1 1
483 * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
484 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
485 * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R|
486 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
488 * n:24 reserved for hose base
489 * 23:16 bus number (8 bits = 128 possible buses)
490 * 15:11 Device number (5 bits)
491 * 10:8 function number
492 * 7:2 register number
495 * IO7 determines whether to use a type 0 or type 1 config cycle
496 * based on the bus number. Therefore the bus number must be set
497 * to 0 for the root bus on any hose.
499 * The function number selects which function of a multi-function device
500 * (e.g., SCSI and Ethernet).
504 static inline unsigned long
505 build_conf_addr(struct pci_controller
*hose
, u8 bus
,
506 unsigned int devfn
, int where
)
508 return (hose
->config_space_base
| (bus
<< 16) | (devfn
<< 8) | where
);
512 mk_conf_addr(struct pci_bus
*pbus
, unsigned int devfn
, int where
)
514 struct pci_controller
*hose
= pbus
->sysdata
;
515 struct io7_port
*io7_port
;
516 unsigned long addr
= 0;
517 u8 bus
= pbus
->number
;
522 /* Check for enabled. */
523 io7_port
= hose
->sysdata
;
524 if (!io7_port
->enabled
)
527 if (!pbus
->parent
) { /* No parent means peer PCI bus. */
528 /* Don't support idsel > 20 on primary bus. */
529 if (devfn
>= PCI_DEVFN(21, 0))
534 addr
= build_conf_addr(hose
, bus
, devfn
, where
);
536 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr
));
541 marvel_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
542 int size
, u32
*value
)
546 if (0 == (addr
= mk_conf_addr(bus
, devfn
, where
)))
547 return PCIBIOS_DEVICE_NOT_FOUND
;
551 *value
= __kernel_ldbu(*(vucp
)addr
);
554 *value
= __kernel_ldwu(*(vusp
)addr
);
557 *value
= *(vuip
)addr
;
560 return PCIBIOS_FUNC_NOT_SUPPORTED
;
563 return PCIBIOS_SUCCESSFUL
;
567 marvel_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
572 if (0 == (addr
= mk_conf_addr(bus
, devfn
, where
)))
573 return PCIBIOS_DEVICE_NOT_FOUND
;
577 __kernel_stb(value
, *(vucp
)addr
);
579 __kernel_ldbu(*(vucp
)addr
);
582 __kernel_stw(value
, *(vusp
)addr
);
584 __kernel_ldwu(*(vusp
)addr
);
592 return PCIBIOS_FUNC_NOT_SUPPORTED
;
595 return PCIBIOS_SUCCESSFUL
;
598 struct pci_ops marvel_pci_ops
=
600 .read
= marvel_read_config
,
601 .write
= marvel_write_config
,
606 * Other PCI helper functions.
609 marvel_pci_tbi(struct pci_controller
*hose
, dma_addr_t start
, dma_addr_t end
)
611 io7_ioport_csrs
*csrs
= ((struct io7_port
*)hose
->sysdata
)->csrs
;
614 csrs
->POx_SG_TBIA
.csr
= 0;
616 csrs
->POx_SG_TBIA
.csr
;
624 struct marvel_rtc_access_info
{
625 unsigned long function
;
631 __marvel_access_rtc(void *info
)
633 struct marvel_rtc_access_info
*rtc_access
= info
;
635 register unsigned long __r0
__asm__("$0");
636 register unsigned long __r16
__asm__("$16") = rtc_access
->function
;
637 register unsigned long __r17
__asm__("$17") = rtc_access
->index
;
638 register unsigned long __r18
__asm__("$18") = rtc_access
->data
;
640 __asm__
__volatile__(
641 "call_pal %4 # cserve rtc"
642 : "=r"(__r16
), "=r"(__r17
), "=r"(__r18
), "=r"(__r0
)
643 : "i"(PAL_cserve
), "0"(__r16
), "1"(__r17
), "2"(__r18
)
644 : "$1", "$22", "$23", "$24", "$25");
646 rtc_access
->data
= __r0
;
650 __marvel_rtc_io(u8 b
, unsigned long addr
, int write
)
654 struct marvel_rtc_access_info rtc_access
;
658 case 0x70: /* RTC_PORT(0) */
659 if (write
) index
= b
;
663 case 0x71: /* RTC_PORT(1) */
664 rtc_access
.index
= index
;
665 rtc_access
.data
= bcd2bin(b
);
666 rtc_access
.function
= 0x48 + !write
; /* GET/PUT_TOY */
668 __marvel_access_rtc(&rtc_access
);
670 ret
= bin2bcd(rtc_access
.data
);
674 printk(KERN_WARNING
"Illegal RTC port %lx\n", addr
);
686 marvel_ioremap(unsigned long addr
, unsigned long size
)
688 struct pci_controller
*hose
;
689 unsigned long baddr
, last
;
690 struct vm_struct
*area
;
696 * Adjust the address.
698 FIXUP_MEMADDR_VGA(addr
);
703 for (hose
= hose_head
; hose
; hose
= hose
->next
) {
704 if ((addr
>> 32) == (hose
->mem_space
->start
>> 32))
711 * We have the hose - calculate the bus limits.
713 baddr
= addr
- hose
->mem_space
->start
;
714 last
= baddr
+ size
- 1;
717 * Is it direct-mapped?
719 if ((baddr
>= __direct_map_base
) &&
720 ((baddr
+ size
- 1) < __direct_map_base
+ __direct_map_size
)) {
721 addr
= IDENT_ADDR
| (baddr
- __direct_map_base
);
722 return (void __iomem
*) addr
;
726 * Check the scatter-gather arena.
729 baddr
>= (unsigned long)hose
->sg_pci
->dma_base
&&
730 last
< (unsigned long)hose
->sg_pci
->dma_base
+ hose
->sg_pci
->size
) {
733 * Adjust the limits (mappings must be page aligned)
735 baddr
-= hose
->sg_pci
->dma_base
;
736 last
-= hose
->sg_pci
->dma_base
;
738 size
= PAGE_ALIGN(last
) - baddr
;
743 area
= get_vm_area(size
, VM_IOREMAP
);
747 ptes
= hose
->sg_pci
->ptes
;
748 for (vaddr
= (unsigned long)area
->addr
;
750 baddr
+= PAGE_SIZE
, vaddr
+= PAGE_SIZE
) {
751 pfn
= ptes
[baddr
>> PAGE_SHIFT
];
753 printk("ioremap failed... pte not valid...\n");
757 pfn
>>= 1; /* make it a true pfn */
759 if (__alpha_remap_area_pages(vaddr
,
762 printk("FAILED to map...\n");
770 vaddr
= (unsigned long)area
->addr
+ (addr
& ~PAGE_MASK
);
772 return (void __iomem
*) vaddr
;
775 /* Assume it was already a reasonable address */
776 vaddr
= baddr
+ hose
->mem_space
->start
;
777 return (void __iomem
*) vaddr
;
781 marvel_iounmap(volatile void __iomem
*xaddr
)
783 unsigned long addr
= (unsigned long) xaddr
;
784 if (addr
>= VMALLOC_START
)
785 vfree((void *)(PAGE_MASK
& addr
));
789 marvel_is_mmio(const volatile void __iomem
*xaddr
)
791 unsigned long addr
= (unsigned long) xaddr
;
793 if (addr
>= VMALLOC_START
)
796 return (addr
& 0xFF000000UL
) == 0;
799 #define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
800 #define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
802 void __iomem
*marvel_ioportmap (unsigned long addr
)
804 FIXUP_IOADDR_VGA(addr
);
805 return (void __iomem
*)addr
;
809 marvel_ioread8(const void __iomem
*xaddr
)
811 unsigned long addr
= (unsigned long) xaddr
;
812 if (__marvel_is_port_kbd(addr
))
814 else if (__marvel_is_port_rtc(addr
))
815 return __marvel_rtc_io(0, addr
, 0);
816 else if (marvel_is_ioaddr(addr
))
817 return __kernel_ldbu(*(vucp
)addr
);
819 /* this should catch other legacy addresses
820 that would normally fail on MARVEL,
821 because there really is nothing there...
827 marvel_iowrite8(u8 b
, void __iomem
*xaddr
)
829 unsigned long addr
= (unsigned long) xaddr
;
830 if (__marvel_is_port_kbd(addr
))
832 else if (__marvel_is_port_rtc(addr
))
833 __marvel_rtc_io(b
, addr
, 1);
834 else if (marvel_is_ioaddr(addr
))
835 __kernel_stb(b
, *(vucp
)addr
);
838 #ifndef CONFIG_ALPHA_GENERIC
839 EXPORT_SYMBOL(marvel_ioremap
);
840 EXPORT_SYMBOL(marvel_iounmap
);
841 EXPORT_SYMBOL(marvel_is_mmio
);
842 EXPORT_SYMBOL(marvel_ioportmap
);
843 EXPORT_SYMBOL(marvel_ioread8
);
844 EXPORT_SYMBOL(marvel_iowrite8
);
851 * FIXME - for now each cpu is a node by itself
852 * -- no real support for striped mode
856 marvel_pa_to_nid(unsigned long pa
)
860 if ((pa
>> 43) & 1) /* I/O */
861 cpuid
= (~(pa
>> 35) & 0xff);
863 cpuid
= ((pa
>> 34) & 0x3) | ((pa
>> (37 - 2)) & (0x1f << 2));
865 return marvel_cpuid_to_nid(cpuid
);
869 marvel_cpuid_to_nid(int cpuid
)
875 marvel_node_mem_start(int nid
)
879 pa
= (nid
& 0x3) | ((nid
& (0x1f << 2)) << 1);
886 marvel_node_mem_size(int nid
)
888 return 16UL * 1024 * 1024 * 1024; /* 16GB */
895 #include <linux/agp_backend.h>
896 #include <asm/agp_backend.h>
897 #include <linux/slab.h>
898 #include <linux/delay.h>
900 struct marvel_agp_aperture
{
901 struct pci_iommu_arena
*arena
;
907 marvel_agp_setup(alpha_agp_info
*agp
)
909 struct marvel_agp_aperture
*aper
;
911 if (!alpha_agpgart_size
)
914 aper
= kmalloc(sizeof(*aper
), GFP_KERNEL
);
915 if (aper
== NULL
) return -ENOMEM
;
917 aper
->arena
= agp
->hose
->sg_pci
;
918 aper
->pg_count
= alpha_agpgart_size
/ PAGE_SIZE
;
919 aper
->pg_start
= iommu_reserve(aper
->arena
, aper
->pg_count
,
922 if (aper
->pg_start
< 0) {
923 printk(KERN_ERR
"Failed to reserve AGP memory\n");
928 agp
->aperture
.bus_base
=
929 aper
->arena
->dma_base
+ aper
->pg_start
* PAGE_SIZE
;
930 agp
->aperture
.size
= aper
->pg_count
* PAGE_SIZE
;
931 agp
->aperture
.sysdata
= aper
;
937 marvel_agp_cleanup(alpha_agp_info
*agp
)
939 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
942 status
= iommu_release(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
943 if (status
== -EBUSY
) {
945 "Attempted to release bound AGP memory - unbinding\n");
946 iommu_unbind(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
947 status
= iommu_release(aper
->arena
, aper
->pg_start
,
951 printk(KERN_ERR
"Failed to release AGP memory\n");
958 marvel_agp_configure(alpha_agp_info
*agp
)
960 io7_ioport_csrs
*csrs
= ((struct io7_port
*)agp
->hose
->sysdata
)->csrs
;
961 struct io7
*io7
= ((struct io7_port
*)agp
->hose
->sysdata
)->io7
;
962 unsigned int new_rate
= 0;
963 unsigned long agp_pll
;
966 * Check the requested mode against the PLL setting.
967 * The agpgart_be code has not programmed the card yet,
968 * so we can still tweak mode here.
970 agp_pll
= io7
->csrs
->POx_RST
[IO7_AGP_PORT
].csr
;
971 switch(IO7_PLL_RNGB(agp_pll
)) {
972 case 0x4: /* 2x only */
974 * The PLL is only programmed for 2x, so adjust the
975 * rate to 2x, if necessary.
977 if (agp
->mode
.bits
.rate
!= 2)
981 case 0x6: /* 1x / 4x */
983 * The PLL is programmed for 1x or 4x. Don't go faster
984 * than requested, so if the requested rate is 2x, use 1x.
986 if (agp
->mode
.bits
.rate
== 2)
990 default: /* ??????? */
992 * Don't know what this PLL setting is, take the requested
993 * rate, but warn the user.
995 printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
996 __func__
, IO7_PLL_RNGB(agp_pll
), agp_pll
);
1001 * Set the new rate, if necessary.
1004 printk("Requested AGP Rate %dX not compatible "
1005 "with PLL setting - using %dX\n",
1006 agp
->mode
.bits
.rate
,
1009 agp
->mode
.bits
.rate
= new_rate
;
1012 printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
1013 agp
->hose
->index
, agp
->mode
.bits
.rate
,
1014 agp
->mode
.bits
.sba
? " - SBA" : "", agp
->mode
.bits
.rq
);
1016 csrs
->AGP_CMD
.csr
= agp
->mode
.lw
;
1022 marvel_agp_bind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
1024 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1025 return iommu_bind(aper
->arena
, aper
->pg_start
+ pg_start
,
1026 mem
->page_count
, mem
->pages
);
1030 marvel_agp_unbind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
1032 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1033 return iommu_unbind(aper
->arena
, aper
->pg_start
+ pg_start
,
1037 static unsigned long
1038 marvel_agp_translate(alpha_agp_info
*agp
, dma_addr_t addr
)
1040 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1041 unsigned long baddr
= addr
- aper
->arena
->dma_base
;
1044 if (addr
< agp
->aperture
.bus_base
||
1045 addr
>= agp
->aperture
.bus_base
+ agp
->aperture
.size
) {
1046 printk("%s: addr out of range\n", __func__
);
1050 pte
= aper
->arena
->ptes
[baddr
>> PAGE_SHIFT
];
1052 printk("%s: pte not valid\n", __func__
);
1055 return (pte
>> 1) << PAGE_SHIFT
;
1058 struct alpha_agp_ops marvel_agp_ops
=
1060 .setup
= marvel_agp_setup
,
1061 .cleanup
= marvel_agp_cleanup
,
1062 .configure
= marvel_agp_configure
,
1063 .bind
= marvel_agp_bind_memory
,
1064 .unbind
= marvel_agp_unbind_memory
,
1065 .translate
= marvel_agp_translate
1069 marvel_agp_info(void)
1071 struct pci_controller
*hose
;
1072 io7_ioport_csrs
*csrs
;
1073 alpha_agp_info
*agp
;
1077 * Find the first IO7 with an AGP card.
1079 * FIXME -- there should be a better way (we want to be able to
1080 * specify and what if the agp card is not video???)
1083 for (io7
= NULL
; (io7
= marvel_next_io7(io7
)) != NULL
; ) {
1084 struct pci_controller
*h
;
1087 if (!io7
->ports
[IO7_AGP_PORT
].enabled
)
1090 h
= io7
->ports
[IO7_AGP_PORT
].hose
;
1091 addr
= (vuip
)build_conf_addr(h
, 0, PCI_DEVFN(5, 0), 0);
1093 if (*addr
!= 0xffffffffu
) {
1099 if (!hose
|| !hose
->sg_pci
)
1102 printk("MARVEL - using hose %d as AGP\n", hose
->index
);
1105 * Get the csrs from the hose.
1107 csrs
= ((struct io7_port
*)hose
->sysdata
)->csrs
;
1110 * Allocate the info structure.
1112 agp
= kmalloc(sizeof(*agp
), GFP_KERNEL
);
1120 agp
->private = NULL
;
1121 agp
->ops
= &marvel_agp_ops
;
1124 * Aperture - not configured until ops.setup().
1126 agp
->aperture
.bus_base
= 0;
1127 agp
->aperture
.size
= 0;
1128 agp
->aperture
.sysdata
= NULL
;
1133 * NOTE: IO7 reports through AGP_STAT that it can support a read queue
1134 * depth of 17 (rq = 0x10). It actually only supports a depth of
1137 agp
->capability
.lw
= csrs
->AGP_STAT
.csr
;
1138 agp
->capability
.bits
.rq
= 0xf;
1143 agp
->mode
.lw
= csrs
->AGP_CMD
.csr
;