1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/core_marvel.c
5 * Code common to all Marvel based systems.
8 #define __EXTERN_INLINE inline
10 #include <asm/core_marvel.h>
11 #undef __EXTERN_INLINE
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mc146818rtc.h>
19 #include <linux/rtc.h>
20 #include <linux/module.h>
21 #include <linux/memblock.h>
23 #include <asm/ptrace.h>
26 #include <asm/pgalloc.h>
27 #include <asm/tlbflush.h>
37 #define DEBUG_CONFIG 0
40 # define DBG_CFG(args) printk args
42 # define DBG_CFG(args)
49 static struct io7
*io7_head
= NULL
;
55 static unsigned long __attribute__ ((unused
))
56 read_ev7_csr(int pe
, unsigned long offset
)
58 ev7_csr
*ev7csr
= EV7_CSR_KERN(pe
, offset
);
68 static void __attribute__ ((unused
))
69 write_ev7_csr(int pe
, unsigned long offset
, unsigned long q
)
71 ev7_csr
*ev7csr
= EV7_CSR_KERN(pe
, offset
);
79 mk_resource_name(int pe
, int port
, char *str
)
84 sprintf(tmp
, "PCI %s PE %d PORT %d", str
, pe
, port
);
85 name
= memblock_alloc(strlen(tmp
) + 1, SMP_CACHE_BYTES
);
87 panic("%s: Failed to allocate %zu bytes\n", __func__
,
95 marvel_next_io7(struct io7
*prev
)
97 return (prev
? prev
->next
: io7_head
);
101 marvel_find_io7(int pe
)
105 for (io7
= io7_head
; io7
&& io7
->pe
!= pe
; io7
= io7
->next
)
111 static struct io7
* __init
112 alloc_io7(unsigned int pe
)
118 if (marvel_find_io7(pe
)) {
119 printk(KERN_WARNING
"IO7 at PE %d already allocated!\n", pe
);
123 io7
= memblock_alloc(sizeof(*io7
), SMP_CACHE_BYTES
);
125 panic("%s: Failed to allocate %zu bytes\n", __func__
,
128 raw_spin_lock_init(&io7
->irq_lock
);
130 for (h
= 0; h
< 4; h
++) {
131 io7
->ports
[h
].io7
= io7
;
132 io7
->ports
[h
].port
= h
;
133 io7
->ports
[h
].enabled
= 0; /* default to disabled */
137 * Insert in pe sorted order.
139 if (NULL
== io7_head
) /* empty list */
141 else if (io7_head
->pe
> io7
->pe
) { /* insert at head */
142 io7
->next
= io7_head
;
144 } else { /* insert at position */
145 for (insp
= io7_head
; insp
; insp
= insp
->next
) {
146 if (insp
->pe
== io7
->pe
) {
147 printk(KERN_ERR
"Too many IO7s at PE %d\n",
152 if (NULL
== insp
->next
||
153 insp
->next
->pe
> io7
->pe
) { /* insert here */
154 io7
->next
= insp
->next
;
160 if (NULL
== insp
) { /* couldn't insert ?!? */
161 printk(KERN_WARNING
"Failed to insert IO7 at PE %d "
162 " - adding at head of list\n", io7
->pe
);
163 io7
->next
= io7_head
;
172 io7_clear_errors(struct io7
*io7
)
174 io7_port7_csrs
*p7csrs
;
175 io7_ioport_csrs
*csrs
;
180 * First the IO ports.
182 for (port
= 0; port
< 4; port
++) {
183 csrs
= IO7_CSRS_KERN(io7
->pe
, port
);
185 csrs
->POx_ERR_SUM
.csr
= -1UL;
186 csrs
->POx_TLB_ERR
.csr
= -1UL;
187 csrs
->POx_SPL_COMPLT
.csr
= -1UL;
188 csrs
->POx_TRANS_SUM
.csr
= -1UL;
192 * Then the common ones.
194 p7csrs
= IO7_PORT7_CSRS_KERN(io7
->pe
);
196 p7csrs
->PO7_ERROR_SUM
.csr
= -1UL;
197 p7csrs
->PO7_UNCRR_SYM
.csr
= -1UL;
198 p7csrs
->PO7_CRRCT_SYM
.csr
= -1UL;
203 * IO7 PCI, PCI/X, AGP configuration.
206 io7_init_hose(struct io7
*io7
, int port
)
208 static int hose_index
= 0;
210 struct pci_controller
*hose
= alloc_pci_controller();
211 struct io7_port
*io7_port
= &io7
->ports
[port
];
212 io7_ioport_csrs
*csrs
= IO7_CSRS_KERN(io7
->pe
, port
);
215 hose
->index
= hose_index
++; /* arbitrary */
218 * We don't have an isa or legacy hose, but glibc expects to be
219 * able to use the bus == 0 / dev == 0 form of the iobase syscall
220 * to determine information about the i/o system. Since XFree86
221 * relies on glibc's determination to tell whether or not to use
222 * sparse access, we need to point the pci_isa_hose at a real hose
223 * so at least that determination is correct.
225 if (hose
->index
== 0)
228 io7_port
->csrs
= csrs
;
229 io7_port
->hose
= hose
;
230 hose
->sysdata
= io7_port
;
232 hose
->io_space
= alloc_resource();
233 hose
->mem_space
= alloc_resource();
236 * Base addresses for userland consumption. Since these are going
237 * to be mapped, they are pure physical addresses.
239 hose
->sparse_mem_base
= hose
->sparse_io_base
= 0;
240 hose
->dense_mem_base
= IO7_MEM_PHYS(io7
->pe
, port
);
241 hose
->dense_io_base
= IO7_IO_PHYS(io7
->pe
, port
);
244 * Base addresses and resource ranges for kernel consumption.
246 hose
->config_space_base
= (unsigned long)IO7_CONF_KERN(io7
->pe
, port
);
248 hose
->io_space
->start
= (unsigned long)IO7_IO_KERN(io7
->pe
, port
);
249 hose
->io_space
->end
= hose
->io_space
->start
+ IO7_IO_SPACE
- 1;
250 hose
->io_space
->name
= mk_resource_name(io7
->pe
, port
, "IO");
251 hose
->io_space
->flags
= IORESOURCE_IO
;
253 hose
->mem_space
->start
= (unsigned long)IO7_MEM_KERN(io7
->pe
, port
);
254 hose
->mem_space
->end
= hose
->mem_space
->start
+ IO7_MEM_SPACE
- 1;
255 hose
->mem_space
->name
= mk_resource_name(io7
->pe
, port
, "MEM");
256 hose
->mem_space
->flags
= IORESOURCE_MEM
;
258 if (request_resource(&ioport_resource
, hose
->io_space
) < 0)
259 printk(KERN_ERR
"Failed to request IO on hose %d\n",
261 if (request_resource(&iomem_resource
, hose
->mem_space
) < 0)
262 printk(KERN_ERR
"Failed to request MEM on hose %d\n",
266 * Save the existing DMA window settings for later restoration.
268 for (i
= 0; i
< 4; i
++) {
269 io7_port
->saved_wbase
[i
] = csrs
->POx_WBASE
[i
].csr
;
270 io7_port
->saved_wmask
[i
] = csrs
->POx_WMASK
[i
].csr
;
271 io7_port
->saved_tbase
[i
] = csrs
->POx_TBASE
[i
].csr
;
275 * Set up the PCI to main memory translation windows.
277 * Window 0 is scatter-gather 8MB at 8MB
278 * Window 1 is direct access 1GB at 2GB
279 * Window 2 is scatter-gather (up-to) 1GB at 3GB
280 * Window 3 is disabled
284 * TBIA before modifying windows.
286 marvel_pci_tbi(hose
, 0, -1);
289 * Set up window 0 for scatter-gather 8MB at 8MB.
291 hose
->sg_isa
= iommu_arena_new_node(marvel_cpuid_to_nid(io7
->pe
),
292 hose
, 0x00800000, 0x00800000, 0);
293 hose
->sg_isa
->align_entry
= 8; /* cache line boundary */
294 csrs
->POx_WBASE
[0].csr
=
295 hose
->sg_isa
->dma_base
| wbase_m_ena
| wbase_m_sg
;
296 csrs
->POx_WMASK
[0].csr
= (hose
->sg_isa
->size
- 1) & wbase_m_addr
;
297 csrs
->POx_TBASE
[0].csr
= virt_to_phys(hose
->sg_isa
->ptes
);
300 * Set up window 1 for direct-mapped 1GB at 2GB.
302 csrs
->POx_WBASE
[1].csr
= __direct_map_base
| wbase_m_ena
;
303 csrs
->POx_WMASK
[1].csr
= (__direct_map_size
- 1) & wbase_m_addr
;
304 csrs
->POx_TBASE
[1].csr
= 0;
307 * Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
309 hose
->sg_pci
= iommu_arena_new_node(marvel_cpuid_to_nid(io7
->pe
),
310 hose
, 0xc0000000, 0x40000000, 0);
311 hose
->sg_pci
->align_entry
= 8; /* cache line boundary */
312 csrs
->POx_WBASE
[2].csr
=
313 hose
->sg_pci
->dma_base
| wbase_m_ena
| wbase_m_sg
;
314 csrs
->POx_WMASK
[2].csr
= (hose
->sg_pci
->size
- 1) & wbase_m_addr
;
315 csrs
->POx_TBASE
[2].csr
= virt_to_phys(hose
->sg_pci
->ptes
);
320 csrs
->POx_WBASE
[3].csr
= 0;
323 * Make sure that the AGP Monster Window is disabled.
325 csrs
->POx_CTRL
.csr
&= ~(1UL << 61);
328 printk("FIXME: disabling master aborts\n");
329 csrs
->POx_MSK_HEI
.csr
&= ~(3UL << 14);
332 * TBIA after modifying windows.
334 marvel_pci_tbi(hose
, 0, -1);
338 marvel_init_io7(struct io7
*io7
)
342 printk("Initializing IO7 at PID %d\n", io7
->pe
);
345 * Get the Port 7 CSR pointer.
347 io7
->csrs
= IO7_PORT7_CSRS_KERN(io7
->pe
);
350 * Init this IO7's hoses.
352 for (i
= 0; i
< IO7_NUM_PORTS
; i
++) {
353 io7_ioport_csrs
*csrs
= IO7_CSRS_KERN(io7
->pe
, i
);
354 if (csrs
->POx_CACHE_CTL
.csr
== 8) {
355 io7
->ports
[i
].enabled
= 1;
356 io7_init_hose(io7
, i
);
362 marvel_io7_present(gct6_node
*node
)
366 if (node
->type
!= GCT_TYPE_HOSE
||
367 node
->subtype
!= GCT_SUBTYPE_IO_PORT_MODULE
)
370 pe
= (node
->id
>> 8) & 0xff;
371 printk("Found an IO7 at PID %d\n", pe
);
377 marvel_find_console_vga_hose(void)
379 #ifdef CONFIG_VGA_HOSE
380 u64
*pu64
= (u64
*)((u64
)hwrpb
+ hwrpb
->ctbt_offset
);
382 if (pu64
[7] == 3) { /* TERM_TYPE == graphics */
383 struct pci_controller
*hose
= NULL
;
384 int h
= (pu64
[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */
388 /* FIXME - encoding is going to have to change for Marvel
389 * since hose will be able to overflow a byte...
390 * need to fix this decode when the console
391 * changes its encoding
393 printk("console graphics is on hose %d (console)\n", h
);
396 * The console's hose numbering is:
401 * We need to find the hose at that pid and port
405 if ((io7
= marvel_find_io7(pid
)))
406 hose
= io7
->ports
[port
].hose
;
409 printk("Console graphics on hose %d\n", hose
->index
);
416 gct6_search_struct gct_wanted_node_list
[] __initdata
= {
417 { GCT_TYPE_HOSE
, GCT_SUBTYPE_IO_PORT_MODULE
, marvel_io7_present
},
422 * In case the GCT is not complete, let the user specify PIDs with IO7s
423 * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal)
424 * where IO7s are connected
427 marvel_specify_io7(char *str
)
434 pid
= simple_strtoul(str
, &pchar
, 0);
436 printk("User-specified IO7 at PID %lu\n", pid
);
437 io7
= alloc_io7(pid
);
438 if (io7
) marvel_init_io7(io7
);
441 if (pchar
== str
) pchar
++;
447 __setup("io7=", marvel_specify_io7
);
450 marvel_init_arch(void)
454 /* With multiple PCI busses, we play with I/O as physical addrs. */
455 ioport_resource
.end
= ~0UL;
457 /* PCI DMA Direct Mapping is 1GB at 2GB. */
458 __direct_map_base
= 0x80000000;
459 __direct_map_size
= 0x40000000;
461 /* Parse the config tree. */
462 gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list
);
465 for (io7
= NULL
; NULL
!= (io7
= marvel_next_io7(io7
)); )
466 marvel_init_io7(io7
);
468 /* Check for graphic console location (if any). */
469 marvel_find_console_vga_hose();
473 marvel_kill_arch(int mode
)
479 * PCI Configuration Space access functions
481 * Configuration space addresses have the following format:
483 * |2 2 2 2|1 1 1 1|1 1 1 1|1 1
484 * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
485 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
486 * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R|
487 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
489 * n:24 reserved for hose base
490 * 23:16 bus number (8 bits = 128 possible buses)
491 * 15:11 Device number (5 bits)
492 * 10:8 function number
493 * 7:2 register number
496 * IO7 determines whether to use a type 0 or type 1 config cycle
497 * based on the bus number. Therefore the bus number must be set
498 * to 0 for the root bus on any hose.
500 * The function number selects which function of a multi-function device
501 * (e.g., SCSI and Ethernet).
505 static inline unsigned long
506 build_conf_addr(struct pci_controller
*hose
, u8 bus
,
507 unsigned int devfn
, int where
)
509 return (hose
->config_space_base
| (bus
<< 16) | (devfn
<< 8) | where
);
513 mk_conf_addr(struct pci_bus
*pbus
, unsigned int devfn
, int where
)
515 struct pci_controller
*hose
= pbus
->sysdata
;
516 struct io7_port
*io7_port
;
517 unsigned long addr
= 0;
518 u8 bus
= pbus
->number
;
523 /* Check for enabled. */
524 io7_port
= hose
->sysdata
;
525 if (!io7_port
->enabled
)
528 if (!pbus
->parent
) { /* No parent means peer PCI bus. */
529 /* Don't support idsel > 20 on primary bus. */
530 if (devfn
>= PCI_DEVFN(21, 0))
535 addr
= build_conf_addr(hose
, bus
, devfn
, where
);
537 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr
));
542 marvel_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
543 int size
, u32
*value
)
547 if (0 == (addr
= mk_conf_addr(bus
, devfn
, where
)))
548 return PCIBIOS_DEVICE_NOT_FOUND
;
552 *value
= __kernel_ldbu(*(vucp
)addr
);
555 *value
= __kernel_ldwu(*(vusp
)addr
);
558 *value
= *(vuip
)addr
;
561 return PCIBIOS_FUNC_NOT_SUPPORTED
;
564 return PCIBIOS_SUCCESSFUL
;
568 marvel_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
573 if (0 == (addr
= mk_conf_addr(bus
, devfn
, where
)))
574 return PCIBIOS_DEVICE_NOT_FOUND
;
578 __kernel_stb(value
, *(vucp
)addr
);
580 __kernel_ldbu(*(vucp
)addr
);
583 __kernel_stw(value
, *(vusp
)addr
);
585 __kernel_ldwu(*(vusp
)addr
);
593 return PCIBIOS_FUNC_NOT_SUPPORTED
;
596 return PCIBIOS_SUCCESSFUL
;
599 struct pci_ops marvel_pci_ops
=
601 .read
= marvel_read_config
,
602 .write
= marvel_write_config
,
607 * Other PCI helper functions.
610 marvel_pci_tbi(struct pci_controller
*hose
, dma_addr_t start
, dma_addr_t end
)
612 io7_ioport_csrs
*csrs
= ((struct io7_port
*)hose
->sysdata
)->csrs
;
615 csrs
->POx_SG_TBIA
.csr
= 0;
617 csrs
->POx_SG_TBIA
.csr
;
625 struct marvel_rtc_access_info
{
626 unsigned long function
;
632 __marvel_access_rtc(void *info
)
634 struct marvel_rtc_access_info
*rtc_access
= info
;
636 register unsigned long __r0
__asm__("$0");
637 register unsigned long __r16
__asm__("$16") = rtc_access
->function
;
638 register unsigned long __r17
__asm__("$17") = rtc_access
->index
;
639 register unsigned long __r18
__asm__("$18") = rtc_access
->data
;
641 __asm__
__volatile__(
642 "call_pal %4 # cserve rtc"
643 : "=r"(__r16
), "=r"(__r17
), "=r"(__r18
), "=r"(__r0
)
644 : "i"(PAL_cserve
), "0"(__r16
), "1"(__r17
), "2"(__r18
)
645 : "$1", "$22", "$23", "$24", "$25");
647 rtc_access
->data
= __r0
;
651 __marvel_rtc_io(u8 b
, unsigned long addr
, int write
)
655 struct marvel_rtc_access_info rtc_access
;
659 case 0x70: /* RTC_PORT(0) */
660 if (write
) index
= b
;
664 case 0x71: /* RTC_PORT(1) */
665 rtc_access
.index
= index
;
666 rtc_access
.data
= bcd2bin(b
);
667 rtc_access
.function
= 0x48 + !write
; /* GET/PUT_TOY */
669 __marvel_access_rtc(&rtc_access
);
671 ret
= bin2bcd(rtc_access
.data
);
675 printk(KERN_WARNING
"Illegal RTC port %lx\n", addr
);
687 marvel_ioremap(unsigned long addr
, unsigned long size
)
689 struct pci_controller
*hose
;
690 unsigned long baddr
, last
;
691 struct vm_struct
*area
;
697 * Adjust the address.
699 FIXUP_MEMADDR_VGA(addr
);
704 for (hose
= hose_head
; hose
; hose
= hose
->next
) {
705 if ((addr
>> 32) == (hose
->mem_space
->start
>> 32))
712 * We have the hose - calculate the bus limits.
714 baddr
= addr
- hose
->mem_space
->start
;
715 last
= baddr
+ size
- 1;
718 * Is it direct-mapped?
720 if ((baddr
>= __direct_map_base
) &&
721 ((baddr
+ size
- 1) < __direct_map_base
+ __direct_map_size
)) {
722 addr
= IDENT_ADDR
| (baddr
- __direct_map_base
);
723 return (void __iomem
*) addr
;
727 * Check the scatter-gather arena.
730 baddr
>= (unsigned long)hose
->sg_pci
->dma_base
&&
731 last
< (unsigned long)hose
->sg_pci
->dma_base
+ hose
->sg_pci
->size
) {
734 * Adjust the limits (mappings must be page aligned)
736 baddr
-= hose
->sg_pci
->dma_base
;
737 last
-= hose
->sg_pci
->dma_base
;
739 size
= PAGE_ALIGN(last
) - baddr
;
744 area
= get_vm_area(size
, VM_IOREMAP
);
748 ptes
= hose
->sg_pci
->ptes
;
749 for (vaddr
= (unsigned long)area
->addr
;
751 baddr
+= PAGE_SIZE
, vaddr
+= PAGE_SIZE
) {
752 pfn
= ptes
[baddr
>> PAGE_SHIFT
];
754 printk("ioremap failed... pte not valid...\n");
758 pfn
>>= 1; /* make it a true pfn */
760 if (__alpha_remap_area_pages(vaddr
,
763 printk("FAILED to map...\n");
771 vaddr
= (unsigned long)area
->addr
+ (addr
& ~PAGE_MASK
);
773 return (void __iomem
*) vaddr
;
776 /* Assume it was already a reasonable address */
777 vaddr
= baddr
+ hose
->mem_space
->start
;
778 return (void __iomem
*) vaddr
;
782 marvel_iounmap(volatile void __iomem
*xaddr
)
784 unsigned long addr
= (unsigned long) xaddr
;
785 if (addr
>= VMALLOC_START
)
786 vfree((void *)(PAGE_MASK
& addr
));
790 marvel_is_mmio(const volatile void __iomem
*xaddr
)
792 unsigned long addr
= (unsigned long) xaddr
;
794 if (addr
>= VMALLOC_START
)
797 return (addr
& 0xFF000000UL
) == 0;
800 #define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
801 #define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
803 void __iomem
*marvel_ioportmap (unsigned long addr
)
805 FIXUP_IOADDR_VGA(addr
);
806 return (void __iomem
*)addr
;
810 marvel_ioread8(void __iomem
*xaddr
)
812 unsigned long addr
= (unsigned long) xaddr
;
813 if (__marvel_is_port_kbd(addr
))
815 else if (__marvel_is_port_rtc(addr
))
816 return __marvel_rtc_io(0, addr
, 0);
817 else if (marvel_is_ioaddr(addr
))
818 return __kernel_ldbu(*(vucp
)addr
);
820 /* this should catch other legacy addresses
821 that would normally fail on MARVEL,
822 because there really is nothing there...
828 marvel_iowrite8(u8 b
, void __iomem
*xaddr
)
830 unsigned long addr
= (unsigned long) xaddr
;
831 if (__marvel_is_port_kbd(addr
))
833 else if (__marvel_is_port_rtc(addr
))
834 __marvel_rtc_io(b
, addr
, 1);
835 else if (marvel_is_ioaddr(addr
))
836 __kernel_stb(b
, *(vucp
)addr
);
839 #ifndef CONFIG_ALPHA_GENERIC
840 EXPORT_SYMBOL(marvel_ioremap
);
841 EXPORT_SYMBOL(marvel_iounmap
);
842 EXPORT_SYMBOL(marvel_is_mmio
);
843 EXPORT_SYMBOL(marvel_ioportmap
);
844 EXPORT_SYMBOL(marvel_ioread8
);
845 EXPORT_SYMBOL(marvel_iowrite8
);
852 * FIXME - for now each cpu is a node by itself
853 * -- no real support for striped mode
857 marvel_pa_to_nid(unsigned long pa
)
861 if ((pa
>> 43) & 1) /* I/O */
862 cpuid
= (~(pa
>> 35) & 0xff);
864 cpuid
= ((pa
>> 34) & 0x3) | ((pa
>> (37 - 2)) & (0x1f << 2));
866 return marvel_cpuid_to_nid(cpuid
);
870 marvel_cpuid_to_nid(int cpuid
)
876 marvel_node_mem_start(int nid
)
880 pa
= (nid
& 0x3) | ((nid
& (0x1f << 2)) << 1);
887 marvel_node_mem_size(int nid
)
889 return 16UL * 1024 * 1024 * 1024; /* 16GB */
896 #include <linux/agp_backend.h>
897 #include <asm/agp_backend.h>
898 #include <linux/slab.h>
899 #include <linux/delay.h>
901 struct marvel_agp_aperture
{
902 struct pci_iommu_arena
*arena
;
908 marvel_agp_setup(alpha_agp_info
*agp
)
910 struct marvel_agp_aperture
*aper
;
912 if (!alpha_agpgart_size
)
915 aper
= kmalloc(sizeof(*aper
), GFP_KERNEL
);
916 if (aper
== NULL
) return -ENOMEM
;
918 aper
->arena
= agp
->hose
->sg_pci
;
919 aper
->pg_count
= alpha_agpgart_size
/ PAGE_SIZE
;
920 aper
->pg_start
= iommu_reserve(aper
->arena
, aper
->pg_count
,
923 if (aper
->pg_start
< 0) {
924 printk(KERN_ERR
"Failed to reserve AGP memory\n");
929 agp
->aperture
.bus_base
=
930 aper
->arena
->dma_base
+ aper
->pg_start
* PAGE_SIZE
;
931 agp
->aperture
.size
= aper
->pg_count
* PAGE_SIZE
;
932 agp
->aperture
.sysdata
= aper
;
938 marvel_agp_cleanup(alpha_agp_info
*agp
)
940 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
943 status
= iommu_release(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
944 if (status
== -EBUSY
) {
946 "Attempted to release bound AGP memory - unbinding\n");
947 iommu_unbind(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
948 status
= iommu_release(aper
->arena
, aper
->pg_start
,
952 printk(KERN_ERR
"Failed to release AGP memory\n");
959 marvel_agp_configure(alpha_agp_info
*agp
)
961 io7_ioport_csrs
*csrs
= ((struct io7_port
*)agp
->hose
->sysdata
)->csrs
;
962 struct io7
*io7
= ((struct io7_port
*)agp
->hose
->sysdata
)->io7
;
963 unsigned int new_rate
= 0;
964 unsigned long agp_pll
;
967 * Check the requested mode against the PLL setting.
968 * The agpgart_be code has not programmed the card yet,
969 * so we can still tweak mode here.
971 agp_pll
= io7
->csrs
->POx_RST
[IO7_AGP_PORT
].csr
;
972 switch(IO7_PLL_RNGB(agp_pll
)) {
973 case 0x4: /* 2x only */
975 * The PLL is only programmed for 2x, so adjust the
976 * rate to 2x, if necessary.
978 if (agp
->mode
.bits
.rate
!= 2)
982 case 0x6: /* 1x / 4x */
984 * The PLL is programmed for 1x or 4x. Don't go faster
985 * than requested, so if the requested rate is 2x, use 1x.
987 if (agp
->mode
.bits
.rate
== 2)
991 default: /* ??????? */
993 * Don't know what this PLL setting is, take the requested
994 * rate, but warn the user.
996 printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
997 __func__
, IO7_PLL_RNGB(agp_pll
), agp_pll
);
1002 * Set the new rate, if necessary.
1005 printk("Requested AGP Rate %dX not compatible "
1006 "with PLL setting - using %dX\n",
1007 agp
->mode
.bits
.rate
,
1010 agp
->mode
.bits
.rate
= new_rate
;
1013 printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
1014 agp
->hose
->index
, agp
->mode
.bits
.rate
,
1015 agp
->mode
.bits
.sba
? " - SBA" : "", agp
->mode
.bits
.rq
);
1017 csrs
->AGP_CMD
.csr
= agp
->mode
.lw
;
1023 marvel_agp_bind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
1025 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1026 return iommu_bind(aper
->arena
, aper
->pg_start
+ pg_start
,
1027 mem
->page_count
, mem
->pages
);
1031 marvel_agp_unbind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
1033 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1034 return iommu_unbind(aper
->arena
, aper
->pg_start
+ pg_start
,
1038 static unsigned long
1039 marvel_agp_translate(alpha_agp_info
*agp
, dma_addr_t addr
)
1041 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1042 unsigned long baddr
= addr
- aper
->arena
->dma_base
;
1045 if (addr
< agp
->aperture
.bus_base
||
1046 addr
>= agp
->aperture
.bus_base
+ agp
->aperture
.size
) {
1047 printk("%s: addr out of range\n", __func__
);
1051 pte
= aper
->arena
->ptes
[baddr
>> PAGE_SHIFT
];
1053 printk("%s: pte not valid\n", __func__
);
1056 return (pte
>> 1) << PAGE_SHIFT
;
1059 struct alpha_agp_ops marvel_agp_ops
=
1061 .setup
= marvel_agp_setup
,
1062 .cleanup
= marvel_agp_cleanup
,
1063 .configure
= marvel_agp_configure
,
1064 .bind
= marvel_agp_bind_memory
,
1065 .unbind
= marvel_agp_unbind_memory
,
1066 .translate
= marvel_agp_translate
1070 marvel_agp_info(void)
1072 struct pci_controller
*hose
;
1073 io7_ioport_csrs
*csrs
;
1074 alpha_agp_info
*agp
;
1078 * Find the first IO7 with an AGP card.
1080 * FIXME -- there should be a better way (we want to be able to
1081 * specify and what if the agp card is not video???)
1084 for (io7
= NULL
; (io7
= marvel_next_io7(io7
)) != NULL
; ) {
1085 struct pci_controller
*h
;
1088 if (!io7
->ports
[IO7_AGP_PORT
].enabled
)
1091 h
= io7
->ports
[IO7_AGP_PORT
].hose
;
1092 addr
= (vuip
)build_conf_addr(h
, 0, PCI_DEVFN(5, 0), 0);
1094 if (*addr
!= 0xffffffffu
) {
1100 if (!hose
|| !hose
->sg_pci
)
1103 printk("MARVEL - using hose %d as AGP\n", hose
->index
);
1106 * Get the csrs from the hose.
1108 csrs
= ((struct io7_port
*)hose
->sysdata
)->csrs
;
1111 * Allocate the info structure.
1113 agp
= kmalloc(sizeof(*agp
), GFP_KERNEL
);
1121 agp
->private = NULL
;
1122 agp
->ops
= &marvel_agp_ops
;
1125 * Aperture - not configured until ops.setup().
1127 agp
->aperture
.bus_base
= 0;
1128 agp
->aperture
.size
= 0;
1129 agp
->aperture
.sysdata
= NULL
;
1134 * NOTE: IO7 reports through AGP_STAT that it can support a read queue
1135 * depth of 17 (rq = 0x10). It actually only supports a depth of
1138 agp
->capability
.lw
= csrs
->AGP_STAT
.csr
;
1139 agp
->capability
.bits
.rq
= 0xf;
1144 agp
->mode
.lw
= csrs
->AGP_CMD
.csr
;