2 * linux/arch/alpha/kernel/core_marvel.c
4 * Code common to all Marvel based systems.
7 #define __EXTERN_INLINE inline
9 #include <asm/core_marvel.h>
10 #undef __EXTERN_INLINE
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/sched.h>
15 #include <linux/init.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mc146818rtc.h>
18 #include <linux/rtc.h>
19 #include <linux/module.h>
20 #include <linux/bootmem.h>
22 #include <asm/ptrace.h>
25 #include <asm/pgalloc.h>
26 #include <asm/tlbflush.h>
37 #define DEBUG_CONFIG 0
40 # define DBG_CFG(args) printk args
42 # define DBG_CFG(args)
49 static struct io7
*io7_head
= NULL
;
55 static unsigned long __attribute__ ((unused
))
56 read_ev7_csr(int pe
, unsigned long offset
)
58 ev7_csr
*ev7csr
= EV7_CSR_KERN(pe
, offset
);
68 static void __attribute__ ((unused
))
69 write_ev7_csr(int pe
, unsigned long offset
, unsigned long q
)
71 ev7_csr
*ev7csr
= EV7_CSR_KERN(pe
, offset
);
79 mk_resource_name(int pe
, int port
, char *str
)
84 sprintf(tmp
, "PCI %s PE %d PORT %d", str
, pe
, port
);
85 name
= alloc_bootmem(strlen(tmp
) + 1);
92 marvel_next_io7(struct io7
*prev
)
94 return (prev
? prev
->next
: io7_head
);
98 marvel_find_io7(int pe
)
102 for (io7
= io7_head
; io7
&& io7
->pe
!= pe
; io7
= io7
->next
)
108 static struct io7
* __init
109 alloc_io7(unsigned int pe
)
115 if (marvel_find_io7(pe
)) {
116 printk(KERN_WARNING
"IO7 at PE %d already allocated!\n", pe
);
120 io7
= alloc_bootmem(sizeof(*io7
));
122 spin_lock_init(&io7
->irq_lock
);
124 for (h
= 0; h
< 4; h
++) {
125 io7
->ports
[h
].io7
= io7
;
126 io7
->ports
[h
].port
= h
;
127 io7
->ports
[h
].enabled
= 0; /* default to disabled */
131 * Insert in pe sorted order.
133 if (NULL
== io7_head
) /* empty list */
135 else if (io7_head
->pe
> io7
->pe
) { /* insert at head */
136 io7
->next
= io7_head
;
138 } else { /* insert at position */
139 for (insp
= io7_head
; insp
; insp
= insp
->next
) {
140 if (insp
->pe
== io7
->pe
) {
141 printk(KERN_ERR
"Too many IO7s at PE %d\n",
146 if (NULL
== insp
->next
||
147 insp
->next
->pe
> io7
->pe
) { /* insert here */
148 io7
->next
= insp
->next
;
154 if (NULL
== insp
) { /* couldn't insert ?!? */
155 printk(KERN_WARNING
"Failed to insert IO7 at PE %d "
156 " - adding at head of list\n", io7
->pe
);
157 io7
->next
= io7_head
;
166 io7_clear_errors(struct io7
*io7
)
168 io7_port7_csrs
*p7csrs
;
169 io7_ioport_csrs
*csrs
;
174 * First the IO ports.
176 for (port
= 0; port
< 4; port
++) {
177 csrs
= IO7_CSRS_KERN(io7
->pe
, port
);
179 csrs
->POx_ERR_SUM
.csr
= -1UL;
180 csrs
->POx_TLB_ERR
.csr
= -1UL;
181 csrs
->POx_SPL_COMPLT
.csr
= -1UL;
182 csrs
->POx_TRANS_SUM
.csr
= -1UL;
186 * Then the common ones.
188 p7csrs
= IO7_PORT7_CSRS_KERN(io7
->pe
);
190 p7csrs
->PO7_ERROR_SUM
.csr
= -1UL;
191 p7csrs
->PO7_UNCRR_SYM
.csr
= -1UL;
192 p7csrs
->PO7_CRRCT_SYM
.csr
= -1UL;
197 * IO7 PCI, PCI/X, AGP configuration.
200 io7_init_hose(struct io7
*io7
, int port
)
202 static int hose_index
= 0;
204 struct pci_controller
*hose
= alloc_pci_controller();
205 struct io7_port
*io7_port
= &io7
->ports
[port
];
206 io7_ioport_csrs
*csrs
= IO7_CSRS_KERN(io7
->pe
, port
);
209 hose
->index
= hose_index
++; /* arbitrary */
212 * We don't have an isa or legacy hose, but glibc expects to be
213 * able to use the bus == 0 / dev == 0 form of the iobase syscall
214 * to determine information about the i/o system. Since XFree86
215 * relies on glibc's determination to tell whether or not to use
216 * sparse access, we need to point the pci_isa_hose at a real hose
217 * so at least that determination is correct.
219 if (hose
->index
== 0)
222 io7_port
->csrs
= csrs
;
223 io7_port
->hose
= hose
;
224 hose
->sysdata
= io7_port
;
226 hose
->io_space
= alloc_resource();
227 hose
->mem_space
= alloc_resource();
230 * Base addresses for userland consumption. Since these are going
231 * to be mapped, they are pure physical addresses.
233 hose
->sparse_mem_base
= hose
->sparse_io_base
= 0;
234 hose
->dense_mem_base
= IO7_MEM_PHYS(io7
->pe
, port
);
235 hose
->dense_io_base
= IO7_IO_PHYS(io7
->pe
, port
);
238 * Base addresses and resource ranges for kernel consumption.
240 hose
->config_space_base
= (unsigned long)IO7_CONF_KERN(io7
->pe
, port
);
242 hose
->io_space
->start
= (unsigned long)IO7_IO_KERN(io7
->pe
, port
);
243 hose
->io_space
->end
= hose
->io_space
->start
+ IO7_IO_SPACE
- 1;
244 hose
->io_space
->name
= mk_resource_name(io7
->pe
, port
, "IO");
245 hose
->io_space
->flags
= IORESOURCE_IO
;
247 hose
->mem_space
->start
= (unsigned long)IO7_MEM_KERN(io7
->pe
, port
);
248 hose
->mem_space
->end
= hose
->mem_space
->start
+ IO7_MEM_SPACE
- 1;
249 hose
->mem_space
->name
= mk_resource_name(io7
->pe
, port
, "MEM");
250 hose
->mem_space
->flags
= IORESOURCE_MEM
;
252 if (request_resource(&ioport_resource
, hose
->io_space
) < 0)
253 printk(KERN_ERR
"Failed to request IO on hose %d\n",
255 if (request_resource(&iomem_resource
, hose
->mem_space
) < 0)
256 printk(KERN_ERR
"Failed to request MEM on hose %d\n",
260 * Save the existing DMA window settings for later restoration.
262 for (i
= 0; i
< 4; i
++) {
263 io7_port
->saved_wbase
[i
] = csrs
->POx_WBASE
[i
].csr
;
264 io7_port
->saved_wmask
[i
] = csrs
->POx_WMASK
[i
].csr
;
265 io7_port
->saved_tbase
[i
] = csrs
->POx_TBASE
[i
].csr
;
269 * Set up the PCI to main memory translation windows.
271 * Window 0 is scatter-gather 8MB at 8MB
272 * Window 1 is direct access 1GB at 2GB
273 * Window 2 is scatter-gather (up-to) 1GB at 3GB
274 * Window 3 is disabled
278 * TBIA before modifying windows.
280 marvel_pci_tbi(hose
, 0, -1);
283 * Set up window 0 for scatter-gather 8MB at 8MB.
285 hose
->sg_isa
= iommu_arena_new_node(marvel_cpuid_to_nid(io7
->pe
),
286 hose
, 0x00800000, 0x00800000, 0);
287 hose
->sg_isa
->align_entry
= 8; /* cache line boundary */
288 csrs
->POx_WBASE
[0].csr
=
289 hose
->sg_isa
->dma_base
| wbase_m_ena
| wbase_m_sg
;
290 csrs
->POx_WMASK
[0].csr
= (hose
->sg_isa
->size
- 1) & wbase_m_addr
;
291 csrs
->POx_TBASE
[0].csr
= virt_to_phys(hose
->sg_isa
->ptes
);
294 * Set up window 1 for direct-mapped 1GB at 2GB.
296 csrs
->POx_WBASE
[1].csr
= __direct_map_base
| wbase_m_ena
;
297 csrs
->POx_WMASK
[1].csr
= (__direct_map_size
- 1) & wbase_m_addr
;
298 csrs
->POx_TBASE
[1].csr
= 0;
301 * Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
303 hose
->sg_pci
= iommu_arena_new_node(marvel_cpuid_to_nid(io7
->pe
),
304 hose
, 0xc0000000, 0x40000000, 0);
305 hose
->sg_pci
->align_entry
= 8; /* cache line boundary */
306 csrs
->POx_WBASE
[2].csr
=
307 hose
->sg_pci
->dma_base
| wbase_m_ena
| wbase_m_sg
;
308 csrs
->POx_WMASK
[2].csr
= (hose
->sg_pci
->size
- 1) & wbase_m_addr
;
309 csrs
->POx_TBASE
[2].csr
= virt_to_phys(hose
->sg_pci
->ptes
);
314 csrs
->POx_WBASE
[3].csr
= 0;
317 * Make sure that the AGP Monster Window is disabled.
319 csrs
->POx_CTRL
.csr
&= ~(1UL << 61);
322 printk("FIXME: disabling master aborts\n");
323 csrs
->POx_MSK_HEI
.csr
&= ~(3UL << 14);
326 * TBIA after modifying windows.
328 marvel_pci_tbi(hose
, 0, -1);
332 marvel_init_io7(struct io7
*io7
)
336 printk("Initializing IO7 at PID %d\n", io7
->pe
);
339 * Get the Port 7 CSR pointer.
341 io7
->csrs
= IO7_PORT7_CSRS_KERN(io7
->pe
);
344 * Init this IO7's hoses.
346 for (i
= 0; i
< IO7_NUM_PORTS
; i
++) {
347 io7_ioport_csrs
*csrs
= IO7_CSRS_KERN(io7
->pe
, i
);
348 if (csrs
->POx_CACHE_CTL
.csr
== 8) {
349 io7
->ports
[i
].enabled
= 1;
350 io7_init_hose(io7
, i
);
356 marvel_io7_present(gct6_node
*node
)
360 if (node
->type
!= GCT_TYPE_HOSE
||
361 node
->subtype
!= GCT_SUBTYPE_IO_PORT_MODULE
)
364 pe
= (node
->id
>> 8) & 0xff;
365 printk("Found an IO7 at PID %d\n", pe
);
371 marvel_find_console_vga_hose(void)
373 u64
*pu64
= (u64
*)((u64
)hwrpb
+ hwrpb
->ctbt_offset
);
375 if (pu64
[7] == 3) { /* TERM_TYPE == graphics */
376 struct pci_controller
*hose
= NULL
;
377 int h
= (pu64
[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */
381 /* FIXME - encoding is going to have to change for Marvel
382 * since hose will be able to overflow a byte...
383 * need to fix this decode when the console
384 * changes its encoding
386 printk("console graphics is on hose %d (console)\n", h
);
389 * The console's hose numbering is:
394 * We need to find the hose at that pid and port
398 if ((io7
= marvel_find_io7(pid
)))
399 hose
= io7
->ports
[port
].hose
;
402 printk("Console graphics on hose %d\n", hose
->index
);
408 gct6_search_struct gct_wanted_node_list
[] = {
409 { GCT_TYPE_HOSE
, GCT_SUBTYPE_IO_PORT_MODULE
, marvel_io7_present
},
414 * In case the GCT is not complete, let the user specify PIDs with IO7s
415 * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal)
416 * where IO7s are connected
419 marvel_specify_io7(char *str
)
426 pid
= simple_strtoul(str
, &pchar
, 0);
428 printk("User-specified IO7 at PID %lu\n", pid
);
429 io7
= alloc_io7(pid
);
430 if (io7
) marvel_init_io7(io7
);
433 if (pchar
== str
) pchar
++;
439 __setup("io7=", marvel_specify_io7
);
442 marvel_init_arch(void)
446 /* With multiple PCI busses, we play with I/O as physical addrs. */
447 ioport_resource
.end
= ~0UL;
449 /* PCI DMA Direct Mapping is 1GB at 2GB. */
450 __direct_map_base
= 0x80000000;
451 __direct_map_size
= 0x40000000;
453 /* Parse the config tree. */
454 gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list
);
457 for (io7
= NULL
; NULL
!= (io7
= marvel_next_io7(io7
)); )
458 marvel_init_io7(io7
);
460 /* Check for graphic console location (if any). */
461 marvel_find_console_vga_hose();
465 marvel_kill_arch(int mode
)
471 * PCI Configuration Space access functions
473 * Configuration space addresses have the following format:
475 * |2 2 2 2|1 1 1 1|1 1 1 1|1 1
476 * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
477 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
478 * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R|
479 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
481 * n:24 reserved for hose base
482 * 23:16 bus number (8 bits = 128 possible buses)
483 * 15:11 Device number (5 bits)
484 * 10:8 function number
485 * 7:2 register number
488 * IO7 determines whether to use a type 0 or type 1 config cycle
489 * based on the bus number. Therefore the bus number must be set
490 * to 0 for the root bus on any hose.
492 * The function number selects which function of a multi-function device
493 * (e.g., SCSI and Ethernet).
497 static inline unsigned long
498 build_conf_addr(struct pci_controller
*hose
, u8 bus
,
499 unsigned int devfn
, int where
)
501 return (hose
->config_space_base
| (bus
<< 16) | (devfn
<< 8) | where
);
505 mk_conf_addr(struct pci_bus
*pbus
, unsigned int devfn
, int where
)
507 struct pci_controller
*hose
= pbus
->sysdata
;
508 struct io7_port
*io7_port
;
509 unsigned long addr
= 0;
510 u8 bus
= pbus
->number
;
515 /* Check for enabled. */
516 io7_port
= hose
->sysdata
;
517 if (!io7_port
->enabled
)
520 if (!pbus
->parent
) { /* No parent means peer PCI bus. */
521 /* Don't support idsel > 20 on primary bus. */
522 if (devfn
>= PCI_DEVFN(21, 0))
527 addr
= build_conf_addr(hose
, bus
, devfn
, where
);
529 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr
));
534 marvel_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
535 int size
, u32
*value
)
539 if (0 == (addr
= mk_conf_addr(bus
, devfn
, where
)))
540 return PCIBIOS_DEVICE_NOT_FOUND
;
544 *value
= __kernel_ldbu(*(vucp
)addr
);
547 *value
= __kernel_ldwu(*(vusp
)addr
);
550 *value
= *(vuip
)addr
;
553 return PCIBIOS_FUNC_NOT_SUPPORTED
;
556 return PCIBIOS_SUCCESSFUL
;
560 marvel_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
565 if (0 == (addr
= mk_conf_addr(bus
, devfn
, where
)))
566 return PCIBIOS_DEVICE_NOT_FOUND
;
570 __kernel_stb(value
, *(vucp
)addr
);
572 __kernel_ldbu(*(vucp
)addr
);
575 __kernel_stw(value
, *(vusp
)addr
);
577 __kernel_ldwu(*(vusp
)addr
);
585 return PCIBIOS_FUNC_NOT_SUPPORTED
;
588 return PCIBIOS_SUCCESSFUL
;
591 struct pci_ops marvel_pci_ops
=
593 .read
= marvel_read_config
,
594 .write
= marvel_write_config
,
599 * Other PCI helper functions.
602 marvel_pci_tbi(struct pci_controller
*hose
, dma_addr_t start
, dma_addr_t end
)
604 io7_ioport_csrs
*csrs
= ((struct io7_port
*)hose
->sysdata
)->csrs
;
607 csrs
->POx_SG_TBIA
.csr
= 0;
609 csrs
->POx_SG_TBIA
.csr
;
617 struct marvel_rtc_access_info
{
618 unsigned long function
;
624 __marvel_access_rtc(void *info
)
626 struct marvel_rtc_access_info
*rtc_access
= info
;
628 register unsigned long __r0
__asm__("$0");
629 register unsigned long __r16
__asm__("$16") = rtc_access
->function
;
630 register unsigned long __r17
__asm__("$17") = rtc_access
->index
;
631 register unsigned long __r18
__asm__("$18") = rtc_access
->data
;
633 __asm__
__volatile__(
634 "call_pal %4 # cserve rtc"
635 : "=r"(__r16
), "=r"(__r17
), "=r"(__r18
), "=r"(__r0
)
636 : "i"(PAL_cserve
), "0"(__r16
), "1"(__r17
), "2"(__r18
)
637 : "$1", "$22", "$23", "$24", "$25");
639 rtc_access
->data
= __r0
;
643 __marvel_rtc_io(u8 b
, unsigned long addr
, int write
)
647 struct marvel_rtc_access_info rtc_access
;
651 case 0x70: /* RTC_PORT(0) */
652 if (write
) index
= b
;
656 case 0x71: /* RTC_PORT(1) */
657 rtc_access
.index
= index
;
658 rtc_access
.data
= BCD_TO_BIN(b
);
659 rtc_access
.function
= 0x48 + !write
; /* GET/PUT_TOY */
662 if (smp_processor_id() != boot_cpuid
)
663 smp_call_function_on_cpu(__marvel_access_rtc
,
665 cpumask_of_cpu(boot_cpuid
));
667 __marvel_access_rtc(&rtc_access
);
669 __marvel_access_rtc(&rtc_access
);
671 ret
= BIN_TO_BCD(rtc_access
.data
);
675 printk(KERN_WARNING
"Illegal RTC port %lx\n", addr
);
687 marvel_ioremap(unsigned long addr
, unsigned long size
)
689 struct pci_controller
*hose
;
690 unsigned long baddr
, last
;
691 struct vm_struct
*area
;
697 * Adjust the address.
699 FIXUP_MEMADDR_VGA(addr
);
704 for (hose
= hose_head
; hose
; hose
= hose
->next
) {
705 if ((addr
>> 32) == (hose
->mem_space
->start
>> 32))
712 * We have the hose - calculate the bus limits.
714 baddr
= addr
- hose
->mem_space
->start
;
715 last
= baddr
+ size
- 1;
718 * Is it direct-mapped?
720 if ((baddr
>= __direct_map_base
) &&
721 ((baddr
+ size
- 1) < __direct_map_base
+ __direct_map_size
)) {
722 addr
= IDENT_ADDR
| (baddr
- __direct_map_base
);
723 return (void __iomem
*) addr
;
727 * Check the scatter-gather arena.
730 baddr
>= (unsigned long)hose
->sg_pci
->dma_base
&&
731 last
< (unsigned long)hose
->sg_pci
->dma_base
+ hose
->sg_pci
->size
) {
734 * Adjust the limits (mappings must be page aligned)
736 baddr
-= hose
->sg_pci
->dma_base
;
737 last
-= hose
->sg_pci
->dma_base
;
739 size
= PAGE_ALIGN(last
) - baddr
;
744 area
= get_vm_area(size
, VM_IOREMAP
);
748 ptes
= hose
->sg_pci
->ptes
;
749 for (vaddr
= (unsigned long)area
->addr
;
751 baddr
+= PAGE_SIZE
, vaddr
+= PAGE_SIZE
) {
752 pfn
= ptes
[baddr
>> PAGE_SHIFT
];
754 printk("ioremap failed... pte not valid...\n");
758 pfn
>>= 1; /* make it a true pfn */
760 if (__alpha_remap_area_pages(vaddr
,
763 printk("FAILED to map...\n");
771 vaddr
= (unsigned long)area
->addr
+ (addr
& ~PAGE_MASK
);
773 return (void __iomem
*) vaddr
;
776 /* Assume it was already a reasonable address */
777 vaddr
= baddr
+ hose
->mem_space
->start
;
778 return (void __iomem
*) vaddr
;
782 marvel_iounmap(volatile void __iomem
*xaddr
)
784 unsigned long addr
= (unsigned long) xaddr
;
785 if (addr
>= VMALLOC_START
)
786 vfree((void *)(PAGE_MASK
& addr
));
790 marvel_is_mmio(const volatile void __iomem
*xaddr
)
792 unsigned long addr
= (unsigned long) xaddr
;
794 if (addr
>= VMALLOC_START
)
797 return (addr
& 0xFF000000UL
) == 0;
800 #define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
801 #define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
803 void __iomem
*marvel_ioportmap (unsigned long addr
)
805 FIXUP_IOADDR_VGA(addr
);
806 return (void __iomem
*)addr
;
810 marvel_ioread8(void __iomem
*xaddr
)
812 unsigned long addr
= (unsigned long) xaddr
;
813 if (__marvel_is_port_kbd(addr
))
815 else if (__marvel_is_port_rtc(addr
))
816 return __marvel_rtc_io(0, addr
, 0);
817 else if (marvel_is_ioaddr(addr
))
818 return __kernel_ldbu(*(vucp
)addr
);
820 /* this should catch other legacy addresses
821 that would normally fail on MARVEL,
822 because there really is nothing there...
828 marvel_iowrite8(u8 b
, void __iomem
*xaddr
)
830 unsigned long addr
= (unsigned long) xaddr
;
831 if (__marvel_is_port_kbd(addr
))
833 else if (__marvel_is_port_rtc(addr
))
834 __marvel_rtc_io(b
, addr
, 1);
835 else if (marvel_is_ioaddr(addr
))
836 __kernel_stb(b
, *(vucp
)addr
);
839 #ifndef CONFIG_ALPHA_GENERIC
840 EXPORT_SYMBOL(marvel_ioremap
);
841 EXPORT_SYMBOL(marvel_iounmap
);
842 EXPORT_SYMBOL(marvel_is_mmio
);
843 EXPORT_SYMBOL(marvel_ioportmap
);
844 EXPORT_SYMBOL(marvel_ioread8
);
845 EXPORT_SYMBOL(marvel_iowrite8
);
852 * FIXME - for now each cpu is a node by itself
853 * -- no real support for striped mode
857 marvel_pa_to_nid(unsigned long pa
)
861 if ((pa
>> 43) & 1) /* I/O */
862 cpuid
= (~(pa
>> 35) & 0xff);
864 cpuid
= ((pa
>> 34) & 0x3) | ((pa
>> (37 - 2)) & (0x1f << 2));
866 return marvel_cpuid_to_nid(cpuid
);
870 marvel_cpuid_to_nid(int cpuid
)
876 marvel_node_mem_start(int nid
)
880 pa
= (nid
& 0x3) | ((nid
& (0x1f << 2)) << 1);
887 marvel_node_mem_size(int nid
)
889 return 16UL * 1024 * 1024 * 1024; /* 16GB */
896 #include <linux/agp_backend.h>
897 #include <asm/agp_backend.h>
898 #include <linux/slab.h>
899 #include <linux/delay.h>
901 struct marvel_agp_aperture
{
902 struct pci_iommu_arena
*arena
;
908 marvel_agp_setup(alpha_agp_info
*agp
)
910 struct marvel_agp_aperture
*aper
;
912 if (!alpha_agpgart_size
)
915 aper
= kmalloc(sizeof(*aper
), GFP_KERNEL
);
916 if (aper
== NULL
) return -ENOMEM
;
918 aper
->arena
= agp
->hose
->sg_pci
;
919 aper
->pg_count
= alpha_agpgart_size
/ PAGE_SIZE
;
920 aper
->pg_start
= iommu_reserve(aper
->arena
, aper
->pg_count
,
923 if (aper
->pg_start
< 0) {
924 printk(KERN_ERR
"Failed to reserve AGP memory\n");
929 agp
->aperture
.bus_base
=
930 aper
->arena
->dma_base
+ aper
->pg_start
* PAGE_SIZE
;
931 agp
->aperture
.size
= aper
->pg_count
* PAGE_SIZE
;
932 agp
->aperture
.sysdata
= aper
;
938 marvel_agp_cleanup(alpha_agp_info
*agp
)
940 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
943 status
= iommu_release(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
944 if (status
== -EBUSY
) {
946 "Attempted to release bound AGP memory - unbinding\n");
947 iommu_unbind(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
948 status
= iommu_release(aper
->arena
, aper
->pg_start
,
952 printk(KERN_ERR
"Failed to release AGP memory\n");
959 marvel_agp_configure(alpha_agp_info
*agp
)
961 io7_ioport_csrs
*csrs
= ((struct io7_port
*)agp
->hose
->sysdata
)->csrs
;
962 struct io7
*io7
= ((struct io7_port
*)agp
->hose
->sysdata
)->io7
;
963 unsigned int new_rate
= 0;
964 unsigned long agp_pll
;
967 * Check the requested mode against the PLL setting.
968 * The agpgart_be code has not programmed the card yet,
969 * so we can still tweak mode here.
971 agp_pll
= io7
->csrs
->POx_RST
[IO7_AGP_PORT
].csr
;
972 switch(IO7_PLL_RNGB(agp_pll
)) {
973 case 0x4: /* 2x only */
975 * The PLL is only programmed for 2x, so adjust the
976 * rate to 2x, if necessary.
978 if (agp
->mode
.bits
.rate
!= 2)
982 case 0x6: /* 1x / 4x */
984 * The PLL is programmed for 1x or 4x. Don't go faster
985 * than requested, so if the requested rate is 2x, use 1x.
987 if (agp
->mode
.bits
.rate
== 2)
991 default: /* ??????? */
993 * Don't know what this PLL setting is, take the requested
994 * rate, but warn the user.
996 printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
997 __FUNCTION__
, IO7_PLL_RNGB(agp_pll
), agp_pll
);
1002 * Set the new rate, if necessary.
1005 printk("Requested AGP Rate %dX not compatible "
1006 "with PLL setting - using %dX\n",
1007 agp
->mode
.bits
.rate
,
1010 agp
->mode
.bits
.rate
= new_rate
;
1013 printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
1014 agp
->hose
->index
, agp
->mode
.bits
.rate
,
1015 agp
->mode
.bits
.sba
? " - SBA" : "", agp
->mode
.bits
.rq
);
1017 csrs
->AGP_CMD
.csr
= agp
->mode
.lw
;
1023 marvel_agp_bind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
1025 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1026 return iommu_bind(aper
->arena
, aper
->pg_start
+ pg_start
,
1027 mem
->page_count
, mem
->memory
);
1031 marvel_agp_unbind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
1033 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1034 return iommu_unbind(aper
->arena
, aper
->pg_start
+ pg_start
,
1038 static unsigned long
1039 marvel_agp_translate(alpha_agp_info
*agp
, dma_addr_t addr
)
1041 struct marvel_agp_aperture
*aper
= agp
->aperture
.sysdata
;
1042 unsigned long baddr
= addr
- aper
->arena
->dma_base
;
1045 if (addr
< agp
->aperture
.bus_base
||
1046 addr
>= agp
->aperture
.bus_base
+ agp
->aperture
.size
) {
1047 printk("%s: addr out of range\n", __FUNCTION__
);
1051 pte
= aper
->arena
->ptes
[baddr
>> PAGE_SHIFT
];
1053 printk("%s: pte not valid\n", __FUNCTION__
);
1056 return (pte
>> 1) << PAGE_SHIFT
;
1059 struct alpha_agp_ops marvel_agp_ops
=
1061 .setup
= marvel_agp_setup
,
1062 .cleanup
= marvel_agp_cleanup
,
1063 .configure
= marvel_agp_configure
,
1064 .bind
= marvel_agp_bind_memory
,
1065 .unbind
= marvel_agp_unbind_memory
,
1066 .translate
= marvel_agp_translate
1070 marvel_agp_info(void)
1072 struct pci_controller
*hose
;
1073 io7_ioport_csrs
*csrs
;
1074 alpha_agp_info
*agp
;
1078 * Find the first IO7 with an AGP card.
1080 * FIXME -- there should be a better way (we want to be able to
1081 * specify and what if the agp card is not video???)
1084 for (io7
= NULL
; (io7
= marvel_next_io7(io7
)) != NULL
; ) {
1085 struct pci_controller
*h
;
1088 if (!io7
->ports
[IO7_AGP_PORT
].enabled
)
1091 h
= io7
->ports
[IO7_AGP_PORT
].hose
;
1092 addr
= (vuip
)build_conf_addr(h
, 0, PCI_DEVFN(5, 0), 0);
1094 if (*addr
!= 0xffffffffu
) {
1100 if (!hose
|| !hose
->sg_pci
)
1103 printk("MARVEL - using hose %d as AGP\n", hose
->index
);
1106 * Get the csrs from the hose.
1108 csrs
= ((struct io7_port
*)hose
->sysdata
)->csrs
;
1111 * Allocate the info structure.
1113 agp
= kmalloc(sizeof(*agp
), GFP_KERNEL
);
1119 agp
->private = NULL
;
1120 agp
->ops
= &marvel_agp_ops
;
1123 * Aperture - not configured until ops.setup().
1125 agp
->aperture
.bus_base
= 0;
1126 agp
->aperture
.size
= 0;
1127 agp
->aperture
.sysdata
= NULL
;
1132 * NOTE: IO7 reports through AGP_STAT that it can support a read queue
1133 * depth of 17 (rq = 0x10). It actually only supports a depth of
1136 agp
->capability
.lw
= csrs
->AGP_STAT
.csr
;
1137 agp
->capability
.bits
.rq
= 0xf;
1142 agp
->mode
.lw
= csrs
->AGP_CMD
.csr
;