1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/core_titan.c
5 * Code common to all TITAN core logic chips.
8 #define __EXTERN_INLINE inline
10 #include <asm/core_titan.h>
11 #undef __EXTERN_INLINE
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/pci.h>
16 #include <linux/sched.h>
17 #include <linux/init.h>
18 #include <linux/vmalloc.h>
19 #include <linux/memblock.h>
21 #include <asm/ptrace.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
30 /* Save Titan configuration data as the console had it set up. */
34 unsigned long wsba
[4];
37 } saved_config
[4] __attribute__((common
));
40 * Is PChip 1 present? No need to query it more than once.
42 static int titan_pchip1_present
;
45 * BIOS32-style PCI interface:
48 #define DEBUG_CONFIG 0
51 # define DBG_CFG(args) printk args
53 # define DBG_CFG(args)
58 * Routines to access TIG registers.
60 static inline volatile unsigned long *
61 mk_tig_addr(int offset
)
63 return (volatile unsigned long *)(TITAN_TIG_SPACE
+ (offset
<< 6));
67 titan_read_tig(int offset
, u8 value
)
69 volatile unsigned long *tig_addr
= mk_tig_addr(offset
);
70 return (u8
)(*tig_addr
& 0xff);
74 titan_write_tig(int offset
, u8 value
)
76 volatile unsigned long *tig_addr
= mk_tig_addr(offset
);
77 *tig_addr
= (unsigned long)value
;
82 * Given a bus, device, and function number, compute resulting
83 * configuration space address
84 * accordingly. It is therefore not safe to have concurrent
85 * invocations to configuration space access routines, but there
86 * really shouldn't be any need for this.
88 * Note that all config space accesses use Type 1 address format.
90 * Note also that type 1 is determined by non-zero bus number.
94 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
95 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
96 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
97 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
98 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
101 * 23:16 bus number (8 bits = 128 possible buses)
102 * 15:11 Device number (5 bits)
103 * 10:8 function number
104 * 7:2 register number
107 * The function number selects which function of a multi-function device
108 * (e.g., SCSI and Ethernet).
110 * The register selects a DWORD (32 bit) register offset. Hence it
111 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
116 mk_conf_addr(struct pci_bus
*pbus
, unsigned int device_fn
, int where
,
117 unsigned long *pci_addr
, unsigned char *type1
)
119 struct pci_controller
*hose
= pbus
->sysdata
;
121 u8 bus
= pbus
->number
;
123 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
124 "pci_addr=0x%p, type1=0x%p)\n",
125 bus
, device_fn
, where
, pci_addr
, type1
));
127 if (!pbus
->parent
) /* No parent means peer PCI bus. */
131 addr
= (bus
<< 16) | (device_fn
<< 8) | where
;
132 addr
|= hose
->config_space_base
;
135 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr
));
140 titan_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
141 int size
, u32
*value
)
146 if (mk_conf_addr(bus
, devfn
, where
, &addr
, &type1
))
147 return PCIBIOS_DEVICE_NOT_FOUND
;
151 *value
= __kernel_ldbu(*(vucp
)addr
);
154 *value
= __kernel_ldwu(*(vusp
)addr
);
157 *value
= *(vuip
)addr
;
161 return PCIBIOS_SUCCESSFUL
;
165 titan_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
171 if (mk_conf_addr(bus
, devfn
, where
, &addr
, &type1
))
172 return PCIBIOS_DEVICE_NOT_FOUND
;
176 __kernel_stb(value
, *(vucp
)addr
);
178 __kernel_ldbu(*(vucp
)addr
);
181 __kernel_stw(value
, *(vusp
)addr
);
183 __kernel_ldwu(*(vusp
)addr
);
192 return PCIBIOS_SUCCESSFUL
;
195 struct pci_ops titan_pci_ops
=
197 .read
= titan_read_config
,
198 .write
= titan_write_config
,
203 titan_pci_tbi(struct pci_controller
*hose
, dma_addr_t start
, dma_addr_t end
)
205 titan_pachip
*pachip
=
206 (hose
->index
& 1) ? TITAN_pachip1
: TITAN_pachip0
;
207 titan_pachip_port
*port
;
208 volatile unsigned long *csr
;
211 /* Get the right hose. */
212 port
= &pachip
->g_port
;
214 port
= &pachip
->a_port
;
216 /* We can invalidate up to 8 tlb entries in a go. The flush
217 matches against <31:16> in the pci address.
218 Note that gtlbi* and atlbi* are in the same place in the g_port
219 and a_port, respectively, so the g_port offset can be used
220 even if hose is an a_port */
221 csr
= &port
->port_specific
.g
.gtlbia
.csr
;
222 if (((start
^ end
) & 0xffff0000) == 0)
223 csr
= &port
->port_specific
.g
.gtlbiv
.csr
;
225 /* For TBIA, it doesn't matter what value we write. For TBI,
226 it's the shifted tag bits. */
227 value
= (start
& 0xffff0000) >> 12;
236 titan_query_agp(titan_pachip_port
*port
)
238 union TPAchipPCTL pctl
;
241 pctl
.pctl_q_whole
= port
->pctl
.csr
;
243 return pctl
.pctl_r_bits
.apctl_v_agp_present
;
248 titan_init_one_pachip_port(titan_pachip_port
*port
, int index
)
250 struct pci_controller
*hose
;
252 hose
= alloc_pci_controller();
255 hose
->io_space
= alloc_resource();
256 hose
->mem_space
= alloc_resource();
259 * This is for userland consumption. The 40-bit PIO bias that we
260 * use in the kernel through KSEG doesn't work in the page table
261 * based user mappings. (43-bit KSEG sign extends the physical
262 * address from bit 40 to hit the I/O bit - mapped addresses don't).
263 * So make sure we get the 43-bit PIO bias.
265 hose
->sparse_mem_base
= 0;
266 hose
->sparse_io_base
= 0;
268 = (TITAN_MEM(index
) & 0xffffffffffUL
) | 0x80000000000UL
;
270 = (TITAN_IO(index
) & 0xffffffffffUL
) | 0x80000000000UL
;
272 hose
->config_space_base
= TITAN_CONF(index
);
275 hose
->io_space
->start
= TITAN_IO(index
) - TITAN_IO_BIAS
;
276 hose
->io_space
->end
= hose
->io_space
->start
+ TITAN_IO_SPACE
- 1;
277 hose
->io_space
->name
= pci_io_names
[index
];
278 hose
->io_space
->flags
= IORESOURCE_IO
;
280 hose
->mem_space
->start
= TITAN_MEM(index
) - TITAN_MEM_BIAS
;
281 hose
->mem_space
->end
= hose
->mem_space
->start
+ 0xffffffff;
282 hose
->mem_space
->name
= pci_mem_names
[index
];
283 hose
->mem_space
->flags
= IORESOURCE_MEM
;
285 if (request_resource(&ioport_resource
, hose
->io_space
) < 0)
286 printk(KERN_ERR
"Failed to request IO on hose %d\n", index
);
287 if (request_resource(&iomem_resource
, hose
->mem_space
) < 0)
288 printk(KERN_ERR
"Failed to request MEM on hose %d\n", index
);
291 * Save the existing PCI window translations. SRM will
292 * need them when we go to reboot.
294 saved_config
[index
].wsba
[0] = port
->wsba
[0].csr
;
295 saved_config
[index
].wsm
[0] = port
->wsm
[0].csr
;
296 saved_config
[index
].tba
[0] = port
->tba
[0].csr
;
298 saved_config
[index
].wsba
[1] = port
->wsba
[1].csr
;
299 saved_config
[index
].wsm
[1] = port
->wsm
[1].csr
;
300 saved_config
[index
].tba
[1] = port
->tba
[1].csr
;
302 saved_config
[index
].wsba
[2] = port
->wsba
[2].csr
;
303 saved_config
[index
].wsm
[2] = port
->wsm
[2].csr
;
304 saved_config
[index
].tba
[2] = port
->tba
[2].csr
;
306 saved_config
[index
].wsba
[3] = port
->wsba
[3].csr
;
307 saved_config
[index
].wsm
[3] = port
->wsm
[3].csr
;
308 saved_config
[index
].tba
[3] = port
->tba
[3].csr
;
311 * Set up the PCI to main memory translation windows.
313 * Note: Window 3 on Titan is Scatter-Gather ONLY.
315 * Window 0 is scatter-gather 8MB at 8MB (for isa)
316 * Window 1 is direct access 1GB at 2GB
317 * Window 2 is scatter-gather 1GB at 3GB
319 hose
->sg_isa
= iommu_arena_new(hose
, 0x00800000, 0x00800000,
321 hose
->sg_isa
->align_entry
= 8; /* 64KB for ISA */
323 hose
->sg_pci
= iommu_arena_new(hose
, 0xc0000000, 0x40000000,
325 hose
->sg_pci
->align_entry
= 4; /* Titan caches 4 PTEs at a time */
327 port
->wsba
[0].csr
= hose
->sg_isa
->dma_base
| 3;
328 port
->wsm
[0].csr
= (hose
->sg_isa
->size
- 1) & 0xfff00000;
329 port
->tba
[0].csr
= virt_to_phys(hose
->sg_isa
->ptes
);
331 port
->wsba
[1].csr
= __direct_map_base
| 1;
332 port
->wsm
[1].csr
= (__direct_map_size
- 1) & 0xfff00000;
333 port
->tba
[1].csr
= 0;
335 port
->wsba
[2].csr
= hose
->sg_pci
->dma_base
| 3;
336 port
->wsm
[2].csr
= (hose
->sg_pci
->size
- 1) & 0xfff00000;
337 port
->tba
[2].csr
= virt_to_phys(hose
->sg_pci
->ptes
);
339 port
->wsba
[3].csr
= 0;
341 /* Enable the Monster Window to make DAC pci64 possible. */
342 port
->pctl
.csr
|= pctl_m_mwin
;
345 * If it's an AGP port, initialize agplastwr.
347 if (titan_query_agp(port
))
348 port
->port_specific
.a
.agplastwr
.csr
= __direct_map_base
;
350 titan_pci_tbi(hose
, 0, -1);
354 titan_init_pachips(titan_pachip
*pachip0
, titan_pachip
*pachip1
)
356 titan_pchip1_present
= TITAN_cchip
->csc
.csr
& 1L<<14;
358 /* Init the ports in hose order... */
359 titan_init_one_pachip_port(&pachip0
->g_port
, 0); /* hose 0 */
360 if (titan_pchip1_present
)
361 titan_init_one_pachip_port(&pachip1
->g_port
, 1);/* hose 1 */
362 titan_init_one_pachip_port(&pachip0
->a_port
, 2); /* hose 2 */
363 if (titan_pchip1_present
)
364 titan_init_one_pachip_port(&pachip1
->a_port
, 3);/* hose 3 */
368 titan_init_arch(void)
371 printk("%s: titan_init_arch()\n", __func__
);
372 printk("%s: CChip registers:\n", __func__
);
373 printk("%s: CSR_CSC 0x%lx\n", __func__
, TITAN_cchip
->csc
.csr
);
374 printk("%s: CSR_MTR 0x%lx\n", __func__
, TITAN_cchip
->mtr
.csr
);
375 printk("%s: CSR_MISC 0x%lx\n", __func__
, TITAN_cchip
->misc
.csr
);
376 printk("%s: CSR_DIM0 0x%lx\n", __func__
, TITAN_cchip
->dim0
.csr
);
377 printk("%s: CSR_DIM1 0x%lx\n", __func__
, TITAN_cchip
->dim1
.csr
);
378 printk("%s: CSR_DIR0 0x%lx\n", __func__
, TITAN_cchip
->dir0
.csr
);
379 printk("%s: CSR_DIR1 0x%lx\n", __func__
, TITAN_cchip
->dir1
.csr
);
380 printk("%s: CSR_DRIR 0x%lx\n", __func__
, TITAN_cchip
->drir
.csr
);
382 printk("%s: DChip registers:\n", __func__
);
383 printk("%s: CSR_DSC 0x%lx\n", __func__
, TITAN_dchip
->dsc
.csr
);
384 printk("%s: CSR_STR 0x%lx\n", __func__
, TITAN_dchip
->str
.csr
);
385 printk("%s: CSR_DREV 0x%lx\n", __func__
, TITAN_dchip
->drev
.csr
);
388 boot_cpuid
= __hard_smp_processor_id();
390 /* With multiple PCI busses, we play with I/O as physical addrs. */
391 ioport_resource
.end
= ~0UL;
392 iomem_resource
.end
= ~0UL;
394 /* PCI DMA Direct Mapping is 1GB at 2GB. */
395 __direct_map_base
= 0x80000000;
396 __direct_map_size
= 0x40000000;
398 /* Init the PA chip(s). */
399 titan_init_pachips(TITAN_pachip0
, TITAN_pachip1
);
401 /* Check for graphic console location (if any). */
402 find_console_vga_hose();
406 titan_kill_one_pachip_port(titan_pachip_port
*port
, int index
)
408 port
->wsba
[0].csr
= saved_config
[index
].wsba
[0];
409 port
->wsm
[0].csr
= saved_config
[index
].wsm
[0];
410 port
->tba
[0].csr
= saved_config
[index
].tba
[0];
412 port
->wsba
[1].csr
= saved_config
[index
].wsba
[1];
413 port
->wsm
[1].csr
= saved_config
[index
].wsm
[1];
414 port
->tba
[1].csr
= saved_config
[index
].tba
[1];
416 port
->wsba
[2].csr
= saved_config
[index
].wsba
[2];
417 port
->wsm
[2].csr
= saved_config
[index
].wsm
[2];
418 port
->tba
[2].csr
= saved_config
[index
].tba
[2];
420 port
->wsba
[3].csr
= saved_config
[index
].wsba
[3];
421 port
->wsm
[3].csr
= saved_config
[index
].wsm
[3];
422 port
->tba
[3].csr
= saved_config
[index
].tba
[3];
426 titan_kill_pachips(titan_pachip
*pachip0
, titan_pachip
*pachip1
)
428 if (titan_pchip1_present
) {
429 titan_kill_one_pachip_port(&pachip1
->g_port
, 1);
430 titan_kill_one_pachip_port(&pachip1
->a_port
, 3);
432 titan_kill_one_pachip_port(&pachip0
->g_port
, 0);
433 titan_kill_one_pachip_port(&pachip0
->a_port
, 2);
437 titan_kill_arch(int mode
)
439 titan_kill_pachips(TITAN_pachip0
, TITAN_pachip1
);
448 titan_ioportmap(unsigned long addr
)
450 FIXUP_IOADDR_VGA(addr
);
451 return (void __iomem
*)(addr
+ TITAN_IO_BIAS
);
456 titan_ioremap(unsigned long addr
, unsigned long size
)
458 int h
= (addr
& TITAN_HOSE_MASK
) >> TITAN_HOSE_SHIFT
;
459 unsigned long baddr
= addr
& ~TITAN_HOSE_MASK
;
460 unsigned long last
= baddr
+ size
- 1;
461 struct pci_controller
*hose
;
462 struct vm_struct
*area
;
467 #ifdef CONFIG_VGA_HOSE
469 * Adjust the address and hose, if necessary.
471 if (pci_vga_hose
&& __is_mem_vga(addr
)) {
472 h
= pci_vga_hose
->index
;
473 addr
+= pci_vga_hose
->mem_space
->start
;
480 for (hose
= hose_head
; hose
; hose
= hose
->next
)
481 if (hose
->index
== h
)
487 * Is it direct-mapped?
489 if ((baddr
>= __direct_map_base
) &&
490 ((baddr
+ size
- 1) < __direct_map_base
+ __direct_map_size
)) {
491 vaddr
= addr
- __direct_map_base
+ TITAN_MEM_BIAS
;
492 return (void __iomem
*) vaddr
;
496 * Check the scatter-gather arena.
499 baddr
>= (unsigned long)hose
->sg_pci
->dma_base
&&
500 last
< (unsigned long)hose
->sg_pci
->dma_base
+ hose
->sg_pci
->size
){
503 * Adjust the limits (mappings must be page aligned)
505 baddr
-= hose
->sg_pci
->dma_base
;
506 last
-= hose
->sg_pci
->dma_base
;
508 size
= PAGE_ALIGN(last
) - baddr
;
513 area
= get_vm_area(size
, VM_IOREMAP
);
515 printk("ioremap failed... no vm_area...\n");
519 ptes
= hose
->sg_pci
->ptes
;
520 for (vaddr
= (unsigned long)area
->addr
;
522 baddr
+= PAGE_SIZE
, vaddr
+= PAGE_SIZE
) {
523 pfn
= ptes
[baddr
>> PAGE_SHIFT
];
525 printk("ioremap failed... pte not valid...\n");
529 pfn
>>= 1; /* make it a true pfn */
531 if (__alpha_remap_area_pages(vaddr
,
534 printk("FAILED to remap_area_pages...\n");
542 vaddr
= (unsigned long)area
->addr
+ (addr
& ~PAGE_MASK
);
543 return (void __iomem
*) vaddr
;
546 /* Assume a legacy (read: VGA) address, and return appropriately. */
547 return (void __iomem
*)(addr
+ TITAN_MEM_BIAS
);
551 titan_iounmap(volatile void __iomem
*xaddr
)
553 unsigned long addr
= (unsigned long) xaddr
;
554 if (addr
>= VMALLOC_START
)
555 vfree((void *)(PAGE_MASK
& addr
));
559 titan_is_mmio(const volatile void __iomem
*xaddr
)
561 unsigned long addr
= (unsigned long) xaddr
;
563 if (addr
>= VMALLOC_START
)
566 return (addr
& 0x100000000UL
) == 0;
569 #ifndef CONFIG_ALPHA_GENERIC
570 EXPORT_SYMBOL(titan_ioportmap
);
571 EXPORT_SYMBOL(titan_ioremap
);
572 EXPORT_SYMBOL(titan_iounmap
);
573 EXPORT_SYMBOL(titan_is_mmio
);
579 #include <linux/agp_backend.h>
580 #include <asm/agp_backend.h>
581 #include <linux/slab.h>
582 #include <linux/delay.h>
584 struct titan_agp_aperture
{
585 struct pci_iommu_arena
*arena
;
591 titan_agp_setup(alpha_agp_info
*agp
)
593 struct titan_agp_aperture
*aper
;
595 if (!alpha_agpgart_size
)
598 aper
= kmalloc(sizeof(struct titan_agp_aperture
), GFP_KERNEL
);
602 aper
->arena
= agp
->hose
->sg_pci
;
603 aper
->pg_count
= alpha_agpgart_size
/ PAGE_SIZE
;
604 aper
->pg_start
= iommu_reserve(aper
->arena
, aper
->pg_count
,
606 if (aper
->pg_start
< 0) {
607 printk(KERN_ERR
"Failed to reserve AGP memory\n");
612 agp
->aperture
.bus_base
=
613 aper
->arena
->dma_base
+ aper
->pg_start
* PAGE_SIZE
;
614 agp
->aperture
.size
= aper
->pg_count
* PAGE_SIZE
;
615 agp
->aperture
.sysdata
= aper
;
621 titan_agp_cleanup(alpha_agp_info
*agp
)
623 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
626 status
= iommu_release(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
627 if (status
== -EBUSY
) {
629 "Attempted to release bound AGP memory - unbinding\n");
630 iommu_unbind(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
631 status
= iommu_release(aper
->arena
, aper
->pg_start
,
635 printk(KERN_ERR
"Failed to release AGP memory\n");
642 titan_agp_configure(alpha_agp_info
*agp
)
644 union TPAchipPCTL pctl
;
645 titan_pachip_port
*port
= agp
->private;
646 pctl
.pctl_q_whole
= port
->pctl
.csr
;
648 /* Side-Band Addressing? */
649 pctl
.pctl_r_bits
.apctl_v_agp_sba_en
= agp
->mode
.bits
.sba
;
652 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 0; /* 1x */
653 if (agp
->mode
.bits
.rate
& 2)
654 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 1; /* 2x */
656 if (agp
->mode
.bits
.rate
& 4)
657 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 2; /* 4x */
661 pctl
.pctl_r_bits
.apctl_v_agp_hp_rd
= 2;
662 pctl
.pctl_r_bits
.apctl_v_agp_lp_rd
= 7;
667 pctl
.pctl_r_bits
.apctl_v_agp_en
= agp
->mode
.bits
.enable
;
670 printk("Enabling AGP: %dX%s\n",
671 1 << pctl
.pctl_r_bits
.apctl_v_agp_rate
,
672 pctl
.pctl_r_bits
.apctl_v_agp_sba_en
? " - SBA" : "");
675 port
->pctl
.csr
= pctl
.pctl_q_whole
;
677 /* And wait at least 5000 66MHz cycles (per Titan spec). */
684 titan_agp_bind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
686 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
687 return iommu_bind(aper
->arena
, aper
->pg_start
+ pg_start
,
688 mem
->page_count
, mem
->pages
);
692 titan_agp_unbind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
694 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
695 return iommu_unbind(aper
->arena
, aper
->pg_start
+ pg_start
,
700 titan_agp_translate(alpha_agp_info
*agp
, dma_addr_t addr
)
702 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
703 unsigned long baddr
= addr
- aper
->arena
->dma_base
;
706 if (addr
< agp
->aperture
.bus_base
||
707 addr
>= agp
->aperture
.bus_base
+ agp
->aperture
.size
) {
708 printk("%s: addr out of range\n", __func__
);
712 pte
= aper
->arena
->ptes
[baddr
>> PAGE_SHIFT
];
714 printk("%s: pte not valid\n", __func__
);
718 return (pte
>> 1) << PAGE_SHIFT
;
721 struct alpha_agp_ops titan_agp_ops
=
723 .setup
= titan_agp_setup
,
724 .cleanup
= titan_agp_cleanup
,
725 .configure
= titan_agp_configure
,
726 .bind
= titan_agp_bind_memory
,
727 .unbind
= titan_agp_unbind_memory
,
728 .translate
= titan_agp_translate
735 struct pci_controller
*hose
;
736 titan_pachip_port
*port
;
738 union TPAchipPCTL pctl
;
743 port
= &TITAN_pachip0
->a_port
;
744 if (titan_query_agp(port
))
747 titan_pchip1_present
&&
748 titan_query_agp(port
= &TITAN_pachip1
->a_port
))
752 * Find the hose the port is on.
754 for (hose
= hose_head
; hose
; hose
= hose
->next
)
755 if (hose
->index
== hosenum
)
758 if (!hose
|| !hose
->sg_pci
)
762 * Allocate the info structure.
764 agp
= kmalloc(sizeof(*agp
), GFP_KERNEL
);
773 agp
->ops
= &titan_agp_ops
;
776 * Aperture - not configured until ops.setup().
778 * FIXME - should we go ahead and allocate it here?
780 agp
->aperture
.bus_base
= 0;
781 agp
->aperture
.size
= 0;
782 agp
->aperture
.sysdata
= NULL
;
787 agp
->capability
.lw
= 0;
788 agp
->capability
.bits
.rate
= 3; /* 2x, 1x */
789 agp
->capability
.bits
.sba
= 1;
790 agp
->capability
.bits
.rq
= 7; /* 8 - 1 */
795 pctl
.pctl_q_whole
= port
->pctl
.csr
;
797 agp
->mode
.bits
.rate
= 1 << pctl
.pctl_r_bits
.apctl_v_agp_rate
;
798 agp
->mode
.bits
.sba
= pctl
.pctl_r_bits
.apctl_v_agp_sba_en
;
799 agp
->mode
.bits
.rq
= 7; /* RQ Depth? */
800 agp
->mode
.bits
.enable
= pctl
.pctl_r_bits
.apctl_v_agp_en
;