2 * linux/arch/alpha/kernel/core_titan.c
4 * Code common to all TITAN core logic chips.
7 #define __EXTERN_INLINE inline
9 #include <asm/core_titan.h>
10 #undef __EXTERN_INLINE
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
20 #include <asm/ptrace.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
28 /* Save Titan configuration data as the console had it set up. */
32 unsigned long wsba
[4];
35 } saved_config
[4] __attribute__((common
));
38 * BIOS32-style PCI interface:
41 #define DEBUG_CONFIG 0
44 # define DBG_CFG(args) printk args
46 # define DBG_CFG(args)
51 * Routines to access TIG registers.
53 static inline volatile unsigned long *
54 mk_tig_addr(int offset
)
56 return (volatile unsigned long *)(TITAN_TIG_SPACE
+ (offset
<< 6));
60 titan_read_tig(int offset
, u8 value
)
62 volatile unsigned long *tig_addr
= mk_tig_addr(offset
);
63 return (u8
)(*tig_addr
& 0xff);
67 titan_write_tig(int offset
, u8 value
)
69 volatile unsigned long *tig_addr
= mk_tig_addr(offset
);
70 *tig_addr
= (unsigned long)value
;
75 * Given a bus, device, and function number, compute resulting
76 * configuration space address
77 * accordingly. It is therefore not safe to have concurrent
78 * invocations to configuration space access routines, but there
79 * really shouldn't be any need for this.
81 * Note that all config space accesses use Type 1 address format.
83 * Note also that type 1 is determined by non-zero bus number.
87 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
88 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
90 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94 * 23:16 bus number (8 bits = 128 possible buses)
95 * 15:11 Device number (5 bits)
96 * 10:8 function number
100 * The function number selects which function of a multi-function device
101 * (e.g., SCSI and Ethernet).
103 * The register selects a DWORD (32 bit) register offset. Hence it
104 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
109 mk_conf_addr(struct pci_bus
*pbus
, unsigned int device_fn
, int where
,
110 unsigned long *pci_addr
, unsigned char *type1
)
112 struct pci_controller
*hose
= pbus
->sysdata
;
114 u8 bus
= pbus
->number
;
116 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
117 "pci_addr=0x%p, type1=0x%p)\n",
118 bus
, device_fn
, where
, pci_addr
, type1
));
120 if (!pbus
->parent
) /* No parent means peer PCI bus. */
124 addr
= (bus
<< 16) | (device_fn
<< 8) | where
;
125 addr
|= hose
->config_space_base
;
128 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr
));
133 titan_read_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
134 int size
, u32
*value
)
139 if (mk_conf_addr(bus
, devfn
, where
, &addr
, &type1
))
140 return PCIBIOS_DEVICE_NOT_FOUND
;
144 *value
= __kernel_ldbu(*(vucp
)addr
);
147 *value
= __kernel_ldwu(*(vusp
)addr
);
150 *value
= *(vuip
)addr
;
154 return PCIBIOS_SUCCESSFUL
;
158 titan_write_config(struct pci_bus
*bus
, unsigned int devfn
, int where
,
164 if (mk_conf_addr(bus
, devfn
, where
, &addr
, &type1
))
165 return PCIBIOS_DEVICE_NOT_FOUND
;
169 __kernel_stb(value
, *(vucp
)addr
);
171 __kernel_ldbu(*(vucp
)addr
);
174 __kernel_stw(value
, *(vusp
)addr
);
176 __kernel_ldwu(*(vusp
)addr
);
185 return PCIBIOS_SUCCESSFUL
;
188 struct pci_ops titan_pci_ops
=
190 .read
= titan_read_config
,
191 .write
= titan_write_config
,
196 titan_pci_tbi(struct pci_controller
*hose
, dma_addr_t start
, dma_addr_t end
)
198 titan_pachip
*pachip
=
199 (hose
->index
& 1) ? TITAN_pachip1
: TITAN_pachip0
;
200 titan_pachip_port
*port
;
201 volatile unsigned long *csr
;
204 /* Get the right hose. */
205 port
= &pachip
->g_port
;
207 port
= &pachip
->a_port
;
209 /* We can invalidate up to 8 tlb entries in a go. The flush
210 matches against <31:16> in the pci address.
211 Note that gtlbi* and atlbi* are in the same place in the g_port
212 and a_port, respectively, so the g_port offset can be used
213 even if hose is an a_port */
214 csr
= &port
->port_specific
.g
.gtlbia
.csr
;
215 if (((start
^ end
) & 0xffff0000) == 0)
216 csr
= &port
->port_specific
.g
.gtlbiv
.csr
;
218 /* For TBIA, it doesn't matter what value we write. For TBI,
219 it's the shifted tag bits. */
220 value
= (start
& 0xffff0000) >> 12;
229 titan_query_agp(titan_pachip_port
*port
)
231 union TPAchipPCTL pctl
;
234 pctl
.pctl_q_whole
= port
->pctl
.csr
;
236 return pctl
.pctl_r_bits
.apctl_v_agp_present
;
241 titan_init_one_pachip_port(titan_pachip_port
*port
, int index
)
243 struct pci_controller
*hose
;
245 hose
= alloc_pci_controller();
248 hose
->io_space
= alloc_resource();
249 hose
->mem_space
= alloc_resource();
252 * This is for userland consumption. The 40-bit PIO bias that we
253 * use in the kernel through KSEG doesn't work in the page table
254 * based user mappings. (43-bit KSEG sign extends the physical
255 * address from bit 40 to hit the I/O bit - mapped addresses don't).
256 * So make sure we get the 43-bit PIO bias.
258 hose
->sparse_mem_base
= 0;
259 hose
->sparse_io_base
= 0;
261 = (TITAN_MEM(index
) & 0xffffffffffUL
) | 0x80000000000UL
;
263 = (TITAN_IO(index
) & 0xffffffffffUL
) | 0x80000000000UL
;
265 hose
->config_space_base
= TITAN_CONF(index
);
268 hose
->io_space
->start
= TITAN_IO(index
) - TITAN_IO_BIAS
;
269 hose
->io_space
->end
= hose
->io_space
->start
+ TITAN_IO_SPACE
- 1;
270 hose
->io_space
->name
= pci_io_names
[index
];
271 hose
->io_space
->flags
= IORESOURCE_IO
;
273 hose
->mem_space
->start
= TITAN_MEM(index
) - TITAN_MEM_BIAS
;
274 hose
->mem_space
->end
= hose
->mem_space
->start
+ 0xffffffff;
275 hose
->mem_space
->name
= pci_mem_names
[index
];
276 hose
->mem_space
->flags
= IORESOURCE_MEM
;
278 if (request_resource(&ioport_resource
, hose
->io_space
) < 0)
279 printk(KERN_ERR
"Failed to request IO on hose %d\n", index
);
280 if (request_resource(&iomem_resource
, hose
->mem_space
) < 0)
281 printk(KERN_ERR
"Failed to request MEM on hose %d\n", index
);
284 * Save the existing PCI window translations. SRM will
285 * need them when we go to reboot.
287 saved_config
[index
].wsba
[0] = port
->wsba
[0].csr
;
288 saved_config
[index
].wsm
[0] = port
->wsm
[0].csr
;
289 saved_config
[index
].tba
[0] = port
->tba
[0].csr
;
291 saved_config
[index
].wsba
[1] = port
->wsba
[1].csr
;
292 saved_config
[index
].wsm
[1] = port
->wsm
[1].csr
;
293 saved_config
[index
].tba
[1] = port
->tba
[1].csr
;
295 saved_config
[index
].wsba
[2] = port
->wsba
[2].csr
;
296 saved_config
[index
].wsm
[2] = port
->wsm
[2].csr
;
297 saved_config
[index
].tba
[2] = port
->tba
[2].csr
;
299 saved_config
[index
].wsba
[3] = port
->wsba
[3].csr
;
300 saved_config
[index
].wsm
[3] = port
->wsm
[3].csr
;
301 saved_config
[index
].tba
[3] = port
->tba
[3].csr
;
304 * Set up the PCI to main memory translation windows.
306 * Note: Window 3 on Titan is Scatter-Gather ONLY.
308 * Window 0 is scatter-gather 8MB at 8MB (for isa)
309 * Window 1 is direct access 1GB at 2GB
310 * Window 2 is scatter-gather 1GB at 3GB
312 hose
->sg_isa
= iommu_arena_new(hose
, 0x00800000, 0x00800000, 0);
313 hose
->sg_isa
->align_entry
= 8; /* 64KB for ISA */
315 hose
->sg_pci
= iommu_arena_new(hose
, 0xc0000000, 0x40000000, 0);
316 hose
->sg_pci
->align_entry
= 4; /* Titan caches 4 PTEs at a time */
318 port
->wsba
[0].csr
= hose
->sg_isa
->dma_base
| 3;
319 port
->wsm
[0].csr
= (hose
->sg_isa
->size
- 1) & 0xfff00000;
320 port
->tba
[0].csr
= virt_to_phys(hose
->sg_isa
->ptes
);
322 port
->wsba
[1].csr
= __direct_map_base
| 1;
323 port
->wsm
[1].csr
= (__direct_map_size
- 1) & 0xfff00000;
324 port
->tba
[1].csr
= 0;
326 port
->wsba
[2].csr
= hose
->sg_pci
->dma_base
| 3;
327 port
->wsm
[2].csr
= (hose
->sg_pci
->size
- 1) & 0xfff00000;
328 port
->tba
[2].csr
= virt_to_phys(hose
->sg_pci
->ptes
);
330 port
->wsba
[3].csr
= 0;
332 /* Enable the Monster Window to make DAC pci64 possible. */
333 port
->pctl
.csr
|= pctl_m_mwin
;
336 * If it's an AGP port, initialize agplastwr.
338 if (titan_query_agp(port
))
339 port
->port_specific
.a
.agplastwr
.csr
= __direct_map_base
;
341 titan_pci_tbi(hose
, 0, -1);
345 titan_init_pachips(titan_pachip
*pachip0
, titan_pachip
*pachip1
)
347 int pchip1_present
= TITAN_cchip
->csc
.csr
& 1L<<14;
349 /* Init the ports in hose order... */
350 titan_init_one_pachip_port(&pachip0
->g_port
, 0); /* hose 0 */
352 titan_init_one_pachip_port(&pachip1
->g_port
, 1);/* hose 1 */
353 titan_init_one_pachip_port(&pachip0
->a_port
, 2); /* hose 2 */
355 titan_init_one_pachip_port(&pachip1
->a_port
, 3);/* hose 3 */
359 titan_init_vga_hose(void)
361 #ifdef CONFIG_VGA_HOSE
362 u64
*pu64
= (u64
*)((u64
)hwrpb
+ hwrpb
->ctbt_offset
);
364 if (pu64
[7] == 3) { /* TERM_TYPE == graphics */
365 struct pci_controller
*hose
;
366 int h
= (pu64
[30] >> 24) & 0xff; /* console hose # */
369 * Our hose numbering matches the console's, so just find
372 for (hose
= hose_head
; hose
; hose
= hose
->next
) {
373 if (hose
->index
== h
) break;
377 printk("Console graphics on hose %d\n", hose
->index
);
381 #endif /* CONFIG_VGA_HOSE */
385 titan_init_arch(void)
388 printk("%s: titan_init_arch()\n", __FUNCTION__
);
389 printk("%s: CChip registers:\n", __FUNCTION__
);
390 printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__
, TITAN_cchip
->csc
.csr
);
391 printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__
, TITAN_cchip
->mtr
.csr
);
392 printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__
, TITAN_cchip
->misc
.csr
);
393 printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dim0
.csr
);
394 printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dim1
.csr
);
395 printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dir0
.csr
);
396 printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__
, TITAN_cchip
->dir1
.csr
);
397 printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__
, TITAN_cchip
->drir
.csr
);
399 printk("%s: DChip registers:\n", __FUNCTION__
);
400 printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__
, TITAN_dchip
->dsc
.csr
);
401 printk("%s: CSR_STR 0x%lx\n", __FUNCTION__
, TITAN_dchip
->str
.csr
);
402 printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__
, TITAN_dchip
->drev
.csr
);
405 boot_cpuid
= __hard_smp_processor_id();
407 /* With multiple PCI busses, we play with I/O as physical addrs. */
408 ioport_resource
.end
= ~0UL;
410 /* PCI DMA Direct Mapping is 1GB at 2GB. */
411 __direct_map_base
= 0x80000000;
412 __direct_map_size
= 0x40000000;
414 /* Init the PA chip(s). */
415 titan_init_pachips(TITAN_pachip0
, TITAN_pachip1
);
417 /* Check for graphic console location (if any). */
418 titan_init_vga_hose();
422 titan_kill_one_pachip_port(titan_pachip_port
*port
, int index
)
424 port
->wsba
[0].csr
= saved_config
[index
].wsba
[0];
425 port
->wsm
[0].csr
= saved_config
[index
].wsm
[0];
426 port
->tba
[0].csr
= saved_config
[index
].tba
[0];
428 port
->wsba
[1].csr
= saved_config
[index
].wsba
[1];
429 port
->wsm
[1].csr
= saved_config
[index
].wsm
[1];
430 port
->tba
[1].csr
= saved_config
[index
].tba
[1];
432 port
->wsba
[2].csr
= saved_config
[index
].wsba
[2];
433 port
->wsm
[2].csr
= saved_config
[index
].wsm
[2];
434 port
->tba
[2].csr
= saved_config
[index
].tba
[2];
436 port
->wsba
[3].csr
= saved_config
[index
].wsba
[3];
437 port
->wsm
[3].csr
= saved_config
[index
].wsm
[3];
438 port
->tba
[3].csr
= saved_config
[index
].tba
[3];
442 titan_kill_pachips(titan_pachip
*pachip0
, titan_pachip
*pachip1
)
444 int pchip1_present
= TITAN_cchip
->csc
.csr
& 1L<<14;
446 if (pchip1_present
) {
447 titan_kill_one_pachip_port(&pachip1
->g_port
, 1);
448 titan_kill_one_pachip_port(&pachip1
->a_port
, 3);
450 titan_kill_one_pachip_port(&pachip0
->g_port
, 0);
451 titan_kill_one_pachip_port(&pachip0
->a_port
, 2);
455 titan_kill_arch(int mode
)
457 titan_kill_pachips(TITAN_pachip0
, TITAN_pachip1
);
466 titan_ioremap(unsigned long addr
, unsigned long size
)
468 int h
= (addr
& TITAN_HOSE_MASK
) >> TITAN_HOSE_SHIFT
;
469 unsigned long baddr
= addr
& ~TITAN_HOSE_MASK
;
470 unsigned long last
= baddr
+ size
- 1;
471 struct pci_controller
*hose
;
472 struct vm_struct
*area
;
480 #ifdef CONFIG_VGA_HOSE
481 if (pci_vga_hose
&& __titan_is_mem_vga(addr
)) {
482 h
= pci_vga_hose
->index
;
483 addr
+= pci_vga_hose
->mem_space
->start
;
490 for (hose
= hose_head
; hose
; hose
= hose
->next
)
491 if (hose
->index
== h
)
497 * Is it direct-mapped?
499 if ((baddr
>= __direct_map_base
) &&
500 ((baddr
+ size
- 1) < __direct_map_base
+ __direct_map_size
)) {
501 vaddr
= addr
- __direct_map_base
+ TITAN_MEM_BIAS
;
502 return (void __iomem
*) vaddr
;
506 * Check the scatter-gather arena.
509 baddr
>= (unsigned long)hose
->sg_pci
->dma_base
&&
510 last
< (unsigned long)hose
->sg_pci
->dma_base
+ hose
->sg_pci
->size
){
513 * Adjust the limits (mappings must be page aligned)
515 baddr
-= hose
->sg_pci
->dma_base
;
516 last
-= hose
->sg_pci
->dma_base
;
518 size
= PAGE_ALIGN(last
) - baddr
;
523 area
= get_vm_area(size
, VM_IOREMAP
);
527 ptes
= hose
->sg_pci
->ptes
;
528 for (vaddr
= (unsigned long)area
->addr
;
530 baddr
+= PAGE_SIZE
, vaddr
+= PAGE_SIZE
) {
531 pfn
= ptes
[baddr
>> PAGE_SHIFT
];
533 printk("ioremap failed... pte not valid...\n");
537 pfn
>>= 1; /* make it a true pfn */
539 if (__alpha_remap_area_pages(vaddr
,
542 printk("FAILED to map...\n");
550 vaddr
= (unsigned long)area
->addr
+ (addr
& ~PAGE_MASK
);
551 return (void __iomem
*) vaddr
;
558 titan_iounmap(volatile void __iomem
*xaddr
)
560 unsigned long addr
= (unsigned long) xaddr
;
561 if (addr
>= VMALLOC_START
)
562 vfree((void *)(PAGE_MASK
& addr
));
566 titan_is_mmio(const volatile void __iomem
*xaddr
)
568 unsigned long addr
= (unsigned long) xaddr
;
570 if (addr
>= VMALLOC_START
)
573 return (addr
& 0x100000000UL
) == 0;
576 #ifndef CONFIG_ALPHA_GENERIC
577 EXPORT_SYMBOL(titan_ioremap
);
578 EXPORT_SYMBOL(titan_iounmap
);
579 EXPORT_SYMBOL(titan_is_mmio
);
585 #include <linux/agp_backend.h>
586 #include <asm/agp_backend.h>
587 #include <linux/slab.h>
588 #include <linux/delay.h>
590 struct titan_agp_aperture
{
591 struct pci_iommu_arena
*arena
;
597 titan_agp_setup(alpha_agp_info
*agp
)
599 struct titan_agp_aperture
*aper
;
601 if (!alpha_agpgart_size
)
604 aper
= kmalloc(sizeof(struct titan_agp_aperture
), GFP_KERNEL
);
608 aper
->arena
= agp
->hose
->sg_pci
;
609 aper
->pg_count
= alpha_agpgart_size
/ PAGE_SIZE
;
610 aper
->pg_start
= iommu_reserve(aper
->arena
, aper
->pg_count
,
612 if (aper
->pg_start
< 0) {
613 printk(KERN_ERR
"Failed to reserve AGP memory\n");
618 agp
->aperture
.bus_base
=
619 aper
->arena
->dma_base
+ aper
->pg_start
* PAGE_SIZE
;
620 agp
->aperture
.size
= aper
->pg_count
* PAGE_SIZE
;
621 agp
->aperture
.sysdata
= aper
;
627 titan_agp_cleanup(alpha_agp_info
*agp
)
629 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
632 status
= iommu_release(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
633 if (status
== -EBUSY
) {
635 "Attempted to release bound AGP memory - unbinding\n");
636 iommu_unbind(aper
->arena
, aper
->pg_start
, aper
->pg_count
);
637 status
= iommu_release(aper
->arena
, aper
->pg_start
,
641 printk(KERN_ERR
"Failed to release AGP memory\n");
648 titan_agp_configure(alpha_agp_info
*agp
)
650 union TPAchipPCTL pctl
;
651 titan_pachip_port
*port
= agp
->private;
652 pctl
.pctl_q_whole
= port
->pctl
.csr
;
654 /* Side-Band Addressing? */
655 pctl
.pctl_r_bits
.apctl_v_agp_sba_en
= agp
->mode
.bits
.sba
;
658 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 0; /* 1x */
659 if (agp
->mode
.bits
.rate
& 2)
660 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 1; /* 2x */
662 if (agp
->mode
.bits
.rate
& 4)
663 pctl
.pctl_r_bits
.apctl_v_agp_rate
= 2; /* 4x */
667 pctl
.pctl_r_bits
.apctl_v_agp_hp_rd
= 2;
668 pctl
.pctl_r_bits
.apctl_v_agp_lp_rd
= 7;
673 pctl
.pctl_r_bits
.apctl_v_agp_en
= agp
->mode
.bits
.enable
;
676 printk("Enabling AGP: %dX%s\n",
677 1 << pctl
.pctl_r_bits
.apctl_v_agp_rate
,
678 pctl
.pctl_r_bits
.apctl_v_agp_sba_en
? " - SBA" : "");
681 port
->pctl
.csr
= pctl
.pctl_q_whole
;
683 /* And wait at least 5000 66MHz cycles (per Titan spec). */
690 titan_agp_bind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
692 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
693 return iommu_bind(aper
->arena
, aper
->pg_start
+ pg_start
,
694 mem
->page_count
, mem
->memory
);
698 titan_agp_unbind_memory(alpha_agp_info
*agp
, off_t pg_start
, struct agp_memory
*mem
)
700 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
701 return iommu_unbind(aper
->arena
, aper
->pg_start
+ pg_start
,
706 titan_agp_translate(alpha_agp_info
*agp
, dma_addr_t addr
)
708 struct titan_agp_aperture
*aper
= agp
->aperture
.sysdata
;
709 unsigned long baddr
= addr
- aper
->arena
->dma_base
;
712 if (addr
< agp
->aperture
.bus_base
||
713 addr
>= agp
->aperture
.bus_base
+ agp
->aperture
.size
) {
714 printk("%s: addr out of range\n", __FUNCTION__
);
718 pte
= aper
->arena
->ptes
[baddr
>> PAGE_SHIFT
];
720 printk("%s: pte not valid\n", __FUNCTION__
);
724 return (pte
>> 1) << PAGE_SHIFT
;
727 struct alpha_agp_ops titan_agp_ops
=
729 .setup
= titan_agp_setup
,
730 .cleanup
= titan_agp_cleanup
,
731 .configure
= titan_agp_configure
,
732 .bind
= titan_agp_bind_memory
,
733 .unbind
= titan_agp_unbind_memory
,
734 .translate
= titan_agp_translate
741 struct pci_controller
*hose
;
742 titan_pachip_port
*port
;
744 union TPAchipPCTL pctl
;
749 port
= &TITAN_pachip0
->a_port
;
750 if (titan_query_agp(port
))
753 titan_query_agp(port
= &TITAN_pachip1
->a_port
))
757 * Find the hose the port is on.
759 for (hose
= hose_head
; hose
; hose
= hose
->next
)
760 if (hose
->index
== hosenum
)
763 if (!hose
|| !hose
->sg_pci
)
767 * Allocate the info structure.
769 agp
= kmalloc(sizeof(*agp
), GFP_KERNEL
);
776 agp
->ops
= &titan_agp_ops
;
779 * Aperture - not configured until ops.setup().
781 * FIXME - should we go ahead and allocate it here?
783 agp
->aperture
.bus_base
= 0;
784 agp
->aperture
.size
= 0;
785 agp
->aperture
.sysdata
= NULL
;
790 agp
->capability
.lw
= 0;
791 agp
->capability
.bits
.rate
= 3; /* 2x, 1x */
792 agp
->capability
.bits
.sba
= 1;
793 agp
->capability
.bits
.rq
= 7; /* 8 - 1 */
798 pctl
.pctl_q_whole
= port
->pctl
.csr
;
800 agp
->mode
.bits
.rate
= 1 << pctl
.pctl_r_bits
.apctl_v_agp_rate
;
801 agp
->mode
.bits
.sba
= pctl
.pctl_r_bits
.apctl_v_agp_sba_en
;
802 agp
->mode
.bits
.rq
= 7; /* RQ Depth? */
803 agp
->mode
.bits
.enable
= pctl
.pctl_r_bits
.apctl_v_agp_en
;