2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
3 * the "Intel 460GTX Chipset Software Developer's Manual":
4 * http://developer.intel.com/design/itanium/downloads/24870401s.htm
7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
8 * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/init.h>
13 #include <linux/string.h>
14 #include <linux/slab.h>
15 #include <linux/agp_backend.h>
16 #include <linux/log2.h>
20 #define INTEL_I460_BAPBASE 0x98
21 #define INTEL_I460_GXBCTL 0xa0
22 #define INTEL_I460_AGPSIZ 0xa2
23 #define INTEL_I460_ATTBASE 0xfe200000
24 #define INTEL_I460_GATT_VALID (1UL << 24)
25 #define INTEL_I460_GATT_COHERENT (1UL << 25)
28 * The i460 can operate with large (4MB) pages, but there is no sane way to support this
29 * within the current kernel/DRM environment, so we disable the relevant code for now.
30 * See also comments in ia64_alloc_page()...
32 #define I460_LARGE_IO_PAGES 0
34 #if I460_LARGE_IO_PAGES
35 # define I460_IO_PAGE_SHIFT i460.io_page_shift
37 # define I460_IO_PAGE_SHIFT 12
40 #define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
41 #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
42 #define I460_SRAM_IO_DISABLE (1 << 4)
43 #define I460_BAPBASE_ENABLE (1 << 3)
44 #define I460_AGPSIZ_MASK 0x7
45 #define I460_4M_PS (1 << 1)
47 /* Control bits for Out-Of-GART coherency and Burst Write Combining */
48 #define I460_GXBCTL_OOG (1UL << 0)
49 #define I460_GXBCTL_BWC (1UL << 2)
52 * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
53 * gatt_table and gatt_table_real pointers a "void *"...
55 #define RD_GATT(index) readl((u32 *) i460.gatt + (index))
56 #define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
58 * The 460 spec says we have to read the last location written to make sure that all
59 * writes have taken effect
61 #define WR_FLUSH_GATT(index) RD_GATT(index)
64 void *gatt
; /* ioremap'd GATT area */
66 /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
69 /* BIOS configures chipset to one of 2 possible apbase values: */
72 /* structure for tracking partial use of 4MB GART pages: */
74 unsigned long *alloced_map
; /* bitmap of kernel-pages in use */
75 int refcount
; /* number of kernel pages using the large page */
76 u64 paddr
; /* physical address of large page */
80 static const struct aper_size_info_8 i460_sizes
[3] =
83 * The 32GB aperture is only available with a 4M GART page size. Due to the
84 * dynamic GART page size, we can't figure out page_order or num_entries until
92 static struct gatt_mask i460_masks
[] =
95 .mask
= INTEL_I460_GATT_VALID
| INTEL_I460_GATT_COHERENT
,
100 static int i460_fetch_size (void)
104 struct aper_size_info_8
*values
;
106 /* Determine the GART page size */
107 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_GXBCTL
, &temp
);
108 i460
.io_page_shift
= (temp
& I460_4M_PS
) ? 22 : 12;
109 pr_debug("i460_fetch_size: io_page_shift=%d\n", i460
.io_page_shift
);
111 if (i460
.io_page_shift
!= I460_IO_PAGE_SHIFT
) {
113 "I/O (GART) page-size %luKB doesn't match expected "
115 1UL << (i460
.io_page_shift
- 10),
116 1UL << (I460_IO_PAGE_SHIFT
));
120 values
= A_SIZE_8(agp_bridge
->driver
->aperture_sizes
);
122 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_AGPSIZ
, &temp
);
124 /* Exit now if the IO drivers for the GART SRAMS are turned off */
125 if (temp
& I460_SRAM_IO_DISABLE
) {
126 printk(KERN_ERR PFX
"GART SRAMS disabled on 460GX chipset\n");
127 printk(KERN_ERR PFX
"AGPGART operation not possible\n");
131 /* Make sure we don't try to create an 2 ^ 23 entry GATT */
132 if ((i460
.io_page_shift
== 0) && ((temp
& I460_AGPSIZ_MASK
) == 4)) {
133 printk(KERN_ERR PFX
"We can't have a 32GB aperture with 4KB GART pages\n");
137 /* Determine the proper APBASE register */
138 if (temp
& I460_BAPBASE_ENABLE
)
139 i460
.dynamic_apbase
= INTEL_I460_BAPBASE
;
141 i460
.dynamic_apbase
= AGP_APBASE
;
143 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
145 * Dynamically calculate the proper num_entries and page_order values for
146 * the define aperture sizes. Take care not to shift off the end of
149 values
[i
].num_entries
= (values
[i
].size
<< 8) >> (I460_IO_PAGE_SHIFT
- 12);
150 values
[i
].page_order
= ilog2((sizeof(u32
)*values
[i
].num_entries
) >> PAGE_SHIFT
);
153 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
154 /* Neglect control bits when matching up size_value */
155 if ((temp
& I460_AGPSIZ_MASK
) == values
[i
].size_value
) {
156 agp_bridge
->previous_size
= agp_bridge
->current_size
= (void *) (values
+ i
);
157 agp_bridge
->aperture_size_idx
= i
;
158 return values
[i
].size
;
165 /* There isn't anything to do here since 460 has no GART TLB. */
166 static void i460_tlb_flush (struct agp_memory
*mem
)
172 * This utility function is needed to prevent corruption of the control bits
173 * which are stored along with the aperture size in 460's AGPSIZ register
175 static void i460_write_agpsiz (u8 size_value
)
179 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_AGPSIZ
, &temp
);
180 pci_write_config_byte(agp_bridge
->dev
, INTEL_I460_AGPSIZ
,
181 ((temp
& ~I460_AGPSIZ_MASK
) | size_value
));
184 static void i460_cleanup (void)
186 struct aper_size_info_8
*previous_size
;
188 previous_size
= A_SIZE_8(agp_bridge
->previous_size
);
189 i460_write_agpsiz(previous_size
->size_value
);
191 if (I460_IO_PAGE_SHIFT
> PAGE_SHIFT
)
195 static int i460_configure (void)
203 struct aper_size_info_8
*current_size
;
207 current_size
= A_SIZE_8(agp_bridge
->current_size
);
208 i460_write_agpsiz(current_size
->size_value
);
211 * Do the necessary rigmarole to read all eight bytes of APBASE.
212 * This has to be done since the AGP aperture can be above 4GB on
215 pci_read_config_dword(agp_bridge
->dev
, i460
.dynamic_apbase
, &(temp
.small
[0]));
216 pci_read_config_dword(agp_bridge
->dev
, i460
.dynamic_apbase
+ 4, &(temp
.small
[1]));
218 /* Clear BAR control bits */
219 agp_bridge
->gart_bus_addr
= temp
.large
& ~((1UL << 3) - 1);
221 pci_read_config_byte(agp_bridge
->dev
, INTEL_I460_GXBCTL
, &scratch
);
222 pci_write_config_byte(agp_bridge
->dev
, INTEL_I460_GXBCTL
,
223 (scratch
& 0x02) | I460_GXBCTL_OOG
| I460_GXBCTL_BWC
);
226 * Initialize partial allocation trackers if a GART page is bigger than a kernel
229 if (I460_IO_PAGE_SHIFT
> PAGE_SHIFT
) {
230 size
= current_size
->num_entries
* sizeof(i460
.lp_desc
[0]);
231 i460
.lp_desc
= kzalloc(size
, GFP_KERNEL
);
238 static int i460_create_gatt_table (struct agp_bridge_data
*bridge
)
240 int page_order
, num_entries
, i
;
244 * Load up the fixed address of the GART SRAMS which hold our GATT table.
246 temp
= agp_bridge
->current_size
;
247 page_order
= A_SIZE_8(temp
)->page_order
;
248 num_entries
= A_SIZE_8(temp
)->num_entries
;
250 i460
.gatt
= ioremap(INTEL_I460_ATTBASE
, PAGE_SIZE
<< page_order
);
252 printk(KERN_ERR PFX
"ioremap failed\n");
256 /* These are no good, the should be removed from the agp_bridge strucure... */
257 agp_bridge
->gatt_table_real
= NULL
;
258 agp_bridge
->gatt_table
= NULL
;
259 agp_bridge
->gatt_bus_addr
= 0;
261 for (i
= 0; i
< num_entries
; ++i
)
263 WR_FLUSH_GATT(i
- 1);
267 static int i460_free_gatt_table (struct agp_bridge_data
*bridge
)
272 temp
= agp_bridge
->current_size
;
274 num_entries
= A_SIZE_8(temp
)->num_entries
;
276 for (i
= 0; i
< num_entries
; ++i
)
278 WR_FLUSH_GATT(num_entries
- 1);
285 * The following functions are called when the I/O (GART) page size is smaller than
289 static int i460_insert_memory_small_io_page (struct agp_memory
*mem
,
290 off_t pg_start
, int type
)
292 unsigned long paddr
, io_pg_start
, io_page_size
;
293 int i
, j
, k
, num_entries
;
296 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
297 mem
, pg_start
, type
, mem
->memory
[0]);
299 if (type
>= AGP_USER_TYPES
|| mem
->type
>= AGP_USER_TYPES
)
302 io_pg_start
= I460_IOPAGES_PER_KPAGE
* pg_start
;
304 temp
= agp_bridge
->current_size
;
305 num_entries
= A_SIZE_8(temp
)->num_entries
;
307 if ((io_pg_start
+ I460_IOPAGES_PER_KPAGE
* mem
->page_count
) > num_entries
) {
308 printk(KERN_ERR PFX
"Looks like we're out of AGP memory\n");
313 while (j
< (io_pg_start
+ I460_IOPAGES_PER_KPAGE
* mem
->page_count
)) {
314 if (!PGE_EMPTY(agp_bridge
, RD_GATT(j
))) {
315 pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
322 io_page_size
= 1UL << I460_IO_PAGE_SHIFT
;
323 for (i
= 0, j
= io_pg_start
; i
< mem
->page_count
; i
++) {
324 paddr
= mem
->memory
[i
];
325 for (k
= 0; k
< I460_IOPAGES_PER_KPAGE
; k
++, j
++, paddr
+= io_page_size
)
326 WR_GATT(j
, agp_bridge
->driver
->mask_memory(agp_bridge
,
329 WR_FLUSH_GATT(j
- 1);
333 static int i460_remove_memory_small_io_page(struct agp_memory
*mem
,
334 off_t pg_start
, int type
)
338 pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
339 mem
, pg_start
, type
);
341 pg_start
= I460_IOPAGES_PER_KPAGE
* pg_start
;
343 for (i
= pg_start
; i
< (pg_start
+ I460_IOPAGES_PER_KPAGE
* mem
->page_count
); i
++)
345 WR_FLUSH_GATT(i
- 1);
349 #if I460_LARGE_IO_PAGES
352 * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
354 * This situation is interesting since AGP memory allocations that are smaller than a
355 * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
356 * large GART pages to work around this issue.
358 * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
359 * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
360 * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
363 static int i460_alloc_large_page (struct lp_desc
*lp
)
365 unsigned long order
= I460_IO_PAGE_SHIFT
- PAGE_SHIFT
;
369 lpage
= (void *) __get_free_pages(GFP_KERNEL
, order
);
371 printk(KERN_ERR PFX
"Couldn't alloc 4M GART page...\n");
375 map_size
= ((I460_KPAGES_PER_IOPAGE
+ BITS_PER_LONG
- 1) & -BITS_PER_LONG
)/8;
376 lp
->alloced_map
= kzalloc(map_size
, GFP_KERNEL
);
377 if (!lp
->alloced_map
) {
378 free_pages((unsigned long) lpage
, order
);
379 printk(KERN_ERR PFX
"Out of memory, we're in trouble...\n");
383 lp
->paddr
= virt_to_gart(lpage
);
385 atomic_add(I460_KPAGES_PER_IOPAGE
, &agp_bridge
->current_memory_agp
);
389 static void i460_free_large_page (struct lp_desc
*lp
)
391 kfree(lp
->alloced_map
);
392 lp
->alloced_map
= NULL
;
394 free_pages((unsigned long) gart_to_virt(lp
->paddr
), I460_IO_PAGE_SHIFT
- PAGE_SHIFT
);
395 atomic_sub(I460_KPAGES_PER_IOPAGE
, &agp_bridge
->current_memory_agp
);
398 static int i460_insert_memory_large_io_page (struct agp_memory
*mem
,
399 off_t pg_start
, int type
)
401 int i
, start_offset
, end_offset
, idx
, pg
, num_entries
;
402 struct lp_desc
*start
, *end
, *lp
;
405 if (type
>= AGP_USER_TYPES
|| mem
->type
>= AGP_USER_TYPES
)
408 temp
= agp_bridge
->current_size
;
409 num_entries
= A_SIZE_8(temp
)->num_entries
;
411 /* Figure out what pg_start means in terms of our large GART pages */
412 start
= &i460
.lp_desc
[pg_start
/ I460_KPAGES_PER_IOPAGE
];
413 end
= &i460
.lp_desc
[(pg_start
+ mem
->page_count
- 1) / I460_KPAGES_PER_IOPAGE
];
414 start_offset
= pg_start
% I460_KPAGES_PER_IOPAGE
;
415 end_offset
= (pg_start
+ mem
->page_count
- 1) % I460_KPAGES_PER_IOPAGE
;
417 if (end
> i460
.lp_desc
+ num_entries
) {
418 printk(KERN_ERR PFX
"Looks like we're out of AGP memory\n");
422 /* Check if the requested region of the aperture is free */
423 for (lp
= start
; lp
<= end
; ++lp
) {
424 if (!lp
->alloced_map
)
425 continue; /* OK, the entire large page is available... */
427 for (idx
= ((lp
== start
) ? start_offset
: 0);
428 idx
< ((lp
== end
) ? (end_offset
+ 1) : I460_KPAGES_PER_IOPAGE
);
431 if (test_bit(idx
, lp
->alloced_map
))
436 for (lp
= start
, i
= 0; lp
<= end
; ++lp
) {
437 if (!lp
->alloced_map
) {
438 /* Allocate new GART pages... */
439 if (i460_alloc_large_page(lp
) < 0)
441 pg
= lp
- i460
.lp_desc
;
442 WR_GATT(pg
, agp_bridge
->driver
->mask_memory(agp_bridge
,
447 for (idx
= ((lp
== start
) ? start_offset
: 0);
448 idx
< ((lp
== end
) ? (end_offset
+ 1) : I460_KPAGES_PER_IOPAGE
);
451 mem
->memory
[i
] = lp
->paddr
+ idx
*PAGE_SIZE
;
452 __set_bit(idx
, lp
->alloced_map
);
459 static int i460_remove_memory_large_io_page (struct agp_memory
*mem
,
460 off_t pg_start
, int type
)
462 int i
, pg
, start_offset
, end_offset
, idx
, num_entries
;
463 struct lp_desc
*start
, *end
, *lp
;
466 temp
= agp_bridge
->driver
->current_size
;
467 num_entries
= A_SIZE_8(temp
)->num_entries
;
469 /* Figure out what pg_start means in terms of our large GART pages */
470 start
= &i460
.lp_desc
[pg_start
/ I460_KPAGES_PER_IOPAGE
];
471 end
= &i460
.lp_desc
[(pg_start
+ mem
->page_count
- 1) / I460_KPAGES_PER_IOPAGE
];
472 start_offset
= pg_start
% I460_KPAGES_PER_IOPAGE
;
473 end_offset
= (pg_start
+ mem
->page_count
- 1) % I460_KPAGES_PER_IOPAGE
;
475 for (i
= 0, lp
= start
; lp
<= end
; ++lp
) {
476 for (idx
= ((lp
== start
) ? start_offset
: 0);
477 idx
< ((lp
== end
) ? (end_offset
+ 1) : I460_KPAGES_PER_IOPAGE
);
481 __clear_bit(idx
, lp
->alloced_map
);
485 /* Free GART pages if they are unused */
486 if (lp
->refcount
== 0) {
487 pg
= lp
- i460
.lp_desc
;
490 i460_free_large_page(lp
);
496 /* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
498 static int i460_insert_memory (struct agp_memory
*mem
,
499 off_t pg_start
, int type
)
501 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
)
502 return i460_insert_memory_small_io_page(mem
, pg_start
, type
);
504 return i460_insert_memory_large_io_page(mem
, pg_start
, type
);
507 static int i460_remove_memory (struct agp_memory
*mem
,
508 off_t pg_start
, int type
)
510 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
)
511 return i460_remove_memory_small_io_page(mem
, pg_start
, type
);
513 return i460_remove_memory_large_io_page(mem
, pg_start
, type
);
517 * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
518 * allocate memory until we know where it is to be bound in the aperture (a
519 * multi-kernel-page alloc might fit inside of an already allocated GART page).
521 * Let's just hope nobody counts on the allocated AGP memory being there before bind time
522 * (I don't think current drivers do)...
524 static void *i460_alloc_page (struct agp_bridge_data
*bridge
)
528 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
) {
529 page
= agp_generic_alloc_page(agp_bridge
);
532 /* Returning NULL would cause problems */
533 /* AK: really dubious code. */
538 static void i460_destroy_page (void *page
, int flags
)
540 if (I460_IO_PAGE_SHIFT
<= PAGE_SHIFT
) {
541 agp_generic_destroy_page(page
, flags
);
546 #endif /* I460_LARGE_IO_PAGES */
548 static unsigned long i460_mask_memory (struct agp_bridge_data
*bridge
,
549 unsigned long addr
, int type
)
551 /* Make sure the returned address is a valid GATT entry */
552 return bridge
->driver
->masks
[0].mask
553 | (((addr
& ~((1 << I460_IO_PAGE_SHIFT
) - 1)) & 0xfffff000) >> 12);
556 const struct agp_bridge_driver intel_i460_driver
= {
557 .owner
= THIS_MODULE
,
558 .aperture_sizes
= i460_sizes
,
559 .size_type
= U8_APER_SIZE
,
560 .num_aperture_sizes
= 3,
561 .configure
= i460_configure
,
562 .fetch_size
= i460_fetch_size
,
563 .cleanup
= i460_cleanup
,
564 .tlb_flush
= i460_tlb_flush
,
565 .mask_memory
= i460_mask_memory
,
567 .agp_enable
= agp_generic_enable
,
568 .cache_flush
= global_cache_flush
,
569 .create_gatt_table
= i460_create_gatt_table
,
570 .free_gatt_table
= i460_free_gatt_table
,
571 #if I460_LARGE_IO_PAGES
572 .insert_memory
= i460_insert_memory
,
573 .remove_memory
= i460_remove_memory
,
574 .agp_alloc_page
= i460_alloc_page
,
575 .agp_destroy_page
= i460_destroy_page
,
577 .insert_memory
= i460_insert_memory_small_io_page
,
578 .remove_memory
= i460_remove_memory_small_io_page
,
579 .agp_alloc_page
= agp_generic_alloc_page
,
580 .agp_destroy_page
= agp_generic_destroy_page
,
582 .alloc_by_type
= agp_generic_alloc_by_type
,
583 .free_by_type
= agp_generic_free_by_type
,
584 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
585 .cant_use_aperture
= 1,
588 static int __devinit
agp_intel_i460_probe(struct pci_dev
*pdev
,
589 const struct pci_device_id
*ent
)
591 struct agp_bridge_data
*bridge
;
594 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
598 bridge
= agp_alloc_bridge();
602 bridge
->driver
= &intel_i460_driver
;
604 bridge
->capndx
= cap_ptr
;
606 printk(KERN_INFO PFX
"Detected Intel 460GX chipset\n");
608 pci_set_drvdata(pdev
, bridge
);
609 return agp_add_bridge(bridge
);
612 static void __devexit
agp_intel_i460_remove(struct pci_dev
*pdev
)
614 struct agp_bridge_data
*bridge
= pci_get_drvdata(pdev
);
616 agp_remove_bridge(bridge
);
617 agp_put_bridge(bridge
);
620 static struct pci_device_id agp_intel_i460_pci_table
[] = {
622 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
624 .vendor
= PCI_VENDOR_ID_INTEL
,
625 .device
= PCI_DEVICE_ID_INTEL_84460GX
,
626 .subvendor
= PCI_ANY_ID
,
627 .subdevice
= PCI_ANY_ID
,
632 MODULE_DEVICE_TABLE(pci
, agp_intel_i460_pci_table
);
634 static struct pci_driver agp_intel_i460_pci_driver
= {
635 .name
= "agpgart-intel-i460",
636 .id_table
= agp_intel_i460_pci_table
,
637 .probe
= agp_intel_i460_probe
,
638 .remove
= __devexit_p(agp_intel_i460_remove
),
641 static int __init
agp_intel_i460_init(void)
645 return pci_register_driver(&agp_intel_i460_pci_driver
);
648 static void __exit
agp_intel_i460_cleanup(void)
650 pci_unregister_driver(&agp_intel_i460_pci_driver
);
653 module_init(agp_intel_i460_init
);
654 module_exit(agp_intel_i460_cleanup
);
656 MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
657 MODULE_LICENSE("GPL and additional rights");