2 * AMD K7 AGPGART routines.
5 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
10 #include <linux/page-flags.h>
14 #define AMD_MMBASE 0x14
15 #define AMD_APSIZE 0xac
16 #define AMD_MODECNTL 0xb0
17 #define AMD_MODECNTL2 0xb2
18 #define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
19 #define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
20 #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
21 #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
23 static struct pci_device_id agp_amdk7_pci_table
[];
27 unsigned long __iomem
*remapped
;
30 static struct _amd_irongate_private
{
31 volatile u8 __iomem
*registers
;
32 struct amd_page_map
**gatt_pages
;
34 } amd_irongate_private
;
36 static int amd_create_page_map(struct amd_page_map
*page_map
)
40 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
41 if (page_map
->real
== NULL
)
44 SetPageReserved(virt_to_page(page_map
->real
));
46 page_map
->remapped
= ioremap_nocache(virt_to_gart(page_map
->real
),
48 if (page_map
->remapped
== NULL
) {
49 ClearPageReserved(virt_to_page(page_map
->real
));
50 free_page((unsigned long) page_map
->real
);
51 page_map
->real
= NULL
;
56 for (i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++) {
57 writel(agp_bridge
->scratch_page
, page_map
->remapped
+i
);
58 readl(page_map
->remapped
+i
); /* PCI Posting. */
64 static void amd_free_page_map(struct amd_page_map
*page_map
)
66 iounmap(page_map
->remapped
);
67 ClearPageReserved(virt_to_page(page_map
->real
));
68 free_page((unsigned long) page_map
->real
);
71 static void amd_free_gatt_pages(void)
74 struct amd_page_map
**tables
;
75 struct amd_page_map
*entry
;
77 tables
= amd_irongate_private
.gatt_pages
;
78 for (i
= 0; i
< amd_irongate_private
.num_tables
; i
++) {
81 if (entry
->real
!= NULL
)
82 amd_free_page_map(entry
);
87 amd_irongate_private
.gatt_pages
= NULL
;
90 static int amd_create_gatt_pages(int nr_tables
)
92 struct amd_page_map
**tables
;
93 struct amd_page_map
*entry
;
97 tables
= kzalloc((nr_tables
+ 1) * sizeof(struct amd_page_map
*),GFP_KERNEL
);
101 for (i
= 0; i
< nr_tables
; i
++) {
102 entry
= kzalloc(sizeof(struct amd_page_map
), GFP_KERNEL
);
108 retval
= amd_create_page_map(entry
);
112 amd_irongate_private
.num_tables
= i
;
113 amd_irongate_private
.gatt_pages
= tables
;
116 amd_free_gatt_pages();
121 /* Since we don't need contiguous memory we just try
122 * to get the gatt table once
125 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
126 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
127 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
128 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
129 #define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
130 GET_PAGE_DIR_IDX(addr)]->remapped)
132 static int amd_create_gatt_table(struct agp_bridge_data
*bridge
)
134 struct aper_size_info_lvl2
*value
;
135 struct amd_page_map page_dir
;
141 value
= A_SIZE_LVL2(agp_bridge
->current_size
);
142 retval
= amd_create_page_map(&page_dir
);
146 retval
= amd_create_gatt_pages(value
->num_entries
/ 1024);
148 amd_free_page_map(&page_dir
);
152 agp_bridge
->gatt_table_real
= (u32
*)page_dir
.real
;
153 agp_bridge
->gatt_table
= (u32 __iomem
*)page_dir
.remapped
;
154 agp_bridge
->gatt_bus_addr
= virt_to_gart(page_dir
.real
);
156 /* Get the address for the gart region.
157 * This is a bus address even on the alpha, b/c its
158 * used to program the agp master not the cpu
161 pci_read_config_dword(agp_bridge
->dev
, AGP_APBASE
, &temp
);
162 addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
163 agp_bridge
->gart_bus_addr
= addr
;
165 /* Calculate the agp offset */
166 for (i
= 0; i
< value
->num_entries
/ 1024; i
++, addr
+= 0x00400000) {
167 writel(virt_to_gart(amd_irongate_private
.gatt_pages
[i
]->real
) | 1,
168 page_dir
.remapped
+GET_PAGE_DIR_OFF(addr
));
169 readl(page_dir
.remapped
+GET_PAGE_DIR_OFF(addr
)); /* PCI Posting. */
175 static int amd_free_gatt_table(struct agp_bridge_data
*bridge
)
177 struct amd_page_map page_dir
;
179 page_dir
.real
= (unsigned long *)agp_bridge
->gatt_table_real
;
180 page_dir
.remapped
= (unsigned long __iomem
*)agp_bridge
->gatt_table
;
182 amd_free_gatt_pages();
183 amd_free_page_map(&page_dir
);
187 static int amd_irongate_fetch_size(void)
191 struct aper_size_info_lvl2
*values
;
193 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
194 temp
= (temp
& 0x0000000e);
195 values
= A_SIZE_LVL2(agp_bridge
->driver
->aperture_sizes
);
196 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
197 if (temp
== values
[i
].size_value
) {
198 agp_bridge
->previous_size
=
199 agp_bridge
->current_size
= (void *) (values
+ i
);
201 agp_bridge
->aperture_size_idx
= i
;
202 return values
[i
].size
;
209 static int amd_irongate_configure(void)
211 struct aper_size_info_lvl2
*current_size
;
215 current_size
= A_SIZE_LVL2(agp_bridge
->current_size
);
217 /* Get the memory mapped registers */
218 pci_read_config_dword(agp_bridge
->dev
, AMD_MMBASE
, &temp
);
219 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
220 amd_irongate_private
.registers
= (volatile u8 __iomem
*) ioremap(temp
, 4096);
221 if (!amd_irongate_private
.registers
)
224 /* Write out the address of the gatt table */
225 writel(agp_bridge
->gatt_bus_addr
, amd_irongate_private
.registers
+AMD_ATTBASE
);
226 readl(amd_irongate_private
.registers
+AMD_ATTBASE
); /* PCI Posting. */
228 /* Write the Sync register */
229 pci_write_config_byte(agp_bridge
->dev
, AMD_MODECNTL
, 0x80);
231 /* Set indexing mode */
232 pci_write_config_byte(agp_bridge
->dev
, AMD_MODECNTL2
, 0x00);
234 /* Write the enable register */
235 enable_reg
= readw(amd_irongate_private
.registers
+AMD_GARTENABLE
);
236 enable_reg
= (enable_reg
| 0x0004);
237 writew(enable_reg
, amd_irongate_private
.registers
+AMD_GARTENABLE
);
238 readw(amd_irongate_private
.registers
+AMD_GARTENABLE
); /* PCI Posting. */
240 /* Write out the size register */
241 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
242 temp
= (((temp
& ~(0x0000000e)) | current_size
->size_value
) | 1);
243 pci_write_config_dword(agp_bridge
->dev
, AMD_APSIZE
, temp
);
246 writel(1, amd_irongate_private
.registers
+AMD_TLBFLUSH
);
247 readl(amd_irongate_private
.registers
+AMD_TLBFLUSH
); /* PCI Posting.*/
251 static void amd_irongate_cleanup(void)
253 struct aper_size_info_lvl2
*previous_size
;
257 previous_size
= A_SIZE_LVL2(agp_bridge
->previous_size
);
259 enable_reg
= readw(amd_irongate_private
.registers
+AMD_GARTENABLE
);
260 enable_reg
= (enable_reg
& ~(0x0004));
261 writew(enable_reg
, amd_irongate_private
.registers
+AMD_GARTENABLE
);
262 readw(amd_irongate_private
.registers
+AMD_GARTENABLE
); /* PCI Posting. */
264 /* Write back the previous size and disable gart translation */
265 pci_read_config_dword(agp_bridge
->dev
, AMD_APSIZE
, &temp
);
266 temp
= ((temp
& ~(0x0000000f)) | previous_size
->size_value
);
267 pci_write_config_dword(agp_bridge
->dev
, AMD_APSIZE
, temp
);
268 iounmap((void __iomem
*) amd_irongate_private
.registers
);
272 * This routine could be implemented by taking the addresses
273 * written to the GATT, and flushing them individually. However
274 * currently it just flushes the whole table. Which is probably
275 * more efficent, since agp_memory blocks can be a large number of
279 static void amd_irongate_tlbflush(struct agp_memory
*temp
)
281 writel(1, amd_irongate_private
.registers
+AMD_TLBFLUSH
);
282 readl(amd_irongate_private
.registers
+AMD_TLBFLUSH
); /* PCI Posting. */
285 static int amd_insert_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
287 int i
, j
, num_entries
;
288 unsigned long __iomem
*cur_gatt
;
291 num_entries
= A_SIZE_LVL2(agp_bridge
->current_size
)->num_entries
;
293 if (type
!= 0 || mem
->type
!= 0)
296 if ((pg_start
+ mem
->page_count
) > num_entries
)
300 while (j
< (pg_start
+ mem
->page_count
)) {
301 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
302 cur_gatt
= GET_GATT(addr
);
303 if (!PGE_EMPTY(agp_bridge
, readl(cur_gatt
+GET_GATT_OFF(addr
))))
308 if (mem
->is_flushed
== FALSE
) {
309 global_cache_flush();
310 mem
->is_flushed
= TRUE
;
313 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
314 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
315 cur_gatt
= GET_GATT(addr
);
316 writel(agp_generic_mask_memory(agp_bridge
,
317 mem
->memory
[i
], mem
->type
), cur_gatt
+GET_GATT_OFF(addr
));
318 readl(cur_gatt
+GET_GATT_OFF(addr
)); /* PCI Posting. */
320 amd_irongate_tlbflush(mem
);
324 static int amd_remove_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
327 unsigned long __iomem
*cur_gatt
;
330 if (type
!= 0 || mem
->type
!= 0)
333 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
334 addr
= (i
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
335 cur_gatt
= GET_GATT(addr
);
336 writel(agp_bridge
->scratch_page
, cur_gatt
+GET_GATT_OFF(addr
));
337 readl(cur_gatt
+GET_GATT_OFF(addr
)); /* PCI Posting. */
340 amd_irongate_tlbflush(mem
);
344 static const struct aper_size_info_lvl2 amd_irongate_sizes
[7] =
346 {2048, 524288, 0x0000000c},
347 {1024, 262144, 0x0000000a},
348 {512, 131072, 0x00000008},
349 {256, 65536, 0x00000006},
350 {128, 32768, 0x00000004},
351 {64, 16384, 0x00000002},
352 {32, 8192, 0x00000000}
355 static const struct gatt_mask amd_irongate_masks
[] =
357 {.mask
= 1, .type
= 0}
360 static const struct agp_bridge_driver amd_irongate_driver
= {
361 .owner
= THIS_MODULE
,
362 .aperture_sizes
= amd_irongate_sizes
,
363 .size_type
= LVL2_APER_SIZE
,
364 .num_aperture_sizes
= 7,
365 .configure
= amd_irongate_configure
,
366 .fetch_size
= amd_irongate_fetch_size
,
367 .cleanup
= amd_irongate_cleanup
,
368 .tlb_flush
= amd_irongate_tlbflush
,
369 .mask_memory
= agp_generic_mask_memory
,
370 .masks
= amd_irongate_masks
,
371 .agp_enable
= agp_generic_enable
,
372 .cache_flush
= global_cache_flush
,
373 .create_gatt_table
= amd_create_gatt_table
,
374 .free_gatt_table
= amd_free_gatt_table
,
375 .insert_memory
= amd_insert_memory
,
376 .remove_memory
= amd_remove_memory
,
377 .alloc_by_type
= agp_generic_alloc_by_type
,
378 .free_by_type
= agp_generic_free_by_type
,
379 .agp_alloc_page
= agp_generic_alloc_page
,
380 .agp_destroy_page
= agp_generic_destroy_page
,
381 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
384 static struct agp_device_ids amd_agp_device_ids
[] __devinitdata
=
387 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_7006
,
388 .chipset_name
= "Irongate",
391 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_700E
,
392 .chipset_name
= "761",
395 .device_id
= PCI_DEVICE_ID_AMD_FE_GATE_700C
,
396 .chipset_name
= "760MP",
398 { }, /* dummy final entry, always present */
401 static int __devinit
agp_amdk7_probe(struct pci_dev
*pdev
,
402 const struct pci_device_id
*ent
)
404 struct agp_bridge_data
*bridge
;
408 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
412 j
= ent
- agp_amdk7_pci_table
;
413 printk(KERN_INFO PFX
"Detected AMD %s chipset\n",
414 amd_agp_device_ids
[j
].chipset_name
);
416 bridge
= agp_alloc_bridge();
420 bridge
->driver
= &amd_irongate_driver
;
421 bridge
->dev_private_data
= &amd_irongate_private
,
423 bridge
->capndx
= cap_ptr
;
425 /* 751 Errata (22564_B-1.PDF)
426 erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
427 system controller may experience noise due to strong drive strengths
429 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_AMD_FE_GATE_7006
) {
431 struct pci_dev
*gfxcard
=NULL
;
433 gfxcard
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<<8, gfxcard
);
435 printk (KERN_INFO PFX
"Couldn't find an AGP VGA controller.\n");
438 cap_ptr
= pci_find_capability(gfxcard
, PCI_CAP_ID_AGP
);
440 pci_dev_put(gfxcard
);
445 /* With so many variants of NVidia cards, it's simpler just
446 to blacklist them all, and then whitelist them as needed
447 (if necessary at all). */
448 if (gfxcard
->vendor
== PCI_VENDOR_ID_NVIDIA
) {
449 agp_bridge
->flags
|= AGP_ERRATA_1X
;
450 printk (KERN_INFO PFX
"AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
452 pci_dev_put(gfxcard
);
455 /* 761 Errata (23613_F.pdf)
456 * Revisions B0/B1 were a disaster.
457 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
458 * erratum 45: Timing problem prevents fast writes -- Disable fast write.
459 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
460 * With this lot disabled, we should prevent lockups. */
461 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_AMD_FE_GATE_700E
) {
462 if (pdev
->revision
== 0x10 || pdev
->revision
== 0x11) {
463 agp_bridge
->flags
= AGP_ERRATA_FASTWRITES
;
464 agp_bridge
->flags
|= AGP_ERRATA_SBA
;
465 agp_bridge
->flags
|= AGP_ERRATA_1X
;
466 printk (KERN_INFO PFX
"AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
470 /* Fill in the mode register */
471 pci_read_config_dword(pdev
,
472 bridge
->capndx
+PCI_AGP_STATUS
,
475 pci_set_drvdata(pdev
, bridge
);
476 return agp_add_bridge(bridge
);
479 static void __devexit
agp_amdk7_remove(struct pci_dev
*pdev
)
481 struct agp_bridge_data
*bridge
= pci_get_drvdata(pdev
);
483 agp_remove_bridge(bridge
);
484 agp_put_bridge(bridge
);
487 /* must be the same order as name table above */
488 static struct pci_device_id agp_amdk7_pci_table
[] = {
490 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
492 .vendor
= PCI_VENDOR_ID_AMD
,
493 .device
= PCI_DEVICE_ID_AMD_FE_GATE_7006
,
494 .subvendor
= PCI_ANY_ID
,
495 .subdevice
= PCI_ANY_ID
,
498 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
500 .vendor
= PCI_VENDOR_ID_AMD
,
501 .device
= PCI_DEVICE_ID_AMD_FE_GATE_700E
,
502 .subvendor
= PCI_ANY_ID
,
503 .subdevice
= PCI_ANY_ID
,
506 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
508 .vendor
= PCI_VENDOR_ID_AMD
,
509 .device
= PCI_DEVICE_ID_AMD_FE_GATE_700C
,
510 .subvendor
= PCI_ANY_ID
,
511 .subdevice
= PCI_ANY_ID
,
516 MODULE_DEVICE_TABLE(pci
, agp_amdk7_pci_table
);
518 static struct pci_driver agp_amdk7_pci_driver
= {
519 .name
= "agpgart-amdk7",
520 .id_table
= agp_amdk7_pci_table
,
521 .probe
= agp_amdk7_probe
,
522 .remove
= agp_amdk7_remove
,
525 static int __init
agp_amdk7_init(void)
529 return pci_register_driver(&agp_amdk7_pci_driver
);
532 static void __exit
agp_amdk7_cleanup(void)
534 pci_unregister_driver(&agp_amdk7_pci_driver
);
537 module_init(agp_amdk7_init
);
538 module_exit(agp_amdk7_cleanup
);
540 MODULE_LICENSE("GPL and additional rights");