2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
13 static u32
*flush_words
;
15 struct pci_device_id amd_nb_misc_ids
[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_K8_NB_MISC
) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_10H_NB_MISC
) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_NB_MISC
) },
21 EXPORT_SYMBOL(amd_nb_misc_ids
);
23 struct amd_northbridge_info amd_northbridges
;
24 EXPORT_SYMBOL(amd_northbridges
);
26 static struct pci_dev
*next_northbridge(struct pci_dev
*dev
,
27 struct pci_device_id
*ids
)
30 dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, dev
);
33 } while (!pci_match_id(ids
, dev
));
37 int amd_cache_northbridges(void)
40 struct amd_northbridge
*nb
;
47 while ((misc
= next_northbridge(misc
, amd_nb_misc_ids
)) != NULL
)
53 nb
= kzalloc(i
* sizeof(struct amd_northbridge
), GFP_KERNEL
);
57 amd_northbridges
.nb
= nb
;
58 amd_northbridges
.num
= i
;
61 for (i
= 0; i
!= amd_nb_num(); i
++) {
62 node_to_amd_nb(i
)->misc
= misc
=
63 next_northbridge(misc
, amd_nb_misc_ids
);
66 /* some CPU families (e.g. family 0x11) do not support GART */
67 if (boot_cpu_data
.x86
== 0xf || boot_cpu_data
.x86
== 0x10 ||
68 boot_cpu_data
.x86
== 0x15)
69 amd_northbridges
.flags
|= AMD_NB_GART
;
72 * Some CPU families support L3 Cache Index Disable. There are some
73 * limitations because of E382 and E388 on family 0x10.
75 if (boot_cpu_data
.x86
== 0x10 &&
76 boot_cpu_data
.x86_model
>= 0x8 &&
77 (boot_cpu_data
.x86_model
> 0x9 ||
78 boot_cpu_data
.x86_mask
>= 0x1))
79 amd_northbridges
.flags
|= AMD_NB_L3_INDEX_DISABLE
;
83 EXPORT_SYMBOL_GPL(amd_cache_northbridges
);
85 /* Ignores subdevice/subvendor but as far as I can figure out
86 they're useless anyways */
87 int __init
early_is_amd_nb(u32 device
)
89 struct pci_device_id
*id
;
90 u32 vendor
= device
& 0xffff;
92 for (id
= amd_nb_misc_ids
; id
->vendor
; id
++)
93 if (vendor
== id
->vendor
&& device
== id
->device
)
98 int amd_cache_gart(void)
102 if (!amd_nb_has_feature(AMD_NB_GART
))
105 flush_words
= kmalloc(amd_nb_num() * sizeof(u32
), GFP_KERNEL
);
107 amd_northbridges
.flags
&= ~AMD_NB_GART
;
111 for (i
= 0; i
!= amd_nb_num(); i
++)
112 pci_read_config_dword(node_to_amd_nb(i
)->misc
, 0x9c,
118 void amd_flush_garts(void)
122 static DEFINE_SPINLOCK(gart_lock
);
124 if (!amd_nb_has_feature(AMD_NB_GART
))
127 /* Avoid races between AGP and IOMMU. In theory it's not needed
128 but I'm not sure if the hardware won't lose flush requests
129 when another is pending. This whole thing is so expensive anyways
130 that it doesn't matter to serialize more. -AK */
131 spin_lock_irqsave(&gart_lock
, flags
);
133 for (i
= 0; i
< amd_nb_num(); i
++) {
134 pci_write_config_dword(node_to_amd_nb(i
)->misc
, 0x9c,
138 for (i
= 0; i
< amd_nb_num(); i
++) {
140 /* Make sure the hardware actually executed the flush*/
142 pci_read_config_dword(node_to_amd_nb(i
)->misc
,
149 spin_unlock_irqrestore(&gart_lock
, flags
);
151 printk("nothing to flush?\n");
153 EXPORT_SYMBOL_GPL(amd_flush_garts
);
155 static __init
int init_amd_nbs(void)
159 err
= amd_cache_northbridges();
162 printk(KERN_NOTICE
"AMD NB: Cannot enumerate AMD northbridges.\n");
164 if (amd_cache_gart() < 0)
165 printk(KERN_NOTICE
"AMD NB: Cannot initialize GART flush words, "
166 "GART support disabled.\n");
171 /* This has to go after the PCI subsystem */
172 fs_initcall(init_amd_nbs
);