2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
16 static u32
*flush_words
;
18 const struct pci_device_id amd_nb_misc_ids
[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_K8_NB_MISC
) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_10H_NB_MISC
) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_NB_F3
) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_M10H_F3
) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3
) },
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3
) },
25 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_16H_NB_F3
) },
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3
) },
29 EXPORT_SYMBOL(amd_nb_misc_ids
);
31 static const struct pci_device_id amd_nb_link_ids
[] = {
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_NB_F4
) },
33 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4
) },
34 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4
) },
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_16H_NB_F4
) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4
) },
40 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges
[] __initconst
= {
47 struct amd_northbridge_info amd_northbridges
;
48 EXPORT_SYMBOL(amd_northbridges
);
50 static struct pci_dev
*next_northbridge(struct pci_dev
*dev
,
51 const struct pci_device_id
*ids
)
54 dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, dev
);
57 } while (!pci_match_id(ids
, dev
));
61 int amd_cache_northbridges(void)
64 struct amd_northbridge
*nb
;
65 struct pci_dev
*misc
, *link
;
71 while ((misc
= next_northbridge(misc
, amd_nb_misc_ids
)) != NULL
)
77 nb
= kzalloc(i
* sizeof(struct amd_northbridge
), GFP_KERNEL
);
81 amd_northbridges
.nb
= nb
;
82 amd_northbridges
.num
= i
;
85 for (i
= 0; i
!= amd_nb_num(); i
++) {
86 node_to_amd_nb(i
)->misc
= misc
=
87 next_northbridge(misc
, amd_nb_misc_ids
);
88 node_to_amd_nb(i
)->link
= link
=
89 next_northbridge(link
, amd_nb_link_ids
);
92 /* GART present only on Fam15h upto model 0fh */
93 if (boot_cpu_data
.x86
== 0xf || boot_cpu_data
.x86
== 0x10 ||
94 (boot_cpu_data
.x86
== 0x15 && boot_cpu_data
.x86_model
< 0x10))
95 amd_northbridges
.flags
|= AMD_NB_GART
;
98 * Check for L3 cache presence.
100 if (!cpuid_edx(0x80000006))
104 * Some CPU families support L3 Cache Index Disable. There are some
105 * limitations because of E382 and E388 on family 0x10.
107 if (boot_cpu_data
.x86
== 0x10 &&
108 boot_cpu_data
.x86_model
>= 0x8 &&
109 (boot_cpu_data
.x86_model
> 0x9 ||
110 boot_cpu_data
.x86_mask
>= 0x1))
111 amd_northbridges
.flags
|= AMD_NB_L3_INDEX_DISABLE
;
113 if (boot_cpu_data
.x86
== 0x15)
114 amd_northbridges
.flags
|= AMD_NB_L3_INDEX_DISABLE
;
116 /* L3 cache partitioning is supported on family 0x15 */
117 if (boot_cpu_data
.x86
== 0x15)
118 amd_northbridges
.flags
|= AMD_NB_L3_PARTITIONING
;
122 EXPORT_SYMBOL_GPL(amd_cache_northbridges
);
125 * Ignores subdevice/subvendor but as far as I can figure out
126 * they're useless anyways
128 bool __init
early_is_amd_nb(u32 device
)
130 const struct pci_device_id
*id
;
131 u32 vendor
= device
& 0xffff;
134 for (id
= amd_nb_misc_ids
; id
->vendor
; id
++)
135 if (vendor
== id
->vendor
&& device
== id
->device
)
140 struct resource
*amd_get_mmconfig_range(struct resource
*res
)
144 unsigned segn_busn_bits
;
146 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
)
149 /* assume all cpus from fam10h have mmconfig */
150 if (boot_cpu_data
.x86
< 0x10)
153 address
= MSR_FAM10H_MMIO_CONF_BASE
;
154 rdmsrl(address
, msr
);
156 /* mmconfig is not enabled */
157 if (!(msr
& FAM10H_MMIO_CONF_ENABLE
))
160 base
= msr
& (FAM10H_MMIO_CONF_BASE_MASK
<<FAM10H_MMIO_CONF_BASE_SHIFT
);
162 segn_busn_bits
= (msr
>> FAM10H_MMIO_CONF_BUSRANGE_SHIFT
) &
163 FAM10H_MMIO_CONF_BUSRANGE_MASK
;
165 res
->flags
= IORESOURCE_MEM
;
167 res
->end
= base
+ (1ULL<<(segn_busn_bits
+ 20)) - 1;
171 int amd_get_subcaches(int cpu
)
173 struct pci_dev
*link
= node_to_amd_nb(amd_get_nb_id(cpu
))->link
;
177 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
180 pci_read_config_dword(link
, 0x1d4, &mask
);
182 cuid
= cpu_data(cpu
).compute_unit_id
;
183 return (mask
>> (4 * cuid
)) & 0xf;
186 int amd_set_subcaches(int cpu
, unsigned long mask
)
188 static unsigned int reset
, ban
;
189 struct amd_northbridge
*nb
= node_to_amd_nb(amd_get_nb_id(cpu
));
193 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING
) || mask
> 0xf)
196 /* if necessary, collect reset state of L3 partitioning and BAN mode */
198 pci_read_config_dword(nb
->link
, 0x1d4, &reset
);
199 pci_read_config_dword(nb
->misc
, 0x1b8, &ban
);
203 /* deactivate BAN mode if any subcaches are to be disabled */
205 pci_read_config_dword(nb
->misc
, 0x1b8, ®
);
206 pci_write_config_dword(nb
->misc
, 0x1b8, reg
& ~0x180000);
209 cuid
= cpu_data(cpu
).compute_unit_id
;
211 mask
|= (0xf ^ (1 << cuid
)) << 26;
213 pci_write_config_dword(nb
->link
, 0x1d4, mask
);
215 /* reset BAN mode if L3 partitioning returned to reset state */
216 pci_read_config_dword(nb
->link
, 0x1d4, ®
);
218 pci_read_config_dword(nb
->misc
, 0x1b8, ®
);
220 pci_write_config_dword(nb
->misc
, 0x1b8, reg
| ban
);
226 static int amd_cache_gart(void)
230 if (!amd_nb_has_feature(AMD_NB_GART
))
233 flush_words
= kmalloc(amd_nb_num() * sizeof(u32
), GFP_KERNEL
);
235 amd_northbridges
.flags
&= ~AMD_NB_GART
;
239 for (i
= 0; i
!= amd_nb_num(); i
++)
240 pci_read_config_dword(node_to_amd_nb(i
)->misc
, 0x9c,
246 void amd_flush_garts(void)
250 static DEFINE_SPINLOCK(gart_lock
);
252 if (!amd_nb_has_feature(AMD_NB_GART
))
255 /* Avoid races between AGP and IOMMU. In theory it's not needed
256 but I'm not sure if the hardware won't lose flush requests
257 when another is pending. This whole thing is so expensive anyways
258 that it doesn't matter to serialize more. -AK */
259 spin_lock_irqsave(&gart_lock
, flags
);
261 for (i
= 0; i
< amd_nb_num(); i
++) {
262 pci_write_config_dword(node_to_amd_nb(i
)->misc
, 0x9c,
266 for (i
= 0; i
< amd_nb_num(); i
++) {
268 /* Make sure the hardware actually executed the flush*/
270 pci_read_config_dword(node_to_amd_nb(i
)->misc
,
277 spin_unlock_irqrestore(&gart_lock
, flags
);
279 pr_notice("nothing to flush?\n");
281 EXPORT_SYMBOL_GPL(amd_flush_garts
);
283 static __init
int init_amd_nbs(void)
287 err
= amd_cache_northbridges();
290 pr_notice("Cannot enumerate AMD northbridges\n");
292 if (amd_cache_gart() < 0)
293 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
298 /* This has to go after the PCI subsystem */
299 fs_initcall(init_amd_nbs
);