Linux 4.1.18
[linux/fpc-iii.git] / arch / x86 / kernel / amd_nb.c
blob5caed1dd7ccf89e6595fc4a7db7d1d5b7f92f0a6
1 /*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
16 static u32 *flush_words;
18 const struct pci_device_id amd_nb_misc_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
29 EXPORT_SYMBOL(amd_nb_misc_ids);
31 static const struct pci_device_id amd_nb_link_ids[] = {
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
40 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
41 { 0x00, 0x18, 0x20 },
42 { 0xff, 0x00, 0x20 },
43 { 0xfe, 0x00, 0x20 },
44 { }
47 struct amd_northbridge_info amd_northbridges;
48 EXPORT_SYMBOL(amd_northbridges);
50 static struct pci_dev *next_northbridge(struct pci_dev *dev,
51 const struct pci_device_id *ids)
53 do {
54 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
55 if (!dev)
56 break;
57 } while (!pci_match_id(ids, dev));
58 return dev;
61 int amd_cache_northbridges(void)
63 u16 i = 0;
64 struct amd_northbridge *nb;
65 struct pci_dev *misc, *link;
67 if (amd_nb_num())
68 return 0;
70 misc = NULL;
71 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
72 i++;
74 if (i == 0)
75 return 0;
77 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
78 if (!nb)
79 return -ENOMEM;
81 amd_northbridges.nb = nb;
82 amd_northbridges.num = i;
84 link = misc = NULL;
85 for (i = 0; i != amd_nb_num(); i++) {
86 node_to_amd_nb(i)->misc = misc =
87 next_northbridge(misc, amd_nb_misc_ids);
88 node_to_amd_nb(i)->link = link =
89 next_northbridge(link, amd_nb_link_ids);
92 /* GART present only on Fam15h upto model 0fh */
93 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
94 (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
95 amd_northbridges.flags |= AMD_NB_GART;
98 * Check for L3 cache presence.
100 if (!cpuid_edx(0x80000006))
101 return 0;
104 * Some CPU families support L3 Cache Index Disable. There are some
105 * limitations because of E382 and E388 on family 0x10.
107 if (boot_cpu_data.x86 == 0x10 &&
108 boot_cpu_data.x86_model >= 0x8 &&
109 (boot_cpu_data.x86_model > 0x9 ||
110 boot_cpu_data.x86_mask >= 0x1))
111 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113 if (boot_cpu_data.x86 == 0x15)
114 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
116 /* L3 cache partitioning is supported on family 0x15 */
117 if (boot_cpu_data.x86 == 0x15)
118 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
120 return 0;
122 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
125 * Ignores subdevice/subvendor but as far as I can figure out
126 * they're useless anyways
128 bool __init early_is_amd_nb(u32 device)
130 const struct pci_device_id *id;
131 u32 vendor = device & 0xffff;
133 device >>= 16;
134 for (id = amd_nb_misc_ids; id->vendor; id++)
135 if (vendor == id->vendor && device == id->device)
136 return true;
137 return false;
140 struct resource *amd_get_mmconfig_range(struct resource *res)
142 u32 address;
143 u64 base, msr;
144 unsigned segn_busn_bits;
146 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
147 return NULL;
149 /* assume all cpus from fam10h have mmconfig */
150 if (boot_cpu_data.x86 < 0x10)
151 return NULL;
153 address = MSR_FAM10H_MMIO_CONF_BASE;
154 rdmsrl(address, msr);
156 /* mmconfig is not enabled */
157 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
158 return NULL;
160 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
162 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
163 FAM10H_MMIO_CONF_BUSRANGE_MASK;
165 res->flags = IORESOURCE_MEM;
166 res->start = base;
167 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
168 return res;
171 int amd_get_subcaches(int cpu)
173 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
174 unsigned int mask;
175 int cuid;
177 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
178 return 0;
180 pci_read_config_dword(link, 0x1d4, &mask);
182 cuid = cpu_data(cpu).compute_unit_id;
183 return (mask >> (4 * cuid)) & 0xf;
186 int amd_set_subcaches(int cpu, unsigned long mask)
188 static unsigned int reset, ban;
189 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
190 unsigned int reg;
191 int cuid;
193 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
194 return -EINVAL;
196 /* if necessary, collect reset state of L3 partitioning and BAN mode */
197 if (reset == 0) {
198 pci_read_config_dword(nb->link, 0x1d4, &reset);
199 pci_read_config_dword(nb->misc, 0x1b8, &ban);
200 ban &= 0x180000;
203 /* deactivate BAN mode if any subcaches are to be disabled */
204 if (mask != 0xf) {
205 pci_read_config_dword(nb->misc, 0x1b8, &reg);
206 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
209 cuid = cpu_data(cpu).compute_unit_id;
210 mask <<= 4 * cuid;
211 mask |= (0xf ^ (1 << cuid)) << 26;
213 pci_write_config_dword(nb->link, 0x1d4, mask);
215 /* reset BAN mode if L3 partitioning returned to reset state */
216 pci_read_config_dword(nb->link, 0x1d4, &reg);
217 if (reg == reset) {
218 pci_read_config_dword(nb->misc, 0x1b8, &reg);
219 reg &= ~0x180000;
220 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
223 return 0;
226 static int amd_cache_gart(void)
228 u16 i;
230 if (!amd_nb_has_feature(AMD_NB_GART))
231 return 0;
233 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
234 if (!flush_words) {
235 amd_northbridges.flags &= ~AMD_NB_GART;
236 return -ENOMEM;
239 for (i = 0; i != amd_nb_num(); i++)
240 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
241 &flush_words[i]);
243 return 0;
246 void amd_flush_garts(void)
248 int flushed, i;
249 unsigned long flags;
250 static DEFINE_SPINLOCK(gart_lock);
252 if (!amd_nb_has_feature(AMD_NB_GART))
253 return;
255 /* Avoid races between AGP and IOMMU. In theory it's not needed
256 but I'm not sure if the hardware won't lose flush requests
257 when another is pending. This whole thing is so expensive anyways
258 that it doesn't matter to serialize more. -AK */
259 spin_lock_irqsave(&gart_lock, flags);
260 flushed = 0;
261 for (i = 0; i < amd_nb_num(); i++) {
262 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
263 flush_words[i] | 1);
264 flushed++;
266 for (i = 0; i < amd_nb_num(); i++) {
267 u32 w;
268 /* Make sure the hardware actually executed the flush*/
269 for (;;) {
270 pci_read_config_dword(node_to_amd_nb(i)->misc,
271 0x9c, &w);
272 if (!(w & 1))
273 break;
274 cpu_relax();
277 spin_unlock_irqrestore(&gart_lock, flags);
278 if (!flushed)
279 pr_notice("nothing to flush?\n");
281 EXPORT_SYMBOL_GPL(amd_flush_garts);
283 static __init int init_amd_nbs(void)
285 int err = 0;
287 err = amd_cache_northbridges();
289 if (err < 0)
290 pr_notice("Cannot enumerate AMD northbridges\n");
292 if (amd_cache_gart() < 0)
293 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
295 return err;
298 /* This has to go after the PCI subsystem */
299 fs_initcall(init_amd_nbs);