mt76x2: apply coverage class on slot time too
[linux/fpc-iii.git] / drivers / acpi / processor_core.c
blobb933061b6b607c467e20317412c63c78728396fc
1 /*
2 * Copyright (C) 2005 Intel Corporation
3 * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com>
6 * - Unified x86/ia64 implementations
8 * I/O APIC hotplug support
9 * Yinghai Lu <yinghai@kernel.org>
10 * Jiang Liu <jiang.liu@intel.com>
12 #include <linux/export.h>
13 #include <linux/acpi.h>
14 #include <acpi/processor.h>
16 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
17 ACPI_MODULE_NAME("processor_core");
19 static struct acpi_table_madt *get_madt_table(void)
21 static struct acpi_table_madt *madt;
22 static int read_madt;
24 if (!read_madt) {
25 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
26 (struct acpi_table_header **)&madt)))
27 madt = NULL;
28 read_madt++;
31 return madt;
34 static int map_lapic_id(struct acpi_subtable_header *entry,
35 u32 acpi_id, phys_cpuid_t *apic_id)
37 struct acpi_madt_local_apic *lapic =
38 container_of(entry, struct acpi_madt_local_apic, header);
40 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
41 return -ENODEV;
43 if (lapic->processor_id != acpi_id)
44 return -EINVAL;
46 *apic_id = lapic->id;
47 return 0;
50 static int map_x2apic_id(struct acpi_subtable_header *entry,
51 int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
53 struct acpi_madt_local_x2apic *apic =
54 container_of(entry, struct acpi_madt_local_x2apic, header);
56 if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
57 return -ENODEV;
59 if (device_declaration && (apic->uid == acpi_id)) {
60 *apic_id = apic->local_apic_id;
61 return 0;
64 return -EINVAL;
67 static int map_lsapic_id(struct acpi_subtable_header *entry,
68 int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
70 struct acpi_madt_local_sapic *lsapic =
71 container_of(entry, struct acpi_madt_local_sapic, header);
73 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
74 return -ENODEV;
76 if (device_declaration) {
77 if ((entry->length < 16) || (lsapic->uid != acpi_id))
78 return -EINVAL;
79 } else if (lsapic->processor_id != acpi_id)
80 return -EINVAL;
82 *apic_id = (lsapic->id << 8) | lsapic->eid;
83 return 0;
87 * Retrieve the ARM CPU physical identifier (MPIDR)
89 static int map_gicc_mpidr(struct acpi_subtable_header *entry,
90 int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
92 struct acpi_madt_generic_interrupt *gicc =
93 container_of(entry, struct acpi_madt_generic_interrupt, header);
95 if (!(gicc->flags & ACPI_MADT_ENABLED))
96 return -ENODEV;
98 /* device_declaration means Device object in DSDT, in the
99 * GIC interrupt model, logical processors are required to
100 * have a Processor Device object in the DSDT, so we should
101 * check device_declaration here
103 if (device_declaration && (gicc->uid == acpi_id)) {
104 *mpidr = gicc->arm_mpidr;
105 return 0;
108 return -EINVAL;
111 static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
112 int type, u32 acpi_id)
114 unsigned long madt_end, entry;
115 phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
117 if (!madt)
118 return phys_id;
120 entry = (unsigned long)madt;
121 madt_end = entry + madt->header.length;
123 /* Parse all entries looking for a match. */
125 entry += sizeof(struct acpi_table_madt);
126 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
127 struct acpi_subtable_header *header =
128 (struct acpi_subtable_header *)entry;
129 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
130 if (!map_lapic_id(header, acpi_id, &phys_id))
131 break;
132 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
133 if (!map_x2apic_id(header, type, acpi_id, &phys_id))
134 break;
135 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
136 if (!map_lsapic_id(header, type, acpi_id, &phys_id))
137 break;
138 } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
139 if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
140 break;
142 entry += header->length;
144 return phys_id;
147 phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
149 struct acpi_table_madt *madt = NULL;
150 phys_cpuid_t rv;
152 acpi_get_table(ACPI_SIG_MADT, 0,
153 (struct acpi_table_header **)&madt);
154 if (!madt)
155 return PHYS_CPUID_INVALID;
157 rv = map_madt_entry(madt, 1, acpi_id);
159 acpi_put_table((struct acpi_table_header *)madt);
161 return rv;
164 static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
166 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
167 union acpi_object *obj;
168 struct acpi_subtable_header *header;
169 phys_cpuid_t phys_id = PHYS_CPUID_INVALID;
171 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
172 goto exit;
174 if (!buffer.length || !buffer.pointer)
175 goto exit;
177 obj = buffer.pointer;
178 if (obj->type != ACPI_TYPE_BUFFER ||
179 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
180 goto exit;
183 header = (struct acpi_subtable_header *)obj->buffer.pointer;
184 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
185 map_lapic_id(header, acpi_id, &phys_id);
186 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
187 map_lsapic_id(header, type, acpi_id, &phys_id);
188 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
189 map_x2apic_id(header, type, acpi_id, &phys_id);
190 else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
191 map_gicc_mpidr(header, type, acpi_id, &phys_id);
193 exit:
194 kfree(buffer.pointer);
195 return phys_id;
198 phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
200 phys_cpuid_t phys_id;
202 phys_id = map_mat_entry(handle, type, acpi_id);
203 if (invalid_phys_cpuid(phys_id))
204 phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
206 return phys_id;
209 int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
211 #ifdef CONFIG_SMP
212 int i;
213 #endif
215 if (invalid_phys_cpuid(phys_id)) {
217 * On UP processor, there is no _MAT or MADT table.
218 * So above phys_id is always set to PHYS_CPUID_INVALID.
220 * BIOS may define multiple CPU handles even for UP processor.
221 * For example,
223 * Scope (_PR)
225 * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
226 * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
227 * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
228 * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
231 * Ignores phys_id and always returns 0 for the processor
232 * handle with acpi id 0 if nr_cpu_ids is 1.
233 * This should be the case if SMP tables are not found.
234 * Return -EINVAL for other CPU's handle.
236 if (nr_cpu_ids <= 1 && acpi_id == 0)
237 return acpi_id;
238 else
239 return -EINVAL;
242 #ifdef CONFIG_SMP
243 for_each_possible_cpu(i) {
244 if (cpu_physical_id(i) == phys_id)
245 return i;
247 #else
248 /* In UP kernel, only processor 0 is valid */
249 if (phys_id == 0)
250 return phys_id;
251 #endif
252 return -ENODEV;
255 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
257 phys_cpuid_t phys_id;
259 phys_id = acpi_get_phys_id(handle, type, acpi_id);
261 return acpi_map_cpuid(phys_id, acpi_id);
263 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
265 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
266 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
267 u64 *phys_addr, int *ioapic_id)
269 struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry;
271 if (ioapic->global_irq_base != gsi_base)
272 return 0;
274 *phys_addr = ioapic->address;
275 *ioapic_id = ioapic->id;
276 return 1;
279 static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr)
281 struct acpi_subtable_header *hdr;
282 unsigned long madt_end, entry;
283 struct acpi_table_madt *madt;
284 int apic_id = -1;
286 madt = get_madt_table();
287 if (!madt)
288 return apic_id;
290 entry = (unsigned long)madt;
291 madt_end = entry + madt->header.length;
293 /* Parse all entries looking for a match. */
294 entry += sizeof(struct acpi_table_madt);
295 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
296 hdr = (struct acpi_subtable_header *)entry;
297 if (hdr->type == ACPI_MADT_TYPE_IO_APIC &&
298 get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id))
299 break;
300 else
301 entry += hdr->length;
304 return apic_id;
307 static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base,
308 u64 *phys_addr)
310 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
311 struct acpi_subtable_header *header;
312 union acpi_object *obj;
313 int apic_id = -1;
315 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
316 goto exit;
318 if (!buffer.length || !buffer.pointer)
319 goto exit;
321 obj = buffer.pointer;
322 if (obj->type != ACPI_TYPE_BUFFER ||
323 obj->buffer.length < sizeof(struct acpi_subtable_header))
324 goto exit;
326 header = (struct acpi_subtable_header *)obj->buffer.pointer;
327 if (header->type == ACPI_MADT_TYPE_IO_APIC)
328 get_ioapic_id(header, gsi_base, phys_addr, &apic_id);
330 exit:
331 kfree(buffer.pointer);
332 return apic_id;
336 * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base
337 * @handle: ACPI object for IOAPIC device
338 * @gsi_base: GSI base to match with
339 * @phys_addr: Pointer to store physical address of matching IOAPIC record
341 * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search
342 * for an ACPI IOAPIC record matching @gsi_base.
343 * Return IOAPIC id and store physical address in @phys_addr if found a match,
344 * otherwise return <0.
346 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr)
348 int apic_id;
350 apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr);
351 if (apic_id == -1)
352 apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr);
354 return apic_id;
356 #endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */