1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 1999 VA Linux Systems
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
6 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
14 #include <acpi/pdc_intel.h>
16 #include <linux/init.h>
17 #include <linux/numa.h>
21 extern int acpi_lapic
;
22 #define acpi_disabled 0 /* ACPI always enabled on IA64 */
23 #define acpi_noirq 0 /* ACPI always enabled on IA64 */
24 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
25 #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
27 static inline bool acpi_has_cpu_in_madt(void)
32 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
33 static inline void disable_acpi(void) { }
35 int acpi_request_vector (u32 int_type
);
36 int acpi_gsi_to_irq (u32 gsi
, unsigned int *irq
);
38 /* Low-level suspend routine. */
39 extern int acpi_suspend_lowlevel(void);
41 static inline unsigned long acpi_get_wakeup_address(void)
47 * Record the cpei override flag and current logical cpu. This is
48 * useful for CPU removal.
50 extern unsigned int can_cpei_retarget(void);
51 extern unsigned int is_cpu_cpei_target(unsigned int cpu
);
52 extern void set_cpei_target_cpu(unsigned int cpu
);
53 extern unsigned int get_cpei_target_cpu(void);
54 extern void prefill_possible_map(void);
55 #ifdef CONFIG_ACPI_HOTPLUG_CPU
56 extern int additional_cpus
;
58 #define additional_cpus 0
61 #ifdef CONFIG_ACPI_NUMA
62 #if MAX_NUMNODES > 256
63 #define MAX_PXM_DOMAINS MAX_NUMNODES
65 #define MAX_PXM_DOMAINS (256)
67 extern int pxm_to_nid_map
[MAX_PXM_DOMAINS
];
68 extern int __initdata nid_to_pxm_map
[MAX_NUMNODES
];
71 static inline bool arch_has_acpi_pdc(void) { return true; }
72 static inline void arch_acpi_set_pdc_bits(u32
*buf
)
74 buf
[2] |= ACPI_PDC_EST_CAPABILITY_SMP
;
77 #define acpi_unlazy_tlb(x)
79 #ifdef CONFIG_ACPI_NUMA
80 extern cpumask_t early_cpu_possible_map
;
81 #define for_each_possible_early_cpu(cpu) \
82 for_each_cpu((cpu), &early_cpu_possible_map)
84 static inline void per_cpu_scan_finalize(int min_cpus
, int reserve_cpus
)
86 int low_cpu
, high_cpu
;
90 low_cpu
= cpumask_weight(&early_cpu_possible_map
);
92 high_cpu
= max(low_cpu
, min_cpus
);
93 high_cpu
= min(high_cpu
+ reserve_cpus
, NR_CPUS
);
95 for (cpu
= low_cpu
; cpu
< high_cpu
; cpu
++) {
96 cpumask_set_cpu(cpu
, &early_cpu_possible_map
);
97 if (node_cpuid
[cpu
].nid
== NUMA_NO_NODE
) {
98 node_cpuid
[cpu
].nid
= next_nid
;
100 if (next_nid
>= num_online_nodes())
106 extern void acpi_numa_fixup(void);
108 #endif /* CONFIG_ACPI_NUMA */
110 #endif /*__KERNEL__*/
112 #endif /*_ASM_ACPI_H*/