x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / arch / x86 / kernel / apic / x2apic_cluster.c
bloba5371ec367769a83b321c23eed6c8d2fde403797
1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
9 #include <asm/smp.h>
10 #include <asm/apic.h>
11 #include <asm/ipi.h>
13 static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
15 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
17 return x2apic_enabled();
21 * need to use more than cpu 0, because we need more vectors when
22 * MSI-X are used.
24 static const struct cpumask *x2apic_target_cpus(void)
26 return cpu_online_mask;
30 * for now each logical cpu is in its own vector allocation domain.
32 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34 cpumask_clear(retmask);
35 cpumask_set_cpu(cpu, retmask);
38 static void
39 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
41 unsigned long cfg;
43 cfg = __prepare_ICR(0, vector, dest);
46 * send the IPI.
48 native_x2apic_icr_write(cfg, apicid);
52 * for now, we send the IPI's one by one in the cpumask.
53 * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
54 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
55 * writes.
57 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59 unsigned long query_cpu;
60 unsigned long flags;
62 x2apic_wrmsr_fence();
64 local_irq_save(flags);
65 for_each_cpu(query_cpu, mask) {
66 __x2apic_send_IPI_dest(
67 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68 vector, apic->dest_logical);
70 local_irq_restore(flags);
73 static void
74 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
76 unsigned long this_cpu = smp_processor_id();
77 unsigned long query_cpu;
78 unsigned long flags;
80 x2apic_wrmsr_fence();
82 local_irq_save(flags);
83 for_each_cpu(query_cpu, mask) {
84 if (query_cpu == this_cpu)
85 continue;
86 __x2apic_send_IPI_dest(
87 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
88 vector, apic->dest_logical);
90 local_irq_restore(flags);
93 static void x2apic_send_IPI_allbutself(int vector)
95 unsigned long this_cpu = smp_processor_id();
96 unsigned long query_cpu;
97 unsigned long flags;
99 x2apic_wrmsr_fence();
101 local_irq_save(flags);
102 for_each_online_cpu(query_cpu) {
103 if (query_cpu == this_cpu)
104 continue;
105 __x2apic_send_IPI_dest(
106 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
107 vector, apic->dest_logical);
109 local_irq_restore(flags);
112 static void x2apic_send_IPI_all(int vector)
114 x2apic_send_IPI_mask(cpu_online_mask, vector);
117 static int x2apic_apic_id_registered(void)
119 return 1;
122 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
125 * We're using fixed IRQ delivery, can only return one logical APIC ID.
126 * May as well be the first.
128 int cpu = cpumask_first(cpumask);
130 if ((unsigned)cpu < nr_cpu_ids)
131 return per_cpu(x86_cpu_to_logical_apicid, cpu);
132 else
133 return BAD_APICID;
136 static unsigned int
137 x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
138 const struct cpumask *andmask)
140 int cpu;
143 * We're using fixed IRQ delivery, can only return one logical APIC ID.
144 * May as well be the first.
146 for_each_cpu_and(cpu, cpumask, andmask) {
147 if (cpumask_test_cpu(cpu, cpu_online_mask))
148 break;
151 if (cpu < nr_cpu_ids)
152 return per_cpu(x86_cpu_to_logical_apicid, cpu);
154 return BAD_APICID;
157 static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
159 unsigned int id;
161 id = x;
162 return id;
165 static unsigned long set_apic_id(unsigned int id)
167 unsigned long x;
169 x = id;
170 return x;
173 static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
175 return initial_apicid >> index_msb;
178 static void x2apic_send_IPI_self(int vector)
180 apic_write(APIC_SELF_IPI, vector);
183 static void init_x2apic_ldr(void)
185 int cpu = smp_processor_id();
187 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
190 struct apic apic_x2apic_cluster = {
192 .name = "cluster x2apic",
193 .probe = NULL,
194 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
195 .apic_id_registered = x2apic_apic_id_registered,
197 .irq_delivery_mode = dest_LowestPrio,
198 .irq_dest_mode = 1, /* logical */
200 .target_cpus = x2apic_target_cpus,
201 .disable_esr = 0,
202 .dest_logical = APIC_DEST_LOGICAL,
203 .check_apicid_used = NULL,
204 .check_apicid_present = NULL,
206 .vector_allocation_domain = x2apic_vector_allocation_domain,
207 .init_apic_ldr = init_x2apic_ldr,
209 .ioapic_phys_id_map = NULL,
210 .setup_apic_routing = NULL,
211 .multi_timer_check = NULL,
212 .apicid_to_node = NULL,
213 .cpu_to_logical_apicid = NULL,
214 .cpu_present_to_apicid = default_cpu_present_to_apicid,
215 .apicid_to_cpu_present = NULL,
216 .setup_portio_remap = NULL,
217 .check_phys_apicid_present = default_check_phys_apicid_present,
218 .enable_apic_mode = NULL,
219 .phys_pkg_id = x2apic_cluster_phys_pkg_id,
220 .mps_oem_check = NULL,
222 .get_apic_id = x2apic_cluster_phys_get_apic_id,
223 .set_apic_id = set_apic_id,
224 .apic_id_mask = 0xFFFFFFFFu,
226 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
227 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
229 .send_IPI_mask = x2apic_send_IPI_mask,
230 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
231 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
232 .send_IPI_all = x2apic_send_IPI_all,
233 .send_IPI_self = x2apic_send_IPI_self,
235 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
236 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
237 .wait_for_init_deassert = NULL,
238 .smp_callin_clear_local_apic = NULL,
239 .inquire_remote_apic = NULL,
241 .read = native_apic_msr_read,
242 .write = native_apic_msr_write,
243 .icr_read = native_x2apic_icr_read,
244 .icr_write = native_x2apic_icr_write,
245 .wait_icr_idle = native_x2apic_wait_icr_idle,
246 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,