Blackfin arch: merge adeos blackfin part to arch/blackfin/
[linux/fpc-iii.git] / arch / x86 / include / asm / summit / apic.h
blob4bb5fb34f030256fe4eb6ed875defea34c89bf32
1 #ifndef __ASM_SUMMIT_APIC_H
2 #define __ASM_SUMMIT_APIC_H
4 #include <asm/smp.h>
6 #define esr_disable (1)
7 #define NO_BALANCE_IRQ (0)
9 /* In clustered mode, the high nibble of APIC ID is a cluster number.
10 * The low nibble is a 4-bit bitmap. */
11 #define XAPIC_DEST_CPUS_SHIFT 4
12 #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
13 #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
15 #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
17 static inline const cpumask_t *target_cpus(void)
19 /* CPU_MASK_ALL (0xff) has undefined behaviour with
20 * dest_LowestPrio mode logical clustered apic interrupt routing
21 * Just start on cpu 0. IRQ balancing will spread load
23 return &cpumask_of_cpu(0);
26 #define INT_DELIVERY_MODE (dest_LowestPrio)
27 #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
29 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31 return 0;
34 /* we don't use the phys_cpu_present_map to indicate apicid presence */
35 static inline unsigned long check_apicid_present(int bit)
37 return 1;
40 #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
42 extern u8 cpu_2_logical_apicid[];
44 static inline void init_apic_ldr(void)
46 unsigned long val, id;
47 int count = 0;
48 u8 my_id = (u8)hard_smp_processor_id();
49 u8 my_cluster = (u8)apicid_cluster(my_id);
50 #ifdef CONFIG_SMP
51 u8 lid;
52 int i;
54 /* Create logical APIC IDs by counting CPUs already in cluster. */
55 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
56 lid = cpu_2_logical_apicid[i];
57 if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
58 ++count;
60 #endif
61 /* We only have a 4 wide bitmap in cluster mode. If a deranged
62 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
63 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
64 id = my_cluster | (1UL << count);
65 apic_write(APIC_DFR, APIC_DFR_VALUE);
66 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
67 val |= SET_APIC_LOGICAL_ID(id);
68 apic_write(APIC_LDR, val);
71 static inline int multi_timer_check(int apic, int irq)
73 return 0;
76 static inline int apic_id_registered(void)
78 return 1;
81 static inline void setup_apic_routing(void)
83 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
84 nr_ioapics);
87 static inline int apicid_to_node(int logical_apicid)
89 #ifdef CONFIG_SMP
90 return apicid_2_node[hard_smp_processor_id()];
91 #else
92 return 0;
93 #endif
96 /* Mapping from cpu number to logical apicid */
97 static inline int cpu_to_logical_apicid(int cpu)
99 #ifdef CONFIG_SMP
100 if (cpu >= nr_cpu_ids)
101 return BAD_APICID;
102 return (int)cpu_2_logical_apicid[cpu];
103 #else
104 return logical_smp_processor_id();
105 #endif
108 static inline int cpu_present_to_apicid(int mps_cpu)
110 if (mps_cpu < nr_cpu_ids)
111 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
112 else
113 return BAD_APICID;
116 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
118 /* For clustered we don't have a good way to do this yet - hack */
119 return physids_promote(0x0F);
122 static inline physid_mask_t apicid_to_cpu_present(int apicid)
124 return physid_mask_of_physid(0);
127 static inline void setup_portio_remap(void)
131 static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
133 return 1;
136 static inline void enable_apic_mode(void)
140 static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
142 int num_bits_set;
143 int cpus_found = 0;
144 int cpu;
145 int apicid;
147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */
149 if (num_bits_set >= nr_cpu_ids)
150 return (int) 0xFF;
152 * The cpus in the mask must all be on the apic cluster. If are not
153 * on the same apicid cluster return default value of TARGET_CPUS.
155 cpu = first_cpu(*cpumask);
156 apicid = cpu_to_logical_apicid(cpu);
157 while (cpus_found < num_bits_set) {
158 if (cpu_isset(cpu, *cpumask)) {
159 int new_apicid = cpu_to_logical_apicid(cpu);
160 if (apicid_cluster(apicid) !=
161 apicid_cluster(new_apicid)){
162 printk ("%s: Not a valid mask!\n", __func__);
163 return 0xFF;
165 apicid = apicid | new_apicid;
166 cpus_found++;
168 cpu++;
170 return apicid;
173 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask)
176 int apicid = cpu_to_logical_apicid(0);
177 cpumask_var_t cpumask;
179 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
180 return apicid;
182 cpumask_and(cpumask, inmask, andmask);
183 cpumask_and(cpumask, cpumask, cpu_online_mask);
184 apicid = cpu_mask_to_apicid(cpumask);
186 free_cpumask_var(cpumask);
187 return apicid;
190 /* cpuid returns the value latched in the HW at reset, not the APIC ID
191 * register's value. For any box whose BIOS changes APIC IDs, like
192 * clustered APIC systems, we must use hard_smp_processor_id.
194 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
196 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
198 return hard_smp_processor_id() >> index_msb;
201 #endif /* __ASM_SUMMIT_APIC_H */