spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / x86 / include / asm / topology.h
blobb9676ae37ada7ddbf7c7d208653ecb892ca2a9c8
1 /*
2 * Written by: Matthew Dobson, IBM Corporation
4 * Copyright (C) 2002, IBM Corp.
6 * All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Send feedback to <colpatch@us.ibm.com>
25 #ifndef _ASM_X86_TOPOLOGY_H
26 #define _ASM_X86_TOPOLOGY_H
28 #ifdef CONFIG_X86_32
29 # ifdef CONFIG_X86_HT
30 # define ENABLE_TOPO_DEFINES
31 # endif
32 #else
33 # ifdef CONFIG_SMP
34 # define ENABLE_TOPO_DEFINES
35 # endif
36 #endif
39 * to preserve the visibility of NUMA_NO_NODE definition,
40 * moved to there from here. May be used independent of
41 * CONFIG_NUMA.
43 #include <linux/numa.h>
45 #ifdef CONFIG_NUMA
46 #include <linux/cpumask.h>
48 #include <asm/mpspec.h>
50 /* Mappings between logical cpu number and node number */
51 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
53 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
55 * override generic percpu implementation of cpu_to_node
57 extern int __cpu_to_node(int cpu);
58 #define cpu_to_node __cpu_to_node
60 extern int early_cpu_to_node(int cpu);
62 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
64 /* Same function but used if called before per_cpu areas are setup */
65 static inline int early_cpu_to_node(int cpu)
67 return early_per_cpu(x86_cpu_to_node_map, cpu);
70 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
72 /* Mappings between node number and cpus on that node. */
73 extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
75 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
76 extern const struct cpumask *cpumask_of_node(int node);
77 #else
78 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
79 static inline const struct cpumask *cpumask_of_node(int node)
81 return node_to_cpumask_map[node];
83 #endif
85 extern void setup_node_to_cpumask_map(void);
88 * Returns the number of the node containing Node 'node'. This
89 * architecture is flat, so it is a pretty simple function!
91 #define parent_node(node) (node)
93 #define pcibus_to_node(bus) __pcibus_to_node(bus)
95 #ifdef CONFIG_X86_32
96 # define SD_CACHE_NICE_TRIES 1
97 # define SD_IDLE_IDX 1
98 #else
99 # define SD_CACHE_NICE_TRIES 2
100 # define SD_IDLE_IDX 2
101 #endif
103 /* sched_domains SD_NODE_INIT for NUMA machines */
104 #define SD_NODE_INIT (struct sched_domain) { \
105 .min_interval = 8, \
106 .max_interval = 32, \
107 .busy_factor = 32, \
108 .imbalance_pct = 125, \
109 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
110 .busy_idx = 3, \
111 .idle_idx = SD_IDLE_IDX, \
112 .newidle_idx = 0, \
113 .wake_idx = 0, \
114 .forkexec_idx = 0, \
116 .flags = 1*SD_LOAD_BALANCE \
117 | 1*SD_BALANCE_NEWIDLE \
118 | 1*SD_BALANCE_EXEC \
119 | 1*SD_BALANCE_FORK \
120 | 0*SD_BALANCE_WAKE \
121 | 1*SD_WAKE_AFFINE \
122 | 0*SD_PREFER_LOCAL \
123 | 0*SD_SHARE_CPUPOWER \
124 | 0*SD_POWERSAVINGS_BALANCE \
125 | 0*SD_SHARE_PKG_RESOURCES \
126 | 1*SD_SERIALIZE \
127 | 0*SD_PREFER_SIBLING \
129 .last_balance = jiffies, \
130 .balance_interval = 1, \
133 extern int __node_distance(int, int);
134 #define node_distance(a, b) __node_distance(a, b)
136 #else /* !CONFIG_NUMA */
138 static inline int numa_node_id(void)
140 return 0;
143 * indicate override:
145 #define numa_node_id numa_node_id
147 static inline int early_cpu_to_node(int cpu)
149 return 0;
152 static inline void setup_node_to_cpumask_map(void) { }
154 #endif
156 #include <asm-generic/topology.h>
158 extern const struct cpumask *cpu_coregroup_mask(int cpu);
160 #ifdef ENABLE_TOPO_DEFINES
161 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
162 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
163 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
164 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
166 /* indicates that pointers to the topology cpumask_t maps are valid */
167 #define arch_provides_topology_pointers yes
168 #endif
170 static inline void arch_fix_phys_package_id(int num, u32 slot)
174 struct pci_bus;
175 void x86_pci_root_bus_resources(int bus, struct list_head *resources);
177 #ifdef CONFIG_SMP
178 #define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \
179 (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids))
180 #define smt_capable() (smp_num_siblings > 1)
181 #endif
183 #ifdef CONFIG_NUMA
184 extern int get_mp_bus_to_node(int busnum);
185 extern void set_mp_bus_to_node(int busnum, int node);
186 #else
187 static inline int get_mp_bus_to_node(int busnum)
189 return 0;
191 static inline void set_mp_bus_to_node(int busnum, int node)
194 #endif
196 #endif /* _ASM_X86_TOPOLOGY_H */