2 * include/linux/topology.h
4 * Written by: Matthew Dobson, IBM Corporation
6 * Copyright (C) 2002, IBM Corp.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * Send feedback to <colpatch@us.ibm.com>
27 #ifndef _LINUX_TOPOLOGY_H
28 #define _LINUX_TOPOLOGY_H
30 #include <linux/cpumask.h>
31 #include <linux/bitops.h>
32 #include <linux/mmzone.h>
33 #include <linux/smp.h>
34 #include <linux/percpu.h>
35 #include <asm/topology.h>
37 #ifndef node_has_online_mem
38 #define node_has_online_mem(nid) (1)
42 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
45 #define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
47 if (nr_cpus_node(node))
49 int arch_update_cpu_topology(void);
51 /* Conform to ACPI 2.0 SLIT distance definitions */
52 #define LOCAL_DISTANCE 10
53 #define REMOTE_DISTANCE 20
55 #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
57 #ifndef RECLAIM_DISTANCE
59 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot.
63 #define RECLAIM_DISTANCE 30
65 #ifndef PENALTY_FOR_NODE_WITH_CPUS
66 #define PENALTY_FOR_NODE_WITH_CPUS (1)
70 * Below are the 3 major initializers used in building sched_domains:
71 * SD_SIBLING_INIT, for SMT domains
72 * SD_CPU_INIT, for SMP domains
73 * SD_NODE_INIT, for NUMA domains
75 * Any architecture that cares to do any tuning to these values should do so
76 * by defining their own arch-specific initializer in include/asm/topology.h.
77 * A definition there will automagically override these default initializers
78 * and allow arch-specific performance tuning of sched_domains.
79 * (Only non-zero and non-null fields need be specified.)
82 #ifdef CONFIG_SCHED_SMT
83 /* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
84 * so can't we drop this in favor of CONFIG_SCHED_SMT?
86 #define ARCH_HAS_SCHED_WAKE_IDLE
87 /* Common values for SMT siblings */
88 #ifndef SD_SIBLING_INIT
89 #define SD_SIBLING_INIT (struct sched_domain) { \
93 .imbalance_pct = 110, \
95 .flags = 1*SD_LOAD_BALANCE \
96 | 1*SD_BALANCE_NEWIDLE \
101 | 1*SD_SHARE_CPUPOWER \
102 | 0*SD_POWERSAVINGS_BALANCE \
103 | 1*SD_SHARE_PKG_RESOURCES \
105 | 0*SD_PREFER_SIBLING \
106 | arch_sd_sibling_asym_packing() \
108 .last_balance = jiffies, \
109 .balance_interval = 1, \
110 .smt_gain = 1178, /* 15% */ \
113 #endif /* CONFIG_SCHED_SMT */
115 #ifdef CONFIG_SCHED_MC
116 /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
118 #define SD_MC_INIT (struct sched_domain) { \
122 .imbalance_pct = 125, \
123 .cache_nice_tries = 1, \
128 .flags = 1*SD_LOAD_BALANCE \
129 | 1*SD_BALANCE_NEWIDLE \
130 | 1*SD_BALANCE_EXEC \
131 | 1*SD_BALANCE_FORK \
132 | 0*SD_BALANCE_WAKE \
134 | 0*SD_PREFER_LOCAL \
135 | 0*SD_SHARE_CPUPOWER \
136 | 1*SD_SHARE_PKG_RESOURCES \
138 | sd_balance_for_mc_power() \
139 | sd_power_saving_flags() \
141 .last_balance = jiffies, \
142 .balance_interval = 1, \
145 #endif /* CONFIG_SCHED_MC */
147 /* Common values for CPUs */
149 #define SD_CPU_INIT (struct sched_domain) { \
153 .imbalance_pct = 125, \
154 .cache_nice_tries = 1, \
161 .flags = 1*SD_LOAD_BALANCE \
162 | 1*SD_BALANCE_NEWIDLE \
163 | 1*SD_BALANCE_EXEC \
164 | 1*SD_BALANCE_FORK \
165 | 0*SD_BALANCE_WAKE \
167 | 0*SD_PREFER_LOCAL \
168 | 0*SD_SHARE_CPUPOWER \
169 | 0*SD_SHARE_PKG_RESOURCES \
171 | sd_balance_for_package_power() \
172 | sd_power_saving_flags() \
174 .last_balance = jiffies, \
175 .balance_interval = 1, \
179 /* sched_domains SD_ALLNODES_INIT for NUMA machines */
180 #define SD_ALLNODES_INIT (struct sched_domain) { \
181 .min_interval = 64, \
182 .max_interval = 64*num_online_cpus(), \
183 .busy_factor = 128, \
184 .imbalance_pct = 133, \
185 .cache_nice_tries = 1, \
188 .flags = 1*SD_LOAD_BALANCE \
189 | 1*SD_BALANCE_NEWIDLE \
190 | 0*SD_BALANCE_EXEC \
191 | 0*SD_BALANCE_FORK \
192 | 0*SD_BALANCE_WAKE \
194 | 0*SD_SHARE_CPUPOWER \
195 | 0*SD_POWERSAVINGS_BALANCE \
196 | 0*SD_SHARE_PKG_RESOURCES \
198 | 0*SD_PREFER_SIBLING \
200 .last_balance = jiffies, \
201 .balance_interval = 64, \
204 #ifndef SD_NODES_PER_DOMAIN
205 #define SD_NODES_PER_DOMAIN 16
208 #ifdef CONFIG_SCHED_BOOK
210 #error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
212 #endif /* CONFIG_SCHED_BOOK */
216 #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
219 #endif /* CONFIG_NUMA */
221 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
222 DECLARE_PER_CPU(int, numa_node
);
225 /* Returns the number of the current Node. */
226 static inline int numa_node_id(void)
228 return __this_cpu_read(numa_node
);
233 static inline int cpu_to_node(int cpu
)
235 return per_cpu(numa_node
, cpu
);
239 #ifndef set_numa_node
240 static inline void set_numa_node(int node
)
242 percpu_write(numa_node
, node
);
246 #ifndef set_cpu_numa_node
247 static inline void set_cpu_numa_node(int cpu
, int node
)
249 per_cpu(numa_node
, cpu
) = node
;
253 #else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
255 /* Returns the number of the current Node. */
257 static inline int numa_node_id(void)
259 return cpu_to_node(raw_smp_processor_id());
263 #endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
265 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
268 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
269 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
270 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
272 DECLARE_PER_CPU(int, _numa_mem_
);
275 static inline void set_numa_mem(int node
)
277 percpu_write(_numa_mem_
, node
);
282 /* Returns the number of the nearest Node with memory */
283 static inline int numa_mem_id(void)
285 return __this_cpu_read(_numa_mem_
);
290 static inline int cpu_to_mem(int cpu
)
292 return per_cpu(_numa_mem_
, cpu
);
296 #ifndef set_cpu_numa_mem
297 static inline void set_cpu_numa_mem(int cpu
, int node
)
299 per_cpu(_numa_mem_
, cpu
) = node
;
303 #else /* !CONFIG_HAVE_MEMORYLESS_NODES */
306 /* Returns the number of the nearest Node with memory */
307 static inline int numa_mem_id(void)
309 return numa_node_id();
314 static inline int cpu_to_mem(int cpu
)
316 return cpu_to_node(cpu
);
320 #endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
322 #ifndef topology_physical_package_id
323 #define topology_physical_package_id(cpu) ((void)(cpu), -1)
325 #ifndef topology_core_id
326 #define topology_core_id(cpu) ((void)(cpu), 0)
328 #ifndef topology_thread_cpumask
329 #define topology_thread_cpumask(cpu) cpumask_of(cpu)
331 #ifndef topology_core_cpumask
332 #define topology_core_cpumask(cpu) cpumask_of(cpu)
335 #endif /* _LINUX_TOPOLOGY_H */