The discovered bit in PGCCSR register indicates if the device has been
[linux-2.6/next.git] / include / linux / cpuset.h
blobe9eaec522655c4da77f55fefb851564c31cae711
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4 * cpuset interface
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 */
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/cgroup.h>
15 #include <linux/mm.h>
17 #ifdef CONFIG_CPUSETS
19 extern int number_of_cpusets; /* How many cpusets are defined in system? */
21 extern int cpuset_init(void);
22 extern void cpuset_init_smp(void);
23 extern void cpuset_update_active_cpus(void);
24 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
25 extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
26 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27 #define cpuset_current_mems_allowed (current->mems_allowed)
28 void cpuset_init_current_mems_allowed(void);
29 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
31 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
34 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
36 return number_of_cpusets <= 1 ||
37 __cpuset_node_allowed_softwall(node, gfp_mask);
40 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
42 return number_of_cpusets <= 1 ||
43 __cpuset_node_allowed_hardwall(node, gfp_mask);
46 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
48 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
51 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
53 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
56 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
57 const struct task_struct *tsk2);
59 #define cpuset_memory_pressure_bump() \
60 do { \
61 if (cpuset_memory_pressure_enabled) \
62 __cpuset_memory_pressure_bump(); \
63 } while (0)
64 extern int cpuset_memory_pressure_enabled;
65 extern void __cpuset_memory_pressure_bump(void);
67 extern const struct file_operations proc_cpuset_operations;
68 struct seq_file;
69 extern void cpuset_task_status_allowed(struct seq_file *m,
70 struct task_struct *task);
72 extern int cpuset_mem_spread_node(void);
73 extern int cpuset_slab_spread_node(void);
75 static inline int cpuset_do_page_mem_spread(void)
77 return current->flags & PF_SPREAD_PAGE;
80 static inline int cpuset_do_slab_mem_spread(void)
82 return current->flags & PF_SPREAD_SLAB;
85 extern int current_cpuset_is_being_rebound(void);
87 extern void rebuild_sched_domains(void);
89 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
92 * reading current mems_allowed and mempolicy in the fastpath must protected
93 * by get_mems_allowed()
95 static inline void get_mems_allowed(void)
97 current->mems_allowed_change_disable++;
100 * ensure that reading mems_allowed and mempolicy happens after the
101 * update of ->mems_allowed_change_disable.
103 * the write-side task finds ->mems_allowed_change_disable is not 0,
104 * and knows the read-side task is reading mems_allowed or mempolicy,
105 * so it will clear old bits lazily.
107 smp_mb();
110 static inline void put_mems_allowed(void)
113 * ensure that reading mems_allowed and mempolicy before reducing
114 * mems_allowed_change_disable.
116 * the write-side task will know that the read-side task is still
117 * reading mems_allowed or mempolicy, don't clears old bits in the
118 * nodemask.
120 smp_mb();
121 --ACCESS_ONCE(current->mems_allowed_change_disable);
124 static inline void set_mems_allowed(nodemask_t nodemask)
126 task_lock(current);
127 current->mems_allowed = nodemask;
128 task_unlock(current);
131 #else /* !CONFIG_CPUSETS */
133 static inline int cpuset_init(void) { return 0; }
134 static inline void cpuset_init_smp(void) {}
136 static inline void cpuset_update_active_cpus(void)
138 partition_sched_domains(1, NULL, NULL);
141 static inline void cpuset_cpus_allowed(struct task_struct *p,
142 struct cpumask *mask)
144 cpumask_copy(mask, cpu_possible_mask);
147 static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
149 do_set_cpus_allowed(p, cpu_possible_mask);
150 return cpumask_any(cpu_active_mask);
153 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
155 return node_possible_map;
158 #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
159 static inline void cpuset_init_current_mems_allowed(void) {}
161 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
163 return 1;
166 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
168 return 1;
171 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
173 return 1;
176 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
178 return 1;
181 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
183 return 1;
186 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
187 const struct task_struct *tsk2)
189 return 1;
192 static inline void cpuset_memory_pressure_bump(void) {}
194 static inline void cpuset_task_status_allowed(struct seq_file *m,
195 struct task_struct *task)
199 static inline int cpuset_mem_spread_node(void)
201 return 0;
204 static inline int cpuset_slab_spread_node(void)
206 return 0;
209 static inline int cpuset_do_page_mem_spread(void)
211 return 0;
214 static inline int cpuset_do_slab_mem_spread(void)
216 return 0;
219 static inline int current_cpuset_is_being_rebound(void)
221 return 0;
224 static inline void rebuild_sched_domains(void)
226 partition_sched_domains(1, NULL, NULL);
229 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
233 static inline void set_mems_allowed(nodemask_t nodemask)
237 static inline void get_mems_allowed(void)
241 static inline void put_mems_allowed(void)
245 #endif /* !CONFIG_CPUSETS */
247 #endif /* _LINUX_CPUSET_H */