1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/cgroup.h>
18 extern int number_of_cpusets
; /* How many cpusets are defined in system? */
20 extern int cpuset_init_early(void);
21 extern int cpuset_init(void);
22 extern void cpuset_init_smp(void);
23 extern cpumask_t
cpuset_cpus_allowed(struct task_struct
*p
);
24 extern cpumask_t
cpuset_cpus_allowed_locked(struct task_struct
*p
);
25 extern nodemask_t
cpuset_mems_allowed(struct task_struct
*p
);
26 #define cpuset_current_mems_allowed (current->mems_allowed)
27 void cpuset_init_current_mems_allowed(void);
28 void cpuset_update_task_memory_state(void);
29 int cpuset_zonelist_valid_mems_allowed(struct zonelist
*zl
);
31 extern int __cpuset_zone_allowed_softwall(struct zone
*z
, gfp_t gfp_mask
);
32 extern int __cpuset_zone_allowed_hardwall(struct zone
*z
, gfp_t gfp_mask
);
34 static int inline cpuset_zone_allowed_softwall(struct zone
*z
, gfp_t gfp_mask
)
36 return number_of_cpusets
<= 1 ||
37 __cpuset_zone_allowed_softwall(z
, gfp_mask
);
40 static int inline cpuset_zone_allowed_hardwall(struct zone
*z
, gfp_t gfp_mask
)
42 return number_of_cpusets
<= 1 ||
43 __cpuset_zone_allowed_hardwall(z
, gfp_mask
);
46 extern int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
47 const struct task_struct
*tsk2
);
49 #define cpuset_memory_pressure_bump() \
51 if (cpuset_memory_pressure_enabled) \
52 __cpuset_memory_pressure_bump(); \
54 extern int cpuset_memory_pressure_enabled
;
55 extern void __cpuset_memory_pressure_bump(void);
57 extern const struct file_operations proc_cpuset_operations
;
59 extern void cpuset_task_status_allowed(struct seq_file
*m
,
60 struct task_struct
*task
);
62 extern void cpuset_lock(void);
63 extern void cpuset_unlock(void);
65 extern int cpuset_mem_spread_node(void);
67 static inline int cpuset_do_page_mem_spread(void)
69 return current
->flags
& PF_SPREAD_PAGE
;
72 static inline int cpuset_do_slab_mem_spread(void)
74 return current
->flags
& PF_SPREAD_SLAB
;
77 extern void cpuset_track_online_nodes(void);
79 extern int current_cpuset_is_being_rebound(void);
81 #else /* !CONFIG_CPUSETS */
83 static inline int cpuset_init_early(void) { return 0; }
84 static inline int cpuset_init(void) { return 0; }
85 static inline void cpuset_init_smp(void) {}
87 static inline cpumask_t
cpuset_cpus_allowed(struct task_struct
*p
)
89 return cpu_possible_map
;
91 static inline cpumask_t
cpuset_cpus_allowed_locked(struct task_struct
*p
)
93 return cpu_possible_map
;
96 static inline nodemask_t
cpuset_mems_allowed(struct task_struct
*p
)
98 return node_possible_map
;
101 #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
102 static inline void cpuset_init_current_mems_allowed(void) {}
103 static inline void cpuset_update_task_memory_state(void) {}
105 static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist
*zl
)
110 static inline int cpuset_zone_allowed_softwall(struct zone
*z
, gfp_t gfp_mask
)
115 static inline int cpuset_zone_allowed_hardwall(struct zone
*z
, gfp_t gfp_mask
)
120 static inline int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
121 const struct task_struct
*tsk2
)
126 static inline void cpuset_memory_pressure_bump(void) {}
128 static inline void cpuset_task_status_allowed(struct seq_file
*m
,
129 struct task_struct
*task
)
133 static inline void cpuset_lock(void) {}
134 static inline void cpuset_unlock(void) {}
136 static inline int cpuset_mem_spread_node(void)
141 static inline int cpuset_do_page_mem_spread(void)
146 static inline int cpuset_do_slab_mem_spread(void)
151 static inline void cpuset_track_online_nodes(void) {}
153 static inline int current_cpuset_is_being_rebound(void)
158 #endif /* !CONFIG_CPUSETS */
160 #endif /* _LINUX_CPUSET_H */