1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/sched/topology.h>
13 #include <linux/sched/task.h>
14 #include <linux/cpumask.h>
15 #include <linux/nodemask.h>
17 #include <linux/jump_label.h>
22 * Static branch rewrites can happen in an arbitrary order for a given
23 * key. In code paths where we need to loop with read_mems_allowed_begin() and
24 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
25 * to ensure that begin() always gets rewritten before retry() in the
26 * disabled -> enabled transition. If not, then if local irqs are disabled
27 * around the loop, we can deadlock since retry() would always be
28 * comparing the latest value of the mems_allowed seqcount against 0 as
29 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
30 * transition should happen in reverse order for the same reasons (want to stop
31 * looking at real value of mems_allowed.sequence in retry() first).
33 extern struct static_key_false cpusets_pre_enable_key
;
34 extern struct static_key_false cpusets_enabled_key
;
35 static inline bool cpusets_enabled(void)
37 return static_branch_unlikely(&cpusets_enabled_key
);
40 static inline int nr_cpusets(void)
42 /* jump label reference count + the top-level cpuset */
43 return static_key_count(&cpusets_enabled_key
.key
) + 1;
46 static inline void cpuset_inc(void)
48 static_branch_inc(&cpusets_pre_enable_key
);
49 static_branch_inc(&cpusets_enabled_key
);
52 static inline void cpuset_dec(void)
54 static_branch_dec(&cpusets_enabled_key
);
55 static_branch_dec(&cpusets_pre_enable_key
);
58 extern int cpuset_init(void);
59 extern void cpuset_init_smp(void);
60 extern void cpuset_force_rebuild(void);
61 extern void cpuset_update_active_cpus(void);
62 extern void cpuset_wait_for_hotplug(void);
63 extern void cpuset_cpus_allowed(struct task_struct
*p
, struct cpumask
*mask
);
64 extern void cpuset_cpus_allowed_fallback(struct task_struct
*p
);
65 extern nodemask_t
cpuset_mems_allowed(struct task_struct
*p
);
66 #define cpuset_current_mems_allowed (current->mems_allowed)
67 void cpuset_init_current_mems_allowed(void);
68 int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
);
70 extern bool __cpuset_node_allowed(int node
, gfp_t gfp_mask
);
72 static inline bool cpuset_node_allowed(int node
, gfp_t gfp_mask
)
74 if (cpusets_enabled())
75 return __cpuset_node_allowed(node
, gfp_mask
);
79 static inline bool __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
81 return __cpuset_node_allowed(zone_to_nid(z
), gfp_mask
);
84 static inline bool cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
86 if (cpusets_enabled())
87 return __cpuset_zone_allowed(z
, gfp_mask
);
91 extern int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
92 const struct task_struct
*tsk2
);
94 #define cpuset_memory_pressure_bump() \
96 if (cpuset_memory_pressure_enabled) \
97 __cpuset_memory_pressure_bump(); \
99 extern int cpuset_memory_pressure_enabled
;
100 extern void __cpuset_memory_pressure_bump(void);
102 extern void cpuset_task_status_allowed(struct seq_file
*m
,
103 struct task_struct
*task
);
104 extern int proc_cpuset_show(struct seq_file
*m
, struct pid_namespace
*ns
,
105 struct pid
*pid
, struct task_struct
*tsk
);
107 extern int cpuset_mem_spread_node(void);
108 extern int cpuset_slab_spread_node(void);
110 static inline int cpuset_do_page_mem_spread(void)
112 return task_spread_page(current
);
115 static inline int cpuset_do_slab_mem_spread(void)
117 return task_spread_slab(current
);
120 extern int current_cpuset_is_being_rebound(void);
122 extern void rebuild_sched_domains(void);
124 extern void cpuset_print_current_mems_allowed(void);
127 * read_mems_allowed_begin is required when making decisions involving
128 * mems_allowed such as during page allocation. mems_allowed can be updated in
129 * parallel and depending on the new value an operation can fail potentially
130 * causing process failure. A retry loop with read_mems_allowed_begin and
131 * read_mems_allowed_retry prevents these artificial failures.
133 static inline unsigned int read_mems_allowed_begin(void)
135 if (!static_branch_unlikely(&cpusets_pre_enable_key
))
138 return read_seqcount_begin(¤t
->mems_allowed_seq
);
142 * If this returns true, the operation that took place after
143 * read_mems_allowed_begin may have failed artificially due to a concurrent
144 * update of mems_allowed. It is up to the caller to retry the operation if
147 static inline bool read_mems_allowed_retry(unsigned int seq
)
149 if (!static_branch_unlikely(&cpusets_enabled_key
))
152 return read_seqcount_retry(¤t
->mems_allowed_seq
, seq
);
155 static inline void set_mems_allowed(nodemask_t nodemask
)
160 local_irq_save(flags
);
161 write_seqcount_begin(¤t
->mems_allowed_seq
);
162 current
->mems_allowed
= nodemask
;
163 write_seqcount_end(¤t
->mems_allowed_seq
);
164 local_irq_restore(flags
);
165 task_unlock(current
);
168 #else /* !CONFIG_CPUSETS */
170 static inline bool cpusets_enabled(void) { return false; }
172 static inline int cpuset_init(void) { return 0; }
173 static inline void cpuset_init_smp(void) {}
175 static inline void cpuset_force_rebuild(void) { }
177 static inline void cpuset_update_active_cpus(void)
179 partition_sched_domains(1, NULL
, NULL
);
182 static inline void cpuset_wait_for_hotplug(void) { }
184 static inline void cpuset_cpus_allowed(struct task_struct
*p
,
185 struct cpumask
*mask
)
187 cpumask_copy(mask
, cpu_possible_mask
);
190 static inline void cpuset_cpus_allowed_fallback(struct task_struct
*p
)
194 static inline nodemask_t
cpuset_mems_allowed(struct task_struct
*p
)
196 return node_possible_map
;
199 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
200 static inline void cpuset_init_current_mems_allowed(void) {}
202 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
)
207 static inline bool cpuset_node_allowed(int node
, gfp_t gfp_mask
)
212 static inline bool __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
217 static inline bool cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
222 static inline int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
223 const struct task_struct
*tsk2
)
228 static inline void cpuset_memory_pressure_bump(void) {}
230 static inline void cpuset_task_status_allowed(struct seq_file
*m
,
231 struct task_struct
*task
)
235 static inline int cpuset_mem_spread_node(void)
240 static inline int cpuset_slab_spread_node(void)
245 static inline int cpuset_do_page_mem_spread(void)
250 static inline int cpuset_do_slab_mem_spread(void)
255 static inline int current_cpuset_is_being_rebound(void)
260 static inline void rebuild_sched_domains(void)
262 partition_sched_domains(1, NULL
, NULL
);
265 static inline void cpuset_print_current_mems_allowed(void)
269 static inline void set_mems_allowed(nodemask_t nodemask
)
273 static inline unsigned int read_mems_allowed_begin(void)
278 static inline bool read_mems_allowed_retry(unsigned int seq
)
283 #endif /* !CONFIG_CPUSETS */
285 #endif /* _LINUX_CPUSET_H */