4 * Processor and Memory placement constraints for sets of tasks.
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004 Silicon Graphics, Inc.
9 * Portions derived from Patrick Mochel's sysfs code.
10 * sysfs is Copyright (c) 2001-3 Patrick Mochel
11 * Portions Copyright (c) 2004 Silicon Graphics, Inc.
13 * 2003-10-10 Written by Simon Derr <simon.derr@bull.net>
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson <pj@sgi.com>
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file COPYING in the main directory of the Linux
19 * distribution for more details.
22 #include <linux/config.h>
23 #include <linux/cpu.h>
24 #include <linux/cpumask.h>
25 #include <linux/cpuset.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/file.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/kernel.h>
33 #include <linux/kmod.h>
34 #include <linux/list.h>
35 #include <linux/mempolicy.h>
37 #include <linux/module.h>
38 #include <linux/mount.h>
39 #include <linux/namei.h>
40 #include <linux/pagemap.h>
41 #include <linux/proc_fs.h>
42 #include <linux/rcupdate.h>
43 #include <linux/sched.h>
44 #include <linux/seq_file.h>
45 #include <linux/slab.h>
46 #include <linux/smp_lock.h>
47 #include <linux/spinlock.h>
48 #include <linux/stat.h>
49 #include <linux/string.h>
50 #include <linux/time.h>
51 #include <linux/backing-dev.h>
52 #include <linux/sort.h>
54 #include <asm/uaccess.h>
55 #include <asm/atomic.h>
56 #include <linux/mutex.h>
58 #define CPUSET_SUPER_MAGIC 0x27e0eb
61 * Tracks how many cpusets are currently defined in system.
62 * When there is only one cpuset (the root cpuset) we can
63 * short circuit some hooks.
65 int number_of_cpusets __read_mostly
;
67 /* See "Frequency meter" comments, below. */
70 int cnt
; /* unprocessed events count */
71 int val
; /* most recent output value */
72 time_t time
; /* clock (secs) when val computed */
73 spinlock_t lock
; /* guards read or write of above */
77 unsigned long flags
; /* "unsigned long" so bitops work */
78 cpumask_t cpus_allowed
; /* CPUs allowed to tasks in cpuset */
79 nodemask_t mems_allowed
; /* Memory Nodes allowed to tasks */
82 * Count is atomic so can incr (fork) or decr (exit) without a lock.
84 atomic_t count
; /* count tasks using this cpuset */
87 * We link our 'sibling' struct into our parents 'children'.
88 * Our children link their 'sibling' into our 'children'.
90 struct list_head sibling
; /* my parents children */
91 struct list_head children
; /* my children */
93 struct cpuset
*parent
; /* my parent */
94 struct dentry
*dentry
; /* cpuset fs entry */
97 * Copy of global cpuset_mems_generation as of the most
98 * recent time this cpuset changed its mems_allowed.
102 struct fmeter fmeter
; /* memory_pressure filter */
105 /* bits in struct cpuset flags field */
114 /* convenient tests for these bits */
115 static inline int is_cpu_exclusive(const struct cpuset
*cs
)
117 return !!test_bit(CS_CPU_EXCLUSIVE
, &cs
->flags
);
120 static inline int is_mem_exclusive(const struct cpuset
*cs
)
122 return !!test_bit(CS_MEM_EXCLUSIVE
, &cs
->flags
);
125 static inline int is_removed(const struct cpuset
*cs
)
127 return !!test_bit(CS_REMOVED
, &cs
->flags
);
130 static inline int notify_on_release(const struct cpuset
*cs
)
132 return !!test_bit(CS_NOTIFY_ON_RELEASE
, &cs
->flags
);
135 static inline int is_memory_migrate(const struct cpuset
*cs
)
137 return !!test_bit(CS_MEMORY_MIGRATE
, &cs
->flags
);
141 * Increment this atomic integer everytime any cpuset changes its
142 * mems_allowed value. Users of cpusets can track this generation
143 * number, and avoid having to lock and reload mems_allowed unless
144 * the cpuset they're using changes generation.
146 * A single, global generation is needed because attach_task() could
147 * reattach a task to a different cpuset, which must not have its
148 * generation numbers aliased with those of that tasks previous cpuset.
150 * Generations are needed for mems_allowed because one task cannot
151 * modify anothers memory placement. So we must enable every task,
152 * on every visit to __alloc_pages(), to efficiently check whether
153 * its current->cpuset->mems_allowed has changed, requiring an update
154 * of its current->mems_allowed.
156 static atomic_t cpuset_mems_generation
= ATOMIC_INIT(1);
158 static struct cpuset top_cpuset
= {
159 .flags
= ((1 << CS_CPU_EXCLUSIVE
) | (1 << CS_MEM_EXCLUSIVE
)),
160 .cpus_allowed
= CPU_MASK_ALL
,
161 .mems_allowed
= NODE_MASK_ALL
,
162 .count
= ATOMIC_INIT(0),
163 .sibling
= LIST_HEAD_INIT(top_cpuset
.sibling
),
164 .children
= LIST_HEAD_INIT(top_cpuset
.children
),
167 static struct vfsmount
*cpuset_mount
;
168 static struct super_block
*cpuset_sb
;
171 * We have two global cpuset mutexes below. They can nest.
172 * It is ok to first take manage_mutex, then nest callback_mutex. We also
173 * require taking task_lock() when dereferencing a tasks cpuset pointer.
174 * See "The task_lock() exception", at the end of this comment.
176 * A task must hold both mutexes to modify cpusets. If a task
177 * holds manage_mutex, then it blocks others wanting that mutex,
178 * ensuring that it is the only task able to also acquire callback_mutex
179 * and be able to modify cpusets. It can perform various checks on
180 * the cpuset structure first, knowing nothing will change. It can
181 * also allocate memory while just holding manage_mutex. While it is
182 * performing these checks, various callback routines can briefly
183 * acquire callback_mutex to query cpusets. Once it is ready to make
184 * the changes, it takes callback_mutex, blocking everyone else.
186 * Calls to the kernel memory allocator can not be made while holding
187 * callback_mutex, as that would risk double tripping on callback_mutex
188 * from one of the callbacks into the cpuset code from within
191 * If a task is only holding callback_mutex, then it has read-only
194 * The task_struct fields mems_allowed and mems_generation may only
195 * be accessed in the context of that task, so require no locks.
197 * Any task can increment and decrement the count field without lock.
198 * So in general, code holding manage_mutex or callback_mutex can't rely
199 * on the count field not changing. However, if the count goes to
200 * zero, then only attach_task(), which holds both mutexes, can
201 * increment it again. Because a count of zero means that no tasks
202 * are currently attached, therefore there is no way a task attached
203 * to that cpuset can fork (the other way to increment the count).
204 * So code holding manage_mutex or callback_mutex can safely assume that
205 * if the count is zero, it will stay zero. Similarly, if a task
206 * holds manage_mutex or callback_mutex on a cpuset with zero count, it
207 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
208 * both of those mutexes.
210 * The cpuset_common_file_write handler for operations that modify
211 * the cpuset hierarchy holds manage_mutex across the entire operation,
212 * single threading all such cpuset modifications across the system.
214 * The cpuset_common_file_read() handlers only hold callback_mutex across
215 * small pieces of code, such as when reading out possibly multi-word
216 * cpumasks and nodemasks.
218 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
219 * (usually) take either mutex. These are the two most performance
220 * critical pieces of code here. The exception occurs on cpuset_exit(),
221 * when a task in a notify_on_release cpuset exits. Then manage_mutex
222 * is taken, and if the cpuset count is zero, a usermode call made
223 * to /sbin/cpuset_release_agent with the name of the cpuset (path
224 * relative to the root of cpuset file system) as the argument.
226 * A cpuset can only be deleted if both its 'count' of using tasks
227 * is zero, and its list of 'children' cpusets is empty. Since all
228 * tasks in the system use _some_ cpuset, and since there is always at
229 * least one task in the system (init, pid == 1), therefore, top_cpuset
230 * always has either children cpusets and/or using tasks. So we don't
231 * need a special hack to ensure that top_cpuset cannot be deleted.
233 * The above "Tale of Two Semaphores" would be complete, but for:
235 * The task_lock() exception
237 * The need for this exception arises from the action of attach_task(),
238 * which overwrites one tasks cpuset pointer with another. It does
239 * so using both mutexes, however there are several performance
240 * critical places that need to reference task->cpuset without the
241 * expense of grabbing a system global mutex. Therefore except as
242 * noted below, when dereferencing or, as in attach_task(), modifying
243 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
244 * (task->alloc_lock) already in the task_struct routinely used for
247 * P.S. One more locking exception. RCU is used to guard the
248 * update of a tasks cpuset pointer by attach_task() and the
249 * access of task->cpuset->mems_generation via that pointer in
250 * the routine cpuset_update_task_memory_state().
253 static DEFINE_MUTEX(manage_mutex
);
254 static DEFINE_MUTEX(callback_mutex
);
257 * A couple of forward declarations required, due to cyclic reference loop:
258 * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
259 * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
262 static int cpuset_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
);
263 static int cpuset_rmdir(struct inode
*unused_dir
, struct dentry
*dentry
);
265 static struct backing_dev_info cpuset_backing_dev_info
= {
266 .ra_pages
= 0, /* No readahead */
267 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
270 static struct inode
*cpuset_new_inode(mode_t mode
)
272 struct inode
*inode
= new_inode(cpuset_sb
);
275 inode
->i_mode
= mode
;
276 inode
->i_uid
= current
->fsuid
;
277 inode
->i_gid
= current
->fsgid
;
278 inode
->i_blksize
= PAGE_CACHE_SIZE
;
280 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
281 inode
->i_mapping
->backing_dev_info
= &cpuset_backing_dev_info
;
286 static void cpuset_diput(struct dentry
*dentry
, struct inode
*inode
)
288 /* is dentry a directory ? if so, kfree() associated cpuset */
289 if (S_ISDIR(inode
->i_mode
)) {
290 struct cpuset
*cs
= dentry
->d_fsdata
;
291 BUG_ON(!(is_removed(cs
)));
297 static struct dentry_operations cpuset_dops
= {
298 .d_iput
= cpuset_diput
,
301 static struct dentry
*cpuset_get_dentry(struct dentry
*parent
, const char *name
)
303 struct dentry
*d
= lookup_one_len(name
, parent
, strlen(name
));
305 d
->d_op
= &cpuset_dops
;
309 static void remove_dir(struct dentry
*d
)
311 struct dentry
*parent
= dget(d
->d_parent
);
314 simple_rmdir(parent
->d_inode
, d
);
319 * NOTE : the dentry must have been dget()'ed
321 static void cpuset_d_remove_dir(struct dentry
*dentry
)
323 struct list_head
*node
;
325 spin_lock(&dcache_lock
);
326 node
= dentry
->d_subdirs
.next
;
327 while (node
!= &dentry
->d_subdirs
) {
328 struct dentry
*d
= list_entry(node
, struct dentry
, d_u
.d_child
);
332 spin_unlock(&dcache_lock
);
334 simple_unlink(dentry
->d_inode
, d
);
336 spin_lock(&dcache_lock
);
338 node
= dentry
->d_subdirs
.next
;
340 list_del_init(&dentry
->d_u
.d_child
);
341 spin_unlock(&dcache_lock
);
345 static struct super_operations cpuset_ops
= {
346 .statfs
= simple_statfs
,
347 .drop_inode
= generic_delete_inode
,
350 static int cpuset_fill_super(struct super_block
*sb
, void *unused_data
,
356 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
357 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
358 sb
->s_magic
= CPUSET_SUPER_MAGIC
;
359 sb
->s_op
= &cpuset_ops
;
362 inode
= cpuset_new_inode(S_IFDIR
| S_IRUGO
| S_IXUGO
| S_IWUSR
);
364 inode
->i_op
= &simple_dir_inode_operations
;
365 inode
->i_fop
= &simple_dir_operations
;
366 /* directories start off with i_nlink == 2 (for "." entry) */
372 root
= d_alloc_root(inode
);
381 static struct super_block
*cpuset_get_sb(struct file_system_type
*fs_type
,
382 int flags
, const char *unused_dev_name
,
385 return get_sb_single(fs_type
, flags
, data
, cpuset_fill_super
);
388 static struct file_system_type cpuset_fs_type
= {
390 .get_sb
= cpuset_get_sb
,
391 .kill_sb
= kill_litter_super
,
396 * The files in the cpuset filesystem mostly have a very simple read/write
397 * handling, some common function will take care of it. Nevertheless some cases
398 * (read tasks) are special and therefore I define this structure for every
402 * When reading/writing to a file:
403 * - the cpuset to use in file->f_dentry->d_parent->d_fsdata
404 * - the 'cftype' of the file is file->f_dentry->d_fsdata
410 int (*open
) (struct inode
*inode
, struct file
*file
);
411 ssize_t (*read
) (struct file
*file
, char __user
*buf
, size_t nbytes
,
413 int (*write
) (struct file
*file
, const char __user
*buf
, size_t nbytes
,
415 int (*release
) (struct inode
*inode
, struct file
*file
);
418 static inline struct cpuset
*__d_cs(struct dentry
*dentry
)
420 return dentry
->d_fsdata
;
423 static inline struct cftype
*__d_cft(struct dentry
*dentry
)
425 return dentry
->d_fsdata
;
429 * Call with manage_mutex held. Writes path of cpuset into buf.
430 * Returns 0 on success, -errno on error.
433 static int cpuset_path(const struct cpuset
*cs
, char *buf
, int buflen
)
437 start
= buf
+ buflen
;
441 int len
= cs
->dentry
->d_name
.len
;
442 if ((start
-= len
) < buf
)
443 return -ENAMETOOLONG
;
444 memcpy(start
, cs
->dentry
->d_name
.name
, len
);
451 return -ENAMETOOLONG
;
454 memmove(buf
, start
, buf
+ buflen
- start
);
459 * Notify userspace when a cpuset is released, by running
460 * /sbin/cpuset_release_agent with the name of the cpuset (path
461 * relative to the root of cpuset file system) as the argument.
463 * Most likely, this user command will try to rmdir this cpuset.
465 * This races with the possibility that some other task will be
466 * attached to this cpuset before it is removed, or that some other
467 * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
468 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
469 * unused, and this cpuset will be reprieved from its death sentence,
470 * to continue to serve a useful existence. Next time it's released,
471 * we will get notified again, if it still has 'notify_on_release' set.
473 * The final arg to call_usermodehelper() is 0, which means don't
474 * wait. The separate /sbin/cpuset_release_agent task is forked by
475 * call_usermodehelper(), then control in this thread returns here,
476 * without waiting for the release agent task. We don't bother to
477 * wait because the caller of this routine has no use for the exit
478 * status of the /sbin/cpuset_release_agent task, so no sense holding
479 * our caller up for that.
481 * When we had only one cpuset mutex, we had to call this
482 * without holding it, to avoid deadlock when call_usermodehelper()
483 * allocated memory. With two locks, we could now call this while
484 * holding manage_mutex, but we still don't, so as to minimize
485 * the time manage_mutex is held.
488 static void cpuset_release_agent(const char *pathbuf
)
490 char *argv
[3], *envp
[3];
497 argv
[i
++] = "/sbin/cpuset_release_agent";
498 argv
[i
++] = (char *)pathbuf
;
502 /* minimal command environment */
503 envp
[i
++] = "HOME=/";
504 envp
[i
++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
507 call_usermodehelper(argv
[0], argv
, envp
, 0);
512 * Either cs->count of using tasks transitioned to zero, or the
513 * cs->children list of child cpusets just became empty. If this
514 * cs is notify_on_release() and now both the user count is zero and
515 * the list of children is empty, prepare cpuset path in a kmalloc'd
516 * buffer, to be returned via ppathbuf, so that the caller can invoke
517 * cpuset_release_agent() with it later on, once manage_mutex is dropped.
518 * Call here with manage_mutex held.
520 * This check_for_release() routine is responsible for kmalloc'ing
521 * pathbuf. The above cpuset_release_agent() is responsible for
522 * kfree'ing pathbuf. The caller of these routines is responsible
523 * for providing a pathbuf pointer, initialized to NULL, then
524 * calling check_for_release() with manage_mutex held and the address
525 * of the pathbuf pointer, then dropping manage_mutex, then calling
526 * cpuset_release_agent() with pathbuf, as set by check_for_release().
529 static void check_for_release(struct cpuset
*cs
, char **ppathbuf
)
531 if (notify_on_release(cs
) && atomic_read(&cs
->count
) == 0 &&
532 list_empty(&cs
->children
)) {
535 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
538 if (cpuset_path(cs
, buf
, PAGE_SIZE
) < 0)
546 * Return in *pmask the portion of a cpusets's cpus_allowed that
547 * are online. If none are online, walk up the cpuset hierarchy
548 * until we find one that does have some online cpus. If we get
549 * all the way to the top and still haven't found any online cpus,
550 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
551 * task, return cpu_online_map.
553 * One way or another, we guarantee to return some non-empty subset
556 * Call with callback_mutex held.
559 static void guarantee_online_cpus(const struct cpuset
*cs
, cpumask_t
*pmask
)
561 while (cs
&& !cpus_intersects(cs
->cpus_allowed
, cpu_online_map
))
564 cpus_and(*pmask
, cs
->cpus_allowed
, cpu_online_map
);
566 *pmask
= cpu_online_map
;
567 BUG_ON(!cpus_intersects(*pmask
, cpu_online_map
));
571 * Return in *pmask the portion of a cpusets's mems_allowed that
572 * are online. If none are online, walk up the cpuset hierarchy
573 * until we find one that does have some online mems. If we get
574 * all the way to the top and still haven't found any online mems,
575 * return node_online_map.
577 * One way or another, we guarantee to return some non-empty subset
578 * of node_online_map.
580 * Call with callback_mutex held.
583 static void guarantee_online_mems(const struct cpuset
*cs
, nodemask_t
*pmask
)
585 while (cs
&& !nodes_intersects(cs
->mems_allowed
, node_online_map
))
588 nodes_and(*pmask
, cs
->mems_allowed
, node_online_map
);
590 *pmask
= node_online_map
;
591 BUG_ON(!nodes_intersects(*pmask
, node_online_map
));
595 * cpuset_update_task_memory_state - update task memory placement
597 * If the current tasks cpusets mems_allowed changed behind our
598 * backs, update current->mems_allowed, mems_generation and task NUMA
599 * mempolicy to the new value.
601 * Task mempolicy is updated by rebinding it relative to the
602 * current->cpuset if a task has its memory placement changed.
603 * Do not call this routine if in_interrupt().
605 * Call without callback_mutex or task_lock() held. May be called
606 * with or without manage_mutex held. Doesn't need task_lock to guard
607 * against another task changing a non-NULL cpuset pointer to NULL,
608 * as that is only done by a task on itself, and if the current task
609 * is here, it is not simultaneously in the exit code NULL'ing its
610 * cpuset pointer. This routine also might acquire callback_mutex and
611 * current->mm->mmap_sem during call.
613 * Reading current->cpuset->mems_generation doesn't need task_lock
614 * to guard the current->cpuset derefence, because it is guarded
615 * from concurrent freeing of current->cpuset by attach_task(),
618 * The rcu_dereference() is technically probably not needed,
619 * as I don't actually mind if I see a new cpuset pointer but
620 * an old value of mems_generation. However this really only
621 * matters on alpha systems using cpusets heavily. If I dropped
622 * that rcu_dereference(), it would save them a memory barrier.
623 * For all other arch's, rcu_dereference is a no-op anyway, and for
624 * alpha systems not using cpusets, another planned optimization,
625 * avoiding the rcu critical section for tasks in the root cpuset
626 * which is statically allocated, so can't vanish, will make this
627 * irrelevant. Better to use RCU as intended, than to engage in
628 * some cute trick to save a memory barrier that is impossible to
629 * test, for alpha systems using cpusets heavily, which might not
632 * This routine is needed to update the per-task mems_allowed data,
633 * within the tasks context, when it is trying to allocate memory
634 * (in various mm/mempolicy.c routines) and notices that some other
635 * task has been modifying its cpuset.
638 void cpuset_update_task_memory_state(void)
640 int my_cpusets_mem_gen
;
641 struct task_struct
*tsk
= current
;
644 if (tsk
->cpuset
== &top_cpuset
) {
645 /* Don't need rcu for top_cpuset. It's never freed. */
646 my_cpusets_mem_gen
= top_cpuset
.mems_generation
;
649 cs
= rcu_dereference(tsk
->cpuset
);
650 my_cpusets_mem_gen
= cs
->mems_generation
;
654 if (my_cpusets_mem_gen
!= tsk
->cpuset_mems_generation
) {
655 mutex_lock(&callback_mutex
);
657 cs
= tsk
->cpuset
; /* Maybe changed when task not locked */
658 guarantee_online_mems(cs
, &tsk
->mems_allowed
);
659 tsk
->cpuset_mems_generation
= cs
->mems_generation
;
661 mutex_unlock(&callback_mutex
);
662 mpol_rebind_task(tsk
, &tsk
->mems_allowed
);
667 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
669 * One cpuset is a subset of another if all its allowed CPUs and
670 * Memory Nodes are a subset of the other, and its exclusive flags
671 * are only set if the other's are set. Call holding manage_mutex.
674 static int is_cpuset_subset(const struct cpuset
*p
, const struct cpuset
*q
)
676 return cpus_subset(p
->cpus_allowed
, q
->cpus_allowed
) &&
677 nodes_subset(p
->mems_allowed
, q
->mems_allowed
) &&
678 is_cpu_exclusive(p
) <= is_cpu_exclusive(q
) &&
679 is_mem_exclusive(p
) <= is_mem_exclusive(q
);
683 * validate_change() - Used to validate that any proposed cpuset change
684 * follows the structural rules for cpusets.
686 * If we replaced the flag and mask values of the current cpuset
687 * (cur) with those values in the trial cpuset (trial), would
688 * our various subset and exclusive rules still be valid? Presumes
691 * 'cur' is the address of an actual, in-use cpuset. Operations
692 * such as list traversal that depend on the actual address of the
693 * cpuset in the list must use cur below, not trial.
695 * 'trial' is the address of bulk structure copy of cur, with
696 * perhaps one or more of the fields cpus_allowed, mems_allowed,
697 * or flags changed to new, trial values.
699 * Return 0 if valid, -errno if not.
702 static int validate_change(const struct cpuset
*cur
, const struct cpuset
*trial
)
704 struct cpuset
*c
, *par
;
706 /* Each of our child cpusets must be a subset of us */
707 list_for_each_entry(c
, &cur
->children
, sibling
) {
708 if (!is_cpuset_subset(c
, trial
))
712 /* Remaining checks don't apply to root cpuset */
713 if ((par
= cur
->parent
) == NULL
)
716 /* We must be a subset of our parent cpuset */
717 if (!is_cpuset_subset(trial
, par
))
720 /* If either I or some sibling (!= me) is exclusive, we can't overlap */
721 list_for_each_entry(c
, &par
->children
, sibling
) {
722 if ((is_cpu_exclusive(trial
) || is_cpu_exclusive(c
)) &&
724 cpus_intersects(trial
->cpus_allowed
, c
->cpus_allowed
))
726 if ((is_mem_exclusive(trial
) || is_mem_exclusive(c
)) &&
728 nodes_intersects(trial
->mems_allowed
, c
->mems_allowed
))
736 * For a given cpuset cur, partition the system as follows
737 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
738 * exclusive child cpusets
739 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
740 * exclusive child cpusets
741 * Build these two partitions by calling partition_sched_domains
743 * Call with manage_mutex held. May nest a call to the
744 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
747 static void update_cpu_domains(struct cpuset
*cur
)
749 struct cpuset
*c
, *par
= cur
->parent
;
750 cpumask_t pspan
, cspan
;
752 if (par
== NULL
|| cpus_empty(cur
->cpus_allowed
))
756 * Get all cpus from parent's cpus_allowed not part of exclusive
759 pspan
= par
->cpus_allowed
;
760 list_for_each_entry(c
, &par
->children
, sibling
) {
761 if (is_cpu_exclusive(c
))
762 cpus_andnot(pspan
, pspan
, c
->cpus_allowed
);
764 if (is_removed(cur
) || !is_cpu_exclusive(cur
)) {
765 cpus_or(pspan
, pspan
, cur
->cpus_allowed
);
766 if (cpus_equal(pspan
, cur
->cpus_allowed
))
768 cspan
= CPU_MASK_NONE
;
770 if (cpus_empty(pspan
))
772 cspan
= cur
->cpus_allowed
;
774 * Get all cpus from current cpuset's cpus_allowed not part
775 * of exclusive children
777 list_for_each_entry(c
, &cur
->children
, sibling
) {
778 if (is_cpu_exclusive(c
))
779 cpus_andnot(cspan
, cspan
, c
->cpus_allowed
);
784 partition_sched_domains(&pspan
, &cspan
);
785 unlock_cpu_hotplug();
789 * Call with manage_mutex held. May take callback_mutex during call.
792 static int update_cpumask(struct cpuset
*cs
, char *buf
)
794 struct cpuset trialcs
;
795 int retval
, cpus_unchanged
;
798 retval
= cpulist_parse(buf
, trialcs
.cpus_allowed
);
801 cpus_and(trialcs
.cpus_allowed
, trialcs
.cpus_allowed
, cpu_online_map
);
802 if (cpus_empty(trialcs
.cpus_allowed
))
804 retval
= validate_change(cs
, &trialcs
);
807 cpus_unchanged
= cpus_equal(cs
->cpus_allowed
, trialcs
.cpus_allowed
);
808 mutex_lock(&callback_mutex
);
809 cs
->cpus_allowed
= trialcs
.cpus_allowed
;
810 mutex_unlock(&callback_mutex
);
811 if (is_cpu_exclusive(cs
) && !cpus_unchanged
)
812 update_cpu_domains(cs
);
817 * Handle user request to change the 'mems' memory placement
818 * of a cpuset. Needs to validate the request, update the
819 * cpusets mems_allowed and mems_generation, and for each
820 * task in the cpuset, rebind any vma mempolicies and if
821 * the cpuset is marked 'memory_migrate', migrate the tasks
822 * pages to the new memory.
824 * Call with manage_mutex held. May take callback_mutex during call.
825 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
826 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
827 * their mempolicies to the cpusets new mems_allowed.
830 static int update_nodemask(struct cpuset
*cs
, char *buf
)
832 struct cpuset trialcs
;
834 struct task_struct
*g
, *p
;
835 struct mm_struct
**mmarray
;
842 retval
= nodelist_parse(buf
, trialcs
.mems_allowed
);
845 nodes_and(trialcs
.mems_allowed
, trialcs
.mems_allowed
, node_online_map
);
846 oldmem
= cs
->mems_allowed
;
847 if (nodes_equal(oldmem
, trialcs
.mems_allowed
)) {
848 retval
= 0; /* Too easy - nothing to do */
851 if (nodes_empty(trialcs
.mems_allowed
)) {
855 retval
= validate_change(cs
, &trialcs
);
859 mutex_lock(&callback_mutex
);
860 cs
->mems_allowed
= trialcs
.mems_allowed
;
861 atomic_inc(&cpuset_mems_generation
);
862 cs
->mems_generation
= atomic_read(&cpuset_mems_generation
);
863 mutex_unlock(&callback_mutex
);
865 set_cpuset_being_rebound(cs
); /* causes mpol_copy() rebind */
867 fudge
= 10; /* spare mmarray[] slots */
868 fudge
+= cpus_weight(cs
->cpus_allowed
); /* imagine one fork-bomb/cpu */
872 * Allocate mmarray[] to hold mm reference for each task
873 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
874 * tasklist_lock. We could use GFP_ATOMIC, but with a
875 * few more lines of code, we can retry until we get a big
876 * enough mmarray[] w/o using GFP_ATOMIC.
879 ntasks
= atomic_read(&cs
->count
); /* guess */
881 mmarray
= kmalloc(ntasks
* sizeof(*mmarray
), GFP_KERNEL
);
884 write_lock_irq(&tasklist_lock
); /* block fork */
885 if (atomic_read(&cs
->count
) <= ntasks
)
886 break; /* got enough */
887 write_unlock_irq(&tasklist_lock
); /* try again */
893 /* Load up mmarray[] with mm reference for each task in cpuset. */
894 do_each_thread(g
, p
) {
895 struct mm_struct
*mm
;
899 "Cpuset mempolicy rebind incomplete.\n");
908 } while_each_thread(g
, p
);
909 write_unlock_irq(&tasklist_lock
);
912 * Now that we've dropped the tasklist spinlock, we can
913 * rebind the vma mempolicies of each mm in mmarray[] to their
914 * new cpuset, and release that mm. The mpol_rebind_mm()
915 * call takes mmap_sem, which we couldn't take while holding
916 * tasklist_lock. Forks can happen again now - the mpol_copy()
917 * cpuset_being_rebound check will catch such forks, and rebind
918 * their vma mempolicies too. Because we still hold the global
919 * cpuset manage_mutex, we know that no other rebind effort will
920 * be contending for the global variable cpuset_being_rebound.
921 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
922 * is idempotent. Also migrate pages in each mm to new nodes.
924 migrate
= is_memory_migrate(cs
);
925 for (i
= 0; i
< n
; i
++) {
926 struct mm_struct
*mm
= mmarray
[i
];
928 mpol_rebind_mm(mm
, &cs
->mems_allowed
);
930 do_migrate_pages(mm
, &oldmem
, &cs
->mems_allowed
,
936 /* We're done rebinding vma's to this cpusets new mems_allowed. */
938 set_cpuset_being_rebound(NULL
);
945 * Call with manage_mutex held.
948 static int update_memory_pressure_enabled(struct cpuset
*cs
, char *buf
)
950 if (simple_strtoul(buf
, NULL
, 10) != 0)
951 cpuset_memory_pressure_enabled
= 1;
953 cpuset_memory_pressure_enabled
= 0;
958 * update_flag - read a 0 or a 1 in a file and update associated flag
959 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
960 * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE)
961 * cs: the cpuset to update
962 * buf: the buffer where we read the 0 or 1
964 * Call with manage_mutex held.
967 static int update_flag(cpuset_flagbits_t bit
, struct cpuset
*cs
, char *buf
)
970 struct cpuset trialcs
;
971 int err
, cpu_exclusive_changed
;
973 turning_on
= (simple_strtoul(buf
, NULL
, 10) != 0);
977 set_bit(bit
, &trialcs
.flags
);
979 clear_bit(bit
, &trialcs
.flags
);
981 err
= validate_change(cs
, &trialcs
);
984 cpu_exclusive_changed
=
985 (is_cpu_exclusive(cs
) != is_cpu_exclusive(&trialcs
));
986 mutex_lock(&callback_mutex
);
988 set_bit(bit
, &cs
->flags
);
990 clear_bit(bit
, &cs
->flags
);
991 mutex_unlock(&callback_mutex
);
993 if (cpu_exclusive_changed
)
994 update_cpu_domains(cs
);
999 * Frequency meter - How fast is some event occuring?
1001 * These routines manage a digitally filtered, constant time based,
1002 * event frequency meter. There are four routines:
1003 * fmeter_init() - initialize a frequency meter.
1004 * fmeter_markevent() - called each time the event happens.
1005 * fmeter_getrate() - returns the recent rate of such events.
1006 * fmeter_update() - internal routine used to update fmeter.
1008 * A common data structure is passed to each of these routines,
1009 * which is used to keep track of the state required to manage the
1010 * frequency meter and its digital filter.
1012 * The filter works on the number of events marked per unit time.
1013 * The filter is single-pole low-pass recursive (IIR). The time unit
1014 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1015 * simulate 3 decimal digits of precision (multiplied by 1000).
1017 * With an FM_COEF of 933, and a time base of 1 second, the filter
1018 * has a half-life of 10 seconds, meaning that if the events quit
1019 * happening, then the rate returned from the fmeter_getrate()
1020 * will be cut in half each 10 seconds, until it converges to zero.
1022 * It is not worth doing a real infinitely recursive filter. If more
1023 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1024 * just compute FM_MAXTICKS ticks worth, by which point the level
1027 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1028 * arithmetic overflow in the fmeter_update() routine.
1030 * Given the simple 32 bit integer arithmetic used, this meter works
1031 * best for reporting rates between one per millisecond (msec) and
1032 * one per 32 (approx) seconds. At constant rates faster than one
1033 * per msec it maxes out at values just under 1,000,000. At constant
1034 * rates between one per msec, and one per second it will stabilize
1035 * to a value N*1000, where N is the rate of events per second.
1036 * At constant rates between one per second and one per 32 seconds,
1037 * it will be choppy, moving up on the seconds that have an event,
1038 * and then decaying until the next event. At rates slower than
1039 * about one in 32 seconds, it decays all the way back to zero between
1043 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
1044 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1045 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1046 #define FM_SCALE 1000 /* faux fixed point scale */
1048 /* Initialize a frequency meter */
1049 static void fmeter_init(struct fmeter
*fmp
)
1054 spin_lock_init(&fmp
->lock
);
1057 /* Internal meter update - process cnt events and update value */
1058 static void fmeter_update(struct fmeter
*fmp
)
1060 time_t now
= get_seconds();
1061 time_t ticks
= now
- fmp
->time
;
1066 ticks
= min(FM_MAXTICKS
, ticks
);
1068 fmp
->val
= (FM_COEF
* fmp
->val
) / FM_SCALE
;
1071 fmp
->val
+= ((FM_SCALE
- FM_COEF
) * fmp
->cnt
) / FM_SCALE
;
1075 /* Process any previous ticks, then bump cnt by one (times scale). */
1076 static void fmeter_markevent(struct fmeter
*fmp
)
1078 spin_lock(&fmp
->lock
);
1080 fmp
->cnt
= min(FM_MAXCNT
, fmp
->cnt
+ FM_SCALE
);
1081 spin_unlock(&fmp
->lock
);
1084 /* Process any previous ticks, then return current value. */
1085 static int fmeter_getrate(struct fmeter
*fmp
)
1089 spin_lock(&fmp
->lock
);
1092 spin_unlock(&fmp
->lock
);
1097 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
1098 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
1099 * notified on release.
1101 * Call holding manage_mutex. May take callback_mutex and task_lock of
1102 * the task 'pid' during call.
1105 static int attach_task(struct cpuset
*cs
, char *pidbuf
, char **ppathbuf
)
1108 struct task_struct
*tsk
;
1109 struct cpuset
*oldcs
;
1111 nodemask_t from
, to
;
1112 struct mm_struct
*mm
;
1114 if (sscanf(pidbuf
, "%d", &pid
) != 1)
1116 if (cpus_empty(cs
->cpus_allowed
) || nodes_empty(cs
->mems_allowed
))
1120 read_lock(&tasklist_lock
);
1122 tsk
= find_task_by_pid(pid
);
1123 if (!tsk
|| tsk
->flags
& PF_EXITING
) {
1124 read_unlock(&tasklist_lock
);
1128 get_task_struct(tsk
);
1129 read_unlock(&tasklist_lock
);
1131 if ((current
->euid
) && (current
->euid
!= tsk
->uid
)
1132 && (current
->euid
!= tsk
->suid
)) {
1133 put_task_struct(tsk
);
1138 get_task_struct(tsk
);
1141 mutex_lock(&callback_mutex
);
1144 oldcs
= tsk
->cpuset
;
1147 mutex_unlock(&callback_mutex
);
1148 put_task_struct(tsk
);
1151 atomic_inc(&cs
->count
);
1152 rcu_assign_pointer(tsk
->cpuset
, cs
);
1155 guarantee_online_cpus(cs
, &cpus
);
1156 set_cpus_allowed(tsk
, cpus
);
1158 from
= oldcs
->mems_allowed
;
1159 to
= cs
->mems_allowed
;
1161 mutex_unlock(&callback_mutex
);
1163 mm
= get_task_mm(tsk
);
1165 mpol_rebind_mm(mm
, &to
);
1169 if (is_memory_migrate(cs
))
1170 do_migrate_pages(tsk
->mm
, &from
, &to
, MPOL_MF_MOVE_ALL
);
1171 put_task_struct(tsk
);
1173 if (atomic_dec_and_test(&oldcs
->count
))
1174 check_for_release(oldcs
, ppathbuf
);
1178 /* The various types of files and directories in a cpuset file system */
1183 FILE_MEMORY_MIGRATE
,
1188 FILE_NOTIFY_ON_RELEASE
,
1189 FILE_MEMORY_PRESSURE_ENABLED
,
1190 FILE_MEMORY_PRESSURE
,
1192 } cpuset_filetype_t
;
1194 static ssize_t
cpuset_common_file_write(struct file
*file
, const char __user
*userbuf
,
1195 size_t nbytes
, loff_t
*unused_ppos
)
1197 struct cpuset
*cs
= __d_cs(file
->f_dentry
->d_parent
);
1198 struct cftype
*cft
= __d_cft(file
->f_dentry
);
1199 cpuset_filetype_t type
= cft
->private;
1201 char *pathbuf
= NULL
;
1204 /* Crude upper limit on largest legitimate cpulist user might write. */
1205 if (nbytes
> 100 + 6 * NR_CPUS
)
1208 /* +1 for nul-terminator */
1209 if ((buffer
= kmalloc(nbytes
+ 1, GFP_KERNEL
)) == 0)
1212 if (copy_from_user(buffer
, userbuf
, nbytes
)) {
1216 buffer
[nbytes
] = 0; /* nul-terminate */
1218 mutex_lock(&manage_mutex
);
1220 if (is_removed(cs
)) {
1227 retval
= update_cpumask(cs
, buffer
);
1230 retval
= update_nodemask(cs
, buffer
);
1232 case FILE_CPU_EXCLUSIVE
:
1233 retval
= update_flag(CS_CPU_EXCLUSIVE
, cs
, buffer
);
1235 case FILE_MEM_EXCLUSIVE
:
1236 retval
= update_flag(CS_MEM_EXCLUSIVE
, cs
, buffer
);
1238 case FILE_NOTIFY_ON_RELEASE
:
1239 retval
= update_flag(CS_NOTIFY_ON_RELEASE
, cs
, buffer
);
1241 case FILE_MEMORY_MIGRATE
:
1242 retval
= update_flag(CS_MEMORY_MIGRATE
, cs
, buffer
);
1244 case FILE_MEMORY_PRESSURE_ENABLED
:
1245 retval
= update_memory_pressure_enabled(cs
, buffer
);
1247 case FILE_MEMORY_PRESSURE
:
1251 retval
= attach_task(cs
, buffer
, &pathbuf
);
1261 mutex_unlock(&manage_mutex
);
1262 cpuset_release_agent(pathbuf
);
1268 static ssize_t
cpuset_file_write(struct file
*file
, const char __user
*buf
,
1269 size_t nbytes
, loff_t
*ppos
)
1272 struct cftype
*cft
= __d_cft(file
->f_dentry
);
1276 /* special function ? */
1278 retval
= cft
->write(file
, buf
, nbytes
, ppos
);
1280 retval
= cpuset_common_file_write(file
, buf
, nbytes
, ppos
);
1286 * These ascii lists should be read in a single call, by using a user
1287 * buffer large enough to hold the entire map. If read in smaller
1288 * chunks, there is no guarantee of atomicity. Since the display format
1289 * used, list of ranges of sequential numbers, is variable length,
1290 * and since these maps can change value dynamically, one could read
1291 * gibberish by doing partial reads while a list was changing.
1292 * A single large read to a buffer that crosses a page boundary is
1293 * ok, because the result being copied to user land is not recomputed
1294 * across a page fault.
1297 static int cpuset_sprintf_cpulist(char *page
, struct cpuset
*cs
)
1301 mutex_lock(&callback_mutex
);
1302 mask
= cs
->cpus_allowed
;
1303 mutex_unlock(&callback_mutex
);
1305 return cpulist_scnprintf(page
, PAGE_SIZE
, mask
);
1308 static int cpuset_sprintf_memlist(char *page
, struct cpuset
*cs
)
1312 mutex_lock(&callback_mutex
);
1313 mask
= cs
->mems_allowed
;
1314 mutex_unlock(&callback_mutex
);
1316 return nodelist_scnprintf(page
, PAGE_SIZE
, mask
);
1319 static ssize_t
cpuset_common_file_read(struct file
*file
, char __user
*buf
,
1320 size_t nbytes
, loff_t
*ppos
)
1322 struct cftype
*cft
= __d_cft(file
->f_dentry
);
1323 struct cpuset
*cs
= __d_cs(file
->f_dentry
->d_parent
);
1324 cpuset_filetype_t type
= cft
->private;
1329 if (!(page
= (char *)__get_free_page(GFP_KERNEL
)))
1336 s
+= cpuset_sprintf_cpulist(s
, cs
);
1339 s
+= cpuset_sprintf_memlist(s
, cs
);
1341 case FILE_CPU_EXCLUSIVE
:
1342 *s
++ = is_cpu_exclusive(cs
) ? '1' : '0';
1344 case FILE_MEM_EXCLUSIVE
:
1345 *s
++ = is_mem_exclusive(cs
) ? '1' : '0';
1347 case FILE_NOTIFY_ON_RELEASE
:
1348 *s
++ = notify_on_release(cs
) ? '1' : '0';
1350 case FILE_MEMORY_MIGRATE
:
1351 *s
++ = is_memory_migrate(cs
) ? '1' : '0';
1353 case FILE_MEMORY_PRESSURE_ENABLED
:
1354 *s
++ = cpuset_memory_pressure_enabled
? '1' : '0';
1356 case FILE_MEMORY_PRESSURE
:
1357 s
+= sprintf(s
, "%d", fmeter_getrate(&cs
->fmeter
));
1365 retval
= simple_read_from_buffer(buf
, nbytes
, ppos
, page
, s
- page
);
1367 free_page((unsigned long)page
);
1371 static ssize_t
cpuset_file_read(struct file
*file
, char __user
*buf
, size_t nbytes
,
1375 struct cftype
*cft
= __d_cft(file
->f_dentry
);
1379 /* special function ? */
1381 retval
= cft
->read(file
, buf
, nbytes
, ppos
);
1383 retval
= cpuset_common_file_read(file
, buf
, nbytes
, ppos
);
1388 static int cpuset_file_open(struct inode
*inode
, struct file
*file
)
1393 err
= generic_file_open(inode
, file
);
1397 cft
= __d_cft(file
->f_dentry
);
1401 err
= cft
->open(inode
, file
);
1408 static int cpuset_file_release(struct inode
*inode
, struct file
*file
)
1410 struct cftype
*cft
= __d_cft(file
->f_dentry
);
1412 return cft
->release(inode
, file
);
1417 * cpuset_rename - Only allow simple rename of directories in place.
1419 static int cpuset_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
1420 struct inode
*new_dir
, struct dentry
*new_dentry
)
1422 if (!S_ISDIR(old_dentry
->d_inode
->i_mode
))
1424 if (new_dentry
->d_inode
)
1426 if (old_dir
!= new_dir
)
1428 return simple_rename(old_dir
, old_dentry
, new_dir
, new_dentry
);
1431 static struct file_operations cpuset_file_operations
= {
1432 .read
= cpuset_file_read
,
1433 .write
= cpuset_file_write
,
1434 .llseek
= generic_file_llseek
,
1435 .open
= cpuset_file_open
,
1436 .release
= cpuset_file_release
,
1439 static struct inode_operations cpuset_dir_inode_operations
= {
1440 .lookup
= simple_lookup
,
1441 .mkdir
= cpuset_mkdir
,
1442 .rmdir
= cpuset_rmdir
,
1443 .rename
= cpuset_rename
,
1446 static int cpuset_create_file(struct dentry
*dentry
, int mode
)
1448 struct inode
*inode
;
1452 if (dentry
->d_inode
)
1455 inode
= cpuset_new_inode(mode
);
1459 if (S_ISDIR(mode
)) {
1460 inode
->i_op
= &cpuset_dir_inode_operations
;
1461 inode
->i_fop
= &simple_dir_operations
;
1463 /* start off with i_nlink == 2 (for "." entry) */
1465 } else if (S_ISREG(mode
)) {
1467 inode
->i_fop
= &cpuset_file_operations
;
1470 d_instantiate(dentry
, inode
);
1471 dget(dentry
); /* Extra count - pin the dentry in core */
1476 * cpuset_create_dir - create a directory for an object.
1477 * cs: the cpuset we create the directory for.
1478 * It must have a valid ->parent field
1479 * And we are going to fill its ->dentry field.
1480 * name: The name to give to the cpuset directory. Will be copied.
1481 * mode: mode to set on new directory.
1484 static int cpuset_create_dir(struct cpuset
*cs
, const char *name
, int mode
)
1486 struct dentry
*dentry
= NULL
;
1487 struct dentry
*parent
;
1490 parent
= cs
->parent
->dentry
;
1491 dentry
= cpuset_get_dentry(parent
, name
);
1493 return PTR_ERR(dentry
);
1494 error
= cpuset_create_file(dentry
, S_IFDIR
| mode
);
1496 dentry
->d_fsdata
= cs
;
1497 parent
->d_inode
->i_nlink
++;
1498 cs
->dentry
= dentry
;
1505 static int cpuset_add_file(struct dentry
*dir
, const struct cftype
*cft
)
1507 struct dentry
*dentry
;
1510 mutex_lock(&dir
->d_inode
->i_mutex
);
1511 dentry
= cpuset_get_dentry(dir
, cft
->name
);
1512 if (!IS_ERR(dentry
)) {
1513 error
= cpuset_create_file(dentry
, 0644 | S_IFREG
);
1515 dentry
->d_fsdata
= (void *)cft
;
1518 error
= PTR_ERR(dentry
);
1519 mutex_unlock(&dir
->d_inode
->i_mutex
);
1524 * Stuff for reading the 'tasks' file.
1526 * Reading this file can return large amounts of data if a cpuset has
1527 * *lots* of attached tasks. So it may need several calls to read(),
1528 * but we cannot guarantee that the information we produce is correct
1529 * unless we produce it entirely atomically.
1531 * Upon tasks file open(), a struct ctr_struct is allocated, that
1532 * will have a pointer to an array (also allocated here). The struct
1533 * ctr_struct * is stored in file->private_data. Its resources will
1534 * be freed by release() when the file is closed. The array is used
1535 * to sprintf the PIDs and then used by read().
1538 /* cpusets_tasks_read array */
1546 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1547 * Return actual number of pids loaded. No need to task_lock(p)
1548 * when reading out p->cpuset, as we don't really care if it changes
1549 * on the next cycle, and we are not going to try to dereference it.
1551 static int pid_array_load(pid_t
*pidarray
, int npids
, struct cpuset
*cs
)
1554 struct task_struct
*g
, *p
;
1556 read_lock(&tasklist_lock
);
1558 do_each_thread(g
, p
) {
1559 if (p
->cpuset
== cs
) {
1560 pidarray
[n
++] = p
->pid
;
1561 if (unlikely(n
== npids
))
1564 } while_each_thread(g
, p
);
1567 read_unlock(&tasklist_lock
);
1571 static int cmppid(const void *a
, const void *b
)
1573 return *(pid_t
*)a
- *(pid_t
*)b
;
1577 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
1578 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
1579 * count 'cnt' of how many chars would be written if buf were large enough.
1581 static int pid_array_to_buf(char *buf
, int sz
, pid_t
*a
, int npids
)
1586 for (i
= 0; i
< npids
; i
++)
1587 cnt
+= snprintf(buf
+ cnt
, max(sz
- cnt
, 0), "%d\n", a
[i
]);
1592 * Handle an open on 'tasks' file. Prepare a buffer listing the
1593 * process id's of tasks currently attached to the cpuset being opened.
1595 * Does not require any specific cpuset mutexes, and does not take any.
1597 static int cpuset_tasks_open(struct inode
*unused
, struct file
*file
)
1599 struct cpuset
*cs
= __d_cs(file
->f_dentry
->d_parent
);
1600 struct ctr_struct
*ctr
;
1605 if (!(file
->f_mode
& FMODE_READ
))
1608 ctr
= kmalloc(sizeof(*ctr
), GFP_KERNEL
);
1613 * If cpuset gets more users after we read count, we won't have
1614 * enough space - tough. This race is indistinguishable to the
1615 * caller from the case that the additional cpuset users didn't
1616 * show up until sometime later on.
1618 npids
= atomic_read(&cs
->count
);
1619 pidarray
= kmalloc(npids
* sizeof(pid_t
), GFP_KERNEL
);
1623 npids
= pid_array_load(pidarray
, npids
, cs
);
1624 sort(pidarray
, npids
, sizeof(pid_t
), cmppid
, NULL
);
1626 /* Call pid_array_to_buf() twice, first just to get bufsz */
1627 ctr
->bufsz
= pid_array_to_buf(&c
, sizeof(c
), pidarray
, npids
) + 1;
1628 ctr
->buf
= kmalloc(ctr
->bufsz
, GFP_KERNEL
);
1631 ctr
->bufsz
= pid_array_to_buf(ctr
->buf
, ctr
->bufsz
, pidarray
, npids
);
1634 file
->private_data
= ctr
;
1645 static ssize_t
cpuset_tasks_read(struct file
*file
, char __user
*buf
,
1646 size_t nbytes
, loff_t
*ppos
)
1648 struct ctr_struct
*ctr
= file
->private_data
;
1650 if (*ppos
+ nbytes
> ctr
->bufsz
)
1651 nbytes
= ctr
->bufsz
- *ppos
;
1652 if (copy_to_user(buf
, ctr
->buf
+ *ppos
, nbytes
))
1658 static int cpuset_tasks_release(struct inode
*unused_inode
, struct file
*file
)
1660 struct ctr_struct
*ctr
;
1662 if (file
->f_mode
& FMODE_READ
) {
1663 ctr
= file
->private_data
;
1671 * for the common functions, 'private' gives the type of file
1674 static struct cftype cft_tasks
= {
1676 .open
= cpuset_tasks_open
,
1677 .read
= cpuset_tasks_read
,
1678 .release
= cpuset_tasks_release
,
1679 .private = FILE_TASKLIST
,
1682 static struct cftype cft_cpus
= {
1684 .private = FILE_CPULIST
,
1687 static struct cftype cft_mems
= {
1689 .private = FILE_MEMLIST
,
1692 static struct cftype cft_cpu_exclusive
= {
1693 .name
= "cpu_exclusive",
1694 .private = FILE_CPU_EXCLUSIVE
,
1697 static struct cftype cft_mem_exclusive
= {
1698 .name
= "mem_exclusive",
1699 .private = FILE_MEM_EXCLUSIVE
,
1702 static struct cftype cft_notify_on_release
= {
1703 .name
= "notify_on_release",
1704 .private = FILE_NOTIFY_ON_RELEASE
,
1707 static struct cftype cft_memory_migrate
= {
1708 .name
= "memory_migrate",
1709 .private = FILE_MEMORY_MIGRATE
,
1712 static struct cftype cft_memory_pressure_enabled
= {
1713 .name
= "memory_pressure_enabled",
1714 .private = FILE_MEMORY_PRESSURE_ENABLED
,
1717 static struct cftype cft_memory_pressure
= {
1718 .name
= "memory_pressure",
1719 .private = FILE_MEMORY_PRESSURE
,
1722 static int cpuset_populate_dir(struct dentry
*cs_dentry
)
1726 if ((err
= cpuset_add_file(cs_dentry
, &cft_cpus
)) < 0)
1728 if ((err
= cpuset_add_file(cs_dentry
, &cft_mems
)) < 0)
1730 if ((err
= cpuset_add_file(cs_dentry
, &cft_cpu_exclusive
)) < 0)
1732 if ((err
= cpuset_add_file(cs_dentry
, &cft_mem_exclusive
)) < 0)
1734 if ((err
= cpuset_add_file(cs_dentry
, &cft_notify_on_release
)) < 0)
1736 if ((err
= cpuset_add_file(cs_dentry
, &cft_memory_migrate
)) < 0)
1738 if ((err
= cpuset_add_file(cs_dentry
, &cft_memory_pressure
)) < 0)
1740 if ((err
= cpuset_add_file(cs_dentry
, &cft_tasks
)) < 0)
1746 * cpuset_create - create a cpuset
1747 * parent: cpuset that will be parent of the new cpuset.
1748 * name: name of the new cpuset. Will be strcpy'ed.
1749 * mode: mode to set on new inode
1751 * Must be called with the mutex on the parent inode held
1754 static long cpuset_create(struct cpuset
*parent
, const char *name
, int mode
)
1759 cs
= kmalloc(sizeof(*cs
), GFP_KERNEL
);
1763 mutex_lock(&manage_mutex
);
1764 cpuset_update_task_memory_state();
1766 if (notify_on_release(parent
))
1767 set_bit(CS_NOTIFY_ON_RELEASE
, &cs
->flags
);
1768 cs
->cpus_allowed
= CPU_MASK_NONE
;
1769 cs
->mems_allowed
= NODE_MASK_NONE
;
1770 atomic_set(&cs
->count
, 0);
1771 INIT_LIST_HEAD(&cs
->sibling
);
1772 INIT_LIST_HEAD(&cs
->children
);
1773 atomic_inc(&cpuset_mems_generation
);
1774 cs
->mems_generation
= atomic_read(&cpuset_mems_generation
);
1775 fmeter_init(&cs
->fmeter
);
1777 cs
->parent
= parent
;
1779 mutex_lock(&callback_mutex
);
1780 list_add(&cs
->sibling
, &cs
->parent
->children
);
1781 number_of_cpusets
++;
1782 mutex_unlock(&callback_mutex
);
1784 err
= cpuset_create_dir(cs
, name
, mode
);
1789 * Release manage_mutex before cpuset_populate_dir() because it
1790 * will down() this new directory's i_mutex and if we race with
1791 * another mkdir, we might deadlock.
1793 mutex_unlock(&manage_mutex
);
1795 err
= cpuset_populate_dir(cs
->dentry
);
1796 /* If err < 0, we have a half-filled directory - oh well ;) */
1799 list_del(&cs
->sibling
);
1800 mutex_unlock(&manage_mutex
);
1805 static int cpuset_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1807 struct cpuset
*c_parent
= dentry
->d_parent
->d_fsdata
;
1809 /* the vfs holds inode->i_mutex already */
1810 return cpuset_create(c_parent
, dentry
->d_name
.name
, mode
| S_IFDIR
);
1813 static int cpuset_rmdir(struct inode
*unused_dir
, struct dentry
*dentry
)
1815 struct cpuset
*cs
= dentry
->d_fsdata
;
1817 struct cpuset
*parent
;
1818 char *pathbuf
= NULL
;
1820 /* the vfs holds both inode->i_mutex already */
1822 mutex_lock(&manage_mutex
);
1823 cpuset_update_task_memory_state();
1824 if (atomic_read(&cs
->count
) > 0) {
1825 mutex_unlock(&manage_mutex
);
1828 if (!list_empty(&cs
->children
)) {
1829 mutex_unlock(&manage_mutex
);
1832 parent
= cs
->parent
;
1833 mutex_lock(&callback_mutex
);
1834 set_bit(CS_REMOVED
, &cs
->flags
);
1835 if (is_cpu_exclusive(cs
))
1836 update_cpu_domains(cs
);
1837 list_del(&cs
->sibling
); /* delete my sibling from parent->children */
1838 spin_lock(&cs
->dentry
->d_lock
);
1839 d
= dget(cs
->dentry
);
1841 spin_unlock(&d
->d_lock
);
1842 cpuset_d_remove_dir(d
);
1844 number_of_cpusets
--;
1845 mutex_unlock(&callback_mutex
);
1846 if (list_empty(&parent
->children
))
1847 check_for_release(parent
, &pathbuf
);
1848 mutex_unlock(&manage_mutex
);
1849 cpuset_release_agent(pathbuf
);
1854 * cpuset_init_early - just enough so that the calls to
1855 * cpuset_update_task_memory_state() in early init code
1859 int __init
cpuset_init_early(void)
1861 struct task_struct
*tsk
= current
;
1863 tsk
->cpuset
= &top_cpuset
;
1864 tsk
->cpuset
->mems_generation
= atomic_read(&cpuset_mems_generation
);
1869 * cpuset_init - initialize cpusets at system boot
1871 * Description: Initialize top_cpuset and the cpuset internal file system,
1874 int __init
cpuset_init(void)
1876 struct dentry
*root
;
1879 top_cpuset
.cpus_allowed
= CPU_MASK_ALL
;
1880 top_cpuset
.mems_allowed
= NODE_MASK_ALL
;
1882 fmeter_init(&top_cpuset
.fmeter
);
1883 atomic_inc(&cpuset_mems_generation
);
1884 top_cpuset
.mems_generation
= atomic_read(&cpuset_mems_generation
);
1886 init_task
.cpuset
= &top_cpuset
;
1888 err
= register_filesystem(&cpuset_fs_type
);
1891 cpuset_mount
= kern_mount(&cpuset_fs_type
);
1892 if (IS_ERR(cpuset_mount
)) {
1893 printk(KERN_ERR
"cpuset: could not mount!\n");
1894 err
= PTR_ERR(cpuset_mount
);
1895 cpuset_mount
= NULL
;
1898 root
= cpuset_mount
->mnt_sb
->s_root
;
1899 root
->d_fsdata
= &top_cpuset
;
1900 root
->d_inode
->i_nlink
++;
1901 top_cpuset
.dentry
= root
;
1902 root
->d_inode
->i_op
= &cpuset_dir_inode_operations
;
1903 number_of_cpusets
= 1;
1904 err
= cpuset_populate_dir(root
);
1905 /* memory_pressure_enabled is in root cpuset only */
1907 err
= cpuset_add_file(root
, &cft_memory_pressure_enabled
);
1913 * cpuset_init_smp - initialize cpus_allowed
1915 * Description: Finish top cpuset after cpu, node maps are initialized
1918 void __init
cpuset_init_smp(void)
1920 top_cpuset
.cpus_allowed
= cpu_online_map
;
1921 top_cpuset
.mems_allowed
= node_online_map
;
1925 * cpuset_fork - attach newly forked task to its parents cpuset.
1926 * @tsk: pointer to task_struct of forking parent process.
1928 * Description: A task inherits its parent's cpuset at fork().
1930 * A pointer to the shared cpuset was automatically copied in fork.c
1931 * by dup_task_struct(). However, we ignore that copy, since it was
1932 * not made under the protection of task_lock(), so might no longer be
1933 * a valid cpuset pointer. attach_task() might have already changed
1934 * current->cpuset, allowing the previously referenced cpuset to
1935 * be removed and freed. Instead, we task_lock(current) and copy
1936 * its present value of current->cpuset for our freshly forked child.
1938 * At the point that cpuset_fork() is called, 'current' is the parent
1939 * task, and the passed argument 'child' points to the child task.
1942 void cpuset_fork(struct task_struct
*child
)
1945 child
->cpuset
= current
->cpuset
;
1946 atomic_inc(&child
->cpuset
->count
);
1947 task_unlock(current
);
1951 * cpuset_exit - detach cpuset from exiting task
1952 * @tsk: pointer to task_struct of exiting process
1954 * Description: Detach cpuset from @tsk and release it.
1956 * Note that cpusets marked notify_on_release force every task in
1957 * them to take the global manage_mutex mutex when exiting.
1958 * This could impact scaling on very large systems. Be reluctant to
1959 * use notify_on_release cpusets where very high task exit scaling
1960 * is required on large systems.
1962 * Don't even think about derefencing 'cs' after the cpuset use count
1963 * goes to zero, except inside a critical section guarded by manage_mutex
1964 * or callback_mutex. Otherwise a zero cpuset use count is a license to
1965 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
1967 * This routine has to take manage_mutex, not callback_mutex, because
1968 * it is holding that mutex while calling check_for_release(),
1969 * which calls kmalloc(), so can't be called holding callback_mutex().
1971 * We don't need to task_lock() this reference to tsk->cpuset,
1972 * because tsk is already marked PF_EXITING, so attach_task() won't
1973 * mess with it, or task is a failed fork, never visible to attach_task.
1977 * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
1979 * Don't leave a task unable to allocate memory, as that is an
1980 * accident waiting to happen should someone add a callout in
1981 * do_exit() after the cpuset_exit() call that might allocate.
1982 * If a task tries to allocate memory with an invalid cpuset,
1983 * it will oops in cpuset_update_task_memory_state().
1985 * We call cpuset_exit() while the task is still competent to
1986 * handle notify_on_release(), then leave the task attached to
1987 * the root cpuset (top_cpuset) for the remainder of its exit.
1989 * To do this properly, we would increment the reference count on
1990 * top_cpuset, and near the very end of the kernel/exit.c do_exit()
1991 * code we would add a second cpuset function call, to drop that
1992 * reference. This would just create an unnecessary hot spot on
1993 * the top_cpuset reference count, to no avail.
1995 * Normally, holding a reference to a cpuset without bumping its
1996 * count is unsafe. The cpuset could go away, or someone could
1997 * attach us to a different cpuset, decrementing the count on
1998 * the first cpuset that we never incremented. But in this case,
1999 * top_cpuset isn't going away, and either task has PF_EXITING set,
2000 * which wards off any attach_task() attempts, or task is a failed
2001 * fork, never visible to attach_task.
2003 * Another way to do this would be to set the cpuset pointer
2004 * to NULL here, and check in cpuset_update_task_memory_state()
2005 * for a NULL pointer. This hack avoids that NULL check, for no
2006 * cost (other than this way too long comment ;).
2009 void cpuset_exit(struct task_struct
*tsk
)
2014 tsk
->cpuset
= &top_cpuset
; /* Hack - see comment above */
2016 if (notify_on_release(cs
)) {
2017 char *pathbuf
= NULL
;
2019 mutex_lock(&manage_mutex
);
2020 if (atomic_dec_and_test(&cs
->count
))
2021 check_for_release(cs
, &pathbuf
);
2022 mutex_unlock(&manage_mutex
);
2023 cpuset_release_agent(pathbuf
);
2025 atomic_dec(&cs
->count
);
2030 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2031 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2033 * Description: Returns the cpumask_t cpus_allowed of the cpuset
2034 * attached to the specified @tsk. Guaranteed to return some non-empty
2035 * subset of cpu_online_map, even if this means going outside the
2039 cpumask_t
cpuset_cpus_allowed(struct task_struct
*tsk
)
2043 mutex_lock(&callback_mutex
);
2045 guarantee_online_cpus(tsk
->cpuset
, &mask
);
2047 mutex_unlock(&callback_mutex
);
2052 void cpuset_init_current_mems_allowed(void)
2054 current
->mems_allowed
= NODE_MASK_ALL
;
2058 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2059 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2061 * Description: Returns the nodemask_t mems_allowed of the cpuset
2062 * attached to the specified @tsk. Guaranteed to return some non-empty
2063 * subset of node_online_map, even if this means going outside the
2067 nodemask_t
cpuset_mems_allowed(struct task_struct
*tsk
)
2071 mutex_lock(&callback_mutex
);
2073 guarantee_online_mems(tsk
->cpuset
, &mask
);
2075 mutex_unlock(&callback_mutex
);
2081 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
2082 * @zl: the zonelist to be checked
2084 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
2086 int cpuset_zonelist_valid_mems_allowed(struct zonelist
*zl
)
2090 for (i
= 0; zl
->zones
[i
]; i
++) {
2091 int nid
= zl
->zones
[i
]->zone_pgdat
->node_id
;
2093 if (node_isset(nid
, current
->mems_allowed
))
2100 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
2101 * ancestor to the specified cpuset. Call holding callback_mutex.
2102 * If no ancestor is mem_exclusive (an unusual configuration), then
2103 * returns the root cpuset.
2105 static const struct cpuset
*nearest_exclusive_ancestor(const struct cpuset
*cs
)
2107 while (!is_mem_exclusive(cs
) && cs
->parent
)
2113 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node?
2114 * @z: is this zone on an allowed node?
2115 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL)
2117 * If we're in interrupt, yes, we can always allocate. If zone
2118 * z's node is in our tasks mems_allowed, yes. If it's not a
2119 * __GFP_HARDWALL request and this zone's nodes is in the nearest
2120 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
2123 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2124 * and do not allow allocations outside the current tasks cpuset.
2125 * GFP_KERNEL allocations are not so marked, so can escape to the
2126 * nearest mem_exclusive ancestor cpuset.
2128 * Scanning up parent cpusets requires callback_mutex. The __alloc_pages()
2129 * routine only calls here with __GFP_HARDWALL bit _not_ set if
2130 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
2131 * mems_allowed came up empty on the first pass over the zonelist.
2132 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
2133 * short of memory, might require taking the callback_mutex mutex.
2135 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
2136 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
2137 * hardwall cpusets - no allocation on a node outside the cpuset is
2138 * allowed (unless in interrupt, of course).
2140 * The second loop doesn't even call here for GFP_ATOMIC requests
2141 * (if the __alloc_pages() local variable 'wait' is set). That check
2142 * and the checks below have the combined affect in the second loop of
2143 * the __alloc_pages() routine that:
2144 * in_interrupt - any node ok (current task context irrelevant)
2145 * GFP_ATOMIC - any node ok
2146 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
2147 * GFP_USER - only nodes in current tasks mems allowed ok.
2150 int __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
2152 int node
; /* node that zone z is on */
2153 const struct cpuset
*cs
; /* current cpuset ancestors */
2154 int allowed
= 1; /* is allocation in zone z allowed? */
2158 node
= z
->zone_pgdat
->node_id
;
2159 if (node_isset(node
, current
->mems_allowed
))
2161 if (gfp_mask
& __GFP_HARDWALL
) /* If hardwall request, stop here */
2164 if (current
->flags
& PF_EXITING
) /* Let dying task have memory */
2167 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2168 mutex_lock(&callback_mutex
);
2171 cs
= nearest_exclusive_ancestor(current
->cpuset
);
2172 task_unlock(current
);
2174 allowed
= node_isset(node
, cs
->mems_allowed
);
2175 mutex_unlock(&callback_mutex
);
2180 * cpuset_lock - lock out any changes to cpuset structures
2182 * The out of memory (oom) code needs to mutex_lock cpusets
2183 * from being changed while it scans the tasklist looking for a
2184 * task in an overlapping cpuset. Expose callback_mutex via this
2185 * cpuset_lock() routine, so the oom code can lock it, before
2186 * locking the task list. The tasklist_lock is a spinlock, so
2187 * must be taken inside callback_mutex.
2190 void cpuset_lock(void)
2192 mutex_lock(&callback_mutex
);
2196 * cpuset_unlock - release lock on cpuset changes
2198 * Undo the lock taken in a previous cpuset_lock() call.
2201 void cpuset_unlock(void)
2203 mutex_unlock(&callback_mutex
);
2207 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
2208 * @p: pointer to task_struct of some other task.
2210 * Description: Return true if the nearest mem_exclusive ancestor
2211 * cpusets of tasks @p and current overlap. Used by oom killer to
2212 * determine if task @p's memory usage might impact the memory
2213 * available to the current task.
2215 * Call while holding callback_mutex.
2218 int cpuset_excl_nodes_overlap(const struct task_struct
*p
)
2220 const struct cpuset
*cs1
, *cs2
; /* my and p's cpuset ancestors */
2221 int overlap
= 0; /* do cpusets overlap? */
2224 if (current
->flags
& PF_EXITING
) {
2225 task_unlock(current
);
2228 cs1
= nearest_exclusive_ancestor(current
->cpuset
);
2229 task_unlock(current
);
2231 task_lock((struct task_struct
*)p
);
2232 if (p
->flags
& PF_EXITING
) {
2233 task_unlock((struct task_struct
*)p
);
2236 cs2
= nearest_exclusive_ancestor(p
->cpuset
);
2237 task_unlock((struct task_struct
*)p
);
2239 overlap
= nodes_intersects(cs1
->mems_allowed
, cs2
->mems_allowed
);
2245 * Collection of memory_pressure is suppressed unless
2246 * this flag is enabled by writing "1" to the special
2247 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2250 int cpuset_memory_pressure_enabled __read_mostly
;
2253 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2255 * Keep a running average of the rate of synchronous (direct)
2256 * page reclaim efforts initiated by tasks in each cpuset.
2258 * This represents the rate at which some task in the cpuset
2259 * ran low on memory on all nodes it was allowed to use, and
2260 * had to enter the kernels page reclaim code in an effort to
2261 * create more free memory by tossing clean pages or swapping
2262 * or writing dirty pages.
2264 * Display to user space in the per-cpuset read-only file
2265 * "memory_pressure". Value displayed is an integer
2266 * representing the recent rate of entry into the synchronous
2267 * (direct) page reclaim by any task attached to the cpuset.
2270 void __cpuset_memory_pressure_bump(void)
2275 cs
= current
->cpuset
;
2276 fmeter_markevent(&cs
->fmeter
);
2277 task_unlock(current
);
2281 * proc_cpuset_show()
2282 * - Print tasks cpuset path into seq_file.
2283 * - Used for /proc/<pid>/cpuset.
2284 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2285 * doesn't really matter if tsk->cpuset changes after we read it,
2286 * and we take manage_mutex, keeping attach_task() from changing it
2290 static int proc_cpuset_show(struct seq_file
*m
, void *v
)
2293 struct task_struct
*tsk
;
2297 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
2302 mutex_lock(&manage_mutex
);
2309 retval
= cpuset_path(cs
, buf
, PAGE_SIZE
);
2315 mutex_unlock(&manage_mutex
);
2320 static int cpuset_open(struct inode
*inode
, struct file
*file
)
2322 struct task_struct
*tsk
= PROC_I(inode
)->task
;
2323 return single_open(file
, proc_cpuset_show
, tsk
);
2326 struct file_operations proc_cpuset_operations
= {
2327 .open
= cpuset_open
,
2329 .llseek
= seq_lseek
,
2330 .release
= single_release
,
2333 /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
2334 char *cpuset_task_status_allowed(struct task_struct
*task
, char *buffer
)
2336 buffer
+= sprintf(buffer
, "Cpus_allowed:\t");
2337 buffer
+= cpumask_scnprintf(buffer
, PAGE_SIZE
, task
->cpus_allowed
);
2338 buffer
+= sprintf(buffer
, "\n");
2339 buffer
+= sprintf(buffer
, "Mems_allowed:\t");
2340 buffer
+= nodemask_scnprintf(buffer
, PAGE_SIZE
, task
->mems_allowed
);
2341 buffer
+= sprintf(buffer
, "\n");