Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux/fpc-iii.git] / kernel / res_counter.c
blob4aa8a305aedeb23cf7ad15aa2750898057b3c0d4
1 /*
2 * resource cgroups
4 * Copyright 2007 OpenVZ SWsoft Inc
6 * Author: Pavel Emelianov <xemul@openvz.org>
8 */
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/res_counter.h>
14 #include <linux/uaccess.h>
15 #include <linux/mm.h>
17 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
19 spin_lock_init(&counter->lock);
20 counter->limit = RES_COUNTER_MAX;
21 counter->soft_limit = RES_COUNTER_MAX;
22 counter->parent = parent;
25 int res_counter_charge_locked(struct res_counter *counter, unsigned long val,
26 bool force)
28 int ret = 0;
30 if (counter->usage + val > counter->limit) {
31 counter->failcnt++;
32 ret = -ENOMEM;
33 if (!force)
34 return ret;
37 counter->usage += val;
38 if (counter->usage > counter->max_usage)
39 counter->max_usage = counter->usage;
40 return ret;
43 static int __res_counter_charge(struct res_counter *counter, unsigned long val,
44 struct res_counter **limit_fail_at, bool force)
46 int ret, r;
47 unsigned long flags;
48 struct res_counter *c, *u;
50 r = ret = 0;
51 *limit_fail_at = NULL;
52 local_irq_save(flags);
53 for (c = counter; c != NULL; c = c->parent) {
54 spin_lock(&c->lock);
55 r = res_counter_charge_locked(c, val, force);
56 spin_unlock(&c->lock);
57 if (r < 0 && !ret) {
58 ret = r;
59 *limit_fail_at = c;
60 if (!force)
61 break;
65 if (ret < 0 && !force) {
66 for (u = counter; u != c; u = u->parent) {
67 spin_lock(&u->lock);
68 res_counter_uncharge_locked(u, val);
69 spin_unlock(&u->lock);
72 local_irq_restore(flags);
74 return ret;
77 int res_counter_charge(struct res_counter *counter, unsigned long val,
78 struct res_counter **limit_fail_at)
80 return __res_counter_charge(counter, val, limit_fail_at, false);
83 int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
84 struct res_counter **limit_fail_at)
86 return __res_counter_charge(counter, val, limit_fail_at, true);
89 u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
91 if (WARN_ON(counter->usage < val))
92 val = counter->usage;
94 counter->usage -= val;
95 return counter->usage;
98 u64 res_counter_uncharge_until(struct res_counter *counter,
99 struct res_counter *top,
100 unsigned long val)
102 unsigned long flags;
103 struct res_counter *c;
104 u64 ret = 0;
106 local_irq_save(flags);
107 for (c = counter; c != top; c = c->parent) {
108 u64 r;
109 spin_lock(&c->lock);
110 r = res_counter_uncharge_locked(c, val);
111 if (c == counter)
112 ret = r;
113 spin_unlock(&c->lock);
115 local_irq_restore(flags);
116 return ret;
119 u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
121 return res_counter_uncharge_until(counter, NULL, val);
124 static inline unsigned long long *
125 res_counter_member(struct res_counter *counter, int member)
127 switch (member) {
128 case RES_USAGE:
129 return &counter->usage;
130 case RES_MAX_USAGE:
131 return &counter->max_usage;
132 case RES_LIMIT:
133 return &counter->limit;
134 case RES_FAILCNT:
135 return &counter->failcnt;
136 case RES_SOFT_LIMIT:
137 return &counter->soft_limit;
140 BUG();
141 return NULL;
144 ssize_t res_counter_read(struct res_counter *counter, int member,
145 const char __user *userbuf, size_t nbytes, loff_t *pos,
146 int (*read_strategy)(unsigned long long val, char *st_buf))
148 unsigned long long *val;
149 char buf[64], *s;
151 s = buf;
152 val = res_counter_member(counter, member);
153 if (read_strategy)
154 s += read_strategy(*val, s);
155 else
156 s += sprintf(s, "%llu\n", *val);
157 return simple_read_from_buffer((void __user *)userbuf, nbytes,
158 pos, buf, s - buf);
161 #if BITS_PER_LONG == 32
162 u64 res_counter_read_u64(struct res_counter *counter, int member)
164 unsigned long flags;
165 u64 ret;
167 spin_lock_irqsave(&counter->lock, flags);
168 ret = *res_counter_member(counter, member);
169 spin_unlock_irqrestore(&counter->lock, flags);
171 return ret;
173 #else
174 u64 res_counter_read_u64(struct res_counter *counter, int member)
176 return *res_counter_member(counter, member);
178 #endif
180 int res_counter_memparse_write_strategy(const char *buf,
181 unsigned long long *resp)
183 char *end;
184 unsigned long long res;
186 /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
187 if (*buf == '-') {
188 res = simple_strtoull(buf + 1, &end, 10);
189 if (res != 1 || *end != '\0')
190 return -EINVAL;
191 *resp = RES_COUNTER_MAX;
192 return 0;
195 res = memparse(buf, &end);
196 if (*end != '\0')
197 return -EINVAL;
199 if (PAGE_ALIGN(res) >= res)
200 res = PAGE_ALIGN(res);
201 else
202 res = RES_COUNTER_MAX;
204 *resp = res;
206 return 0;