virtio-blk: allow toggling host cache between writeback and writethrough
[linux/fpc-iii.git] / kernel / res_counter.c
blobad581aa2369a2ed8f925c395b2b4eadd9d8640f2
1 /*
2 * resource cgroups
4 * Copyright 2007 OpenVZ SWsoft Inc
6 * Author: Pavel Emelianov <xemul@openvz.org>
8 */
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/res_counter.h>
14 #include <linux/uaccess.h>
15 #include <linux/mm.h>
17 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
19 spin_lock_init(&counter->lock);
20 counter->limit = RESOURCE_MAX;
21 counter->soft_limit = RESOURCE_MAX;
22 counter->parent = parent;
25 int res_counter_charge_locked(struct res_counter *counter, unsigned long val,
26 bool force)
28 int ret = 0;
30 if (counter->usage + val > counter->limit) {
31 counter->failcnt++;
32 ret = -ENOMEM;
33 if (!force)
34 return ret;
37 counter->usage += val;
38 if (counter->usage > counter->max_usage)
39 counter->max_usage = counter->usage;
40 return ret;
43 static int __res_counter_charge(struct res_counter *counter, unsigned long val,
44 struct res_counter **limit_fail_at, bool force)
46 int ret, r;
47 unsigned long flags;
48 struct res_counter *c, *u;
50 r = ret = 0;
51 *limit_fail_at = NULL;
52 local_irq_save(flags);
53 for (c = counter; c != NULL; c = c->parent) {
54 spin_lock(&c->lock);
55 r = res_counter_charge_locked(c, val, force);
56 spin_unlock(&c->lock);
57 if (r < 0 && !ret) {
58 ret = r;
59 *limit_fail_at = c;
60 if (!force)
61 break;
65 if (ret < 0 && !force) {
66 for (u = counter; u != c; u = u->parent) {
67 spin_lock(&u->lock);
68 res_counter_uncharge_locked(u, val);
69 spin_unlock(&u->lock);
72 local_irq_restore(flags);
74 return ret;
77 int res_counter_charge(struct res_counter *counter, unsigned long val,
78 struct res_counter **limit_fail_at)
80 return __res_counter_charge(counter, val, limit_fail_at, false);
83 int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
84 struct res_counter **limit_fail_at)
86 return __res_counter_charge(counter, val, limit_fail_at, true);
89 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
91 if (WARN_ON(counter->usage < val))
92 val = counter->usage;
94 counter->usage -= val;
97 void res_counter_uncharge_until(struct res_counter *counter,
98 struct res_counter *top,
99 unsigned long val)
101 unsigned long flags;
102 struct res_counter *c;
104 local_irq_save(flags);
105 for (c = counter; c != top; c = c->parent) {
106 spin_lock(&c->lock);
107 res_counter_uncharge_locked(c, val);
108 spin_unlock(&c->lock);
110 local_irq_restore(flags);
113 void res_counter_uncharge(struct res_counter *counter, unsigned long val)
115 res_counter_uncharge_until(counter, NULL, val);
118 static inline unsigned long long *
119 res_counter_member(struct res_counter *counter, int member)
121 switch (member) {
122 case RES_USAGE:
123 return &counter->usage;
124 case RES_MAX_USAGE:
125 return &counter->max_usage;
126 case RES_LIMIT:
127 return &counter->limit;
128 case RES_FAILCNT:
129 return &counter->failcnt;
130 case RES_SOFT_LIMIT:
131 return &counter->soft_limit;
134 BUG();
135 return NULL;
138 ssize_t res_counter_read(struct res_counter *counter, int member,
139 const char __user *userbuf, size_t nbytes, loff_t *pos,
140 int (*read_strategy)(unsigned long long val, char *st_buf))
142 unsigned long long *val;
143 char buf[64], *s;
145 s = buf;
146 val = res_counter_member(counter, member);
147 if (read_strategy)
148 s += read_strategy(*val, s);
149 else
150 s += sprintf(s, "%llu\n", *val);
151 return simple_read_from_buffer((void __user *)userbuf, nbytes,
152 pos, buf, s - buf);
155 #if BITS_PER_LONG == 32
156 u64 res_counter_read_u64(struct res_counter *counter, int member)
158 unsigned long flags;
159 u64 ret;
161 spin_lock_irqsave(&counter->lock, flags);
162 ret = *res_counter_member(counter, member);
163 spin_unlock_irqrestore(&counter->lock, flags);
165 return ret;
167 #else
168 u64 res_counter_read_u64(struct res_counter *counter, int member)
170 return *res_counter_member(counter, member);
172 #endif
174 int res_counter_memparse_write_strategy(const char *buf,
175 unsigned long long *res)
177 char *end;
179 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
180 if (*buf == '-') {
181 *res = simple_strtoull(buf + 1, &end, 10);
182 if (*res != 1 || *end != '\0')
183 return -EINVAL;
184 *res = RESOURCE_MAX;
185 return 0;
188 *res = memparse(buf, &end);
189 if (*end != '\0')
190 return -EINVAL;
192 *res = PAGE_ALIGN(*res);
193 return 0;
196 int res_counter_write(struct res_counter *counter, int member,
197 const char *buf, write_strategy_fn write_strategy)
199 char *end;
200 unsigned long flags;
201 unsigned long long tmp, *val;
203 if (write_strategy) {
204 if (write_strategy(buf, &tmp))
205 return -EINVAL;
206 } else {
207 tmp = simple_strtoull(buf, &end, 10);
208 if (*end != '\0')
209 return -EINVAL;
211 spin_lock_irqsave(&counter->lock, flags);
212 val = res_counter_member(counter, member);
213 *val = tmp;
214 spin_unlock_irqrestore(&counter->lock, flags);
215 return 0;