Luca's patch ported
[cbs-scheduler.git] / kernel / res_counter.c
blob14a730913c5bd6710792e37b1f39f7678b8d0209
1 /*
2 * resource cgroups
4 * Copyright 2007 OpenVZ SWsoft Inc
6 * Author: Pavel Emelianov <xemul@openvz.org>
8 */
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/slab.h>
14 #include <linux/res_counter.h>
15 #include <linux/uaccess.h>
16 #include <linux/mm.h>
17 #include <linux/interrupt.h>
19 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
21 spin_lock_init(&counter->lock);
22 counter->limit = (unsigned long long)LLONG_MAX;
23 counter->parent = parent;
26 int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
28 if (counter->usage + val > counter->limit) {
29 counter->failcnt++;
30 return -ENOMEM;
33 counter->usage += val;
34 if (counter->usage > counter->max_usage)
35 counter->max_usage = counter->usage;
36 return 0;
39 int res_counter_charge(struct res_counter *counter, unsigned long val,
40 struct res_counter **limit_fail_at)
42 int ret;
43 unsigned long flags;
44 struct res_counter *c, *u;
46 *limit_fail_at = NULL;
47 local_irq_save_nort(flags);
48 for (c = counter; c != NULL; c = c->parent) {
49 spin_lock(&c->lock);
50 ret = res_counter_charge_locked(c, val);
51 spin_unlock(&c->lock);
52 if (ret < 0) {
53 *limit_fail_at = c;
54 goto undo;
57 ret = 0;
58 goto done;
59 undo:
60 for (u = counter; u != c; u = u->parent) {
61 spin_lock(&u->lock);
62 res_counter_uncharge_locked(u, val);
63 spin_unlock(&u->lock);
65 done:
66 local_irq_restore_nort(flags);
67 return ret;
70 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
72 if (WARN_ON(counter->usage < val))
73 val = counter->usage;
75 counter->usage -= val;
78 void res_counter_uncharge(struct res_counter *counter, unsigned long val)
80 unsigned long flags;
81 struct res_counter *c;
83 local_irq_save_nort(flags);
84 for (c = counter; c != NULL; c = c->parent) {
85 spin_lock(&c->lock);
86 res_counter_uncharge_locked(c, val);
87 spin_unlock(&c->lock);
89 local_irq_restore_nort(flags);
93 static inline unsigned long long *
94 res_counter_member(struct res_counter *counter, int member)
96 switch (member) {
97 case RES_USAGE:
98 return &counter->usage;
99 case RES_MAX_USAGE:
100 return &counter->max_usage;
101 case RES_LIMIT:
102 return &counter->limit;
103 case RES_FAILCNT:
104 return &counter->failcnt;
107 BUG();
108 return NULL;
111 ssize_t res_counter_read(struct res_counter *counter, int member,
112 const char __user *userbuf, size_t nbytes, loff_t *pos,
113 int (*read_strategy)(unsigned long long val, char *st_buf))
115 unsigned long long *val;
116 char buf[64], *s;
118 s = buf;
119 val = res_counter_member(counter, member);
120 if (read_strategy)
121 s += read_strategy(*val, s);
122 else
123 s += sprintf(s, "%llu\n", *val);
124 return simple_read_from_buffer((void __user *)userbuf, nbytes,
125 pos, buf, s - buf);
128 u64 res_counter_read_u64(struct res_counter *counter, int member)
130 return *res_counter_member(counter, member);
133 int res_counter_memparse_write_strategy(const char *buf,
134 unsigned long long *res)
136 char *end;
137 /* FIXME - make memparse() take const char* args */
138 *res = memparse((char *)buf, &end);
139 if (*end != '\0')
140 return -EINVAL;
142 *res = PAGE_ALIGN(*res);
143 return 0;
146 int res_counter_write(struct res_counter *counter, int member,
147 const char *buf, write_strategy_fn write_strategy)
149 char *end;
150 unsigned long flags;
151 unsigned long long tmp, *val;
153 if (write_strategy) {
154 if (write_strategy(buf, &tmp))
155 return -EINVAL;
156 } else {
157 tmp = simple_strtoull(buf, &end, 10);
158 if (*end != '\0')
159 return -EINVAL;
161 spin_lock_irqsave(&counter->lock, flags);
162 val = res_counter_member(counter, member);
163 *val = tmp;
164 spin_unlock_irqrestore(&counter->lock, flags);
165 return 0;