2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock
);
26 static LIST_HEAD(blkio_list
);
28 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
31 /* for encoding cft->private value on file */
32 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
33 /* What policy owns the file, proportional or throttle */
34 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
35 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
37 static inline void blkio_policy_insert_node(struct blkio_cgroup
*blkcg
,
38 struct blkio_policy_node
*pn
)
40 list_add(&pn
->node
, &blkcg
->policy_list
);
43 static inline bool cftype_blkg_same_policy(struct cftype
*cft
,
44 struct blkio_group
*blkg
)
46 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
48 if (blkg
->plid
== plid
)
54 /* Determines if policy node matches cgroup file being accessed */
55 static inline bool pn_matches_cftype(struct cftype
*cft
,
56 struct blkio_policy_node
*pn
)
58 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
59 int fileid
= BLKIOFILE_ATTR(cft
->private);
61 return (plid
== pn
->plid
&& fileid
== pn
->fileid
);
64 /* Must be called with blkcg->lock held */
65 static inline void blkio_policy_delete_node(struct blkio_policy_node
*pn
)
70 /* Must be called with blkcg->lock held */
71 static struct blkio_policy_node
*
72 blkio_policy_search_node(const struct blkio_cgroup
*blkcg
, dev_t dev
,
73 enum blkio_policy_id plid
, int fileid
)
75 struct blkio_policy_node
*pn
;
77 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
78 if (pn
->dev
== dev
&& pn
->plid
== plid
&& pn
->fileid
== fileid
)
85 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
87 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
88 struct blkio_cgroup
, css
);
90 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
92 struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
94 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
95 struct blkio_cgroup
, css
);
97 EXPORT_SYMBOL_GPL(task_blkio_cgroup
);
100 blkio_update_group_weight(struct blkio_group
*blkg
, unsigned int weight
)
102 struct blkio_policy_type
*blkiop
;
104 list_for_each_entry(blkiop
, &blkio_list
, list
) {
105 /* If this policy does not own the blkg, do not send updates */
106 if (blkiop
->plid
!= blkg
->plid
)
108 if (blkiop
->ops
.blkio_update_group_weight_fn
)
109 blkiop
->ops
.blkio_update_group_weight_fn(blkg
->key
,
114 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, u64 bps
,
117 struct blkio_policy_type
*blkiop
;
119 list_for_each_entry(blkiop
, &blkio_list
, list
) {
121 /* If this policy does not own the blkg, do not send updates */
122 if (blkiop
->plid
!= blkg
->plid
)
125 if (fileid
== BLKIO_THROTL_read_bps_device
126 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
127 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
->key
,
130 if (fileid
== BLKIO_THROTL_write_bps_device
131 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
132 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
->key
,
137 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
138 unsigned int iops
, int fileid
)
140 struct blkio_policy_type
*blkiop
;
142 list_for_each_entry(blkiop
, &blkio_list
, list
) {
144 /* If this policy does not own the blkg, do not send updates */
145 if (blkiop
->plid
!= blkg
->plid
)
148 if (fileid
== BLKIO_THROTL_read_iops_device
149 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
150 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
->key
,
153 if (fileid
== BLKIO_THROTL_write_iops_device
154 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
155 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
->key
,
161 * Add to the appropriate stat variable depending on the request type.
162 * This should be called with the blkg->stats_lock held.
164 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
168 stat
[BLKIO_STAT_WRITE
] += add
;
170 stat
[BLKIO_STAT_READ
] += add
;
172 stat
[BLKIO_STAT_SYNC
] += add
;
174 stat
[BLKIO_STAT_ASYNC
] += add
;
178 * Decrements the appropriate stat variable if non-zero depending on the
179 * request type. Panics on value being zero.
180 * This should be called with the blkg->stats_lock held.
182 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
185 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
186 stat
[BLKIO_STAT_WRITE
]--;
188 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
189 stat
[BLKIO_STAT_READ
]--;
192 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
193 stat
[BLKIO_STAT_SYNC
]--;
195 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
196 stat
[BLKIO_STAT_ASYNC
]--;
200 #ifdef CONFIG_DEBUG_BLK_CGROUP
201 /* This should be called with the blkg->stats_lock held. */
202 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
203 struct blkio_group
*curr_blkg
)
205 if (blkio_blkg_waiting(&blkg
->stats
))
207 if (blkg
== curr_blkg
)
209 blkg
->stats
.start_group_wait_time
= sched_clock();
210 blkio_mark_blkg_waiting(&blkg
->stats
);
213 /* This should be called with the blkg->stats_lock held. */
214 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
216 unsigned long long now
;
218 if (!blkio_blkg_waiting(stats
))
222 if (time_after64(now
, stats
->start_group_wait_time
))
223 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
224 blkio_clear_blkg_waiting(stats
);
227 /* This should be called with the blkg->stats_lock held. */
228 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
230 unsigned long long now
;
232 if (!blkio_blkg_empty(stats
))
236 if (time_after64(now
, stats
->start_empty_time
))
237 stats
->empty_time
+= now
- stats
->start_empty_time
;
238 blkio_clear_blkg_empty(stats
);
241 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
245 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
246 BUG_ON(blkio_blkg_idling(&blkg
->stats
));
247 blkg
->stats
.start_idle_time
= sched_clock();
248 blkio_mark_blkg_idling(&blkg
->stats
);
249 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
251 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
253 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
)
256 unsigned long long now
;
257 struct blkio_group_stats
*stats
;
259 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
260 stats
= &blkg
->stats
;
261 if (blkio_blkg_idling(stats
)) {
263 if (time_after64(now
, stats
->start_idle_time
))
264 stats
->idle_time
+= now
- stats
->start_idle_time
;
265 blkio_clear_blkg_idling(stats
);
267 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
269 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
271 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
)
274 struct blkio_group_stats
*stats
;
276 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
277 stats
= &blkg
->stats
;
278 stats
->avg_queue_size_sum
+=
279 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
280 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
281 stats
->avg_queue_size_samples
++;
282 blkio_update_group_wait_time(stats
);
283 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
285 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
287 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
)
290 struct blkio_group_stats
*stats
;
292 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
293 stats
= &blkg
->stats
;
295 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
296 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
297 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
302 * group is already marked empty. This can happen if cfqq got new
303 * request in parent group and moved to this group while being added
304 * to service tree. Just ignore the event and move on.
306 if(blkio_blkg_empty(stats
)) {
307 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
311 stats
->start_empty_time
= sched_clock();
312 blkio_mark_blkg_empty(stats
);
313 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
315 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
317 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
318 unsigned long dequeue
)
320 blkg
->stats
.dequeue
+= dequeue
;
322 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
324 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
325 struct blkio_group
*curr_blkg
) {}
326 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) {}
329 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
330 struct blkio_group
*curr_blkg
, bool direction
,
335 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
336 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
338 blkio_end_empty_time(&blkg
->stats
);
339 blkio_set_start_group_wait_time(blkg
, curr_blkg
);
340 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
342 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
344 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
345 bool direction
, bool sync
)
349 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
350 blkio_check_and_dec_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
352 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
354 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
356 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
, unsigned long time
,
357 unsigned long unaccounted_time
)
361 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
362 blkg
->stats
.time
+= time
;
363 #ifdef CONFIG_DEBUG_BLK_CGROUP
364 blkg
->stats
.unaccounted_time
+= unaccounted_time
;
366 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
368 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
371 * should be called under rcu read lock or queue lock to make sure blkg pointer
374 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
375 uint64_t bytes
, bool direction
, bool sync
)
377 struct blkio_group_stats_cpu
*stats_cpu
;
381 * Disabling interrupts to provide mutual exclusion between two
382 * writes on same cpu. It probably is not needed for 64bit. Not
383 * optimizing that case yet.
385 local_irq_save(flags
);
387 stats_cpu
= this_cpu_ptr(blkg
->stats_cpu
);
389 u64_stats_update_begin(&stats_cpu
->syncp
);
390 stats_cpu
->sectors
+= bytes
>> 9;
391 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICED
],
393 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICE_BYTES
],
394 bytes
, direction
, sync
);
395 u64_stats_update_end(&stats_cpu
->syncp
);
396 local_irq_restore(flags
);
398 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
400 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
401 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
)
403 struct blkio_group_stats
*stats
;
405 unsigned long long now
= sched_clock();
407 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
408 stats
= &blkg
->stats
;
409 if (time_after64(now
, io_start_time
))
410 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
411 now
- io_start_time
, direction
, sync
);
412 if (time_after64(io_start_time
, start_time
))
413 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
414 io_start_time
- start_time
, direction
, sync
);
415 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
417 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
419 /* Merged stats are per cpu. */
420 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
423 struct blkio_group_stats_cpu
*stats_cpu
;
427 * Disabling interrupts to provide mutual exclusion between two
428 * writes on same cpu. It probably is not needed for 64bit. Not
429 * optimizing that case yet.
431 local_irq_save(flags
);
433 stats_cpu
= this_cpu_ptr(blkg
->stats_cpu
);
435 u64_stats_update_begin(&stats_cpu
->syncp
);
436 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_MERGED
], 1,
438 u64_stats_update_end(&stats_cpu
->syncp
);
439 local_irq_restore(flags
);
441 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
444 * This function allocates the per cpu stats for blkio_group. Should be called
445 * from sleepable context as alloc_per_cpu() requires that.
447 int blkio_alloc_blkg_stats(struct blkio_group
*blkg
)
449 /* Allocate memory for per cpu stats */
450 blkg
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
451 if (!blkg
->stats_cpu
)
455 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats
);
457 void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
458 struct blkio_group
*blkg
, void *key
, dev_t dev
,
459 enum blkio_policy_id plid
)
463 spin_lock_irqsave(&blkcg
->lock
, flags
);
464 spin_lock_init(&blkg
->stats_lock
);
465 rcu_assign_pointer(blkg
->key
, key
);
466 blkg
->blkcg_id
= css_id(&blkcg
->css
);
467 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
469 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
470 /* Need to take css reference ? */
471 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
474 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group
);
476 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
478 hlist_del_init_rcu(&blkg
->blkcg_node
);
483 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
484 * indicating that blk_group was unhashed by the time we got to it.
486 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
488 struct blkio_cgroup
*blkcg
;
490 struct cgroup_subsys_state
*css
;
494 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
496 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
497 spin_lock_irqsave(&blkcg
->lock
, flags
);
498 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
499 __blkiocg_del_blkio_group(blkg
);
502 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
508 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
510 /* called under rcu_read_lock(). */
511 struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
)
513 struct blkio_group
*blkg
;
514 struct hlist_node
*n
;
517 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
525 EXPORT_SYMBOL_GPL(blkiocg_lookup_group
);
527 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
)
529 struct blkio_group_stats_cpu
*stats_cpu
;
532 * Note: On 64 bit arch this should not be an issue. This has the
533 * possibility of returning some inconsistent value on 32bit arch
534 * as 64bit update on 32bit is non atomic. Taking care of this
535 * corner case makes code very complicated, like sending IPIs to
536 * cpus, taking care of stats of offline cpus etc.
538 * reset stats is anyway more of a debug feature and this sounds a
539 * corner case. So I am not complicating the code yet until and
540 * unless this becomes a real issue.
542 for_each_possible_cpu(i
) {
543 stats_cpu
= per_cpu_ptr(blkg
->stats_cpu
, i
);
544 stats_cpu
->sectors
= 0;
545 for(j
= 0; j
< BLKIO_STAT_CPU_NR
; j
++)
546 for (k
= 0; k
< BLKIO_STAT_TOTAL
; k
++)
547 stats_cpu
->stat_arr_cpu
[j
][k
] = 0;
552 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
554 struct blkio_cgroup
*blkcg
;
555 struct blkio_group
*blkg
;
556 struct blkio_group_stats
*stats
;
557 struct hlist_node
*n
;
558 uint64_t queued
[BLKIO_STAT_TOTAL
];
560 #ifdef CONFIG_DEBUG_BLK_CGROUP
561 bool idling
, waiting
, empty
;
562 unsigned long long now
= sched_clock();
565 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
566 spin_lock_irq(&blkcg
->lock
);
567 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
568 spin_lock(&blkg
->stats_lock
);
569 stats
= &blkg
->stats
;
570 #ifdef CONFIG_DEBUG_BLK_CGROUP
571 idling
= blkio_blkg_idling(stats
);
572 waiting
= blkio_blkg_waiting(stats
);
573 empty
= blkio_blkg_empty(stats
);
575 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
576 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
577 memset(stats
, 0, sizeof(struct blkio_group_stats
));
578 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
579 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
580 #ifdef CONFIG_DEBUG_BLK_CGROUP
582 blkio_mark_blkg_idling(stats
);
583 stats
->start_idle_time
= now
;
586 blkio_mark_blkg_waiting(stats
);
587 stats
->start_group_wait_time
= now
;
590 blkio_mark_blkg_empty(stats
);
591 stats
->start_empty_time
= now
;
594 spin_unlock(&blkg
->stats_lock
);
596 /* Reset Per cpu stats which don't take blkg->stats_lock */
597 blkio_reset_stats_cpu(blkg
);
600 spin_unlock_irq(&blkcg
->lock
);
604 static void blkio_get_key_name(enum stat_sub_type type
, dev_t dev
, char *str
,
605 int chars_left
, bool diskname_only
)
607 snprintf(str
, chars_left
, "%d:%d", MAJOR(dev
), MINOR(dev
));
608 chars_left
-= strlen(str
);
609 if (chars_left
<= 0) {
611 "Possibly incorrect cgroup stat display format");
617 case BLKIO_STAT_READ
:
618 strlcat(str
, " Read", chars_left
);
620 case BLKIO_STAT_WRITE
:
621 strlcat(str
, " Write", chars_left
);
623 case BLKIO_STAT_SYNC
:
624 strlcat(str
, " Sync", chars_left
);
626 case BLKIO_STAT_ASYNC
:
627 strlcat(str
, " Async", chars_left
);
629 case BLKIO_STAT_TOTAL
:
630 strlcat(str
, " Total", chars_left
);
633 strlcat(str
, " Invalid", chars_left
);
637 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
638 struct cgroup_map_cb
*cb
, dev_t dev
)
640 blkio_get_key_name(0, dev
, str
, chars_left
, true);
641 cb
->fill(cb
, str
, val
);
646 static uint64_t blkio_read_stat_cpu(struct blkio_group
*blkg
,
647 enum stat_type_cpu type
, enum stat_sub_type sub_type
)
650 struct blkio_group_stats_cpu
*stats_cpu
;
653 for_each_possible_cpu(cpu
) {
655 stats_cpu
= per_cpu_ptr(blkg
->stats_cpu
, cpu
);
658 start
= u64_stats_fetch_begin(&stats_cpu
->syncp
);
659 if (type
== BLKIO_STAT_CPU_SECTORS
)
660 tval
= stats_cpu
->sectors
;
662 tval
= stats_cpu
->stat_arr_cpu
[type
][sub_type
];
663 } while(u64_stats_fetch_retry(&stats_cpu
->syncp
, start
));
671 static uint64_t blkio_get_stat_cpu(struct blkio_group
*blkg
,
672 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type_cpu type
)
674 uint64_t disk_total
, val
;
675 char key_str
[MAX_KEY_LEN
];
676 enum stat_sub_type sub_type
;
678 if (type
== BLKIO_STAT_CPU_SECTORS
) {
679 val
= blkio_read_stat_cpu(blkg
, type
, 0);
680 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, val
, cb
, dev
);
683 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
685 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
686 val
= blkio_read_stat_cpu(blkg
, type
, sub_type
);
687 cb
->fill(cb
, key_str
, val
);
690 disk_total
= blkio_read_stat_cpu(blkg
, type
, BLKIO_STAT_READ
) +
691 blkio_read_stat_cpu(blkg
, type
, BLKIO_STAT_WRITE
);
693 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
694 cb
->fill(cb
, key_str
, disk_total
);
698 /* This should be called with blkg->stats_lock held */
699 static uint64_t blkio_get_stat(struct blkio_group
*blkg
,
700 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type type
)
703 char key_str
[MAX_KEY_LEN
];
704 enum stat_sub_type sub_type
;
706 if (type
== BLKIO_STAT_TIME
)
707 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
708 blkg
->stats
.time
, cb
, dev
);
709 #ifdef CONFIG_DEBUG_BLK_CGROUP
710 if (type
== BLKIO_STAT_UNACCOUNTED_TIME
)
711 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
712 blkg
->stats
.unaccounted_time
, cb
, dev
);
713 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
714 uint64_t sum
= blkg
->stats
.avg_queue_size_sum
;
715 uint64_t samples
= blkg
->stats
.avg_queue_size_samples
;
717 do_div(sum
, samples
);
720 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, sum
, cb
, dev
);
722 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
723 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
724 blkg
->stats
.group_wait_time
, cb
, dev
);
725 if (type
== BLKIO_STAT_IDLE_TIME
)
726 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
727 blkg
->stats
.idle_time
, cb
, dev
);
728 if (type
== BLKIO_STAT_EMPTY_TIME
)
729 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
730 blkg
->stats
.empty_time
, cb
, dev
);
731 if (type
== BLKIO_STAT_DEQUEUE
)
732 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
733 blkg
->stats
.dequeue
, cb
, dev
);
736 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
738 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
739 cb
->fill(cb
, key_str
, blkg
->stats
.stat_arr
[type
][sub_type
]);
741 disk_total
= blkg
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
742 blkg
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
743 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
744 cb
->fill(cb
, key_str
, disk_total
);
748 static int blkio_policy_parse_and_set(char *buf
,
749 struct blkio_policy_node
*newpn
, enum blkio_policy_id plid
, int fileid
)
751 struct gendisk
*disk
= NULL
;
752 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
753 unsigned long major
, minor
;
754 int i
= 0, ret
= -EINVAL
;
759 memset(s
, 0, sizeof(s
));
761 while ((p
= strsep(&buf
, " ")) != NULL
) {
767 /* Prevent from inputing too many things */
775 p
= strsep(&s
[0], ":");
785 if (strict_strtoul(major_s
, 10, &major
))
788 if (strict_strtoul(minor_s
, 10, &minor
))
791 dev
= MKDEV(major
, minor
);
793 if (strict_strtoull(s
[1], 10, &temp
))
796 /* For rule removal, do not check for device presence. */
798 disk
= get_gendisk(dev
, &part
);
808 case BLKIO_POLICY_PROP
:
809 if ((temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
810 temp
> BLKIO_WEIGHT_MAX
)
814 newpn
->fileid
= fileid
;
815 newpn
->val
.weight
= temp
;
817 case BLKIO_POLICY_THROTL
:
819 case BLKIO_THROTL_read_bps_device
:
820 case BLKIO_THROTL_write_bps_device
:
822 newpn
->fileid
= fileid
;
823 newpn
->val
.bps
= temp
;
825 case BLKIO_THROTL_read_iops_device
:
826 case BLKIO_THROTL_write_iops_device
:
827 if (temp
> THROTL_IOPS_MAX
)
831 newpn
->fileid
= fileid
;
832 newpn
->val
.iops
= (unsigned int)temp
;
845 unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
848 struct blkio_policy_node
*pn
;
852 spin_lock_irqsave(&blkcg
->lock
, flags
);
854 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_PROP
,
855 BLKIO_PROP_weight_device
);
857 weight
= pn
->val
.weight
;
859 weight
= blkcg
->weight
;
861 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
865 EXPORT_SYMBOL_GPL(blkcg_get_weight
);
867 uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
869 struct blkio_policy_node
*pn
;
873 spin_lock_irqsave(&blkcg
->lock
, flags
);
874 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
875 BLKIO_THROTL_read_bps_device
);
878 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
883 uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
885 struct blkio_policy_node
*pn
;
889 spin_lock_irqsave(&blkcg
->lock
, flags
);
890 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
891 BLKIO_THROTL_write_bps_device
);
894 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
899 unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
901 struct blkio_policy_node
*pn
;
903 unsigned int iops
= -1;
905 spin_lock_irqsave(&blkcg
->lock
, flags
);
906 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
907 BLKIO_THROTL_read_iops_device
);
910 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
915 unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
917 struct blkio_policy_node
*pn
;
919 unsigned int iops
= -1;
921 spin_lock_irqsave(&blkcg
->lock
, flags
);
922 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
923 BLKIO_THROTL_write_iops_device
);
926 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
931 /* Checks whether user asked for deleting a policy rule */
932 static bool blkio_delete_rule_command(struct blkio_policy_node
*pn
)
935 case BLKIO_POLICY_PROP
:
936 if (pn
->val
.weight
== 0)
939 case BLKIO_POLICY_THROTL
:
941 case BLKIO_THROTL_read_bps_device
:
942 case BLKIO_THROTL_write_bps_device
:
943 if (pn
->val
.bps
== 0)
946 case BLKIO_THROTL_read_iops_device
:
947 case BLKIO_THROTL_write_iops_device
:
948 if (pn
->val
.iops
== 0)
959 static void blkio_update_policy_rule(struct blkio_policy_node
*oldpn
,
960 struct blkio_policy_node
*newpn
)
962 switch(oldpn
->plid
) {
963 case BLKIO_POLICY_PROP
:
964 oldpn
->val
.weight
= newpn
->val
.weight
;
966 case BLKIO_POLICY_THROTL
:
967 switch(newpn
->fileid
) {
968 case BLKIO_THROTL_read_bps_device
:
969 case BLKIO_THROTL_write_bps_device
:
970 oldpn
->val
.bps
= newpn
->val
.bps
;
972 case BLKIO_THROTL_read_iops_device
:
973 case BLKIO_THROTL_write_iops_device
:
974 oldpn
->val
.iops
= newpn
->val
.iops
;
983 * Some rules/values in blkg have changed. Propagate those to respective
986 static void blkio_update_blkg_policy(struct blkio_cgroup
*blkcg
,
987 struct blkio_group
*blkg
, struct blkio_policy_node
*pn
)
989 unsigned int weight
, iops
;
993 case BLKIO_POLICY_PROP
:
994 weight
= pn
->val
.weight
? pn
->val
.weight
:
996 blkio_update_group_weight(blkg
, weight
);
998 case BLKIO_POLICY_THROTL
:
1000 case BLKIO_THROTL_read_bps_device
:
1001 case BLKIO_THROTL_write_bps_device
:
1002 bps
= pn
->val
.bps
? pn
->val
.bps
: (-1);
1003 blkio_update_group_bps(blkg
, bps
, pn
->fileid
);
1005 case BLKIO_THROTL_read_iops_device
:
1006 case BLKIO_THROTL_write_iops_device
:
1007 iops
= pn
->val
.iops
? pn
->val
.iops
: (-1);
1008 blkio_update_group_iops(blkg
, iops
, pn
->fileid
);
1018 * A policy node rule has been updated. Propagate this update to all the
1019 * block groups which might be affected by this update.
1021 static void blkio_update_policy_node_blkg(struct blkio_cgroup
*blkcg
,
1022 struct blkio_policy_node
*pn
)
1024 struct blkio_group
*blkg
;
1025 struct hlist_node
*n
;
1027 spin_lock(&blkio_list_lock
);
1028 spin_lock_irq(&blkcg
->lock
);
1030 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1031 if (pn
->dev
!= blkg
->dev
|| pn
->plid
!= blkg
->plid
)
1033 blkio_update_blkg_policy(blkcg
, blkg
, pn
);
1036 spin_unlock_irq(&blkcg
->lock
);
1037 spin_unlock(&blkio_list_lock
);
1040 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
1045 struct blkio_policy_node
*newpn
, *pn
;
1046 struct blkio_cgroup
*blkcg
;
1048 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1049 int fileid
= BLKIOFILE_ATTR(cft
->private);
1051 buf
= kstrdup(buffer
, GFP_KERNEL
);
1055 newpn
= kzalloc(sizeof(*newpn
), GFP_KERNEL
);
1061 ret
= blkio_policy_parse_and_set(buf
, newpn
, plid
, fileid
);
1065 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1067 spin_lock_irq(&blkcg
->lock
);
1069 pn
= blkio_policy_search_node(blkcg
, newpn
->dev
, plid
, fileid
);
1071 if (!blkio_delete_rule_command(newpn
)) {
1072 blkio_policy_insert_node(blkcg
, newpn
);
1075 spin_unlock_irq(&blkcg
->lock
);
1076 goto update_io_group
;
1079 if (blkio_delete_rule_command(newpn
)) {
1080 blkio_policy_delete_node(pn
);
1082 spin_unlock_irq(&blkcg
->lock
);
1083 goto update_io_group
;
1085 spin_unlock_irq(&blkcg
->lock
);
1087 blkio_update_policy_rule(pn
, newpn
);
1090 blkio_update_policy_node_blkg(blkcg
, newpn
);
1101 blkio_print_policy_node(struct seq_file
*m
, struct blkio_policy_node
*pn
)
1104 case BLKIO_POLICY_PROP
:
1105 if (pn
->fileid
== BLKIO_PROP_weight_device
)
1106 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
1107 MINOR(pn
->dev
), pn
->val
.weight
);
1109 case BLKIO_POLICY_THROTL
:
1110 switch(pn
->fileid
) {
1111 case BLKIO_THROTL_read_bps_device
:
1112 case BLKIO_THROTL_write_bps_device
:
1113 seq_printf(m
, "%u:%u\t%llu\n", MAJOR(pn
->dev
),
1114 MINOR(pn
->dev
), pn
->val
.bps
);
1116 case BLKIO_THROTL_read_iops_device
:
1117 case BLKIO_THROTL_write_iops_device
:
1118 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
1119 MINOR(pn
->dev
), pn
->val
.iops
);
1128 /* cgroup files which read their data from policy nodes end up here */
1129 static void blkio_read_policy_node_files(struct cftype
*cft
,
1130 struct blkio_cgroup
*blkcg
, struct seq_file
*m
)
1132 struct blkio_policy_node
*pn
;
1134 if (!list_empty(&blkcg
->policy_list
)) {
1135 spin_lock_irq(&blkcg
->lock
);
1136 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
1137 if (!pn_matches_cftype(cft
, pn
))
1139 blkio_print_policy_node(m
, pn
);
1141 spin_unlock_irq(&blkcg
->lock
);
1145 static int blkiocg_file_read(struct cgroup
*cgrp
, struct cftype
*cft
,
1148 struct blkio_cgroup
*blkcg
;
1149 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1150 int name
= BLKIOFILE_ATTR(cft
->private);
1152 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1155 case BLKIO_POLICY_PROP
:
1157 case BLKIO_PROP_weight_device
:
1158 blkio_read_policy_node_files(cft
, blkcg
, m
);
1164 case BLKIO_POLICY_THROTL
:
1166 case BLKIO_THROTL_read_bps_device
:
1167 case BLKIO_THROTL_write_bps_device
:
1168 case BLKIO_THROTL_read_iops_device
:
1169 case BLKIO_THROTL_write_iops_device
:
1170 blkio_read_policy_node_files(cft
, blkcg
, m
);
1183 static int blkio_read_blkg_stats(struct blkio_cgroup
*blkcg
,
1184 struct cftype
*cft
, struct cgroup_map_cb
*cb
,
1185 enum stat_type type
, bool show_total
, bool pcpu
)
1187 struct blkio_group
*blkg
;
1188 struct hlist_node
*n
;
1189 uint64_t cgroup_total
= 0;
1192 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1194 if (!cftype_blkg_same_policy(cft
, blkg
))
1197 cgroup_total
+= blkio_get_stat_cpu(blkg
, cb
,
1200 spin_lock_irq(&blkg
->stats_lock
);
1201 cgroup_total
+= blkio_get_stat(blkg
, cb
,
1203 spin_unlock_irq(&blkg
->stats_lock
);
1208 cb
->fill(cb
, "Total", cgroup_total
);
1213 /* All map kind of cgroup file get serviced by this function */
1214 static int blkiocg_file_read_map(struct cgroup
*cgrp
, struct cftype
*cft
,
1215 struct cgroup_map_cb
*cb
)
1217 struct blkio_cgroup
*blkcg
;
1218 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1219 int name
= BLKIOFILE_ATTR(cft
->private);
1221 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1224 case BLKIO_POLICY_PROP
:
1226 case BLKIO_PROP_time
:
1227 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1228 BLKIO_STAT_TIME
, 0, 0);
1229 case BLKIO_PROP_sectors
:
1230 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1231 BLKIO_STAT_CPU_SECTORS
, 0, 1);
1232 case BLKIO_PROP_io_service_bytes
:
1233 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1234 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1235 case BLKIO_PROP_io_serviced
:
1236 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1237 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1238 case BLKIO_PROP_io_service_time
:
1239 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1240 BLKIO_STAT_SERVICE_TIME
, 1, 0);
1241 case BLKIO_PROP_io_wait_time
:
1242 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1243 BLKIO_STAT_WAIT_TIME
, 1, 0);
1244 case BLKIO_PROP_io_merged
:
1245 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1246 BLKIO_STAT_CPU_MERGED
, 1, 1);
1247 case BLKIO_PROP_io_queued
:
1248 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1249 BLKIO_STAT_QUEUED
, 1, 0);
1250 #ifdef CONFIG_DEBUG_BLK_CGROUP
1251 case BLKIO_PROP_unaccounted_time
:
1252 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1253 BLKIO_STAT_UNACCOUNTED_TIME
, 0, 0);
1254 case BLKIO_PROP_dequeue
:
1255 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1256 BLKIO_STAT_DEQUEUE
, 0, 0);
1257 case BLKIO_PROP_avg_queue_size
:
1258 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1259 BLKIO_STAT_AVG_QUEUE_SIZE
, 0, 0);
1260 case BLKIO_PROP_group_wait_time
:
1261 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1262 BLKIO_STAT_GROUP_WAIT_TIME
, 0, 0);
1263 case BLKIO_PROP_idle_time
:
1264 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1265 BLKIO_STAT_IDLE_TIME
, 0, 0);
1266 case BLKIO_PROP_empty_time
:
1267 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1268 BLKIO_STAT_EMPTY_TIME
, 0, 0);
1274 case BLKIO_POLICY_THROTL
:
1276 case BLKIO_THROTL_io_service_bytes
:
1277 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1278 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1279 case BLKIO_THROTL_io_serviced
:
1280 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1281 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1293 static int blkio_weight_write(struct blkio_cgroup
*blkcg
, u64 val
)
1295 struct blkio_group
*blkg
;
1296 struct hlist_node
*n
;
1297 struct blkio_policy_node
*pn
;
1299 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1302 spin_lock(&blkio_list_lock
);
1303 spin_lock_irq(&blkcg
->lock
);
1304 blkcg
->weight
= (unsigned int)val
;
1306 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1307 pn
= blkio_policy_search_node(blkcg
, blkg
->dev
,
1308 BLKIO_POLICY_PROP
, BLKIO_PROP_weight_device
);
1312 blkio_update_group_weight(blkg
, blkcg
->weight
);
1314 spin_unlock_irq(&blkcg
->lock
);
1315 spin_unlock(&blkio_list_lock
);
1319 static u64
blkiocg_file_read_u64 (struct cgroup
*cgrp
, struct cftype
*cft
) {
1320 struct blkio_cgroup
*blkcg
;
1321 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1322 int name
= BLKIOFILE_ATTR(cft
->private);
1324 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1327 case BLKIO_POLICY_PROP
:
1329 case BLKIO_PROP_weight
:
1330 return (u64
)blkcg
->weight
;
1340 blkiocg_file_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1342 struct blkio_cgroup
*blkcg
;
1343 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1344 int name
= BLKIOFILE_ATTR(cft
->private);
1346 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1349 case BLKIO_POLICY_PROP
:
1351 case BLKIO_PROP_weight
:
1352 return blkio_weight_write(blkcg
, val
);
1362 struct cftype blkio_files
[] = {
1364 .name
= "weight_device",
1365 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1366 BLKIO_PROP_weight_device
),
1367 .read_seq_string
= blkiocg_file_read
,
1368 .write_string
= blkiocg_file_write
,
1369 .max_write_len
= 256,
1373 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1375 .read_u64
= blkiocg_file_read_u64
,
1376 .write_u64
= blkiocg_file_write_u64
,
1380 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1382 .read_map
= blkiocg_file_read_map
,
1386 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1387 BLKIO_PROP_sectors
),
1388 .read_map
= blkiocg_file_read_map
,
1391 .name
= "io_service_bytes",
1392 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1393 BLKIO_PROP_io_service_bytes
),
1394 .read_map
= blkiocg_file_read_map
,
1397 .name
= "io_serviced",
1398 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1399 BLKIO_PROP_io_serviced
),
1400 .read_map
= blkiocg_file_read_map
,
1403 .name
= "io_service_time",
1404 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1405 BLKIO_PROP_io_service_time
),
1406 .read_map
= blkiocg_file_read_map
,
1409 .name
= "io_wait_time",
1410 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1411 BLKIO_PROP_io_wait_time
),
1412 .read_map
= blkiocg_file_read_map
,
1415 .name
= "io_merged",
1416 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1417 BLKIO_PROP_io_merged
),
1418 .read_map
= blkiocg_file_read_map
,
1421 .name
= "io_queued",
1422 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1423 BLKIO_PROP_io_queued
),
1424 .read_map
= blkiocg_file_read_map
,
1427 .name
= "reset_stats",
1428 .write_u64
= blkiocg_reset_stats
,
1430 #ifdef CONFIG_BLK_DEV_THROTTLING
1432 .name
= "throttle.read_bps_device",
1433 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1434 BLKIO_THROTL_read_bps_device
),
1435 .read_seq_string
= blkiocg_file_read
,
1436 .write_string
= blkiocg_file_write
,
1437 .max_write_len
= 256,
1441 .name
= "throttle.write_bps_device",
1442 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1443 BLKIO_THROTL_write_bps_device
),
1444 .read_seq_string
= blkiocg_file_read
,
1445 .write_string
= blkiocg_file_write
,
1446 .max_write_len
= 256,
1450 .name
= "throttle.read_iops_device",
1451 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1452 BLKIO_THROTL_read_iops_device
),
1453 .read_seq_string
= blkiocg_file_read
,
1454 .write_string
= blkiocg_file_write
,
1455 .max_write_len
= 256,
1459 .name
= "throttle.write_iops_device",
1460 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1461 BLKIO_THROTL_write_iops_device
),
1462 .read_seq_string
= blkiocg_file_read
,
1463 .write_string
= blkiocg_file_write
,
1464 .max_write_len
= 256,
1467 .name
= "throttle.io_service_bytes",
1468 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1469 BLKIO_THROTL_io_service_bytes
),
1470 .read_map
= blkiocg_file_read_map
,
1473 .name
= "throttle.io_serviced",
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1475 BLKIO_THROTL_io_serviced
),
1476 .read_map
= blkiocg_file_read_map
,
1478 #endif /* CONFIG_BLK_DEV_THROTTLING */
1480 #ifdef CONFIG_DEBUG_BLK_CGROUP
1482 .name
= "avg_queue_size",
1483 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1484 BLKIO_PROP_avg_queue_size
),
1485 .read_map
= blkiocg_file_read_map
,
1488 .name
= "group_wait_time",
1489 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1490 BLKIO_PROP_group_wait_time
),
1491 .read_map
= blkiocg_file_read_map
,
1494 .name
= "idle_time",
1495 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1496 BLKIO_PROP_idle_time
),
1497 .read_map
= blkiocg_file_read_map
,
1500 .name
= "empty_time",
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1502 BLKIO_PROP_empty_time
),
1503 .read_map
= blkiocg_file_read_map
,
1507 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1508 BLKIO_PROP_dequeue
),
1509 .read_map
= blkiocg_file_read_map
,
1512 .name
= "unaccounted_time",
1513 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1514 BLKIO_PROP_unaccounted_time
),
1515 .read_map
= blkiocg_file_read_map
,
1521 static void blkiocg_destroy(struct cgroup
*cgroup
)
1523 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1524 unsigned long flags
;
1525 struct blkio_group
*blkg
;
1527 struct blkio_policy_type
*blkiop
;
1528 struct blkio_policy_node
*pn
, *pntmp
;
1532 spin_lock_irqsave(&blkcg
->lock
, flags
);
1534 if (hlist_empty(&blkcg
->blkg_list
)) {
1535 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1539 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
1541 key
= rcu_dereference(blkg
->key
);
1542 __blkiocg_del_blkio_group(blkg
);
1544 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1547 * This blkio_group is being unlinked as associated cgroup is
1548 * going away. Let all the IO controlling policies know about
1551 spin_lock(&blkio_list_lock
);
1552 list_for_each_entry(blkiop
, &blkio_list
, list
) {
1553 if (blkiop
->plid
!= blkg
->plid
)
1555 blkiop
->ops
.blkio_unlink_group_fn(key
, blkg
);
1557 spin_unlock(&blkio_list_lock
);
1560 list_for_each_entry_safe(pn
, pntmp
, &blkcg
->policy_list
, node
) {
1561 blkio_policy_delete_node(pn
);
1565 free_css_id(&blkio_subsys
, &blkcg
->css
);
1567 if (blkcg
!= &blkio_root_cgroup
)
1571 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup
*cgroup
)
1573 struct blkio_cgroup
*blkcg
;
1574 struct cgroup
*parent
= cgroup
->parent
;
1577 blkcg
= &blkio_root_cgroup
;
1581 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1583 return ERR_PTR(-ENOMEM
);
1585 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1587 spin_lock_init(&blkcg
->lock
);
1588 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1590 INIT_LIST_HEAD(&blkcg
->policy_list
);
1595 * We cannot support shared io contexts, as we have no mean to support
1596 * two tasks with the same ioc in two different groups without major rework
1597 * of the main cic data structures. For now we allow a task to change
1598 * its cgroup only if it's the only owner of its ioc.
1600 static int blkiocg_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
1602 struct task_struct
*task
;
1603 struct io_context
*ioc
;
1606 /* task_lock() is needed to avoid races with exit_io_context() */
1607 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1609 ioc
= task
->io_context
;
1610 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1619 static void blkiocg_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
1621 struct task_struct
*task
;
1622 struct io_context
*ioc
;
1624 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1625 /* we don't lose anything even if ioc allocation fails */
1626 ioc
= get_task_io_context(task
, GFP_ATOMIC
, NUMA_NO_NODE
);
1628 ioc_cgroup_changed(ioc
);
1629 put_io_context(ioc
);
1634 struct cgroup_subsys blkio_subsys
= {
1636 .create
= blkiocg_create
,
1637 .can_attach
= blkiocg_can_attach
,
1638 .attach
= blkiocg_attach
,
1639 .destroy
= blkiocg_destroy
,
1640 #ifdef CONFIG_BLK_CGROUP
1641 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
1642 .subsys_id
= blkio_subsys_id
,
1644 .base_cftypes
= blkio_files
,
1646 .module
= THIS_MODULE
,
1648 EXPORT_SYMBOL_GPL(blkio_subsys
);
1650 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1652 spin_lock(&blkio_list_lock
);
1653 list_add_tail(&blkiop
->list
, &blkio_list
);
1654 spin_unlock(&blkio_list_lock
);
1656 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1658 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1660 spin_lock(&blkio_list_lock
);
1661 list_del_init(&blkiop
->list
);
1662 spin_unlock(&blkio_list_lock
);
1664 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);
1666 static int __init
init_cgroup_blkio(void)
1668 return cgroup_load_subsys(&blkio_subsys
);
1671 static void __exit
exit_cgroup_blkio(void)
1673 cgroup_unload_subsys(&blkio_subsys
);
1676 module_init(init_cgroup_blkio
);
1677 module_exit(exit_cgroup_blkio
);
1678 MODULE_LICENSE("GPL");