2 * BFQ: CGROUPS support.
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
13 #ifdef CONFIG_CGROUP_BFQIO
14 static struct bfqio_cgroup bfqio_root_cgroup
= {
15 .weight
= BFQ_DEFAULT_GRP_WEIGHT
,
16 .ioprio
= BFQ_DEFAULT_GRP_IOPRIO
,
17 .ioprio_class
= BFQ_DEFAULT_GRP_CLASS
,
20 static inline void bfq_init_entity(struct bfq_entity
*entity
,
21 struct bfq_group
*bfqg
)
23 entity
->weight
= entity
->new_weight
;
24 entity
->orig_weight
= entity
->new_weight
;
25 entity
->ioprio
= entity
->new_ioprio
;
26 entity
->ioprio_class
= entity
->new_ioprio_class
;
27 entity
->parent
= bfqg
->my_entity
;
28 entity
->sched_data
= &bfqg
->sched_data
;
31 static struct bfqio_cgroup
*cgroup_to_bfqio(struct cgroup
*cgroup
)
33 return container_of(cgroup_subsys_state(cgroup
, bfqio_subsys_id
),
34 struct bfqio_cgroup
, css
);
38 * Search the bfq_group for bfqd into the hash table (by now only a list)
39 * of bgrp. Must be called under rcu_read_lock().
41 static struct bfq_group
*bfqio_lookup_group(struct bfqio_cgroup
*bgrp
,
42 struct bfq_data
*bfqd
)
44 struct bfq_group
*bfqg
;
48 hlist_for_each_entry_rcu(bfqg
, n
, &bgrp
->group_data
, group_node
) {
49 key
= rcu_dereference(bfqg
->bfqd
);
57 static inline void bfq_group_init_entity(struct bfqio_cgroup
*bgrp
,
58 struct bfq_group
*bfqg
)
60 struct bfq_entity
*entity
= &bfqg
->entity
;
62 entity
->weight
= entity
->new_weight
= bgrp
->weight
;
63 entity
->orig_weight
= entity
->new_weight
;
64 entity
->ioprio
= entity
->new_ioprio
= bgrp
->ioprio
;
65 entity
->ioprio_class
= entity
->new_ioprio_class
= bgrp
->ioprio_class
;
66 entity
->ioprio_changed
= 1;
67 entity
->my_sched_data
= &bfqg
->sched_data
;
70 static inline void bfq_group_set_parent(struct bfq_group
*bfqg
,
71 struct bfq_group
*parent
)
73 struct bfq_entity
*entity
;
75 BUG_ON(parent
== NULL
);
78 entity
= &bfqg
->entity
;
79 entity
->parent
= parent
->my_entity
;
80 entity
->sched_data
= &parent
->sched_data
;
84 * bfq_group_chain_alloc - allocate a chain of groups.
85 * @bfqd: queue descriptor.
86 * @cgroup: the leaf cgroup this chain starts from.
88 * Allocate a chain of groups starting from the one belonging to
89 * @cgroup up to the root cgroup. Stop if a cgroup on the chain
90 * to the root has already an allocated group on @bfqd.
92 static struct bfq_group
*bfq_group_chain_alloc(struct bfq_data
*bfqd
,
93 struct cgroup
*cgroup
)
95 struct bfqio_cgroup
*bgrp
;
96 struct bfq_group
*bfqg
, *prev
= NULL
, *leaf
= NULL
;
98 for (; cgroup
!= NULL
; cgroup
= cgroup
->parent
) {
99 bgrp
= cgroup_to_bfqio(cgroup
);
101 bfqg
= bfqio_lookup_group(bgrp
, bfqd
);
104 * All the cgroups in the path from there to the
105 * root must have a bfq_group for bfqd, so we don't
106 * need any more allocations.
111 bfqg
= kzalloc(sizeof(*bfqg
), GFP_ATOMIC
);
115 bfq_group_init_entity(bgrp
, bfqg
);
116 bfqg
->my_entity
= &bfqg
->entity
;
122 bfq_group_set_parent(prev
, bfqg
);
124 * Build a list of allocated nodes using the bfqd
125 * filed, that is still unused and will be initialized
126 * only after the node will be connected.
136 while (leaf
!= NULL
) {
146 * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy.
147 * @bfqd: the queue descriptor.
148 * @cgroup: the leaf cgroup to start from.
149 * @leaf: the leaf group (to be associated to @cgroup).
151 * Try to link a chain of groups to a cgroup hierarchy, connecting the
152 * nodes bottom-up, so we can be sure that when we find a cgroup in the
153 * hierarchy that already as a group associated to @bfqd all the nodes
154 * in the path to the root cgroup have one too.
156 * On locking: the queue lock protects the hierarchy (there is a hierarchy
157 * per device) while the bfqio_cgroup lock protects the list of groups
158 * belonging to the same cgroup.
160 static void bfq_group_chain_link(struct bfq_data
*bfqd
, struct cgroup
*cgroup
,
161 struct bfq_group
*leaf
)
163 struct bfqio_cgroup
*bgrp
;
164 struct bfq_group
*bfqg
, *next
, *prev
= NULL
;
167 assert_spin_locked(bfqd
->queue
->queue_lock
);
169 for (; cgroup
!= NULL
&& leaf
!= NULL
; cgroup
= cgroup
->parent
) {
170 bgrp
= cgroup_to_bfqio(cgroup
);
173 bfqg
= bfqio_lookup_group(bgrp
, bfqd
);
174 BUG_ON(bfqg
!= NULL
);
176 spin_lock_irqsave(&bgrp
->lock
, flags
);
178 rcu_assign_pointer(leaf
->bfqd
, bfqd
);
179 hlist_add_head_rcu(&leaf
->group_node
, &bgrp
->group_data
);
180 hlist_add_head(&leaf
->bfqd_node
, &bfqd
->group_list
);
182 spin_unlock_irqrestore(&bgrp
->lock
, flags
);
188 BUG_ON(cgroup
== NULL
&& leaf
!= NULL
);
189 if (cgroup
!= NULL
&& prev
!= NULL
) {
190 bgrp
= cgroup_to_bfqio(cgroup
);
191 bfqg
= bfqio_lookup_group(bgrp
, bfqd
);
192 bfq_group_set_parent(prev
, bfqg
);
197 * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
198 * @bfqd: queue descriptor.
199 * @cgroup: cgroup being searched for.
201 * Return a group associated to @bfqd in @cgroup, allocating one if
202 * necessary. When a group is returned all the cgroups in the path
203 * to the root have a group associated to @bfqd.
205 * If the allocation fails, return the root group: this breaks guarantees
206 * but is a safe fallbak. If this loss becames a problem it can be
207 * mitigated using the equivalent weight (given by the product of the
208 * weights of the groups in the path from @group to the root) in the
211 * We allocate all the missing nodes in the path from the leaf cgroup
212 * to the root and we connect the nodes only after all the allocations
213 * have been successful.
215 static struct bfq_group
*bfq_find_alloc_group(struct bfq_data
*bfqd
,
216 struct cgroup
*cgroup
)
218 struct bfqio_cgroup
*bgrp
= cgroup_to_bfqio(cgroup
);
219 struct bfq_group
*bfqg
;
221 bfqg
= bfqio_lookup_group(bgrp
, bfqd
);
225 bfqg
= bfq_group_chain_alloc(bfqd
, cgroup
);
227 bfq_group_chain_link(bfqd
, cgroup
, bfqg
);
229 bfqg
= bfqd
->root_group
;
235 * bfq_bfqq_move - migrate @bfqq to @bfqg.
236 * @bfqd: queue descriptor.
237 * @bfqq: the queue to move.
238 * @entity: @bfqq's entity.
239 * @bfqg: the group to move to.
241 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
242 * it on the new one. Avoid putting the entity on the old group idle tree.
244 * Must be called under the queue lock; the cgroup owning @bfqg must
245 * not disappear (by now this just means that we are called under
248 static void bfq_bfqq_move(struct bfq_data
*bfqd
, struct bfq_queue
*bfqq
,
249 struct bfq_entity
*entity
, struct bfq_group
*bfqg
)
253 busy
= bfq_bfqq_busy(bfqq
);
254 resume
= !RB_EMPTY_ROOT(&bfqq
->sort_list
);
256 BUG_ON(resume
&& !entity
->on_st
);
257 BUG_ON(busy
&& !resume
&& entity
->on_st
&& bfqq
!= bfqd
->active_queue
);
260 BUG_ON(atomic_read(&bfqq
->ref
) < 2);
263 bfq_del_bfqq_busy(bfqd
, bfqq
, 0);
265 bfq_deactivate_bfqq(bfqd
, bfqq
, 0);
266 } else if (entity
->on_st
)
267 bfq_put_idle_entity(bfq_entity_service_tree(entity
), entity
);
270 * Here we use a reference to bfqg. We don't need a refcounter
271 * as the cgroup reference will not be dropped, so that its
272 * destroy() callback will not be invoked.
274 entity
->parent
= bfqg
->my_entity
;
275 entity
->sched_data
= &bfqg
->sched_data
;
278 bfq_activate_bfqq(bfqd
, bfqq
);
282 * __bfq_bic_change_cgroup - move @bic to @cgroup.
283 * @bfqd: the queue descriptor.
284 * @bic: the bic to move.
285 * @cgroup: the cgroup to move to.
287 * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
288 * has to make sure that the reference to cgroup is valid across the call.
290 * NOTE: an alternative approach might have been to store the current
291 * cgroup in bfqq and getting a reference to it, reducing the lookup
292 * time here, at the price of slightly more complex code.
294 static struct bfq_group
*__bfq_bic_change_cgroup(struct bfq_data
*bfqd
,
295 struct bfq_io_cq
*bic
,
296 struct cgroup
*cgroup
)
298 struct bfq_queue
*async_bfqq
= bic_to_bfqq(bic
, 0);
299 struct bfq_queue
*sync_bfqq
= bic_to_bfqq(bic
, 1);
300 struct bfq_entity
*entity
;
301 struct bfq_group
*bfqg
;
302 struct bfqio_cgroup
*bgrp
;
304 bgrp
= cgroup_to_bfqio(cgroup
);
306 bfqg
= bfq_find_alloc_group(bfqd
, cgroup
);
307 if (async_bfqq
!= NULL
) {
308 entity
= &async_bfqq
->entity
;
310 if (entity
->sched_data
!= &bfqg
->sched_data
) {
311 bic_set_bfqq(bic
, NULL
, 0);
312 bfq_log_bfqq(bfqd
, async_bfqq
,
313 "bic_change_group: %p %d",
314 async_bfqq
, atomic_read(&async_bfqq
->ref
));
315 bfq_put_queue(async_bfqq
);
319 if (sync_bfqq
!= NULL
) {
320 entity
= &sync_bfqq
->entity
;
321 if (entity
->sched_data
!= &bfqg
->sched_data
)
322 bfq_bfqq_move(bfqd
, sync_bfqq
, entity
, bfqg
);
329 * bfq_bic_change_cgroup - move @bic to @cgroup.
330 * @bic: the bic being migrated.
331 * @cgroup: the destination cgroup.
333 * When the task owning @bic is moved to @cgroup, @bic is immediately
334 * moved into its new parent group.
336 static void bfq_bic_change_cgroup(struct bfq_io_cq
*bic
,
337 struct cgroup
*cgroup
)
339 struct bfq_data
*bfqd
;
340 unsigned long uninitialized_var(flags
);
342 bfqd
= bfq_get_bfqd_locked(&(bic
->icq
.q
->elevator
->elevator_data
), &flags
);
344 __bfq_bic_change_cgroup(bfqd
, bic
, cgroup
);
345 bfq_put_bfqd_unlock(bfqd
, &flags
);
350 * bfq_bic_update_cgroup - update the cgroup of @bic.
351 * @bic: the @bic to update.
353 * Make sure that @bic is enqueued in the cgroup of the current task.
354 * We need this in addition to moving bics during the cgroup attach
355 * phase because the task owning @bic could be at its first disk
356 * access or we may end up in the root cgroup as the result of a
357 * memory allocation failure and here we try to move to the right
360 * Must be called under the queue lock. It is safe to use the returned
361 * value even after the rcu_read_unlock() as the migration/destruction
362 * paths act under the queue lock too. IOW it is impossible to race with
363 * group migration/destruction and end up with an invalid group as:
364 * a) here cgroup has not yet been destroyed, nor its destroy callback
365 * has started execution, as current holds a reference to it,
366 * b) if it is destroyed after rcu_read_unlock() [after current is
367 * migrated to a different cgroup] its attach() callback will have
368 * taken care of remove all the references to the old cgroup data.
370 static struct bfq_group
*bfq_bic_update_cgroup(struct bfq_io_cq
*bic
)
372 struct bfq_data
*bfqd
= bic_to_bfqd(bic
);
373 struct bfq_group
*bfqg
;
374 struct cgroup
*cgroup
;
376 BUG_ON(bfqd
== NULL
);
379 cgroup
= task_cgroup(current
, bfqio_subsys_id
);
380 bfqg
= __bfq_bic_change_cgroup(bfqd
, bic
, cgroup
);
387 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
388 * @st: the service tree being flushed.
390 static inline void bfq_flush_idle_tree(struct bfq_service_tree
*st
)
392 struct bfq_entity
*entity
= st
->first_idle
;
394 for (; entity
!= NULL
; entity
= st
->first_idle
)
395 __bfq_deactivate_entity(entity
, 0);
399 * bfq_destroy_group - destroy @bfqg.
400 * @bgrp: the bfqio_cgroup containing @bfqg.
401 * @bfqg: the group being destroyed.
403 * Destroy @bfqg, making sure that it is not referenced from its parent.
405 static void bfq_destroy_group(struct bfqio_cgroup
*bgrp
, struct bfq_group
*bfqg
)
407 struct bfq_data
*bfqd
;
408 struct bfq_service_tree
*st
;
409 struct bfq_entity
*entity
= bfqg
->my_entity
;
410 unsigned long uninitialized_var(flags
);
413 hlist_del(&bfqg
->group_node
);
416 * We may race with device destruction, take extra care when
417 * dereferencing bfqg->bfqd.
419 bfqd
= bfq_get_bfqd_locked(&bfqg
->bfqd
, &flags
);
421 hlist_del(&bfqg
->bfqd_node
);
422 __bfq_deactivate_entity(entity
, 0);
423 bfq_put_async_queues(bfqd
, bfqg
);
424 bfq_put_bfqd_unlock(bfqd
, &flags
);
427 for (i
= 0; i
< BFQ_IOPRIO_CLASSES
; i
++) {
428 st
= bfqg
->sched_data
.service_tree
+ i
;
431 * The idle tree may still contain bfq_queues belonging
432 * to exited task because they never migrated to a different
433 * cgroup from the one being destroyed now. Noone else
434 * can access them so it's safe to act without any lock.
436 bfq_flush_idle_tree(st
);
438 BUG_ON(!RB_EMPTY_ROOT(&st
->active
));
439 BUG_ON(!RB_EMPTY_ROOT(&st
->idle
));
441 BUG_ON(bfqg
->sched_data
.next_active
!= NULL
);
442 BUG_ON(bfqg
->sched_data
.active_entity
!= NULL
);
443 BUG_ON(entity
->tree
!= NULL
);
446 * No need to defer the kfree() to the end of the RCU grace
447 * period: we are called from the destroy() callback of our
448 * cgroup, so we can be sure that noone is a) still using
449 * this cgroup or b) doing lookups in it.
455 * bfq_disconnect_groups - diconnect @bfqd from all its groups.
456 * @bfqd: the device descriptor being exited.
458 * When the device exits we just make sure that no lookup can return
459 * the now unused group structures. They will be deallocated on cgroup
462 static void bfq_disconnect_groups(struct bfq_data
*bfqd
)
464 struct hlist_node
*pos
, *n
;
465 struct bfq_group
*bfqg
;
467 bfq_log(bfqd
, "disconnect_groups beginning") ;
468 hlist_for_each_entry_safe(bfqg
, pos
, n
, &bfqd
->group_list
, bfqd_node
) {
469 hlist_del(&bfqg
->bfqd_node
);
471 __bfq_deactivate_entity(bfqg
->my_entity
, 0);
474 * Don't remove from the group hash, just set an
475 * invalid key. No lookups can race with the
476 * assignment as bfqd is being destroyed; this
477 * implies also that new elements cannot be added
480 rcu_assign_pointer(bfqg
->bfqd
, NULL
);
482 bfq_log(bfqd
, "disconnect_groups: put async for group %p",
484 bfq_put_async_queues(bfqd
, bfqg
);
488 static inline void bfq_free_root_group(struct bfq_data
*bfqd
)
490 struct bfqio_cgroup
*bgrp
= &bfqio_root_cgroup
;
491 struct bfq_group
*bfqg
= bfqd
->root_group
;
493 bfq_put_async_queues(bfqd
, bfqg
);
495 spin_lock_irq(&bgrp
->lock
);
496 hlist_del_rcu(&bfqg
->group_node
);
497 spin_unlock_irq(&bgrp
->lock
);
500 * No need to synchronize_rcu() here: since the device is gone
501 * there cannot be any read-side access to its root_group.
506 static struct bfq_group
*bfq_alloc_root_group(struct bfq_data
*bfqd
, int node
)
508 struct bfq_group
*bfqg
;
509 struct bfqio_cgroup
*bgrp
;
512 bfqg
= kmalloc_node(sizeof(*bfqg
), GFP_KERNEL
| __GFP_ZERO
, node
);
516 bfqg
->entity
.parent
= NULL
;
517 for (i
= 0; i
< BFQ_IOPRIO_CLASSES
; i
++)
518 bfqg
->sched_data
.service_tree
[i
] = BFQ_SERVICE_TREE_INIT
;
520 bgrp
= &bfqio_root_cgroup
;
521 spin_lock_irq(&bgrp
->lock
);
522 rcu_assign_pointer(bfqg
->bfqd
, bfqd
);
523 hlist_add_head_rcu(&bfqg
->group_node
, &bgrp
->group_data
);
524 spin_unlock_irq(&bgrp
->lock
);
529 #define SHOW_FUNCTION(__VAR) \
530 static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \
531 struct cftype *cftype) \
533 struct bfqio_cgroup *bgrp; \
536 if (!cgroup_lock_live_group(cgroup)) \
539 bgrp = cgroup_to_bfqio(cgroup); \
540 spin_lock_irq(&bgrp->lock); \
542 spin_unlock_irq(&bgrp->lock); \
549 SHOW_FUNCTION(weight
);
550 SHOW_FUNCTION(ioprio
);
551 SHOW_FUNCTION(ioprio_class
);
554 #define STORE_FUNCTION(__VAR, __MIN, __MAX) \
555 static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \
556 struct cftype *cftype, \
559 struct bfqio_cgroup *bgrp; \
560 struct bfq_group *bfqg; \
561 struct hlist_node *n; \
563 if (val < (__MIN) || val > (__MAX)) \
566 if (!cgroup_lock_live_group(cgroup)) \
569 bgrp = cgroup_to_bfqio(cgroup); \
571 spin_lock_irq(&bgrp->lock); \
572 bgrp->__VAR = (unsigned short)val; \
573 hlist_for_each_entry(bfqg, n, &bgrp->group_data, group_node) { \
574 bfqg->entity.new_##__VAR = (unsigned short)val; \
576 bfqg->entity.ioprio_changed = 1; \
578 spin_unlock_irq(&bgrp->lock); \
585 STORE_FUNCTION(weight
, BFQ_MIN_WEIGHT
, BFQ_MAX_WEIGHT
);
586 STORE_FUNCTION(ioprio
, 0, IOPRIO_BE_NR
- 1);
587 STORE_FUNCTION(ioprio_class
, IOPRIO_CLASS_RT
, IOPRIO_CLASS_IDLE
);
588 #undef STORE_FUNCTION
590 static struct cftype bfqio_files
[] = {
593 .read_u64
= bfqio_cgroup_weight_read
,
594 .write_u64
= bfqio_cgroup_weight_write
,
598 .read_u64
= bfqio_cgroup_ioprio_read
,
599 .write_u64
= bfqio_cgroup_ioprio_write
,
602 .name
= "ioprio_class",
603 .read_u64
= bfqio_cgroup_ioprio_class_read
,
604 .write_u64
= bfqio_cgroup_ioprio_class_write
,
608 static int bfqio_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
610 return cgroup_add_files(cgroup
, subsys
, bfqio_files
,
611 ARRAY_SIZE(bfqio_files
));
614 static struct cgroup_subsys_state
*bfqio_create(struct cgroup_subsys
*subsys
,
615 struct cgroup
*cgroup
)
617 struct bfqio_cgroup
*bgrp
;
619 if (cgroup
->parent
!= NULL
) {
620 bgrp
= kzalloc(sizeof(*bgrp
), GFP_KERNEL
);
622 return ERR_PTR(-ENOMEM
);
624 bgrp
= &bfqio_root_cgroup
;
626 spin_lock_init(&bgrp
->lock
);
627 INIT_HLIST_HEAD(&bgrp
->group_data
);
628 bgrp
->ioprio
= BFQ_DEFAULT_GRP_IOPRIO
;
629 bgrp
->ioprio_class
= BFQ_DEFAULT_GRP_CLASS
;
635 * We cannot support shared io contexts, as we have no mean to support
636 * two tasks with the same ioc in two different groups without major rework
637 * of the main bic/bfqq data structures. By now we allow a task to change
638 * its cgroup only if it's the only owner of its ioc; the drawback of this
639 * behavior is that a group containing a task that forked using CLONE_IO
640 * will not be destroyed until the tasks sharing the ioc die.
642 static int bfqio_can_attach(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
,
643 struct cgroup_taskset
*tset
)
645 struct task_struct
*task
;
646 struct io_context
*ioc
;
649 cgroup_taskset_for_each(task
, cgroup
, tset
) {
650 /* task_lock() is needed to avoid races with exit_io_context() */
652 ioc
= task
->io_context
;
653 if (ioc
!= NULL
&& atomic_read(&ioc
->nr_tasks
) > 1)
655 * ioc == NULL means that the task is either too young or
656 * exiting: if it has still no ioc the ioc can't be shared,
657 * if the task is exiting the attach will fail anyway, no
658 * matter what we return here.
669 static void bfqio_attach(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
,
670 struct cgroup_taskset
*tset
)
672 struct task_struct
*task
;
673 struct io_context
*ioc
;
675 struct hlist_node
*n
;
678 * IMPORTANT NOTE: The move of more than one process at a time to a
679 * new group has not yet been tested.
681 cgroup_taskset_for_each(task
, cgroup
, tset
) {
682 ioc
= get_task_io_context(task
, GFP_ATOMIC
, NUMA_NO_NODE
);
685 * Handle cgroup change here.
688 hlist_for_each_entry_rcu(icq
, n
, &ioc
->icq_list
, ioc_node
)
689 bfq_bic_change_cgroup(icq_to_bic(icq
), cgroup
);
696 static void bfqio_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
698 struct bfqio_cgroup
*bgrp
= cgroup_to_bfqio(cgroup
);
699 struct hlist_node
*n
, *tmp
;
700 struct bfq_group
*bfqg
;
703 * Since we are destroying the cgroup, there are no more tasks
704 * referencing it, and all the RCU grace periods that may have
705 * referenced it are ended (as the destruction of the parent
706 * cgroup is RCU-safe); bgrp->group_data will not be accessed by
707 * anything else and we don't need any synchronization.
709 hlist_for_each_entry_safe(bfqg
, n
, tmp
, &bgrp
->group_data
, group_node
)
710 bfq_destroy_group(bgrp
, bfqg
);
712 BUG_ON(!hlist_empty(&bgrp
->group_data
));
717 struct cgroup_subsys bfqio_subsys
= {
719 .create
= bfqio_create
,
720 .can_attach
= bfqio_can_attach
,
721 .attach
= bfqio_attach
,
722 .destroy
= bfqio_destroy
,
723 .populate
= bfqio_populate
,
724 .subsys_id
= bfqio_subsys_id
,
727 static inline void bfq_init_entity(struct bfq_entity
*entity
,
728 struct bfq_group
*bfqg
)
730 entity
->weight
= entity
->new_weight
;
731 entity
->orig_weight
= entity
->new_weight
;
732 entity
->ioprio
= entity
->new_ioprio
;
733 entity
->ioprio_class
= entity
->new_ioprio_class
;
734 entity
->sched_data
= &bfqg
->sched_data
;
737 static inline struct bfq_group
*
738 bfq_bic_update_cgroup(struct bfq_io_cq
*bic
)
740 struct bfq_data
*bfqd
= bic_to_bfqd(bic
);
741 return bfqd
->root_group
;
744 static inline void bfq_bfqq_move(struct bfq_data
*bfqd
,
745 struct bfq_queue
*bfqq
,
746 struct bfq_entity
*entity
,
747 struct bfq_group
*bfqg
)
751 static inline void bfq_disconnect_groups(struct bfq_data
*bfqd
)
753 bfq_put_async_queues(bfqd
, bfqd
->root_group
);
756 static inline void bfq_free_root_group(struct bfq_data
*bfqd
)
758 kfree(bfqd
->root_group
);
761 static struct bfq_group
*bfq_alloc_root_group(struct bfq_data
*bfqd
, int node
)
763 struct bfq_group
*bfqg
;
766 bfqg
= kmalloc_node(sizeof(*bfqg
), GFP_KERNEL
| __GFP_ZERO
, node
);
770 for (i
= 0; i
< BFQ_IOPRIO_CLASSES
; i
++)
771 bfqg
->sched_data
.service_tree
[i
] = BFQ_SERVICE_TREE_INIT
;