2 * Block stat tracking code
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/rculist.h>
8 #include <linux/blk-mq.h>
14 struct blk_queue_stats
{
15 struct list_head callbacks
;
17 bool enable_accounting
;
20 void blk_rq_stat_init(struct blk_rq_stat
*stat
)
23 stat
->max
= stat
->nr_samples
= stat
->mean
= 0;
27 /* src is a per-cpu stat, mean isn't initialized */
28 void blk_rq_stat_sum(struct blk_rq_stat
*dst
, struct blk_rq_stat
*src
)
33 dst
->min
= min(dst
->min
, src
->min
);
34 dst
->max
= max(dst
->max
, src
->max
);
36 dst
->mean
= div_u64(src
->batch
+ dst
->mean
* dst
->nr_samples
,
37 dst
->nr_samples
+ src
->nr_samples
);
39 dst
->nr_samples
+= src
->nr_samples
;
42 void blk_rq_stat_add(struct blk_rq_stat
*stat
, u64 value
)
44 stat
->min
= min(stat
->min
, value
);
45 stat
->max
= max(stat
->max
, value
);
50 void blk_stat_add(struct request
*rq
, u64 now
)
52 struct request_queue
*q
= rq
->q
;
53 struct blk_stat_callback
*cb
;
54 struct blk_rq_stat
*stat
;
58 value
= (now
>= rq
->io_start_time_ns
) ? now
- rq
->io_start_time_ns
: 0;
60 blk_throtl_stat_add(rq
, value
);
63 list_for_each_entry_rcu(cb
, &q
->stats
->callbacks
, list
) {
64 if (!blk_stat_is_active(cb
))
67 bucket
= cb
->bucket_fn(rq
);
71 stat
= &get_cpu_ptr(cb
->cpu_stat
)[bucket
];
72 blk_rq_stat_add(stat
, value
);
73 put_cpu_ptr(cb
->cpu_stat
);
78 static void blk_stat_timer_fn(struct timer_list
*t
)
80 struct blk_stat_callback
*cb
= from_timer(cb
, t
, timer
);
84 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
85 blk_rq_stat_init(&cb
->stat
[bucket
]);
87 for_each_online_cpu(cpu
) {
88 struct blk_rq_stat
*cpu_stat
;
90 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
91 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++) {
92 blk_rq_stat_sum(&cb
->stat
[bucket
], &cpu_stat
[bucket
]);
93 blk_rq_stat_init(&cpu_stat
[bucket
]);
100 struct blk_stat_callback
*
101 blk_stat_alloc_callback(void (*timer_fn
)(struct blk_stat_callback
*),
102 int (*bucket_fn
)(const struct request
*),
103 unsigned int buckets
, void *data
)
105 struct blk_stat_callback
*cb
;
107 cb
= kmalloc(sizeof(*cb
), GFP_KERNEL
);
111 cb
->stat
= kmalloc_array(buckets
, sizeof(struct blk_rq_stat
),
117 cb
->cpu_stat
= __alloc_percpu(buckets
* sizeof(struct blk_rq_stat
),
118 __alignof__(struct blk_rq_stat
));
125 cb
->timer_fn
= timer_fn
;
126 cb
->bucket_fn
= bucket_fn
;
128 cb
->buckets
= buckets
;
129 timer_setup(&cb
->timer
, blk_stat_timer_fn
, 0);
133 EXPORT_SYMBOL_GPL(blk_stat_alloc_callback
);
135 void blk_stat_add_callback(struct request_queue
*q
,
136 struct blk_stat_callback
*cb
)
141 for_each_possible_cpu(cpu
) {
142 struct blk_rq_stat
*cpu_stat
;
144 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
145 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
146 blk_rq_stat_init(&cpu_stat
[bucket
]);
149 spin_lock(&q
->stats
->lock
);
150 list_add_tail_rcu(&cb
->list
, &q
->stats
->callbacks
);
151 blk_queue_flag_set(QUEUE_FLAG_STATS
, q
);
152 spin_unlock(&q
->stats
->lock
);
154 EXPORT_SYMBOL_GPL(blk_stat_add_callback
);
156 void blk_stat_remove_callback(struct request_queue
*q
,
157 struct blk_stat_callback
*cb
)
159 spin_lock(&q
->stats
->lock
);
160 list_del_rcu(&cb
->list
);
161 if (list_empty(&q
->stats
->callbacks
) && !q
->stats
->enable_accounting
)
162 blk_queue_flag_clear(QUEUE_FLAG_STATS
, q
);
163 spin_unlock(&q
->stats
->lock
);
165 del_timer_sync(&cb
->timer
);
167 EXPORT_SYMBOL_GPL(blk_stat_remove_callback
);
169 static void blk_stat_free_callback_rcu(struct rcu_head
*head
)
171 struct blk_stat_callback
*cb
;
173 cb
= container_of(head
, struct blk_stat_callback
, rcu
);
174 free_percpu(cb
->cpu_stat
);
179 void blk_stat_free_callback(struct blk_stat_callback
*cb
)
182 call_rcu(&cb
->rcu
, blk_stat_free_callback_rcu
);
184 EXPORT_SYMBOL_GPL(blk_stat_free_callback
);
186 void blk_stat_enable_accounting(struct request_queue
*q
)
188 spin_lock(&q
->stats
->lock
);
189 q
->stats
->enable_accounting
= true;
190 blk_queue_flag_set(QUEUE_FLAG_STATS
, q
);
191 spin_unlock(&q
->stats
->lock
);
193 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting
);
195 struct blk_queue_stats
*blk_alloc_queue_stats(void)
197 struct blk_queue_stats
*stats
;
199 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
203 INIT_LIST_HEAD(&stats
->callbacks
);
204 spin_lock_init(&stats
->lock
);
205 stats
->enable_accounting
= false;
210 void blk_free_queue_stats(struct blk_queue_stats
*stats
)
215 WARN_ON(!list_empty(&stats
->callbacks
));