1 // SPDX-License-Identifier: GPL-2.0
3 * Block stat tracking code
5 * Copyright (C) 2016 Jens Axboe
7 #include <linux/kernel.h>
8 #include <linux/rculist.h>
14 struct blk_queue_stats
{
15 struct list_head callbacks
;
20 void blk_rq_stat_init(struct blk_rq_stat
*stat
)
23 stat
->max
= stat
->nr_samples
= stat
->mean
= 0;
27 /* src is a per-cpu stat, mean isn't initialized */
28 void blk_rq_stat_sum(struct blk_rq_stat
*dst
, struct blk_rq_stat
*src
)
30 if (dst
->nr_samples
+ src
->nr_samples
<= dst
->nr_samples
)
33 dst
->min
= min(dst
->min
, src
->min
);
34 dst
->max
= max(dst
->max
, src
->max
);
36 dst
->mean
= div_u64(src
->batch
+ dst
->mean
* dst
->nr_samples
,
37 dst
->nr_samples
+ src
->nr_samples
);
39 dst
->nr_samples
+= src
->nr_samples
;
42 void blk_rq_stat_add(struct blk_rq_stat
*stat
, u64 value
)
44 stat
->min
= min(stat
->min
, value
);
45 stat
->max
= max(stat
->max
, value
);
50 void blk_stat_add(struct request
*rq
, u64 now
)
52 struct request_queue
*q
= rq
->q
;
53 struct blk_stat_callback
*cb
;
54 struct blk_rq_stat
*stat
;
58 value
= (now
>= rq
->io_start_time_ns
) ? now
- rq
->io_start_time_ns
: 0;
62 list_for_each_entry_rcu(cb
, &q
->stats
->callbacks
, list
) {
63 if (!blk_stat_is_active(cb
))
66 bucket
= cb
->bucket_fn(rq
);
70 stat
= &per_cpu_ptr(cb
->cpu_stat
, cpu
)[bucket
];
71 blk_rq_stat_add(stat
, value
);
77 static void blk_stat_timer_fn(struct timer_list
*t
)
79 struct blk_stat_callback
*cb
= from_timer(cb
, t
, timer
);
83 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
84 blk_rq_stat_init(&cb
->stat
[bucket
]);
86 for_each_online_cpu(cpu
) {
87 struct blk_rq_stat
*cpu_stat
;
89 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
90 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++) {
91 blk_rq_stat_sum(&cb
->stat
[bucket
], &cpu_stat
[bucket
]);
92 blk_rq_stat_init(&cpu_stat
[bucket
]);
99 struct blk_stat_callback
*
100 blk_stat_alloc_callback(void (*timer_fn
)(struct blk_stat_callback
*),
101 int (*bucket_fn
)(const struct request
*),
102 unsigned int buckets
, void *data
)
104 struct blk_stat_callback
*cb
;
106 cb
= kmalloc(sizeof(*cb
), GFP_KERNEL
);
110 cb
->stat
= kmalloc_array(buckets
, sizeof(struct blk_rq_stat
),
116 cb
->cpu_stat
= __alloc_percpu(buckets
* sizeof(struct blk_rq_stat
),
117 __alignof__(struct blk_rq_stat
));
124 cb
->timer_fn
= timer_fn
;
125 cb
->bucket_fn
= bucket_fn
;
127 cb
->buckets
= buckets
;
128 timer_setup(&cb
->timer
, blk_stat_timer_fn
, 0);
133 void blk_stat_add_callback(struct request_queue
*q
,
134 struct blk_stat_callback
*cb
)
140 for_each_possible_cpu(cpu
) {
141 struct blk_rq_stat
*cpu_stat
;
143 cpu_stat
= per_cpu_ptr(cb
->cpu_stat
, cpu
);
144 for (bucket
= 0; bucket
< cb
->buckets
; bucket
++)
145 blk_rq_stat_init(&cpu_stat
[bucket
]);
148 spin_lock_irqsave(&q
->stats
->lock
, flags
);
149 list_add_tail_rcu(&cb
->list
, &q
->stats
->callbacks
);
150 blk_queue_flag_set(QUEUE_FLAG_STATS
, q
);
151 spin_unlock_irqrestore(&q
->stats
->lock
, flags
);
154 void blk_stat_remove_callback(struct request_queue
*q
,
155 struct blk_stat_callback
*cb
)
159 spin_lock_irqsave(&q
->stats
->lock
, flags
);
160 list_del_rcu(&cb
->list
);
161 if (list_empty(&q
->stats
->callbacks
) && !q
->stats
->accounting
)
162 blk_queue_flag_clear(QUEUE_FLAG_STATS
, q
);
163 spin_unlock_irqrestore(&q
->stats
->lock
, flags
);
165 del_timer_sync(&cb
->timer
);
168 static void blk_stat_free_callback_rcu(struct rcu_head
*head
)
170 struct blk_stat_callback
*cb
;
172 cb
= container_of(head
, struct blk_stat_callback
, rcu
);
173 free_percpu(cb
->cpu_stat
);
178 void blk_stat_free_callback(struct blk_stat_callback
*cb
)
181 call_rcu(&cb
->rcu
, blk_stat_free_callback_rcu
);
184 void blk_stat_disable_accounting(struct request_queue
*q
)
188 spin_lock_irqsave(&q
->stats
->lock
, flags
);
189 if (!--q
->stats
->accounting
&& list_empty(&q
->stats
->callbacks
))
190 blk_queue_flag_clear(QUEUE_FLAG_STATS
, q
);
191 spin_unlock_irqrestore(&q
->stats
->lock
, flags
);
193 EXPORT_SYMBOL_GPL(blk_stat_disable_accounting
);
195 void blk_stat_enable_accounting(struct request_queue
*q
)
199 spin_lock_irqsave(&q
->stats
->lock
, flags
);
200 if (!q
->stats
->accounting
++ && list_empty(&q
->stats
->callbacks
))
201 blk_queue_flag_set(QUEUE_FLAG_STATS
, q
);
202 spin_unlock_irqrestore(&q
->stats
->lock
, flags
);
204 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting
);
206 struct blk_queue_stats
*blk_alloc_queue_stats(void)
208 struct blk_queue_stats
*stats
;
210 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
214 INIT_LIST_HEAD(&stats
->callbacks
);
215 spin_lock_init(&stats
->lock
);
216 stats
->accounting
= 0;
221 void blk_free_queue_stats(struct blk_queue_stats
*stats
)
226 WARN_ON(!list_empty(&stats
->callbacks
));