1 #include <linux/errno.h>
2 #include <linux/numa.h>
3 #include <linux/slab.h>
4 #include <linux/rculist.h>
5 #include <linux/threads.h>
6 #include <linux/preempt.h>
7 #include <linux/irqflags.h>
8 #include <linux/vmalloc.h>
10 #include <linux/module.h>
11 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "stats"
18 static int dm_stat_need_rcu_barrier
;
21 * Using 64-bit values to avoid overflow (which is a
22 * problem that block/genhd.c's IO accounting has).
24 struct dm_stat_percpu
{
25 unsigned long long sectors
[2];
26 unsigned long long ios
[2];
27 unsigned long long merges
[2];
28 unsigned long long ticks
[2];
29 unsigned long long io_ticks
[2];
30 unsigned long long io_ticks_total
;
31 unsigned long long time_in_queue
;
34 struct dm_stat_shared
{
35 atomic_t in_flight
[2];
37 struct dm_stat_percpu tmp
;
41 struct list_head list_entry
;
47 const char *program_id
;
49 struct rcu_head rcu_head
;
50 size_t shared_alloc_size
;
51 size_t percpu_alloc_size
;
52 struct dm_stat_percpu
*stat_percpu
[NR_CPUS
];
53 struct dm_stat_shared stat_shared
[0];
56 struct dm_stats_last_position
{
62 * A typo on the command line could possibly make the kernel run out of memory
63 * and crash. To prevent the crash we account all used memory. We fail if we
64 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
66 #define DM_STATS_MEMORY_FACTOR 4
67 #define DM_STATS_VMALLOC_FACTOR 2
69 static DEFINE_SPINLOCK(shared_memory_lock
);
71 static unsigned long shared_memory_amount
;
73 static bool __check_shared_memory(size_t alloc_size
)
77 a
= shared_memory_amount
+ alloc_size
;
78 if (a
< shared_memory_amount
)
80 if (a
>> PAGE_SHIFT
> totalram_pages
/ DM_STATS_MEMORY_FACTOR
)
83 if (a
> (VMALLOC_END
- VMALLOC_START
) / DM_STATS_VMALLOC_FACTOR
)
89 static bool check_shared_memory(size_t alloc_size
)
93 spin_lock_irq(&shared_memory_lock
);
95 ret
= __check_shared_memory(alloc_size
);
97 spin_unlock_irq(&shared_memory_lock
);
102 static bool claim_shared_memory(size_t alloc_size
)
104 spin_lock_irq(&shared_memory_lock
);
106 if (!__check_shared_memory(alloc_size
)) {
107 spin_unlock_irq(&shared_memory_lock
);
111 shared_memory_amount
+= alloc_size
;
113 spin_unlock_irq(&shared_memory_lock
);
118 static void free_shared_memory(size_t alloc_size
)
122 spin_lock_irqsave(&shared_memory_lock
, flags
);
124 if (WARN_ON_ONCE(shared_memory_amount
< alloc_size
)) {
125 spin_unlock_irqrestore(&shared_memory_lock
, flags
);
126 DMCRIT("Memory usage accounting bug.");
130 shared_memory_amount
-= alloc_size
;
132 spin_unlock_irqrestore(&shared_memory_lock
, flags
);
135 static void *dm_kvzalloc(size_t alloc_size
, int node
)
139 if (!claim_shared_memory(alloc_size
))
142 if (alloc_size
<= KMALLOC_MAX_SIZE
) {
143 p
= kzalloc_node(alloc_size
, GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
, node
);
147 p
= vzalloc_node(alloc_size
, node
);
151 free_shared_memory(alloc_size
);
156 static void dm_kvfree(void *ptr
, size_t alloc_size
)
161 free_shared_memory(alloc_size
);
163 if (is_vmalloc_addr(ptr
))
169 static void dm_stat_free(struct rcu_head
*head
)
172 struct dm_stat
*s
= container_of(head
, struct dm_stat
, rcu_head
);
174 kfree(s
->program_id
);
176 for_each_possible_cpu(cpu
)
177 dm_kvfree(s
->stat_percpu
[cpu
], s
->percpu_alloc_size
);
178 dm_kvfree(s
, s
->shared_alloc_size
);
181 static int dm_stat_in_flight(struct dm_stat_shared
*shared
)
183 return atomic_read(&shared
->in_flight
[READ
]) +
184 atomic_read(&shared
->in_flight
[WRITE
]);
187 void dm_stats_init(struct dm_stats
*stats
)
190 struct dm_stats_last_position
*last
;
192 mutex_init(&stats
->mutex
);
193 INIT_LIST_HEAD(&stats
->list
);
194 stats
->last
= alloc_percpu(struct dm_stats_last_position
);
195 for_each_possible_cpu(cpu
) {
196 last
= per_cpu_ptr(stats
->last
, cpu
);
197 last
->last_sector
= (sector_t
)ULLONG_MAX
;
198 last
->last_rw
= UINT_MAX
;
202 void dm_stats_cleanup(struct dm_stats
*stats
)
206 struct dm_stat_shared
*shared
;
208 while (!list_empty(&stats
->list
)) {
209 s
= container_of(stats
->list
.next
, struct dm_stat
, list_entry
);
210 list_del(&s
->list_entry
);
211 for (ni
= 0; ni
< s
->n_entries
; ni
++) {
212 shared
= &s
->stat_shared
[ni
];
213 if (WARN_ON(dm_stat_in_flight(shared
))) {
214 DMCRIT("leaked in-flight counter at index %lu "
215 "(start %llu, end %llu, step %llu): reads %d, writes %d",
217 (unsigned long long)s
->start
,
218 (unsigned long long)s
->end
,
219 (unsigned long long)s
->step
,
220 atomic_read(&shared
->in_flight
[READ
]),
221 atomic_read(&shared
->in_flight
[WRITE
]));
224 dm_stat_free(&s
->rcu_head
);
226 free_percpu(stats
->last
);
229 static int dm_stats_create(struct dm_stats
*stats
, sector_t start
, sector_t end
,
230 sector_t step
, const char *program_id
, const char *aux_data
,
231 void (*suspend_callback
)(struct mapped_device
*),
232 void (*resume_callback
)(struct mapped_device
*),
233 struct mapped_device
*md
)
236 struct dm_stat
*s
, *tmp_s
;
239 size_t shared_alloc_size
;
240 size_t percpu_alloc_size
;
241 struct dm_stat_percpu
*p
;
246 if (end
< start
|| !step
)
249 n_entries
= end
- start
;
250 if (dm_sector_div64(n_entries
, step
))
253 if (n_entries
!= (size_t)n_entries
|| !(size_t)(n_entries
+ 1))
256 shared_alloc_size
= sizeof(struct dm_stat
) + (size_t)n_entries
* sizeof(struct dm_stat_shared
);
257 if ((shared_alloc_size
- sizeof(struct dm_stat
)) / sizeof(struct dm_stat_shared
) != n_entries
)
260 percpu_alloc_size
= (size_t)n_entries
* sizeof(struct dm_stat_percpu
);
261 if (percpu_alloc_size
/ sizeof(struct dm_stat_percpu
) != n_entries
)
264 if (!check_shared_memory(shared_alloc_size
+ num_possible_cpus() * percpu_alloc_size
))
267 s
= dm_kvzalloc(shared_alloc_size
, NUMA_NO_NODE
);
271 s
->n_entries
= n_entries
;
275 s
->shared_alloc_size
= shared_alloc_size
;
276 s
->percpu_alloc_size
= percpu_alloc_size
;
278 s
->program_id
= kstrdup(program_id
, GFP_KERNEL
);
279 if (!s
->program_id
) {
283 s
->aux_data
= kstrdup(aux_data
, GFP_KERNEL
);
289 for (ni
= 0; ni
< n_entries
; ni
++) {
290 atomic_set(&s
->stat_shared
[ni
].in_flight
[READ
], 0);
291 atomic_set(&s
->stat_shared
[ni
].in_flight
[WRITE
], 0);
294 for_each_possible_cpu(cpu
) {
295 p
= dm_kvzalloc(percpu_alloc_size
, cpu_to_node(cpu
));
300 s
->stat_percpu
[cpu
] = p
;
304 * Suspend/resume to make sure there is no i/o in flight,
305 * so that newly created statistics will be exact.
307 * (note: we couldn't suspend earlier because we must not
308 * allocate memory while suspended)
310 suspend_callback(md
);
312 mutex_lock(&stats
->mutex
);
314 list_for_each(l
, &stats
->list
) {
315 tmp_s
= container_of(l
, struct dm_stat
, list_entry
);
316 if (WARN_ON(tmp_s
->id
< s
->id
)) {
318 goto out_unlock_resume
;
320 if (tmp_s
->id
> s
->id
)
322 if (unlikely(s
->id
== INT_MAX
)) {
324 goto out_unlock_resume
;
329 list_add_tail_rcu(&s
->list_entry
, l
);
330 mutex_unlock(&stats
->mutex
);
337 mutex_unlock(&stats
->mutex
);
340 dm_stat_free(&s
->rcu_head
);
344 static struct dm_stat
*__dm_stats_find(struct dm_stats
*stats
, int id
)
348 list_for_each_entry(s
, &stats
->list
, list_entry
) {
358 static int dm_stats_delete(struct dm_stats
*stats
, int id
)
363 mutex_lock(&stats
->mutex
);
365 s
= __dm_stats_find(stats
, id
);
367 mutex_unlock(&stats
->mutex
);
371 list_del_rcu(&s
->list_entry
);
372 mutex_unlock(&stats
->mutex
);
375 * vfree can't be called from RCU callback
377 for_each_possible_cpu(cpu
)
378 if (is_vmalloc_addr(s
->stat_percpu
))
380 if (is_vmalloc_addr(s
)) {
382 synchronize_rcu_expedited();
383 dm_stat_free(&s
->rcu_head
);
385 ACCESS_ONCE(dm_stat_need_rcu_barrier
) = 1;
386 call_rcu(&s
->rcu_head
, dm_stat_free
);
391 static int dm_stats_list(struct dm_stats
*stats
, const char *program
,
392 char *result
, unsigned maxlen
)
400 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
403 mutex_lock(&stats
->mutex
);
404 list_for_each_entry(s
, &stats
->list
, list_entry
) {
405 if (!program
|| !strcmp(program
, s
->program_id
)) {
406 len
= s
->end
- s
->start
;
407 DMEMIT("%d: %llu+%llu %llu %s %s\n", s
->id
,
408 (unsigned long long)s
->start
,
409 (unsigned long long)len
,
410 (unsigned long long)s
->step
,
415 mutex_unlock(&stats
->mutex
);
420 static void dm_stat_round(struct dm_stat_shared
*shared
, struct dm_stat_percpu
*p
)
423 * This is racy, but so is part_round_stats_single.
425 unsigned long now
= jiffies
;
426 unsigned in_flight_read
;
427 unsigned in_flight_write
;
428 unsigned long difference
= now
- shared
->stamp
;
432 in_flight_read
= (unsigned)atomic_read(&shared
->in_flight
[READ
]);
433 in_flight_write
= (unsigned)atomic_read(&shared
->in_flight
[WRITE
]);
435 p
->io_ticks
[READ
] += difference
;
437 p
->io_ticks
[WRITE
] += difference
;
438 if (in_flight_read
+ in_flight_write
) {
439 p
->io_ticks_total
+= difference
;
440 p
->time_in_queue
+= (in_flight_read
+ in_flight_write
) * difference
;
445 static void dm_stat_for_entry(struct dm_stat
*s
, size_t entry
,
446 unsigned long bi_rw
, sector_t len
, bool merged
,
447 bool end
, unsigned long duration
)
449 unsigned long idx
= bi_rw
& REQ_WRITE
;
450 struct dm_stat_shared
*shared
= &s
->stat_shared
[entry
];
451 struct dm_stat_percpu
*p
;
454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable.
457 * preempt_disable/enable is racy if the driver finishes bios
458 * from non-interrupt context as well as from interrupt context
459 * or from more different interrupts.
461 * On 64-bit architectures the race only results in not counting some
462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
466 * part_stat_lock()/part_stat_unlock() have this race too.
468 #if BITS_PER_LONG == 32
470 local_irq_save(flags
);
474 p
= &s
->stat_percpu
[smp_processor_id()][entry
];
477 dm_stat_round(shared
, p
);
478 atomic_inc(&shared
->in_flight
[idx
]);
480 dm_stat_round(shared
, p
);
481 atomic_dec(&shared
->in_flight
[idx
]);
482 p
->sectors
[idx
] += len
;
484 p
->merges
[idx
] += merged
;
485 p
->ticks
[idx
] += duration
;
488 #if BITS_PER_LONG == 32
489 local_irq_restore(flags
);
495 static void __dm_stat_bio(struct dm_stat
*s
, unsigned long bi_rw
,
496 sector_t bi_sector
, sector_t end_sector
,
497 bool end
, unsigned long duration
,
498 struct dm_stats_aux
*stats_aux
)
500 sector_t rel_sector
, offset
, todo
, fragment_len
;
503 if (end_sector
<= s
->start
|| bi_sector
>= s
->end
)
505 if (unlikely(bi_sector
< s
->start
)) {
507 todo
= end_sector
- s
->start
;
509 rel_sector
= bi_sector
- s
->start
;
510 todo
= end_sector
- bi_sector
;
512 if (unlikely(end_sector
> s
->end
))
513 todo
-= (end_sector
- s
->end
);
515 offset
= dm_sector_div64(rel_sector
, s
->step
);
518 if (WARN_ON_ONCE(entry
>= s
->n_entries
)) {
519 DMCRIT("Invalid area access in region id %d", s
->id
);
523 if (fragment_len
> s
->step
- offset
)
524 fragment_len
= s
->step
- offset
;
525 dm_stat_for_entry(s
, entry
, bi_rw
, fragment_len
,
526 stats_aux
->merged
, end
, duration
);
527 todo
-= fragment_len
;
530 } while (unlikely(todo
!= 0));
533 void dm_stats_account_io(struct dm_stats
*stats
, unsigned long bi_rw
,
534 sector_t bi_sector
, unsigned bi_sectors
, bool end
,
535 unsigned long duration
, struct dm_stats_aux
*stats_aux
)
539 struct dm_stats_last_position
*last
;
541 if (unlikely(!bi_sectors
))
544 end_sector
= bi_sector
+ bi_sectors
;
548 * A race condition can at worst result in the merged flag being
549 * misrepresented, so we don't have to disable preemption here.
551 last
= raw_cpu_ptr(stats
->last
);
553 (bi_sector
== (ACCESS_ONCE(last
->last_sector
) &&
554 ((bi_rw
& (REQ_WRITE
| REQ_DISCARD
)) ==
555 (ACCESS_ONCE(last
->last_rw
) & (REQ_WRITE
| REQ_DISCARD
)))
557 ACCESS_ONCE(last
->last_sector
) = end_sector
;
558 ACCESS_ONCE(last
->last_rw
) = bi_rw
;
563 list_for_each_entry_rcu(s
, &stats
->list
, list_entry
)
564 __dm_stat_bio(s
, bi_rw
, bi_sector
, end_sector
, end
, duration
, stats_aux
);
569 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared
*shared
,
570 struct dm_stat
*s
, size_t x
)
573 struct dm_stat_percpu
*p
;
576 p
= &s
->stat_percpu
[smp_processor_id()][x
];
577 dm_stat_round(shared
, p
);
580 memset(&shared
->tmp
, 0, sizeof(shared
->tmp
));
581 for_each_possible_cpu(cpu
) {
582 p
= &s
->stat_percpu
[cpu
][x
];
583 shared
->tmp
.sectors
[READ
] += ACCESS_ONCE(p
->sectors
[READ
]);
584 shared
->tmp
.sectors
[WRITE
] += ACCESS_ONCE(p
->sectors
[WRITE
]);
585 shared
->tmp
.ios
[READ
] += ACCESS_ONCE(p
->ios
[READ
]);
586 shared
->tmp
.ios
[WRITE
] += ACCESS_ONCE(p
->ios
[WRITE
]);
587 shared
->tmp
.merges
[READ
] += ACCESS_ONCE(p
->merges
[READ
]);
588 shared
->tmp
.merges
[WRITE
] += ACCESS_ONCE(p
->merges
[WRITE
]);
589 shared
->tmp
.ticks
[READ
] += ACCESS_ONCE(p
->ticks
[READ
]);
590 shared
->tmp
.ticks
[WRITE
] += ACCESS_ONCE(p
->ticks
[WRITE
]);
591 shared
->tmp
.io_ticks
[READ
] += ACCESS_ONCE(p
->io_ticks
[READ
]);
592 shared
->tmp
.io_ticks
[WRITE
] += ACCESS_ONCE(p
->io_ticks
[WRITE
]);
593 shared
->tmp
.io_ticks_total
+= ACCESS_ONCE(p
->io_ticks_total
);
594 shared
->tmp
.time_in_queue
+= ACCESS_ONCE(p
->time_in_queue
);
598 static void __dm_stat_clear(struct dm_stat
*s
, size_t idx_start
, size_t idx_end
,
599 bool init_tmp_percpu_totals
)
602 struct dm_stat_shared
*shared
;
603 struct dm_stat_percpu
*p
;
605 for (x
= idx_start
; x
< idx_end
; x
++) {
606 shared
= &s
->stat_shared
[x
];
607 if (init_tmp_percpu_totals
)
608 __dm_stat_init_temporary_percpu_totals(shared
, s
, x
);
610 p
= &s
->stat_percpu
[smp_processor_id()][x
];
611 p
->sectors
[READ
] -= shared
->tmp
.sectors
[READ
];
612 p
->sectors
[WRITE
] -= shared
->tmp
.sectors
[WRITE
];
613 p
->ios
[READ
] -= shared
->tmp
.ios
[READ
];
614 p
->ios
[WRITE
] -= shared
->tmp
.ios
[WRITE
];
615 p
->merges
[READ
] -= shared
->tmp
.merges
[READ
];
616 p
->merges
[WRITE
] -= shared
->tmp
.merges
[WRITE
];
617 p
->ticks
[READ
] -= shared
->tmp
.ticks
[READ
];
618 p
->ticks
[WRITE
] -= shared
->tmp
.ticks
[WRITE
];
619 p
->io_ticks
[READ
] -= shared
->tmp
.io_ticks
[READ
];
620 p
->io_ticks
[WRITE
] -= shared
->tmp
.io_ticks
[WRITE
];
621 p
->io_ticks_total
-= shared
->tmp
.io_ticks_total
;
622 p
->time_in_queue
-= shared
->tmp
.time_in_queue
;
627 static int dm_stats_clear(struct dm_stats
*stats
, int id
)
631 mutex_lock(&stats
->mutex
);
633 s
= __dm_stats_find(stats
, id
);
635 mutex_unlock(&stats
->mutex
);
639 __dm_stat_clear(s
, 0, s
->n_entries
, true);
641 mutex_unlock(&stats
->mutex
);
647 * This is like jiffies_to_msec, but works for 64-bit values.
649 static unsigned long long dm_jiffies_to_msec64(unsigned long long j
)
651 unsigned long long result
= 0;
655 result
= jiffies_to_msecs(j
& 0x3fffff);
657 mult
= jiffies_to_msecs(1 << 22);
658 result
+= (unsigned long long)mult
* (unsigned long long)jiffies_to_msecs((j
>> 22) & 0x3fffff);
661 result
+= (unsigned long long)mult
* (unsigned long long)mult
* (unsigned long long)jiffies_to_msecs(j
>> 44);
666 static int dm_stats_print(struct dm_stats
*stats
, int id
,
667 size_t idx_start
, size_t idx_len
,
668 bool clear
, char *result
, unsigned maxlen
)
673 sector_t start
, end
, step
;
675 struct dm_stat_shared
*shared
;
679 * <start_sector>+<length> counters
682 mutex_lock(&stats
->mutex
);
684 s
= __dm_stats_find(stats
, id
);
686 mutex_unlock(&stats
->mutex
);
690 idx_end
= idx_start
+ idx_len
;
691 if (idx_end
< idx_start
||
692 idx_end
> s
->n_entries
)
693 idx_end
= s
->n_entries
;
695 if (idx_start
> idx_end
)
699 start
= s
->start
+ (step
* idx_start
);
701 for (x
= idx_start
; x
< idx_end
; x
++, start
= end
) {
702 shared
= &s
->stat_shared
[x
];
704 if (unlikely(end
> s
->end
))
707 __dm_stat_init_temporary_percpu_totals(shared
, s
, x
);
709 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
710 (unsigned long long)start
,
711 (unsigned long long)step
,
712 shared
->tmp
.ios
[READ
],
713 shared
->tmp
.merges
[READ
],
714 shared
->tmp
.sectors
[READ
],
715 dm_jiffies_to_msec64(shared
->tmp
.ticks
[READ
]),
716 shared
->tmp
.ios
[WRITE
],
717 shared
->tmp
.merges
[WRITE
],
718 shared
->tmp
.sectors
[WRITE
],
719 dm_jiffies_to_msec64(shared
->tmp
.ticks
[WRITE
]),
720 dm_stat_in_flight(shared
),
721 dm_jiffies_to_msec64(shared
->tmp
.io_ticks_total
),
722 dm_jiffies_to_msec64(shared
->tmp
.time_in_queue
),
723 dm_jiffies_to_msec64(shared
->tmp
.io_ticks
[READ
]),
724 dm_jiffies_to_msec64(shared
->tmp
.io_ticks
[WRITE
]));
726 if (unlikely(sz
+ 1 >= maxlen
))
727 goto buffer_overflow
;
731 __dm_stat_clear(s
, idx_start
, idx_end
, false);
734 mutex_unlock(&stats
->mutex
);
739 static int dm_stats_set_aux(struct dm_stats
*stats
, int id
, const char *aux_data
)
742 const char *new_aux_data
;
744 mutex_lock(&stats
->mutex
);
746 s
= __dm_stats_find(stats
, id
);
748 mutex_unlock(&stats
->mutex
);
752 new_aux_data
= kstrdup(aux_data
, GFP_KERNEL
);
754 mutex_unlock(&stats
->mutex
);
759 s
->aux_data
= new_aux_data
;
761 mutex_unlock(&stats
->mutex
);
766 static int message_stats_create(struct mapped_device
*md
,
767 unsigned argc
, char **argv
,
768 char *result
, unsigned maxlen
)
772 unsigned long long start
, end
, len
, step
;
774 const char *program_id
, *aux_data
;
778 * <range> <step> [<program_id> [<aux_data>]]
781 if (argc
< 3 || argc
> 5)
784 if (!strcmp(argv
[1], "-")) {
786 len
= dm_get_size(md
);
789 } else if (sscanf(argv
[1], "%llu+%llu%c", &start
, &len
, &dummy
) != 2 ||
790 start
!= (sector_t
)start
|| len
!= (sector_t
)len
)
797 if (sscanf(argv
[2], "/%u%c", &divisor
, &dummy
) == 1) {
801 if (do_div(step
, divisor
))
805 } else if (sscanf(argv
[2], "%llu%c", &step
, &dummy
) != 1 ||
806 step
!= (sector_t
)step
|| !step
)
813 program_id
= argv
[3];
819 * If a buffer overflow happens after we created the region,
820 * it's too late (the userspace would retry with a larger
821 * buffer, but the region id that caused the overflow is already
822 * leaked). So we must detect buffer overflow in advance.
824 snprintf(result
, maxlen
, "%d", INT_MAX
);
825 if (dm_message_test_buffer_overflow(result
, maxlen
))
828 id
= dm_stats_create(dm_get_stats(md
), start
, end
, step
, program_id
, aux_data
,
829 dm_internal_suspend
, dm_internal_resume
, md
);
833 snprintf(result
, maxlen
, "%d", id
);
838 static int message_stats_delete(struct mapped_device
*md
,
839 unsigned argc
, char **argv
)
847 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
850 return dm_stats_delete(dm_get_stats(md
), id
);
853 static int message_stats_clear(struct mapped_device
*md
,
854 unsigned argc
, char **argv
)
862 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
865 return dm_stats_clear(dm_get_stats(md
), id
);
868 static int message_stats_list(struct mapped_device
*md
,
869 unsigned argc
, char **argv
,
870 char *result
, unsigned maxlen
)
873 const char *program
= NULL
;
875 if (argc
< 1 || argc
> 2)
879 program
= kstrdup(argv
[1], GFP_KERNEL
);
884 r
= dm_stats_list(dm_get_stats(md
), program
, result
, maxlen
);
891 static int message_stats_print(struct mapped_device
*md
,
892 unsigned argc
, char **argv
, bool clear
,
893 char *result
, unsigned maxlen
)
897 unsigned long idx_start
= 0, idx_len
= ULONG_MAX
;
899 if (argc
!= 2 && argc
!= 4)
902 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
906 if (strcmp(argv
[2], "-") &&
907 sscanf(argv
[2], "%lu%c", &idx_start
, &dummy
) != 1)
909 if (strcmp(argv
[3], "-") &&
910 sscanf(argv
[3], "%lu%c", &idx_len
, &dummy
) != 1)
914 return dm_stats_print(dm_get_stats(md
), id
, idx_start
, idx_len
, clear
,
918 static int message_stats_set_aux(struct mapped_device
*md
,
919 unsigned argc
, char **argv
)
927 if (sscanf(argv
[1], "%d%c", &id
, &dummy
) != 1 || id
< 0)
930 return dm_stats_set_aux(dm_get_stats(md
), id
, argv
[2]);
933 int dm_stats_message(struct mapped_device
*md
, unsigned argc
, char **argv
,
934 char *result
, unsigned maxlen
)
938 if (dm_request_based(md
)) {
939 DMWARN("Statistics are only supported for bio-based devices");
943 /* All messages here must start with '@' */
944 if (!strcasecmp(argv
[0], "@stats_create"))
945 r
= message_stats_create(md
, argc
, argv
, result
, maxlen
);
946 else if (!strcasecmp(argv
[0], "@stats_delete"))
947 r
= message_stats_delete(md
, argc
, argv
);
948 else if (!strcasecmp(argv
[0], "@stats_clear"))
949 r
= message_stats_clear(md
, argc
, argv
);
950 else if (!strcasecmp(argv
[0], "@stats_list"))
951 r
= message_stats_list(md
, argc
, argv
, result
, maxlen
);
952 else if (!strcasecmp(argv
[0], "@stats_print"))
953 r
= message_stats_print(md
, argc
, argv
, false, result
, maxlen
);
954 else if (!strcasecmp(argv
[0], "@stats_print_clear"))
955 r
= message_stats_print(md
, argc
, argv
, true, result
, maxlen
);
956 else if (!strcasecmp(argv
[0], "@stats_set_aux"))
957 r
= message_stats_set_aux(md
, argc
, argv
);
959 return 2; /* this wasn't a stats message */
962 DMWARN("Invalid parameters for message %s", argv
[0]);
967 int __init
dm_statistics_init(void)
969 shared_memory_amount
= 0;
970 dm_stat_need_rcu_barrier
= 0;
974 void dm_statistics_exit(void)
976 if (dm_stat_need_rcu_barrier
)
978 if (WARN_ON(shared_memory_amount
))
979 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount
);
982 module_param_named(stats_current_allocated_bytes
, shared_memory_amount
, ulong
, S_IRUGO
);
983 MODULE_PARM_DESC(stats_current_allocated_bytes
, "Memory currently used by statistics");