2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
6 #include "dm-block-manager.h"
7 #include "dm-persistent-data-internal.h"
8 #include "../dm-bufio.h"
10 #include <linux/crc32c.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/rwsem.h>
14 #include <linux/device-mapper.h>
15 #include <linux/stacktrace.h>
17 #define DM_MSG_PREFIX "block manager"
19 /*----------------------------------------------------------------*/
22 * This is a read/write semaphore with a couple of differences.
24 * i) There is a restriction on the number of concurrent read locks that
25 * may be held at once. This is just an implementation detail.
27 * ii) Recursive locking attempts are detected and return EINVAL. A stack
28 * trace is also emitted for the previous lock acquisition.
30 * iii) Priority is given to write locks.
35 typedef unsigned long stack_entries
[MAX_STACK
];
40 struct list_head waiters
;
41 struct task_struct
*holders
[MAX_HOLDERS
];
43 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
44 struct stack_trace traces
[MAX_HOLDERS
];
45 stack_entries entries
[MAX_HOLDERS
];
50 struct list_head list
;
51 struct task_struct
*task
;
55 static unsigned __find_holder(struct block_lock
*lock
,
56 struct task_struct
*task
)
60 for (i
= 0; i
< MAX_HOLDERS
; i
++)
61 if (lock
->holders
[i
] == task
)
64 BUG_ON(i
== MAX_HOLDERS
);
68 /* call this *after* you increment lock->count */
69 static void __add_holder(struct block_lock
*lock
, struct task_struct
*task
)
71 unsigned h
= __find_holder(lock
, NULL
);
72 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
73 struct stack_trace
*t
;
76 get_task_struct(task
);
77 lock
->holders
[h
] = task
;
79 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
82 t
->max_entries
= MAX_STACK
;
83 t
->entries
= lock
->entries
[h
];
89 /* call this *before* you decrement lock->count */
90 static void __del_holder(struct block_lock
*lock
, struct task_struct
*task
)
92 unsigned h
= __find_holder(lock
, task
);
93 lock
->holders
[h
] = NULL
;
94 put_task_struct(task
);
97 static int __check_holder(struct block_lock
*lock
)
100 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
101 static struct stack_trace t
;
102 static stack_entries entries
;
105 for (i
= 0; i
< MAX_HOLDERS
; i
++) {
106 if (lock
->holders
[i
] == current
) {
107 DMERR("recursive lock detected in pool metadata");
108 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
109 DMERR("previously held here:");
110 print_stack_trace(lock
->traces
+ i
, 4);
112 DMERR("subsequent acquisition attempted here:");
114 t
.max_entries
= MAX_STACK
;
117 save_stack_trace(&t
);
118 print_stack_trace(&t
, 4);
127 static void __wait(struct waiter
*w
)
130 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
138 set_task_state(current
, TASK_RUNNING
);
141 static void __wake_waiter(struct waiter
*w
)
143 struct task_struct
*task
;
149 wake_up_process(task
);
153 * We either wake a few readers or a single writer.
155 static void __wake_many(struct block_lock
*lock
)
157 struct waiter
*w
, *tmp
;
159 BUG_ON(lock
->count
< 0);
160 list_for_each_entry_safe(w
, tmp
, &lock
->waiters
, list
) {
161 if (lock
->count
>= MAX_HOLDERS
)
164 if (w
->wants_write
) {
166 return; /* still read locked */
169 __add_holder(lock
, w
->task
);
175 __add_holder(lock
, w
->task
);
180 static void bl_init(struct block_lock
*lock
)
184 spin_lock_init(&lock
->lock
);
186 INIT_LIST_HEAD(&lock
->waiters
);
187 for (i
= 0; i
< MAX_HOLDERS
; i
++)
188 lock
->holders
[i
] = NULL
;
191 static int __available_for_read(struct block_lock
*lock
)
193 return lock
->count
>= 0 &&
194 lock
->count
< MAX_HOLDERS
&&
195 list_empty(&lock
->waiters
);
198 static int bl_down_read(struct block_lock
*lock
)
203 spin_lock(&lock
->lock
);
204 r
= __check_holder(lock
);
206 spin_unlock(&lock
->lock
);
210 if (__available_for_read(lock
)) {
212 __add_holder(lock
, current
);
213 spin_unlock(&lock
->lock
);
217 get_task_struct(current
);
221 list_add_tail(&w
.list
, &lock
->waiters
);
222 spin_unlock(&lock
->lock
);
225 put_task_struct(current
);
229 static int bl_down_read_nonblock(struct block_lock
*lock
)
233 spin_lock(&lock
->lock
);
234 r
= __check_holder(lock
);
238 if (__available_for_read(lock
)) {
240 __add_holder(lock
, current
);
246 spin_unlock(&lock
->lock
);
250 static void bl_up_read(struct block_lock
*lock
)
252 spin_lock(&lock
->lock
);
253 BUG_ON(lock
->count
<= 0);
254 __del_holder(lock
, current
);
256 if (!list_empty(&lock
->waiters
))
258 spin_unlock(&lock
->lock
);
261 static int bl_down_write(struct block_lock
*lock
)
266 spin_lock(&lock
->lock
);
267 r
= __check_holder(lock
);
269 spin_unlock(&lock
->lock
);
273 if (lock
->count
== 0 && list_empty(&lock
->waiters
)) {
275 __add_holder(lock
, current
);
276 spin_unlock(&lock
->lock
);
280 get_task_struct(current
);
285 * Writers given priority. We know there's only one mutator in the
286 * system, so ignoring the ordering reversal.
288 list_add(&w
.list
, &lock
->waiters
);
289 spin_unlock(&lock
->lock
);
292 put_task_struct(current
);
297 static void bl_up_write(struct block_lock
*lock
)
299 spin_lock(&lock
->lock
);
300 __del_holder(lock
, current
);
302 if (!list_empty(&lock
->waiters
))
304 spin_unlock(&lock
->lock
);
307 static void report_recursive_bug(dm_block_t b
, int r
)
310 DMERR("recursive acquisition of block %llu requested.",
311 (unsigned long long) b
);
314 /*----------------------------------------------------------------*/
317 * Block manager is currently implemented using dm-bufio. struct
318 * dm_block_manager and struct dm_block map directly onto a couple of
319 * structs in the bufio interface. I want to retain the freedom to move
320 * away from bufio in the future. So these structs are just cast within
321 * this .c file, rather than making it through to the public interface.
323 static struct dm_buffer
*to_buffer(struct dm_block
*b
)
325 return (struct dm_buffer
*) b
;
328 dm_block_t
dm_block_location(struct dm_block
*b
)
330 return dm_bufio_get_block_number(to_buffer(b
));
332 EXPORT_SYMBOL_GPL(dm_block_location
);
334 void *dm_block_data(struct dm_block
*b
)
336 return dm_bufio_get_block_data(to_buffer(b
));
338 EXPORT_SYMBOL_GPL(dm_block_data
);
341 struct dm_block_validator
*validator
;
342 struct block_lock lock
;
346 static void dm_block_manager_alloc_callback(struct dm_buffer
*buf
)
348 struct buffer_aux
*aux
= dm_bufio_get_aux_data(buf
);
349 aux
->validator
= NULL
;
353 static void dm_block_manager_write_callback(struct dm_buffer
*buf
)
355 struct buffer_aux
*aux
= dm_bufio_get_aux_data(buf
);
356 if (aux
->validator
) {
357 aux
->validator
->prepare_for_write(aux
->validator
, (struct dm_block
*) buf
,
358 dm_bufio_get_block_size(dm_bufio_get_client(buf
)));
362 /*----------------------------------------------------------------
364 *--------------------------------------------------------------*/
365 struct dm_block_manager
{
366 struct dm_bufio_client
*bufio
;
370 struct dm_block_manager
*dm_block_manager_create(struct block_device
*bdev
,
373 unsigned max_held_per_thread
)
376 struct dm_block_manager
*bm
;
378 bm
= kmalloc(sizeof(*bm
), GFP_KERNEL
);
384 bm
->bufio
= dm_bufio_client_create(bdev
, block_size
, max_held_per_thread
,
385 sizeof(struct buffer_aux
),
386 dm_block_manager_alloc_callback
,
387 dm_block_manager_write_callback
);
388 if (IS_ERR(bm
->bufio
)) {
389 r
= PTR_ERR(bm
->bufio
);
394 bm
->read_only
= false;
401 EXPORT_SYMBOL_GPL(dm_block_manager_create
);
403 void dm_block_manager_destroy(struct dm_block_manager
*bm
)
405 dm_bufio_client_destroy(bm
->bufio
);
408 EXPORT_SYMBOL_GPL(dm_block_manager_destroy
);
410 unsigned dm_bm_block_size(struct dm_block_manager
*bm
)
412 return dm_bufio_get_block_size(bm
->bufio
);
414 EXPORT_SYMBOL_GPL(dm_bm_block_size
);
416 dm_block_t
dm_bm_nr_blocks(struct dm_block_manager
*bm
)
418 return dm_bufio_get_device_size(bm
->bufio
);
421 static int dm_bm_validate_buffer(struct dm_block_manager
*bm
,
422 struct dm_buffer
*buf
,
423 struct buffer_aux
*aux
,
424 struct dm_block_validator
*v
)
426 if (unlikely(!aux
->validator
)) {
430 r
= v
->check(v
, (struct dm_block
*) buf
, dm_bufio_get_block_size(bm
->bufio
));
432 DMERR_LIMIT("%s validator check failed for block %llu", v
->name
,
433 (unsigned long long) dm_bufio_get_block_number(buf
));
438 if (unlikely(aux
->validator
!= v
)) {
439 DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
440 aux
->validator
->name
, v
? v
->name
: "NULL",
441 (unsigned long long) dm_bufio_get_block_number(buf
));
448 int dm_bm_read_lock(struct dm_block_manager
*bm
, dm_block_t b
,
449 struct dm_block_validator
*v
,
450 struct dm_block
**result
)
452 struct buffer_aux
*aux
;
456 p
= dm_bufio_read(bm
->bufio
, b
, (struct dm_buffer
**) result
);
457 if (unlikely(IS_ERR(p
)))
460 aux
= dm_bufio_get_aux_data(to_buffer(*result
));
461 r
= bl_down_read(&aux
->lock
);
463 dm_bufio_release(to_buffer(*result
));
464 report_recursive_bug(b
, r
);
468 aux
->write_locked
= 0;
470 r
= dm_bm_validate_buffer(bm
, to_buffer(*result
), aux
, v
);
472 bl_up_read(&aux
->lock
);
473 dm_bufio_release(to_buffer(*result
));
479 EXPORT_SYMBOL_GPL(dm_bm_read_lock
);
481 int dm_bm_write_lock(struct dm_block_manager
*bm
,
482 dm_block_t b
, struct dm_block_validator
*v
,
483 struct dm_block
**result
)
485 struct buffer_aux
*aux
;
492 p
= dm_bufio_read(bm
->bufio
, b
, (struct dm_buffer
**) result
);
493 if (unlikely(IS_ERR(p
)))
496 aux
= dm_bufio_get_aux_data(to_buffer(*result
));
497 r
= bl_down_write(&aux
->lock
);
499 dm_bufio_release(to_buffer(*result
));
500 report_recursive_bug(b
, r
);
504 aux
->write_locked
= 1;
506 r
= dm_bm_validate_buffer(bm
, to_buffer(*result
), aux
, v
);
508 bl_up_write(&aux
->lock
);
509 dm_bufio_release(to_buffer(*result
));
515 EXPORT_SYMBOL_GPL(dm_bm_write_lock
);
517 int dm_bm_read_try_lock(struct dm_block_manager
*bm
,
518 dm_block_t b
, struct dm_block_validator
*v
,
519 struct dm_block
**result
)
521 struct buffer_aux
*aux
;
525 p
= dm_bufio_get(bm
->bufio
, b
, (struct dm_buffer
**) result
);
526 if (unlikely(IS_ERR(p
)))
531 aux
= dm_bufio_get_aux_data(to_buffer(*result
));
532 r
= bl_down_read_nonblock(&aux
->lock
);
534 dm_bufio_release(to_buffer(*result
));
535 report_recursive_bug(b
, r
);
538 aux
->write_locked
= 0;
540 r
= dm_bm_validate_buffer(bm
, to_buffer(*result
), aux
, v
);
542 bl_up_read(&aux
->lock
);
543 dm_bufio_release(to_buffer(*result
));
550 int dm_bm_write_lock_zero(struct dm_block_manager
*bm
,
551 dm_block_t b
, struct dm_block_validator
*v
,
552 struct dm_block
**result
)
555 struct buffer_aux
*aux
;
561 p
= dm_bufio_new(bm
->bufio
, b
, (struct dm_buffer
**) result
);
562 if (unlikely(IS_ERR(p
)))
565 memset(p
, 0, dm_bm_block_size(bm
));
567 aux
= dm_bufio_get_aux_data(to_buffer(*result
));
568 r
= bl_down_write(&aux
->lock
);
570 dm_bufio_release(to_buffer(*result
));
574 aux
->write_locked
= 1;
579 EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero
);
581 int dm_bm_unlock(struct dm_block
*b
)
583 struct buffer_aux
*aux
;
584 aux
= dm_bufio_get_aux_data(to_buffer(b
));
586 if (aux
->write_locked
) {
587 dm_bufio_mark_buffer_dirty(to_buffer(b
));
588 bl_up_write(&aux
->lock
);
590 bl_up_read(&aux
->lock
);
592 dm_bufio_release(to_buffer(b
));
596 EXPORT_SYMBOL_GPL(dm_bm_unlock
);
598 int dm_bm_flush(struct dm_block_manager
*bm
)
603 return dm_bufio_write_dirty_buffers(bm
->bufio
);
605 EXPORT_SYMBOL_GPL(dm_bm_flush
);
607 void dm_bm_prefetch(struct dm_block_manager
*bm
, dm_block_t b
)
609 dm_bufio_prefetch(bm
->bufio
, b
, 1);
612 void dm_bm_set_read_only(struct dm_block_manager
*bm
)
614 bm
->read_only
= true;
616 EXPORT_SYMBOL_GPL(dm_bm_set_read_only
);
618 void dm_bm_set_read_write(struct dm_block_manager
*bm
)
620 bm
->read_only
= false;
622 EXPORT_SYMBOL_GPL(dm_bm_set_read_write
);
624 u32
dm_bm_checksum(const void *data
, size_t len
, u32 init_xor
)
626 return crc32c(~(u32
) 0, data
, len
) ^ init_xor
;
628 EXPORT_SYMBOL_GPL(dm_bm_checksum
);
630 /*----------------------------------------------------------------*/
632 MODULE_LICENSE("GPL");
633 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
634 MODULE_DESCRIPTION("Immutable metadata library for dm");
636 /*----------------------------------------------------------------*/