Linux 4.2.1
[linux/fpc-iii.git] / drivers / md / bcache / util.h
blob1d04c4859c70cba7339f6fe9bd10edd01ebe3ddb
2 #ifndef _BCACHE_UTIL_H
3 #define _BCACHE_UTIL_H
5 #include <linux/blkdev.h>
6 #include <linux/errno.h>
7 #include <linux/kernel.h>
8 #include <linux/llist.h>
9 #include <linux/ratelimit.h>
10 #include <linux/vmalloc.h>
11 #include <linux/workqueue.h>
13 #include "closure.h"
15 #define PAGE_SECTORS (PAGE_SIZE / 512)
17 struct closure;
19 #ifdef CONFIG_BCACHE_DEBUG
21 #define EBUG_ON(cond) BUG_ON(cond)
22 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
23 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
25 #else /* DEBUG */
27 #define EBUG_ON(cond) do { if (cond); } while (0)
28 #define atomic_dec_bug(v) atomic_dec(v)
29 #define atomic_inc_bug(v, i) atomic_inc(v)
31 #endif
33 #define DECLARE_HEAP(type, name) \
34 struct { \
35 size_t size, used; \
36 type *data; \
37 } name
39 #define init_heap(heap, _size, gfp) \
40 ({ \
41 size_t _bytes; \
42 (heap)->used = 0; \
43 (heap)->size = (_size); \
44 _bytes = (heap)->size * sizeof(*(heap)->data); \
45 (heap)->data = NULL; \
46 if (_bytes < KMALLOC_MAX_SIZE) \
47 (heap)->data = kmalloc(_bytes, (gfp)); \
48 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
49 (heap)->data = vmalloc(_bytes); \
50 (heap)->data; \
53 #define free_heap(heap) \
54 do { \
55 kvfree((heap)->data); \
56 (heap)->data = NULL; \
57 } while (0)
59 #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
61 #define heap_sift(h, i, cmp) \
62 do { \
63 size_t _r, _j = i; \
65 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
66 _r = _j * 2 + 1; \
67 if (_r + 1 < (h)->used && \
68 cmp((h)->data[_r], (h)->data[_r + 1])) \
69 _r++; \
71 if (cmp((h)->data[_r], (h)->data[_j])) \
72 break; \
73 heap_swap(h, _r, _j); \
74 } \
75 } while (0)
77 #define heap_sift_down(h, i, cmp) \
78 do { \
79 while (i) { \
80 size_t p = (i - 1) / 2; \
81 if (cmp((h)->data[i], (h)->data[p])) \
82 break; \
83 heap_swap(h, i, p); \
84 i = p; \
85 } \
86 } while (0)
88 #define heap_add(h, d, cmp) \
89 ({ \
90 bool _r = !heap_full(h); \
91 if (_r) { \
92 size_t _i = (h)->used++; \
93 (h)->data[_i] = d; \
95 heap_sift_down(h, _i, cmp); \
96 heap_sift(h, _i, cmp); \
97 } \
98 _r; \
101 #define heap_pop(h, d, cmp) \
102 ({ \
103 bool _r = (h)->used; \
104 if (_r) { \
105 (d) = (h)->data[0]; \
106 (h)->used--; \
107 heap_swap(h, 0, (h)->used); \
108 heap_sift(h, 0, cmp); \
110 _r; \
113 #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
115 #define heap_full(h) ((h)->used == (h)->size)
117 #define DECLARE_FIFO(type, name) \
118 struct { \
119 size_t front, back, size, mask; \
120 type *data; \
121 } name
123 #define fifo_for_each(c, fifo, iter) \
124 for (iter = (fifo)->front; \
125 c = (fifo)->data[iter], iter != (fifo)->back; \
126 iter = (iter + 1) & (fifo)->mask)
128 #define __init_fifo(fifo, gfp) \
129 ({ \
130 size_t _allocated_size, _bytes; \
131 BUG_ON(!(fifo)->size); \
133 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
134 _bytes = _allocated_size * sizeof(*(fifo)->data); \
136 (fifo)->mask = _allocated_size - 1; \
137 (fifo)->front = (fifo)->back = 0; \
138 (fifo)->data = NULL; \
140 if (_bytes < KMALLOC_MAX_SIZE) \
141 (fifo)->data = kmalloc(_bytes, (gfp)); \
142 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
143 (fifo)->data = vmalloc(_bytes); \
144 (fifo)->data; \
147 #define init_fifo_exact(fifo, _size, gfp) \
148 ({ \
149 (fifo)->size = (_size); \
150 __init_fifo(fifo, gfp); \
153 #define init_fifo(fifo, _size, gfp) \
154 ({ \
155 (fifo)->size = (_size); \
156 if ((fifo)->size > 4) \
157 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
158 __init_fifo(fifo, gfp); \
161 #define free_fifo(fifo) \
162 do { \
163 kvfree((fifo)->data); \
164 (fifo)->data = NULL; \
165 } while (0)
167 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
168 #define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
170 #define fifo_empty(fifo) (!fifo_used(fifo))
171 #define fifo_full(fifo) (!fifo_free(fifo))
173 #define fifo_front(fifo) ((fifo)->data[(fifo)->front])
174 #define fifo_back(fifo) \
175 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
177 #define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
179 #define fifo_push_back(fifo, i) \
180 ({ \
181 bool _r = !fifo_full((fifo)); \
182 if (_r) { \
183 (fifo)->data[(fifo)->back++] = (i); \
184 (fifo)->back &= (fifo)->mask; \
186 _r; \
189 #define fifo_pop_front(fifo, i) \
190 ({ \
191 bool _r = !fifo_empty((fifo)); \
192 if (_r) { \
193 (i) = (fifo)->data[(fifo)->front++]; \
194 (fifo)->front &= (fifo)->mask; \
196 _r; \
199 #define fifo_push_front(fifo, i) \
200 ({ \
201 bool _r = !fifo_full((fifo)); \
202 if (_r) { \
203 --(fifo)->front; \
204 (fifo)->front &= (fifo)->mask; \
205 (fifo)->data[(fifo)->front] = (i); \
207 _r; \
210 #define fifo_pop_back(fifo, i) \
211 ({ \
212 bool _r = !fifo_empty((fifo)); \
213 if (_r) { \
214 --(fifo)->back; \
215 (fifo)->back &= (fifo)->mask; \
216 (i) = (fifo)->data[(fifo)->back] \
218 _r; \
221 #define fifo_push(fifo, i) fifo_push_back(fifo, (i))
222 #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
224 #define fifo_swap(l, r) \
225 do { \
226 swap((l)->front, (r)->front); \
227 swap((l)->back, (r)->back); \
228 swap((l)->size, (r)->size); \
229 swap((l)->mask, (r)->mask); \
230 swap((l)->data, (r)->data); \
231 } while (0)
233 #define fifo_move(dest, src) \
234 do { \
235 typeof(*((dest)->data)) _t; \
236 while (!fifo_full(dest) && \
237 fifo_pop(src, _t)) \
238 fifo_push(dest, _t); \
239 } while (0)
242 * Simple array based allocator - preallocates a number of elements and you can
243 * never allocate more than that, also has no locking.
245 * Handy because if you know you only need a fixed number of elements you don't
246 * have to worry about memory allocation failure, and sometimes a mempool isn't
247 * what you want.
249 * We treat the free elements as entries in a singly linked list, and the
250 * freelist as a stack - allocating and freeing push and pop off the freelist.
253 #define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
254 struct { \
255 type *freelist; \
256 type data[size]; \
257 } name
259 #define array_alloc(array) \
260 ({ \
261 typeof((array)->freelist) _ret = (array)->freelist; \
263 if (_ret) \
264 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
266 _ret; \
269 #define array_free(array, ptr) \
270 do { \
271 typeof((array)->freelist) _ptr = ptr; \
273 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
274 (array)->freelist = _ptr; \
275 } while (0)
277 #define array_allocator_init(array) \
278 do { \
279 typeof((array)->freelist) _i; \
281 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
282 (array)->freelist = NULL; \
284 for (_i = (array)->data; \
285 _i < (array)->data + ARRAY_SIZE((array)->data); \
286 _i++) \
287 array_free(array, _i); \
288 } while (0)
290 #define array_freelist_empty(array) ((array)->freelist == NULL)
292 #define ANYSINT_MAX(t) \
293 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
295 int bch_strtoint_h(const char *, int *);
296 int bch_strtouint_h(const char *, unsigned int *);
297 int bch_strtoll_h(const char *, long long *);
298 int bch_strtoull_h(const char *, unsigned long long *);
300 static inline int bch_strtol_h(const char *cp, long *res)
302 #if BITS_PER_LONG == 32
303 return bch_strtoint_h(cp, (int *) res);
304 #else
305 return bch_strtoll_h(cp, (long long *) res);
306 #endif
309 static inline int bch_strtoul_h(const char *cp, long *res)
311 #if BITS_PER_LONG == 32
312 return bch_strtouint_h(cp, (unsigned int *) res);
313 #else
314 return bch_strtoull_h(cp, (unsigned long long *) res);
315 #endif
318 #define strtoi_h(cp, res) \
319 (__builtin_types_compatible_p(typeof(*res), int) \
320 ? bch_strtoint_h(cp, (void *) res) \
321 : __builtin_types_compatible_p(typeof(*res), long) \
322 ? bch_strtol_h(cp, (void *) res) \
323 : __builtin_types_compatible_p(typeof(*res), long long) \
324 ? bch_strtoll_h(cp, (void *) res) \
325 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
326 ? bch_strtouint_h(cp, (void *) res) \
327 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
328 ? bch_strtoul_h(cp, (void *) res) \
329 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
330 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
332 #define strtoul_safe(cp, var) \
333 ({ \
334 unsigned long _v; \
335 int _r = kstrtoul(cp, 10, &_v); \
336 if (!_r) \
337 var = _v; \
338 _r; \
341 #define strtoul_safe_clamp(cp, var, min, max) \
342 ({ \
343 unsigned long _v; \
344 int _r = kstrtoul(cp, 10, &_v); \
345 if (!_r) \
346 var = clamp_t(typeof(var), _v, min, max); \
347 _r; \
350 #define snprint(buf, size, var) \
351 snprintf(buf, size, \
352 __builtin_types_compatible_p(typeof(var), int) \
353 ? "%i\n" : \
354 __builtin_types_compatible_p(typeof(var), unsigned) \
355 ? "%u\n" : \
356 __builtin_types_compatible_p(typeof(var), long) \
357 ? "%li\n" : \
358 __builtin_types_compatible_p(typeof(var), unsigned long)\
359 ? "%lu\n" : \
360 __builtin_types_compatible_p(typeof(var), int64_t) \
361 ? "%lli\n" : \
362 __builtin_types_compatible_p(typeof(var), uint64_t) \
363 ? "%llu\n" : \
364 __builtin_types_compatible_p(typeof(var), const char *) \
365 ? "%s\n" : "%i\n", var)
367 ssize_t bch_hprint(char *buf, int64_t v);
369 bool bch_is_zero(const char *p, size_t n);
370 int bch_parse_uuid(const char *s, char *uuid);
372 ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
373 size_t selected);
375 ssize_t bch_read_string_list(const char *buf, const char * const list[]);
377 struct time_stats {
378 spinlock_t lock;
380 * all fields are in nanoseconds, averages are ewmas stored left shifted
381 * by 8
383 uint64_t max_duration;
384 uint64_t average_duration;
385 uint64_t average_frequency;
386 uint64_t last;
389 void bch_time_stats_update(struct time_stats *stats, uint64_t time);
391 static inline unsigned local_clock_us(void)
393 return local_clock() >> 10;
396 #define NSEC_PER_ns 1L
397 #define NSEC_PER_us NSEC_PER_USEC
398 #define NSEC_PER_ms NSEC_PER_MSEC
399 #define NSEC_PER_sec NSEC_PER_SEC
401 #define __print_time_stat(stats, name, stat, units) \
402 sysfs_print(name ## _ ## stat ## _ ## units, \
403 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
405 #define sysfs_print_time_stats(stats, name, \
406 frequency_units, \
407 duration_units) \
408 do { \
409 __print_time_stat(stats, name, \
410 average_frequency, frequency_units); \
411 __print_time_stat(stats, name, \
412 average_duration, duration_units); \
413 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
414 div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
416 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
417 ? div_s64(local_clock() - (stats)->last, \
418 NSEC_PER_ ## frequency_units) \
419 : -1LL); \
420 } while (0)
422 #define sysfs_time_stats_attribute(name, \
423 frequency_units, \
424 duration_units) \
425 read_attribute(name ## _average_frequency_ ## frequency_units); \
426 read_attribute(name ## _average_duration_ ## duration_units); \
427 read_attribute(name ## _max_duration_ ## duration_units); \
428 read_attribute(name ## _last_ ## frequency_units)
430 #define sysfs_time_stats_attribute_list(name, \
431 frequency_units, \
432 duration_units) \
433 &sysfs_ ## name ## _average_frequency_ ## frequency_units, \
434 &sysfs_ ## name ## _average_duration_ ## duration_units, \
435 &sysfs_ ## name ## _max_duration_ ## duration_units, \
436 &sysfs_ ## name ## _last_ ## frequency_units,
438 #define ewma_add(ewma, val, weight, factor) \
439 ({ \
440 (ewma) *= (weight) - 1; \
441 (ewma) += (val) << factor; \
442 (ewma) /= (weight); \
443 (ewma) >> factor; \
446 struct bch_ratelimit {
447 /* Next time we want to do some work, in nanoseconds */
448 uint64_t next;
451 * Rate at which we want to do work, in units per nanosecond
452 * The units here correspond to the units passed to bch_next_delay()
454 unsigned rate;
457 static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459 d->next = local_clock();
462 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464 #define __DIV_SAFE(n, d, zero) \
465 ({ \
466 typeof(n) _n = (n); \
467 typeof(d) _d = (d); \
468 _d ? _n / _d : zero; \
471 #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
473 #define container_of_or_null(ptr, type, member) \
474 ({ \
475 typeof(ptr) _ptr = ptr; \
476 _ptr ? container_of(_ptr, type, member) : NULL; \
479 #define RB_INSERT(root, new, member, cmp) \
480 ({ \
481 __label__ dup; \
482 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
483 typeof(new) this; \
484 int res, ret = -1; \
486 while (*n) { \
487 parent = *n; \
488 this = container_of(*n, typeof(*(new)), member); \
489 res = cmp(new, this); \
490 if (!res) \
491 goto dup; \
492 n = res < 0 \
493 ? &(*n)->rb_left \
494 : &(*n)->rb_right; \
497 rb_link_node(&(new)->member, parent, n); \
498 rb_insert_color(&(new)->member, root); \
499 ret = 0; \
500 dup: \
501 ret; \
504 #define RB_SEARCH(root, search, member, cmp) \
505 ({ \
506 struct rb_node *n = (root)->rb_node; \
507 typeof(&(search)) this, ret = NULL; \
508 int res; \
510 while (n) { \
511 this = container_of(n, typeof(search), member); \
512 res = cmp(&(search), this); \
513 if (!res) { \
514 ret = this; \
515 break; \
517 n = res < 0 \
518 ? n->rb_left \
519 : n->rb_right; \
521 ret; \
524 #define RB_GREATER(root, search, member, cmp) \
525 ({ \
526 struct rb_node *n = (root)->rb_node; \
527 typeof(&(search)) this, ret = NULL; \
528 int res; \
530 while (n) { \
531 this = container_of(n, typeof(search), member); \
532 res = cmp(&(search), this); \
533 if (res < 0) { \
534 ret = this; \
535 n = n->rb_left; \
536 } else \
537 n = n->rb_right; \
539 ret; \
542 #define RB_FIRST(root, type, member) \
543 container_of_or_null(rb_first(root), type, member)
545 #define RB_LAST(root, type, member) \
546 container_of_or_null(rb_last(root), type, member)
548 #define RB_NEXT(ptr, member) \
549 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
551 #define RB_PREV(ptr, member) \
552 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
554 /* Does linear interpolation between powers of two */
555 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
557 unsigned fract = x & ~(~0 << fract_bits);
559 x >>= fract_bits;
560 x = 1 << x;
561 x += (x * fract) >> fract_bits;
563 return x;
566 void bch_bio_map(struct bio *bio, void *base);
568 static inline sector_t bdev_sectors(struct block_device *bdev)
570 return bdev->bd_inode->i_size >> 9;
573 #define closure_bio_submit(bio, cl, dev) \
574 do { \
575 closure_get(cl); \
576 bch_generic_make_request(bio, &(dev)->bio_split_hook); \
577 } while (0)
579 uint64_t bch_crc64_update(uint64_t, const void *, size_t);
580 uint64_t bch_crc64(const void *, size_t);
582 #endif /* _BCACHE_UTIL_H */