1 #ifndef _BCACHE_WRITEBACK_H
2 #define _BCACHE_WRITEBACK_H
4 #define CUTOFF_WRITEBACK 40
5 #define CUTOFF_WRITEBACK_SYNC 70
7 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device
*d
)
11 for (i
= 0; i
< d
->nr_stripes
; i
++)
12 ret
+= atomic_read(d
->stripe_sectors_dirty
+ i
);
17 static inline bool bcache_dev_stripe_dirty(struct bcache_device
*d
,
21 uint64_t stripe
= offset
>> d
->stripe_size_bits
;
24 if (atomic_read(d
->stripe_sectors_dirty
+ stripe
))
27 if (nr_sectors
<= 1 << d
->stripe_size_bits
)
30 nr_sectors
-= 1 << d
->stripe_size_bits
;
35 static inline bool should_writeback(struct cached_dev
*dc
, struct bio
*bio
,
36 unsigned cache_mode
, bool would_skip
)
38 unsigned in_use
= dc
->disk
.c
->gc_stats
.in_use
;
40 if (cache_mode
!= CACHE_MODE_WRITEBACK
||
41 atomic_read(&dc
->disk
.detaching
) ||
42 in_use
> CUTOFF_WRITEBACK_SYNC
)
45 if (dc
->partial_stripes_expensive
&&
46 bcache_dev_stripe_dirty(&dc
->disk
, bio
->bi_sector
,
53 return bio
->bi_rw
& REQ_SYNC
||
54 in_use
<= CUTOFF_WRITEBACK
;
57 void bcache_dev_sectors_dirty_add(struct cache_set
*, unsigned, uint64_t, int);
58 void bch_writeback_queue(struct cached_dev
*);
59 void bch_writeback_add(struct cached_dev
*);
61 void bch_sectors_dirty_init(struct cached_dev
*dc
);
62 void bch_cached_dev_writeback_init(struct cached_dev
*);