2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
12 #include <trace/events/bcache.h>
17 struct data_insert_op op
;
21 static bool moving_pred(struct keybuf
*buf
, struct bkey
*k
)
23 struct cache_set
*c
= container_of(buf
, struct cache_set
,
27 for (i
= 0; i
< KEY_PTRS(k
); i
++)
28 if (ptr_available(c
, k
, i
) &&
29 GC_MOVE(PTR_BUCKET(c
, k
, i
)))
35 /* Moving GC - IO loop */
37 static void moving_io_destructor(struct closure
*cl
)
39 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
43 static void write_moving_finish(struct closure
*cl
)
45 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
46 struct bio
*bio
= &io
->bio
.bio
;
50 bio_for_each_segment_all(bv
, bio
, i
)
51 __free_page(bv
->bv_page
);
53 if (io
->op
.replace_collision
)
54 trace_bcache_gc_copy_collision(&io
->w
->key
);
56 bch_keybuf_del(&io
->op
.c
->moving_gc_keys
, io
->w
);
58 up(&io
->op
.c
->moving_in_flight
);
60 closure_return_with_destructor(cl
, moving_io_destructor
);
63 static void read_moving_endio(struct bio
*bio
, int error
)
65 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
66 struct moving_io
*io
= container_of(bio
->bi_private
,
67 struct moving_io
, cl
);
71 else if (!KEY_DIRTY(&b
->key
) &&
72 ptr_stale(io
->op
.c
, &b
->key
, 0)) {
73 io
->op
.error
= -EINTR
;
76 bch_bbio_endio(io
->op
.c
, bio
, error
, "reading data to move");
79 static void moving_init(struct moving_io
*io
)
81 struct bio
*bio
= &io
->bio
.bio
;
85 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
87 bio
->bi_iter
.bi_size
= KEY_SIZE(&io
->w
->key
) << 9;
88 bio
->bi_max_vecs
= DIV_ROUND_UP(KEY_SIZE(&io
->w
->key
),
90 bio
->bi_private
= &io
->cl
;
91 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
92 bch_bio_map(bio
, NULL
);
95 static void write_moving(struct closure
*cl
)
97 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
98 struct data_insert_op
*op
= &io
->op
;
103 io
->bio
.bio
.bi_iter
.bi_sector
= KEY_START(&io
->w
->key
);
105 op
->bio
= &io
->bio
.bio
;
107 op
->writeback
= KEY_DIRTY(&io
->w
->key
);
108 op
->csum
= KEY_CSUM(&io
->w
->key
);
110 bkey_copy(&op
->replace_key
, &io
->w
->key
);
113 closure_call(&op
->cl
, bch_data_insert
, NULL
, cl
);
116 continue_at(cl
, write_moving_finish
, op
->wq
);
119 static void read_moving_submit(struct closure
*cl
)
121 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
122 struct bio
*bio
= &io
->bio
.bio
;
124 bch_submit_bbio(bio
, io
->op
.c
, &io
->w
->key
, 0);
126 continue_at(cl
, write_moving
, io
->op
.wq
);
129 static void read_moving(struct cache_set
*c
)
131 struct keybuf_key
*w
;
132 struct moving_io
*io
;
136 closure_init_stack(&cl
);
138 /* XXX: if we error, background writeback could stall indefinitely */
140 while (!test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
141 w
= bch_keybuf_next_rescan(c
, &c
->moving_gc_keys
,
142 &MAX_KEY
, moving_pred
);
146 if (ptr_stale(c
, &w
->key
, 0)) {
147 bch_keybuf_del(&c
->moving_gc_keys
, w
);
151 io
= kzalloc(sizeof(struct moving_io
) + sizeof(struct bio_vec
)
152 * DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
159 io
->op
.inode
= KEY_INODE(&w
->key
);
161 io
->op
.wq
= c
->moving_gc_wq
;
167 bio
->bi_end_io
= read_moving_endio
;
169 if (bio_alloc_pages(bio
, GFP_KERNEL
))
172 trace_bcache_gc_copy(&w
->key
);
174 down(&c
->moving_in_flight
);
175 closure_call(&io
->cl
, read_moving_submit
, NULL
, &cl
);
179 err
: if (!IS_ERR_OR_NULL(w
->private))
182 bch_keybuf_del(&c
->moving_gc_keys
, w
);
188 static bool bucket_cmp(struct bucket
*l
, struct bucket
*r
)
190 return GC_SECTORS_USED(l
) < GC_SECTORS_USED(r
);
193 static unsigned bucket_heap_top(struct cache
*ca
)
196 return (b
= heap_peek(&ca
->heap
)) ? GC_SECTORS_USED(b
) : 0;
199 void bch_moving_gc(struct cache_set
*c
)
205 if (!c
->copy_gc_enabled
)
208 mutex_lock(&c
->bucket_lock
);
210 for_each_cache(ca
, c
, i
) {
211 unsigned sectors_to_move
= 0;
212 unsigned reserve_sectors
= ca
->sb
.bucket_size
*
213 fifo_used(&ca
->free
[RESERVE_MOVINGGC
]);
217 for_each_bucket(b
, ca
) {
218 if (GC_MARK(b
) == GC_MARK_METADATA
||
219 !GC_SECTORS_USED(b
) ||
220 GC_SECTORS_USED(b
) == ca
->sb
.bucket_size
||
221 atomic_read(&b
->pin
))
224 if (!heap_full(&ca
->heap
)) {
225 sectors_to_move
+= GC_SECTORS_USED(b
);
226 heap_add(&ca
->heap
, b
, bucket_cmp
);
227 } else if (bucket_cmp(b
, heap_peek(&ca
->heap
))) {
228 sectors_to_move
-= bucket_heap_top(ca
);
229 sectors_to_move
+= GC_SECTORS_USED(b
);
231 ca
->heap
.data
[0] = b
;
232 heap_sift(&ca
->heap
, 0, bucket_cmp
);
236 while (sectors_to_move
> reserve_sectors
) {
237 heap_pop(&ca
->heap
, b
, bucket_cmp
);
238 sectors_to_move
-= GC_SECTORS_USED(b
);
241 while (heap_pop(&ca
->heap
, b
, bucket_cmp
))
245 mutex_unlock(&c
->bucket_lock
);
247 c
->moving_gc_keys
.last_scanned
= ZERO_KEY
;
252 void bch_moving_init_cache_set(struct cache_set
*c
)
254 bch_keybuf_init(&c
->moving_gc_keys
);
255 sema_init(&c
->moving_in_flight
, 64);