1 // SPDX-License-Identifier: GPL-2.0
3 * Moving/copying garbage collector
5 * Copyright 2012 Google, Inc.
13 #include <trace/events/bcache.h>
18 struct data_insert_op op
;
22 static bool moving_pred(struct keybuf
*buf
, struct bkey
*k
)
24 struct cache_set
*c
= container_of(buf
, struct cache_set
,
28 for (i
= 0; i
< KEY_PTRS(k
); i
++)
29 if (ptr_available(c
, k
, i
) &&
30 GC_MOVE(PTR_BUCKET(c
, k
, i
)))
36 /* Moving GC - IO loop */
38 static void moving_io_destructor(struct closure
*cl
)
40 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
44 static void write_moving_finish(struct closure
*cl
)
46 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
47 struct bio
*bio
= &io
->bio
.bio
;
51 if (io
->op
.replace_collision
)
52 trace_bcache_gc_copy_collision(&io
->w
->key
);
54 bch_keybuf_del(&io
->op
.c
->moving_gc_keys
, io
->w
);
56 up(&io
->op
.c
->moving_in_flight
);
58 closure_return_with_destructor(cl
, moving_io_destructor
);
61 static void read_moving_endio(struct bio
*bio
)
63 struct bbio
*b
= container_of(bio
, struct bbio
, bio
);
64 struct moving_io
*io
= container_of(bio
->bi_private
,
65 struct moving_io
, cl
);
68 io
->op
.status
= bio
->bi_status
;
69 else if (!KEY_DIRTY(&b
->key
) &&
70 ptr_stale(io
->op
.c
, &b
->key
, 0)) {
71 io
->op
.status
= BLK_STS_IOERR
;
74 bch_bbio_endio(io
->op
.c
, bio
, bio
->bi_status
, "reading data to move");
77 static void moving_init(struct moving_io
*io
)
79 struct bio
*bio
= &io
->bio
.bio
;
81 bio_init(bio
, bio
->bi_inline_vecs
,
82 DIV_ROUND_UP(KEY_SIZE(&io
->w
->key
), PAGE_SECTORS
));
84 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
86 bio
->bi_iter
.bi_size
= KEY_SIZE(&io
->w
->key
) << 9;
87 bio
->bi_private
= &io
->cl
;
88 bch_bio_map(bio
, NULL
);
91 static void write_moving(struct closure
*cl
)
93 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
94 struct data_insert_op
*op
= &io
->op
;
99 io
->bio
.bio
.bi_iter
.bi_sector
= KEY_START(&io
->w
->key
);
101 op
->bio
= &io
->bio
.bio
;
103 op
->writeback
= KEY_DIRTY(&io
->w
->key
);
104 op
->csum
= KEY_CSUM(&io
->w
->key
);
106 bkey_copy(&op
->replace_key
, &io
->w
->key
);
109 closure_call(&op
->cl
, bch_data_insert
, NULL
, cl
);
112 continue_at(cl
, write_moving_finish
, op
->wq
);
115 static void read_moving_submit(struct closure
*cl
)
117 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
118 struct bio
*bio
= &io
->bio
.bio
;
120 bch_submit_bbio(bio
, io
->op
.c
, &io
->w
->key
, 0);
122 continue_at(cl
, write_moving
, io
->op
.wq
);
125 static void read_moving(struct cache_set
*c
)
127 struct keybuf_key
*w
;
128 struct moving_io
*io
;
132 closure_init_stack(&cl
);
134 /* XXX: if we error, background writeback could stall indefinitely */
136 while (!test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
137 w
= bch_keybuf_next_rescan(c
, &c
->moving_gc_keys
,
138 &MAX_KEY
, moving_pred
);
142 if (ptr_stale(c
, &w
->key
, 0)) {
143 bch_keybuf_del(&c
->moving_gc_keys
, w
);
147 io
= kzalloc(sizeof(struct moving_io
) + sizeof(struct bio_vec
)
148 * DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
155 io
->op
.inode
= KEY_INODE(&w
->key
);
157 io
->op
.wq
= c
->moving_gc_wq
;
162 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
163 bio
->bi_end_io
= read_moving_endio
;
165 if (bch_bio_alloc_pages(bio
, GFP_KERNEL
))
168 trace_bcache_gc_copy(&w
->key
);
170 down(&c
->moving_in_flight
);
171 closure_call(&io
->cl
, read_moving_submit
, NULL
, &cl
);
175 err
: if (!IS_ERR_OR_NULL(w
->private))
178 bch_keybuf_del(&c
->moving_gc_keys
, w
);
184 static bool bucket_cmp(struct bucket
*l
, struct bucket
*r
)
186 return GC_SECTORS_USED(l
) < GC_SECTORS_USED(r
);
189 static unsigned bucket_heap_top(struct cache
*ca
)
192 return (b
= heap_peek(&ca
->heap
)) ? GC_SECTORS_USED(b
) : 0;
195 void bch_moving_gc(struct cache_set
*c
)
201 if (!c
->copy_gc_enabled
)
204 mutex_lock(&c
->bucket_lock
);
206 for_each_cache(ca
, c
, i
) {
207 unsigned sectors_to_move
= 0;
208 unsigned reserve_sectors
= ca
->sb
.bucket_size
*
209 fifo_used(&ca
->free
[RESERVE_MOVINGGC
]);
213 for_each_bucket(b
, ca
) {
214 if (GC_MARK(b
) == GC_MARK_METADATA
||
215 !GC_SECTORS_USED(b
) ||
216 GC_SECTORS_USED(b
) == ca
->sb
.bucket_size
||
217 atomic_read(&b
->pin
))
220 if (!heap_full(&ca
->heap
)) {
221 sectors_to_move
+= GC_SECTORS_USED(b
);
222 heap_add(&ca
->heap
, b
, bucket_cmp
);
223 } else if (bucket_cmp(b
, heap_peek(&ca
->heap
))) {
224 sectors_to_move
-= bucket_heap_top(ca
);
225 sectors_to_move
+= GC_SECTORS_USED(b
);
227 ca
->heap
.data
[0] = b
;
228 heap_sift(&ca
->heap
, 0, bucket_cmp
);
232 while (sectors_to_move
> reserve_sectors
) {
233 heap_pop(&ca
->heap
, b
, bucket_cmp
);
234 sectors_to_move
-= GC_SECTORS_USED(b
);
237 while (heap_pop(&ca
->heap
, b
, bucket_cmp
))
241 mutex_unlock(&c
->bucket_lock
);
243 c
->moving_gc_keys
.last_scanned
= ZERO_KEY
;
248 void bch_moving_init_cache_set(struct cache_set
*c
)
250 bch_keybuf_init(&c
->moving_gc_keys
);
251 sema_init(&c
->moving_in_flight
, 64);