gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / md / bcache / movinggc.c
blobcd7490311e518b9139db7d95818111685d52f0fa
1 /*
2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
5 */
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
12 #include <trace/events/bcache.h>
14 struct moving_io {
15 struct closure cl;
16 struct keybuf_key *w;
17 struct data_insert_op op;
18 struct bbio bio;
21 static bool moving_pred(struct keybuf *buf, struct bkey *k)
23 struct cache_set *c = container_of(buf, struct cache_set,
24 moving_gc_keys);
25 unsigned i;
27 for (i = 0; i < KEY_PTRS(k); i++)
28 if (ptr_available(c, k, i) &&
29 GC_MOVE(PTR_BUCKET(c, k, i)))
30 return true;
32 return false;
35 /* Moving GC - IO loop */
37 static void moving_io_destructor(struct closure *cl)
39 struct moving_io *io = container_of(cl, struct moving_io, cl);
40 kfree(io);
43 static void write_moving_finish(struct closure *cl)
45 struct moving_io *io = container_of(cl, struct moving_io, cl);
46 struct bio *bio = &io->bio.bio;
47 struct bio_vec *bv;
48 int i;
50 bio_for_each_segment_all(bv, bio, i)
51 __free_page(bv->bv_page);
53 if (io->op.replace_collision)
54 trace_bcache_gc_copy_collision(&io->w->key);
56 bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
58 up(&io->op.c->moving_in_flight);
60 closure_return_with_destructor(cl, moving_io_destructor);
63 static void read_moving_endio(struct bio *bio, int error)
65 struct bbio *b = container_of(bio, struct bbio, bio);
66 struct moving_io *io = container_of(bio->bi_private,
67 struct moving_io, cl);
69 if (error)
70 io->op.error = error;
71 else if (!KEY_DIRTY(&b->key) &&
72 ptr_stale(io->op.c, &b->key, 0)) {
73 io->op.error = -EINTR;
76 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
79 static void moving_init(struct moving_io *io)
81 struct bio *bio = &io->bio.bio;
83 bio_init(bio);
84 bio_get(bio);
85 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
87 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
88 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
89 PAGE_SECTORS);
90 bio->bi_private = &io->cl;
91 bio->bi_io_vec = bio->bi_inline_vecs;
92 bch_bio_map(bio, NULL);
95 static void write_moving(struct closure *cl)
97 struct moving_io *io = container_of(cl, struct moving_io, cl);
98 struct data_insert_op *op = &io->op;
100 if (!op->error) {
101 moving_init(io);
103 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
104 op->write_prio = 1;
105 op->bio = &io->bio.bio;
107 op->writeback = KEY_DIRTY(&io->w->key);
108 op->csum = KEY_CSUM(&io->w->key);
110 bkey_copy(&op->replace_key, &io->w->key);
111 op->replace = true;
113 closure_call(&op->cl, bch_data_insert, NULL, cl);
116 continue_at(cl, write_moving_finish, op->wq);
119 static void read_moving_submit(struct closure *cl)
121 struct moving_io *io = container_of(cl, struct moving_io, cl);
122 struct bio *bio = &io->bio.bio;
124 bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
126 continue_at(cl, write_moving, io->op.wq);
129 static void read_moving(struct cache_set *c)
131 struct keybuf_key *w;
132 struct moving_io *io;
133 struct bio *bio;
134 struct closure cl;
136 closure_init_stack(&cl);
138 /* XXX: if we error, background writeback could stall indefinitely */
140 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
141 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
142 &MAX_KEY, moving_pred);
143 if (!w)
144 break;
146 if (ptr_stale(c, &w->key, 0)) {
147 bch_keybuf_del(&c->moving_gc_keys, w);
148 continue;
151 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
152 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
153 GFP_KERNEL);
154 if (!io)
155 goto err;
157 w->private = io;
158 io->w = w;
159 io->op.inode = KEY_INODE(&w->key);
160 io->op.c = c;
161 io->op.wq = c->moving_gc_wq;
163 moving_init(io);
164 bio = &io->bio.bio;
166 bio->bi_rw = READ;
167 bio->bi_end_io = read_moving_endio;
169 if (bio_alloc_pages(bio, GFP_KERNEL))
170 goto err;
172 trace_bcache_gc_copy(&w->key);
174 down(&c->moving_in_flight);
175 closure_call(&io->cl, read_moving_submit, NULL, &cl);
178 if (0) {
179 err: if (!IS_ERR_OR_NULL(w->private))
180 kfree(w->private);
182 bch_keybuf_del(&c->moving_gc_keys, w);
185 closure_sync(&cl);
188 static bool bucket_cmp(struct bucket *l, struct bucket *r)
190 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
193 static unsigned bucket_heap_top(struct cache *ca)
195 struct bucket *b;
196 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
199 void bch_moving_gc(struct cache_set *c)
201 struct cache *ca;
202 struct bucket *b;
203 unsigned i;
205 if (!c->copy_gc_enabled)
206 return;
208 mutex_lock(&c->bucket_lock);
210 for_each_cache(ca, c, i) {
211 unsigned sectors_to_move = 0;
212 unsigned reserve_sectors = ca->sb.bucket_size *
213 fifo_used(&ca->free[RESERVE_MOVINGGC]);
215 ca->heap.used = 0;
217 for_each_bucket(b, ca) {
218 if (GC_MARK(b) == GC_MARK_METADATA ||
219 !GC_SECTORS_USED(b) ||
220 GC_SECTORS_USED(b) == ca->sb.bucket_size ||
221 atomic_read(&b->pin))
222 continue;
224 if (!heap_full(&ca->heap)) {
225 sectors_to_move += GC_SECTORS_USED(b);
226 heap_add(&ca->heap, b, bucket_cmp);
227 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
228 sectors_to_move -= bucket_heap_top(ca);
229 sectors_to_move += GC_SECTORS_USED(b);
231 ca->heap.data[0] = b;
232 heap_sift(&ca->heap, 0, bucket_cmp);
236 while (sectors_to_move > reserve_sectors) {
237 heap_pop(&ca->heap, b, bucket_cmp);
238 sectors_to_move -= GC_SECTORS_USED(b);
241 while (heap_pop(&ca->heap, b, bucket_cmp))
242 SET_GC_MOVE(b, 1);
245 mutex_unlock(&c->bucket_lock);
247 c->moving_gc_keys.last_scanned = ZERO_KEY;
249 read_moving(c);
252 void bch_moving_init_cache_set(struct cache_set *c)
254 bch_keybuf_init(&c->moving_gc_keys);
255 sema_init(&c->moving_in_flight, 64);