treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / mtd / ubi / fastmap-wl.c
blob426820ab9afe15c44d8f16896cce607d1e42c38f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
8 /**
9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10 * @wrk: the work description object
12 static void update_fastmap_work_fn(struct work_struct *wrk)
14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
16 ubi_update_fastmap(ubi);
17 spin_lock(&ubi->wl_lock);
18 ubi->fm_work_scheduled = 0;
19 spin_unlock(&ubi->wl_lock);
22 /**
23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24 * @root: the RB-tree where to look for
26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
28 struct rb_node *p;
29 struct ubi_wl_entry *e, *victim = NULL;
30 int max_ec = UBI_MAX_ERASECOUNTER;
32 ubi_rb_for_each_entry(p, e, root, u.rb) {
33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34 victim = e;
35 max_ec = e->ec;
39 return victim;
42 /**
43 * return_unused_pool_pebs - returns unused PEB to the free tree.
44 * @ubi: UBI device description object
45 * @pool: fastmap pool description object
47 static void return_unused_pool_pebs(struct ubi_device *ubi,
48 struct ubi_fm_pool *pool)
50 int i;
51 struct ubi_wl_entry *e;
53 for (i = pool->used; i < pool->size; i++) {
54 e = ubi->lookuptbl[pool->pebs[i]];
55 wl_tree_add(e, &ubi->free);
56 ubi->free_count++;
60 /**
61 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
62 * @ubi: UBI device description object
63 * @anchor: This PEB will be used as anchor PEB by fastmap
65 * The function returns a physical erase block with a given maximal number
66 * and removes it from the wl subsystem.
67 * Must be called with wl_lock held!
69 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
71 struct ubi_wl_entry *e = NULL;
73 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
74 goto out;
76 if (anchor)
77 e = find_anchor_wl_entry(&ubi->free);
78 else
79 e = find_mean_wl_entry(ubi, &ubi->free);
81 if (!e)
82 goto out;
84 self_check_in_wl_tree(ubi, e, &ubi->free);
86 /* remove it from the free list,
87 * the wl subsystem does no longer know this erase block */
88 rb_erase(&e->u.rb, &ubi->free);
89 ubi->free_count--;
90 out:
91 return e;
94 /**
95 * ubi_refill_pools - refills all fastmap PEB pools.
96 * @ubi: UBI device description object
98 void ubi_refill_pools(struct ubi_device *ubi)
100 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
101 struct ubi_fm_pool *pool = &ubi->fm_pool;
102 struct ubi_wl_entry *e;
103 int enough;
105 spin_lock(&ubi->wl_lock);
107 return_unused_pool_pebs(ubi, wl_pool);
108 return_unused_pool_pebs(ubi, pool);
110 wl_pool->size = 0;
111 pool->size = 0;
113 for (;;) {
114 enough = 0;
115 if (pool->size < pool->max_size) {
116 if (!ubi->free.rb_node)
117 break;
119 e = wl_get_wle(ubi);
120 if (!e)
121 break;
123 pool->pebs[pool->size] = e->pnum;
124 pool->size++;
125 } else
126 enough++;
128 if (wl_pool->size < wl_pool->max_size) {
129 if (!ubi->free.rb_node ||
130 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
131 break;
133 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
134 self_check_in_wl_tree(ubi, e, &ubi->free);
135 rb_erase(&e->u.rb, &ubi->free);
136 ubi->free_count--;
138 wl_pool->pebs[wl_pool->size] = e->pnum;
139 wl_pool->size++;
140 } else
141 enough++;
143 if (enough == 2)
144 break;
147 wl_pool->used = 0;
148 pool->used = 0;
150 spin_unlock(&ubi->wl_lock);
154 * produce_free_peb - produce a free physical eraseblock.
155 * @ubi: UBI device description object
157 * This function tries to make a free PEB by means of synchronous execution of
158 * pending works. This may be needed if, for example the background thread is
159 * disabled. Returns zero in case of success and a negative error code in case
160 * of failure.
162 static int produce_free_peb(struct ubi_device *ubi)
164 int err;
166 while (!ubi->free.rb_node && ubi->works_count) {
167 dbg_wl("do one work synchronously");
168 err = do_work(ubi);
170 if (err)
171 return err;
174 return 0;
178 * ubi_wl_get_peb - get a physical eraseblock.
179 * @ubi: UBI device description object
181 * This function returns a physical eraseblock in case of success and a
182 * negative error code in case of failure.
183 * Returns with ubi->fm_eba_sem held in read mode!
185 int ubi_wl_get_peb(struct ubi_device *ubi)
187 int ret, attempts = 0;
188 struct ubi_fm_pool *pool = &ubi->fm_pool;
189 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
191 again:
192 down_read(&ubi->fm_eba_sem);
193 spin_lock(&ubi->wl_lock);
195 /* We check here also for the WL pool because at this point we can
196 * refill the WL pool synchronous. */
197 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
198 spin_unlock(&ubi->wl_lock);
199 up_read(&ubi->fm_eba_sem);
200 ret = ubi_update_fastmap(ubi);
201 if (ret) {
202 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
203 down_read(&ubi->fm_eba_sem);
204 return -ENOSPC;
206 down_read(&ubi->fm_eba_sem);
207 spin_lock(&ubi->wl_lock);
210 if (pool->used == pool->size) {
211 spin_unlock(&ubi->wl_lock);
212 attempts++;
213 if (attempts == 10) {
214 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
215 ret = -ENOSPC;
216 goto out;
218 up_read(&ubi->fm_eba_sem);
219 ret = produce_free_peb(ubi);
220 if (ret < 0) {
221 down_read(&ubi->fm_eba_sem);
222 goto out;
224 goto again;
227 ubi_assert(pool->used < pool->size);
228 ret = pool->pebs[pool->used++];
229 prot_queue_add(ubi, ubi->lookuptbl[ret]);
230 spin_unlock(&ubi->wl_lock);
231 out:
232 return ret;
235 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
237 * @ubi: UBI device description object
239 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
241 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
242 int pnum;
244 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
246 if (pool->used == pool->size) {
247 /* We cannot update the fastmap here because this
248 * function is called in atomic context.
249 * Let's fail here and refill/update it as soon as possible. */
250 if (!ubi->fm_work_scheduled) {
251 ubi->fm_work_scheduled = 1;
252 schedule_work(&ubi->fm_work);
254 return NULL;
257 pnum = pool->pebs[pool->used++];
258 return ubi->lookuptbl[pnum];
262 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
263 * @ubi: UBI device description object
265 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
267 struct ubi_work *wrk;
268 struct ubi_wl_entry *anchor;
270 spin_lock(&ubi->wl_lock);
272 /* Do we already have an anchor? */
273 if (ubi->fm_anchor) {
274 spin_unlock(&ubi->wl_lock);
275 return 0;
278 /* See if we can find an anchor PEB on the list of free PEBs */
279 anchor = ubi_wl_get_fm_peb(ubi, 1);
280 if (anchor) {
281 ubi->fm_anchor = anchor;
282 spin_unlock(&ubi->wl_lock);
283 return 0;
286 /* No luck, trigger wear leveling to produce a new anchor PEB */
287 ubi->fm_do_produce_anchor = 1;
288 if (ubi->wl_scheduled) {
289 spin_unlock(&ubi->wl_lock);
290 return 0;
292 ubi->wl_scheduled = 1;
293 spin_unlock(&ubi->wl_lock);
295 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
296 if (!wrk) {
297 spin_lock(&ubi->wl_lock);
298 ubi->wl_scheduled = 0;
299 spin_unlock(&ubi->wl_lock);
300 return -ENOMEM;
303 wrk->func = &wear_leveling_worker;
304 __schedule_ubi_work(ubi, wrk);
305 return 0;
309 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
310 * sub-system.
311 * see: ubi_wl_put_peb()
313 * @ubi: UBI device description object
314 * @fm_e: physical eraseblock to return
315 * @lnum: the last used logical eraseblock number for the PEB
316 * @torture: if this physical eraseblock has to be tortured
318 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
319 int lnum, int torture)
321 struct ubi_wl_entry *e;
322 int vol_id, pnum = fm_e->pnum;
324 dbg_wl("PEB %d", pnum);
326 ubi_assert(pnum >= 0);
327 ubi_assert(pnum < ubi->peb_count);
329 spin_lock(&ubi->wl_lock);
330 e = ubi->lookuptbl[pnum];
332 /* This can happen if we recovered from a fastmap the very
333 * first time and writing now a new one. In this case the wl system
334 * has never seen any PEB used by the original fastmap.
336 if (!e) {
337 e = fm_e;
338 ubi_assert(e->ec >= 0);
339 ubi->lookuptbl[pnum] = e;
342 spin_unlock(&ubi->wl_lock);
344 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
345 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
349 * ubi_is_erase_work - checks whether a work is erase work.
350 * @wrk: The work object to be checked
352 int ubi_is_erase_work(struct ubi_work *wrk)
354 return wrk->func == erase_worker;
357 static void ubi_fastmap_close(struct ubi_device *ubi)
359 int i;
361 return_unused_pool_pebs(ubi, &ubi->fm_pool);
362 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
364 if (ubi->fm) {
365 for (i = 0; i < ubi->fm->used_blocks; i++)
366 kfree(ubi->fm->e[i]);
368 kfree(ubi->fm);
372 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
373 * See find_mean_wl_entry()
375 * @ubi: UBI device description object
376 * @e: physical eraseblock to return
377 * @root: RB tree to test against.
379 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
380 struct ubi_wl_entry *e,
381 struct rb_root *root) {
382 if (e && !ubi->fm_disabled && !ubi->fm &&
383 e->pnum < UBI_FM_MAX_START)
384 e = rb_entry(rb_next(root->rb_node),
385 struct ubi_wl_entry, u.rb);
387 return e;