x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / mtd / ubi / fastmap-wl.c
blob4f0bd6b4422adc9bfa5c7bfd4f1afdd8abcdb4fc
1 /*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
17 /**
18 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19 * @wrk: the work description object
21 static void update_fastmap_work_fn(struct work_struct *wrk)
23 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
25 ubi_update_fastmap(ubi);
26 spin_lock(&ubi->wl_lock);
27 ubi->fm_work_scheduled = 0;
28 spin_unlock(&ubi->wl_lock);
31 /**
32 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
33 * @root: the RB-tree where to look for
35 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
37 struct rb_node *p;
38 struct ubi_wl_entry *e, *victim = NULL;
39 int max_ec = UBI_MAX_ERASECOUNTER;
41 ubi_rb_for_each_entry(p, e, root, u.rb) {
42 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
43 victim = e;
44 max_ec = e->ec;
48 return victim;
51 /**
52 * return_unused_pool_pebs - returns unused PEB to the free tree.
53 * @ubi: UBI device description object
54 * @pool: fastmap pool description object
56 static void return_unused_pool_pebs(struct ubi_device *ubi,
57 struct ubi_fm_pool *pool)
59 int i;
60 struct ubi_wl_entry *e;
62 for (i = pool->used; i < pool->size; i++) {
63 e = ubi->lookuptbl[pool->pebs[i]];
64 wl_tree_add(e, &ubi->free);
65 ubi->free_count++;
69 static int anchor_pebs_avalible(struct rb_root *root)
71 struct rb_node *p;
72 struct ubi_wl_entry *e;
74 ubi_rb_for_each_entry(p, e, root, u.rb)
75 if (e->pnum < UBI_FM_MAX_START)
76 return 1;
78 return 0;
81 /**
82 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
83 * @ubi: UBI device description object
84 * @anchor: This PEB will be used as anchor PEB by fastmap
86 * The function returns a physical erase block with a given maximal number
87 * and removes it from the wl subsystem.
88 * Must be called with wl_lock held!
90 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
92 struct ubi_wl_entry *e = NULL;
94 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
95 goto out;
97 if (anchor)
98 e = find_anchor_wl_entry(&ubi->free);
99 else
100 e = find_mean_wl_entry(ubi, &ubi->free);
102 if (!e)
103 goto out;
105 self_check_in_wl_tree(ubi, e, &ubi->free);
107 /* remove it from the free list,
108 * the wl subsystem does no longer know this erase block */
109 rb_erase(&e->u.rb, &ubi->free);
110 ubi->free_count--;
111 out:
112 return e;
116 * ubi_refill_pools - refills all fastmap PEB pools.
117 * @ubi: UBI device description object
119 void ubi_refill_pools(struct ubi_device *ubi)
121 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
122 struct ubi_fm_pool *pool = &ubi->fm_pool;
123 struct ubi_wl_entry *e;
124 int enough;
126 spin_lock(&ubi->wl_lock);
128 return_unused_pool_pebs(ubi, wl_pool);
129 return_unused_pool_pebs(ubi, pool);
131 wl_pool->size = 0;
132 pool->size = 0;
134 for (;;) {
135 enough = 0;
136 if (pool->size < pool->max_size) {
137 if (!ubi->free.rb_node)
138 break;
140 e = wl_get_wle(ubi);
141 if (!e)
142 break;
144 pool->pebs[pool->size] = e->pnum;
145 pool->size++;
146 } else
147 enough++;
149 if (wl_pool->size < wl_pool->max_size) {
150 if (!ubi->free.rb_node ||
151 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
152 break;
154 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
155 self_check_in_wl_tree(ubi, e, &ubi->free);
156 rb_erase(&e->u.rb, &ubi->free);
157 ubi->free_count--;
159 wl_pool->pebs[wl_pool->size] = e->pnum;
160 wl_pool->size++;
161 } else
162 enough++;
164 if (enough == 2)
165 break;
168 wl_pool->used = 0;
169 pool->used = 0;
171 spin_unlock(&ubi->wl_lock);
175 * produce_free_peb - produce a free physical eraseblock.
176 * @ubi: UBI device description object
178 * This function tries to make a free PEB by means of synchronous execution of
179 * pending works. This may be needed if, for example the background thread is
180 * disabled. Returns zero in case of success and a negative error code in case
181 * of failure.
183 static int produce_free_peb(struct ubi_device *ubi)
185 int err;
187 while (!ubi->free.rb_node && ubi->works_count) {
188 dbg_wl("do one work synchronously");
189 err = do_work(ubi);
191 if (err)
192 return err;
195 return 0;
199 * ubi_wl_get_peb - get a physical eraseblock.
200 * @ubi: UBI device description object
202 * This function returns a physical eraseblock in case of success and a
203 * negative error code in case of failure.
204 * Returns with ubi->fm_eba_sem held in read mode!
206 int ubi_wl_get_peb(struct ubi_device *ubi)
208 int ret, retried = 0;
209 struct ubi_fm_pool *pool = &ubi->fm_pool;
210 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
212 again:
213 down_read(&ubi->fm_eba_sem);
214 spin_lock(&ubi->wl_lock);
216 /* We check here also for the WL pool because at this point we can
217 * refill the WL pool synchronous. */
218 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
219 spin_unlock(&ubi->wl_lock);
220 up_read(&ubi->fm_eba_sem);
221 ret = ubi_update_fastmap(ubi);
222 if (ret) {
223 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
224 down_read(&ubi->fm_eba_sem);
225 return -ENOSPC;
227 down_read(&ubi->fm_eba_sem);
228 spin_lock(&ubi->wl_lock);
231 if (pool->used == pool->size) {
232 spin_unlock(&ubi->wl_lock);
233 if (retried) {
234 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
235 ret = -ENOSPC;
236 goto out;
238 retried = 1;
239 up_read(&ubi->fm_eba_sem);
240 ret = produce_free_peb(ubi);
241 if (ret < 0) {
242 down_read(&ubi->fm_eba_sem);
243 goto out;
245 goto again;
248 ubi_assert(pool->used < pool->size);
249 ret = pool->pebs[pool->used++];
250 prot_queue_add(ubi, ubi->lookuptbl[ret]);
251 spin_unlock(&ubi->wl_lock);
252 out:
253 return ret;
256 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
258 * @ubi: UBI device description object
260 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
263 int pnum;
265 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
267 if (pool->used == pool->size) {
268 /* We cannot update the fastmap here because this
269 * function is called in atomic context.
270 * Let's fail here and refill/update it as soon as possible. */
271 if (!ubi->fm_work_scheduled) {
272 ubi->fm_work_scheduled = 1;
273 schedule_work(&ubi->fm_work);
275 return NULL;
278 pnum = pool->pebs[pool->used++];
279 return ubi->lookuptbl[pnum];
283 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
284 * @ubi: UBI device description object
286 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
288 struct ubi_work *wrk;
290 spin_lock(&ubi->wl_lock);
291 if (ubi->wl_scheduled) {
292 spin_unlock(&ubi->wl_lock);
293 return 0;
295 ubi->wl_scheduled = 1;
296 spin_unlock(&ubi->wl_lock);
298 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
299 if (!wrk) {
300 spin_lock(&ubi->wl_lock);
301 ubi->wl_scheduled = 0;
302 spin_unlock(&ubi->wl_lock);
303 return -ENOMEM;
306 wrk->anchor = 1;
307 wrk->func = &wear_leveling_worker;
308 __schedule_ubi_work(ubi, wrk);
309 return 0;
313 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
314 * sub-system.
315 * see: ubi_wl_put_peb()
317 * @ubi: UBI device description object
318 * @fm_e: physical eraseblock to return
319 * @lnum: the last used logical eraseblock number for the PEB
320 * @torture: if this physical eraseblock has to be tortured
322 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
323 int lnum, int torture)
325 struct ubi_wl_entry *e;
326 int vol_id, pnum = fm_e->pnum;
328 dbg_wl("PEB %d", pnum);
330 ubi_assert(pnum >= 0);
331 ubi_assert(pnum < ubi->peb_count);
333 spin_lock(&ubi->wl_lock);
334 e = ubi->lookuptbl[pnum];
336 /* This can happen if we recovered from a fastmap the very
337 * first time and writing now a new one. In this case the wl system
338 * has never seen any PEB used by the original fastmap.
340 if (!e) {
341 e = fm_e;
342 ubi_assert(e->ec >= 0);
343 ubi->lookuptbl[pnum] = e;
346 spin_unlock(&ubi->wl_lock);
348 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
349 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
353 * ubi_is_erase_work - checks whether a work is erase work.
354 * @wrk: The work object to be checked
356 int ubi_is_erase_work(struct ubi_work *wrk)
358 return wrk->func == erase_worker;
361 static void ubi_fastmap_close(struct ubi_device *ubi)
363 int i;
365 flush_work(&ubi->fm_work);
366 return_unused_pool_pebs(ubi, &ubi->fm_pool);
367 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
369 if (ubi->fm) {
370 for (i = 0; i < ubi->fm->used_blocks; i++)
371 kfree(ubi->fm->e[i]);
373 kfree(ubi->fm);
377 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
378 * See find_mean_wl_entry()
380 * @ubi: UBI device description object
381 * @e: physical eraseblock to return
382 * @root: RB tree to test against.
384 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
385 struct ubi_wl_entry *e,
386 struct rb_root *root) {
387 if (e && !ubi->fm_disabled && !ubi->fm &&
388 e->pnum < UBI_FM_MAX_START)
389 e = rb_entry(rb_next(root->rb_node),
390 struct ubi_wl_entry, u.rb);
392 return e;