Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / fs / jffs2 / build.c
bloba3750f902adcbae8f8cabbad066f9d629b805155
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
7 * Created by David Woodhouse <dwmw2@infradead.org>
9 * For licensing information, see the file 'LICENCE' in this directory.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/mtd/mtd.h>
20 #include "nodelist.h"
22 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
23 struct jffs2_inode_cache *, struct jffs2_full_dirent **);
25 static inline struct jffs2_inode_cache *
26 first_inode_chain(int *i, struct jffs2_sb_info *c)
28 for (; *i < c->inocache_hashsize; (*i)++) {
29 if (c->inocache_list[*i])
30 return c->inocache_list[*i];
32 return NULL;
35 static inline struct jffs2_inode_cache *
36 next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
38 /* More in this chain? */
39 if (ic->next)
40 return ic->next;
41 (*i)++;
42 return first_inode_chain(i, c);
45 #define for_each_inode(i, c, ic) \
46 for (i = 0, ic = first_inode_chain(&i, (c)); \
47 ic; \
48 ic = next_inode(&i, ic, (c)))
51 static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
52 struct jffs2_inode_cache *ic)
54 struct jffs2_full_dirent *fd;
56 dbg_fsbuild("building directory inode #%u\n", ic->ino);
58 /* For each child, increase nlink */
59 for(fd = ic->scan_dents; fd; fd = fd->next) {
60 struct jffs2_inode_cache *child_ic;
61 if (!fd->ino)
62 continue;
64 /* we can get high latency here with huge directories */
66 child_ic = jffs2_get_ino_cache(c, fd->ino);
67 if (!child_ic) {
68 dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
69 fd->name, fd->ino, ic->ino);
70 jffs2_mark_node_obsolete(c, fd->raw);
71 continue;
74 if (fd->type == DT_DIR) {
75 if (child_ic->pino_nlink) {
76 JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
77 fd->name, fd->ino, ic->ino);
78 /* TODO: What do we do about it? */
79 } else {
80 child_ic->pino_nlink = ic->ino;
82 } else
83 child_ic->pino_nlink++;
85 dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
86 /* Can't free scan_dents so far. We might need them in pass 2 */
90 /* Scan plan:
91 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
92 - Scan directory tree from top down, setting nlink in inocaches
93 - Scan inocaches for inodes with nlink==0
95 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
97 int ret;
98 int i;
99 struct jffs2_inode_cache *ic;
100 struct jffs2_full_dirent *fd;
101 struct jffs2_full_dirent *dead_fds = NULL;
103 dbg_fsbuild("build FS data structures\n");
105 /* First, scan the medium and build all the inode caches with
106 lists of physical nodes */
108 c->flags |= JFFS2_SB_FLAG_SCANNING;
109 ret = jffs2_scan_medium(c);
110 c->flags &= ~JFFS2_SB_FLAG_SCANNING;
111 if (ret)
112 goto exit;
114 dbg_fsbuild("scanned flash completely\n");
115 jffs2_dbg_dump_block_lists_nolock(c);
117 dbg_fsbuild("pass 1 starting\n");
118 c->flags |= JFFS2_SB_FLAG_BUILDING;
119 /* Now scan the directory tree, increasing nlink according to every dirent found. */
120 for_each_inode(i, c, ic) {
121 if (ic->scan_dents) {
122 jffs2_build_inode_pass1(c, ic);
123 cond_resched();
127 dbg_fsbuild("pass 1 complete\n");
129 /* Next, scan for inodes with nlink == 0 and remove them. If
130 they were directories, then decrement the nlink of their
131 children too, and repeat the scan. As that's going to be
132 a fairly uncommon occurrence, it's not so evil to do it this
133 way. Recursion bad. */
134 dbg_fsbuild("pass 2 starting\n");
136 for_each_inode(i, c, ic) {
137 if (ic->pino_nlink)
138 continue;
140 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
141 cond_resched();
144 dbg_fsbuild("pass 2a starting\n");
146 while (dead_fds) {
147 fd = dead_fds;
148 dead_fds = fd->next;
150 ic = jffs2_get_ino_cache(c, fd->ino);
152 if (ic)
153 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
154 jffs2_free_full_dirent(fd);
157 dbg_fsbuild("pass 2a complete\n");
158 dbg_fsbuild("freeing temporary data structures\n");
160 /* Finally, we can scan again and free the dirent structs */
161 for_each_inode(i, c, ic) {
162 while(ic->scan_dents) {
163 fd = ic->scan_dents;
164 ic->scan_dents = fd->next;
165 jffs2_free_full_dirent(fd);
167 ic->scan_dents = NULL;
168 cond_resched();
170 jffs2_build_xattr_subsystem(c);
171 c->flags &= ~JFFS2_SB_FLAG_BUILDING;
173 dbg_fsbuild("FS build complete\n");
175 /* Rotate the lists by some number to ensure wear levelling */
176 jffs2_rotate_lists(c);
178 ret = 0;
180 exit:
181 if (ret) {
182 for_each_inode(i, c, ic) {
183 while(ic->scan_dents) {
184 fd = ic->scan_dents;
185 ic->scan_dents = fd->next;
186 jffs2_free_full_dirent(fd);
189 jffs2_clear_xattr_subsystem(c);
192 return ret;
195 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
196 struct jffs2_inode_cache *ic,
197 struct jffs2_full_dirent **dead_fds)
199 struct jffs2_raw_node_ref *raw;
200 struct jffs2_full_dirent *fd;
202 dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
204 raw = ic->nodes;
205 while (raw != (void *)ic) {
206 struct jffs2_raw_node_ref *next = raw->next_in_ino;
207 dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
208 jffs2_mark_node_obsolete(c, raw);
209 raw = next;
212 if (ic->scan_dents) {
213 int whinged = 0;
214 dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
216 while(ic->scan_dents) {
217 struct jffs2_inode_cache *child_ic;
219 fd = ic->scan_dents;
220 ic->scan_dents = fd->next;
222 if (!fd->ino) {
223 /* It's a deletion dirent. Ignore it */
224 dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
225 jffs2_free_full_dirent(fd);
226 continue;
228 if (!whinged)
229 whinged = 1;
231 dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
233 child_ic = jffs2_get_ino_cache(c, fd->ino);
234 if (!child_ic) {
235 dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
236 fd->name, fd->ino);
237 jffs2_free_full_dirent(fd);
238 continue;
241 /* Reduce nlink of the child. If it's now zero, stick it on the
242 dead_fds list to be cleaned up later. Else just free the fd */
244 if (fd->type == DT_DIR)
245 child_ic->pino_nlink = 0;
246 else
247 child_ic->pino_nlink--;
249 if (!child_ic->pino_nlink) {
250 dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
251 fd->ino, fd->name);
252 fd->next = *dead_fds;
253 *dead_fds = fd;
254 } else {
255 dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
256 fd->ino, fd->name, child_ic->pino_nlink);
257 jffs2_free_full_dirent(fd);
263 We don't delete the inocache from the hash list and free it yet.
264 The erase code will do that, when all the nodes are completely gone.
268 static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
270 uint32_t size;
272 /* Deletion should almost _always_ be allowed. We're fairly
273 buggered once we stop allowing people to delete stuff
274 because there's not enough free space... */
275 c->resv_blocks_deletion = 2;
277 /* Be conservative about how much space we need before we allow writes.
278 On top of that which is required for deletia, require an extra 2%
279 of the medium to be available, for overhead caused by nodes being
280 split across blocks, etc. */
282 size = c->flash_size / 50; /* 2% of flash size */
283 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
284 size += c->sector_size - 1; /* ... and round up */
286 c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
288 /* When do we let the GC thread run in the background */
290 c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
292 /* When do we allow garbage collection to merge nodes to make
293 long-term progress at the expense of short-term space exhaustion? */
294 c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
296 /* When do we allow garbage collection to eat from bad blocks rather
297 than actually making progress? */
298 c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
300 /* What number of 'very dirty' eraseblocks do we allow before we
301 trigger the GC thread even if we don't _need_ the space. When we
302 can't mark nodes obsolete on the medium, the old dirty nodes cause
303 performance problems because we have to inspect and discard them. */
304 c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
305 if (jffs2_can_mark_obsolete(c))
306 c->vdirty_blocks_gctrigger *= 10;
308 /* If there's less than this amount of dirty space, don't bother
309 trying to GC to make more space. It'll be a fruitless task */
310 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
312 dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
313 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
314 dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
315 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
316 dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
317 c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
318 dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
319 c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
320 dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
321 c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
322 dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
323 c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
324 dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
325 c->nospc_dirty_size);
326 dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
327 c->vdirty_blocks_gctrigger);
330 int jffs2_do_mount_fs(struct jffs2_sb_info *c)
332 int ret;
333 int i;
334 int size;
336 c->free_size = c->flash_size;
337 c->nr_blocks = c->flash_size / c->sector_size;
338 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
339 #ifndef __ECOS
340 if (jffs2_blocks_use_vmalloc(c))
341 c->blocks = vzalloc(size);
342 else
343 #endif
344 c->blocks = kzalloc(size, GFP_KERNEL);
345 if (!c->blocks)
346 return -ENOMEM;
348 for (i=0; i<c->nr_blocks; i++) {
349 INIT_LIST_HEAD(&c->blocks[i].list);
350 c->blocks[i].offset = i * c->sector_size;
351 c->blocks[i].free_size = c->sector_size;
354 INIT_LIST_HEAD(&c->clean_list);
355 INIT_LIST_HEAD(&c->very_dirty_list);
356 INIT_LIST_HEAD(&c->dirty_list);
357 INIT_LIST_HEAD(&c->erasable_list);
358 INIT_LIST_HEAD(&c->erasing_list);
359 INIT_LIST_HEAD(&c->erase_checking_list);
360 INIT_LIST_HEAD(&c->erase_pending_list);
361 INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
362 INIT_LIST_HEAD(&c->erase_complete_list);
363 INIT_LIST_HEAD(&c->free_list);
364 INIT_LIST_HEAD(&c->bad_list);
365 INIT_LIST_HEAD(&c->bad_used_list);
366 c->highest_ino = 1;
367 c->summary = NULL;
369 ret = jffs2_sum_init(c);
370 if (ret)
371 goto out_free;
373 if (jffs2_build_filesystem(c)) {
374 dbg_fsbuild("build_fs failed\n");
375 jffs2_free_ino_caches(c);
376 jffs2_free_raw_node_refs(c);
377 ret = -EIO;
378 goto out_free;
381 jffs2_calc_trigger_levels(c);
383 return 0;
385 out_free:
386 #ifndef __ECOS
387 if (jffs2_blocks_use_vmalloc(c))
388 vfree(c->blocks);
389 else
390 #endif
391 kfree(c->blocks);
393 return ret;