IB/qib: Unnecessary delayed completions on RC connection
[linux/fpc-iii.git] / fs / jffs2 / build.c
blob85c6be2db02f7fc5c9b0152cc723ec39e6c25584
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
7 * Created by David Woodhouse <dwmw2@infradead.org>
9 * For licensing information, see the file 'LICENCE' in this directory.
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mtd/mtd.h>
18 #include "nodelist.h"
20 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
21 struct jffs2_inode_cache *, struct jffs2_full_dirent **);
23 static inline struct jffs2_inode_cache *
24 first_inode_chain(int *i, struct jffs2_sb_info *c)
26 for (; *i < c->inocache_hashsize; (*i)++) {
27 if (c->inocache_list[*i])
28 return c->inocache_list[*i];
30 return NULL;
33 static inline struct jffs2_inode_cache *
34 next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
36 /* More in this chain? */
37 if (ic->next)
38 return ic->next;
39 (*i)++;
40 return first_inode_chain(i, c);
43 #define for_each_inode(i, c, ic) \
44 for (i = 0, ic = first_inode_chain(&i, (c)); \
45 ic; \
46 ic = next_inode(&i, ic, (c)))
49 static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
50 struct jffs2_inode_cache *ic)
52 struct jffs2_full_dirent *fd;
54 dbg_fsbuild("building directory inode #%u\n", ic->ino);
56 /* For each child, increase nlink */
57 for(fd = ic->scan_dents; fd; fd = fd->next) {
58 struct jffs2_inode_cache *child_ic;
59 if (!fd->ino)
60 continue;
62 /* we can get high latency here with huge directories */
64 child_ic = jffs2_get_ino_cache(c, fd->ino);
65 if (!child_ic) {
66 dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
67 fd->name, fd->ino, ic->ino);
68 jffs2_mark_node_obsolete(c, fd->raw);
69 continue;
72 if (fd->type == DT_DIR) {
73 if (child_ic->pino_nlink) {
74 JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
75 fd->name, fd->ino, ic->ino);
76 /* TODO: What do we do about it? */
77 } else {
78 child_ic->pino_nlink = ic->ino;
80 } else
81 child_ic->pino_nlink++;
83 dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
84 /* Can't free scan_dents so far. We might need them in pass 2 */
88 /* Scan plan:
89 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
90 - Scan directory tree from top down, setting nlink in inocaches
91 - Scan inocaches for inodes with nlink==0
93 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
95 int ret;
96 int i;
97 struct jffs2_inode_cache *ic;
98 struct jffs2_full_dirent *fd;
99 struct jffs2_full_dirent *dead_fds = NULL;
101 dbg_fsbuild("build FS data structures\n");
103 /* First, scan the medium and build all the inode caches with
104 lists of physical nodes */
106 c->flags |= JFFS2_SB_FLAG_SCANNING;
107 ret = jffs2_scan_medium(c);
108 c->flags &= ~JFFS2_SB_FLAG_SCANNING;
109 if (ret)
110 goto exit;
112 dbg_fsbuild("scanned flash completely\n");
113 jffs2_dbg_dump_block_lists_nolock(c);
115 dbg_fsbuild("pass 1 starting\n");
116 c->flags |= JFFS2_SB_FLAG_BUILDING;
117 /* Now scan the directory tree, increasing nlink according to every dirent found. */
118 for_each_inode(i, c, ic) {
119 if (ic->scan_dents) {
120 jffs2_build_inode_pass1(c, ic);
121 cond_resched();
125 dbg_fsbuild("pass 1 complete\n");
127 /* Next, scan for inodes with nlink == 0 and remove them. If
128 they were directories, then decrement the nlink of their
129 children too, and repeat the scan. As that's going to be
130 a fairly uncommon occurrence, it's not so evil to do it this
131 way. Recursion bad. */
132 dbg_fsbuild("pass 2 starting\n");
134 for_each_inode(i, c, ic) {
135 if (ic->pino_nlink)
136 continue;
138 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
139 cond_resched();
142 dbg_fsbuild("pass 2a starting\n");
144 while (dead_fds) {
145 fd = dead_fds;
146 dead_fds = fd->next;
148 ic = jffs2_get_ino_cache(c, fd->ino);
150 if (ic)
151 jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
152 jffs2_free_full_dirent(fd);
155 dbg_fsbuild("pass 2a complete\n");
156 dbg_fsbuild("freeing temporary data structures\n");
158 /* Finally, we can scan again and free the dirent structs */
159 for_each_inode(i, c, ic) {
160 while(ic->scan_dents) {
161 fd = ic->scan_dents;
162 ic->scan_dents = fd->next;
163 jffs2_free_full_dirent(fd);
165 ic->scan_dents = NULL;
166 cond_resched();
168 jffs2_build_xattr_subsystem(c);
169 c->flags &= ~JFFS2_SB_FLAG_BUILDING;
171 dbg_fsbuild("FS build complete\n");
173 /* Rotate the lists by some number to ensure wear levelling */
174 jffs2_rotate_lists(c);
176 ret = 0;
178 exit:
179 if (ret) {
180 for_each_inode(i, c, ic) {
181 while(ic->scan_dents) {
182 fd = ic->scan_dents;
183 ic->scan_dents = fd->next;
184 jffs2_free_full_dirent(fd);
187 jffs2_clear_xattr_subsystem(c);
190 return ret;
193 static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
194 struct jffs2_inode_cache *ic,
195 struct jffs2_full_dirent **dead_fds)
197 struct jffs2_raw_node_ref *raw;
198 struct jffs2_full_dirent *fd;
200 dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
202 raw = ic->nodes;
203 while (raw != (void *)ic) {
204 struct jffs2_raw_node_ref *next = raw->next_in_ino;
205 dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
206 jffs2_mark_node_obsolete(c, raw);
207 raw = next;
210 if (ic->scan_dents) {
211 int whinged = 0;
212 dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
214 while(ic->scan_dents) {
215 struct jffs2_inode_cache *child_ic;
217 fd = ic->scan_dents;
218 ic->scan_dents = fd->next;
220 if (!fd->ino) {
221 /* It's a deletion dirent. Ignore it */
222 dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
223 jffs2_free_full_dirent(fd);
224 continue;
226 if (!whinged)
227 whinged = 1;
229 dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
231 child_ic = jffs2_get_ino_cache(c, fd->ino);
232 if (!child_ic) {
233 dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
234 fd->name, fd->ino);
235 jffs2_free_full_dirent(fd);
236 continue;
239 /* Reduce nlink of the child. If it's now zero, stick it on the
240 dead_fds list to be cleaned up later. Else just free the fd */
242 if (fd->type == DT_DIR)
243 child_ic->pino_nlink = 0;
244 else
245 child_ic->pino_nlink--;
247 if (!child_ic->pino_nlink) {
248 dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
249 fd->ino, fd->name);
250 fd->next = *dead_fds;
251 *dead_fds = fd;
252 } else {
253 dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
254 fd->ino, fd->name, child_ic->pino_nlink);
255 jffs2_free_full_dirent(fd);
261 We don't delete the inocache from the hash list and free it yet.
262 The erase code will do that, when all the nodes are completely gone.
266 static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
268 uint32_t size;
270 /* Deletion should almost _always_ be allowed. We're fairly
271 buggered once we stop allowing people to delete stuff
272 because there's not enough free space... */
273 c->resv_blocks_deletion = 2;
275 /* Be conservative about how much space we need before we allow writes.
276 On top of that which is required for deletia, require an extra 2%
277 of the medium to be available, for overhead caused by nodes being
278 split across blocks, etc. */
280 size = c->flash_size / 50; /* 2% of flash size */
281 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
282 size += c->sector_size - 1; /* ... and round up */
284 c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
286 /* When do we let the GC thread run in the background */
288 c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
290 /* When do we allow garbage collection to merge nodes to make
291 long-term progress at the expense of short-term space exhaustion? */
292 c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
294 /* When do we allow garbage collection to eat from bad blocks rather
295 than actually making progress? */
296 c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
298 /* What number of 'very dirty' eraseblocks do we allow before we
299 trigger the GC thread even if we don't _need_ the space. When we
300 can't mark nodes obsolete on the medium, the old dirty nodes cause
301 performance problems because we have to inspect and discard them. */
302 c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
303 if (jffs2_can_mark_obsolete(c))
304 c->vdirty_blocks_gctrigger *= 10;
306 /* If there's less than this amount of dirty space, don't bother
307 trying to GC to make more space. It'll be a fruitless task */
308 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
310 dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
311 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
312 dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
313 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
314 dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
315 c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
316 dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
317 c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
318 dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
319 c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
320 dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
321 c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
322 dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
323 c->nospc_dirty_size);
324 dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
325 c->vdirty_blocks_gctrigger);
328 int jffs2_do_mount_fs(struct jffs2_sb_info *c)
330 int ret;
331 int i;
332 int size;
334 c->free_size = c->flash_size;
335 c->nr_blocks = c->flash_size / c->sector_size;
336 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
337 #ifndef __ECOS
338 if (jffs2_blocks_use_vmalloc(c))
339 c->blocks = vmalloc(size);
340 else
341 #endif
342 c->blocks = kmalloc(size, GFP_KERNEL);
343 if (!c->blocks)
344 return -ENOMEM;
346 memset(c->blocks, 0, size);
347 for (i=0; i<c->nr_blocks; i++) {
348 INIT_LIST_HEAD(&c->blocks[i].list);
349 c->blocks[i].offset = i * c->sector_size;
350 c->blocks[i].free_size = c->sector_size;
353 INIT_LIST_HEAD(&c->clean_list);
354 INIT_LIST_HEAD(&c->very_dirty_list);
355 INIT_LIST_HEAD(&c->dirty_list);
356 INIT_LIST_HEAD(&c->erasable_list);
357 INIT_LIST_HEAD(&c->erasing_list);
358 INIT_LIST_HEAD(&c->erase_checking_list);
359 INIT_LIST_HEAD(&c->erase_pending_list);
360 INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
361 INIT_LIST_HEAD(&c->erase_complete_list);
362 INIT_LIST_HEAD(&c->free_list);
363 INIT_LIST_HEAD(&c->bad_list);
364 INIT_LIST_HEAD(&c->bad_used_list);
365 c->highest_ino = 1;
366 c->summary = NULL;
368 ret = jffs2_sum_init(c);
369 if (ret)
370 goto out_free;
372 if (jffs2_build_filesystem(c)) {
373 dbg_fsbuild("build_fs failed\n");
374 jffs2_free_ino_caches(c);
375 jffs2_free_raw_node_refs(c);
376 ret = -EIO;
377 goto out_free;
380 jffs2_calc_trigger_levels(c);
382 return 0;
384 out_free:
385 #ifndef __ECOS
386 if (jffs2_blocks_use_vmalloc(c))
387 vfree(c->blocks);
388 else
389 #endif
390 kfree(c->blocks);
392 return ret;