x86: revert "x86: fix pmd_bad and pud_bad to support huge pages"
[wrt350n-kernel.git] / fs / jffs2 / malloc.c
blobf9211252b5f16f424fda655a0e5881680fc8eb78
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/jffs2.h>
16 #include "nodelist.h"
18 /* These are initialised to NULL in the kernel startup code.
19 If you're porting to other operating systems, beware */
20 static struct kmem_cache *full_dnode_slab;
21 static struct kmem_cache *raw_dirent_slab;
22 static struct kmem_cache *raw_inode_slab;
23 static struct kmem_cache *tmp_dnode_info_slab;
24 static struct kmem_cache *raw_node_ref_slab;
25 static struct kmem_cache *node_frag_slab;
26 static struct kmem_cache *inode_cache_slab;
27 #ifdef CONFIG_JFFS2_FS_XATTR
28 static struct kmem_cache *xattr_datum_cache;
29 static struct kmem_cache *xattr_ref_cache;
30 #endif
32 int __init jffs2_create_slab_caches(void)
34 full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
35 sizeof(struct jffs2_full_dnode),
36 0, 0, NULL);
37 if (!full_dnode_slab)
38 goto err;
40 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
41 sizeof(struct jffs2_raw_dirent),
42 0, 0, NULL);
43 if (!raw_dirent_slab)
44 goto err;
46 raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
47 sizeof(struct jffs2_raw_inode),
48 0, 0, NULL);
49 if (!raw_inode_slab)
50 goto err;
52 tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
53 sizeof(struct jffs2_tmp_dnode_info),
54 0, 0, NULL);
55 if (!tmp_dnode_info_slab)
56 goto err;
58 raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
59 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
60 0, 0, NULL);
61 if (!raw_node_ref_slab)
62 goto err;
64 node_frag_slab = kmem_cache_create("jffs2_node_frag",
65 sizeof(struct jffs2_node_frag),
66 0, 0, NULL);
67 if (!node_frag_slab)
68 goto err;
70 inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
71 sizeof(struct jffs2_inode_cache),
72 0, 0, NULL);
73 if (!inode_cache_slab)
74 goto err;
76 #ifdef CONFIG_JFFS2_FS_XATTR
77 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
78 sizeof(struct jffs2_xattr_datum),
79 0, 0, NULL);
80 if (!xattr_datum_cache)
81 goto err;
83 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
84 sizeof(struct jffs2_xattr_ref),
85 0, 0, NULL);
86 if (!xattr_ref_cache)
87 goto err;
88 #endif
90 return 0;
91 err:
92 jffs2_destroy_slab_caches();
93 return -ENOMEM;
96 void jffs2_destroy_slab_caches(void)
98 if(full_dnode_slab)
99 kmem_cache_destroy(full_dnode_slab);
100 if(raw_dirent_slab)
101 kmem_cache_destroy(raw_dirent_slab);
102 if(raw_inode_slab)
103 kmem_cache_destroy(raw_inode_slab);
104 if(tmp_dnode_info_slab)
105 kmem_cache_destroy(tmp_dnode_info_slab);
106 if(raw_node_ref_slab)
107 kmem_cache_destroy(raw_node_ref_slab);
108 if(node_frag_slab)
109 kmem_cache_destroy(node_frag_slab);
110 if(inode_cache_slab)
111 kmem_cache_destroy(inode_cache_slab);
112 #ifdef CONFIG_JFFS2_FS_XATTR
113 if (xattr_datum_cache)
114 kmem_cache_destroy(xattr_datum_cache);
115 if (xattr_ref_cache)
116 kmem_cache_destroy(xattr_ref_cache);
117 #endif
120 struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
122 struct jffs2_full_dirent *ret;
123 ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
124 dbg_memalloc("%p\n", ret);
125 return ret;
128 void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
130 dbg_memalloc("%p\n", x);
131 kfree(x);
134 struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
136 struct jffs2_full_dnode *ret;
137 ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
138 dbg_memalloc("%p\n", ret);
139 return ret;
142 void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
144 dbg_memalloc("%p\n", x);
145 kmem_cache_free(full_dnode_slab, x);
148 struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
150 struct jffs2_raw_dirent *ret;
151 ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
152 dbg_memalloc("%p\n", ret);
153 return ret;
156 void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
158 dbg_memalloc("%p\n", x);
159 kmem_cache_free(raw_dirent_slab, x);
162 struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
164 struct jffs2_raw_inode *ret;
165 ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
166 dbg_memalloc("%p\n", ret);
167 return ret;
170 void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
172 dbg_memalloc("%p\n", x);
173 kmem_cache_free(raw_inode_slab, x);
176 struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
178 struct jffs2_tmp_dnode_info *ret;
179 ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
180 dbg_memalloc("%p\n",
181 ret);
182 return ret;
185 void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
187 dbg_memalloc("%p\n", x);
188 kmem_cache_free(tmp_dnode_info_slab, x);
191 static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
193 struct jffs2_raw_node_ref *ret;
195 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
196 if (ret) {
197 int i = 0;
198 for (i=0; i < REFS_PER_BLOCK; i++) {
199 ret[i].flash_offset = REF_EMPTY_NODE;
200 ret[i].next_in_ino = NULL;
202 ret[i].flash_offset = REF_LINK_NODE;
203 ret[i].next_in_ino = NULL;
205 return ret;
208 int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
209 struct jffs2_eraseblock *jeb, int nr)
211 struct jffs2_raw_node_ref **p, *ref;
212 int i = nr;
214 dbg_memalloc("%d\n", nr);
216 p = &jeb->last_node;
217 ref = *p;
219 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
221 /* If jeb->last_node is really a valid node then skip over it */
222 if (ref && ref->flash_offset != REF_EMPTY_NODE)
223 ref++;
225 while (i) {
226 if (!ref) {
227 dbg_memalloc("Allocating new refblock linked from %p\n", p);
228 ref = *p = jffs2_alloc_refblock();
229 if (!ref)
230 return -ENOMEM;
232 if (ref->flash_offset == REF_LINK_NODE) {
233 p = &ref->next_in_ino;
234 ref = *p;
235 continue;
237 i--;
238 ref++;
240 jeb->allocated_refs = nr;
242 dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
243 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
244 jeb->last_node->next_in_ino);
246 return 0;
249 void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
251 dbg_memalloc("%p\n", x);
252 kmem_cache_free(raw_node_ref_slab, x);
255 struct jffs2_node_frag *jffs2_alloc_node_frag(void)
257 struct jffs2_node_frag *ret;
258 ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
259 dbg_memalloc("%p\n", ret);
260 return ret;
263 void jffs2_free_node_frag(struct jffs2_node_frag *x)
265 dbg_memalloc("%p\n", x);
266 kmem_cache_free(node_frag_slab, x);
269 struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
271 struct jffs2_inode_cache *ret;
272 ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
273 dbg_memalloc("%p\n", ret);
274 return ret;
277 void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
279 dbg_memalloc("%p\n", x);
280 kmem_cache_free(inode_cache_slab, x);
283 #ifdef CONFIG_JFFS2_FS_XATTR
284 struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
286 struct jffs2_xattr_datum *xd;
287 xd = kmem_cache_alloc(xattr_datum_cache, GFP_KERNEL);
288 dbg_memalloc("%p\n", xd);
290 memset(xd, 0, sizeof(struct jffs2_xattr_datum));
291 xd->class = RAWNODE_CLASS_XATTR_DATUM;
292 xd->node = (void *)xd;
293 INIT_LIST_HEAD(&xd->xindex);
294 return xd;
297 void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
299 dbg_memalloc("%p\n", xd);
300 kmem_cache_free(xattr_datum_cache, xd);
303 struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
305 struct jffs2_xattr_ref *ref;
306 ref = kmem_cache_alloc(xattr_ref_cache, GFP_KERNEL);
307 dbg_memalloc("%p\n", ref);
309 memset(ref, 0, sizeof(struct jffs2_xattr_ref));
310 ref->class = RAWNODE_CLASS_XATTR_REF;
311 ref->node = (void *)ref;
312 return ref;
315 void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
317 dbg_memalloc("%p\n", ref);
318 kmem_cache_free(xattr_ref_cache, ref);
320 #endif