kmem: add slab-specific documentation about the kmem controller
[linux/fpc-iii.git] / fs / minix / dir.c
blob685b2d981b8709b31461d2fc4e6cdb8b044f6df2
1 /*
2 * linux/fs/minix/dir.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * minix directory handling functions
8 * Updated to filesystem version 3 by Daniel Aragones
9 */
11 #include "minix.h"
12 #include <linux/buffer_head.h>
13 #include <linux/highmem.h>
14 #include <linux/swap.h>
16 typedef struct minix_dir_entry minix_dirent;
17 typedef struct minix3_dir_entry minix3_dirent;
19 static int minix_readdir(struct file *, void *, filldir_t);
21 const struct file_operations minix_dir_operations = {
22 .llseek = generic_file_llseek,
23 .read = generic_read_dir,
24 .readdir = minix_readdir,
25 .fsync = generic_file_fsync,
28 static inline void dir_put_page(struct page *page)
30 kunmap(page);
31 page_cache_release(page);
35 * Return the offset into page `page_nr' of the last valid
36 * byte in that page, plus one.
38 static unsigned
39 minix_last_byte(struct inode *inode, unsigned long page_nr)
41 unsigned last_byte = PAGE_CACHE_SIZE;
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
45 return last_byte;
48 static inline unsigned long dir_pages(struct inode *inode)
50 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
53 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
55 struct address_space *mapping = page->mapping;
56 struct inode *dir = mapping->host;
57 int err = 0;
58 block_write_end(NULL, mapping, pos, len, len, page, NULL);
60 if (pos+len > dir->i_size) {
61 i_size_write(dir, pos+len);
62 mark_inode_dirty(dir);
64 if (IS_DIRSYNC(dir))
65 err = write_one_page(page, 1);
66 else
67 unlock_page(page);
68 return err;
71 static struct page * dir_get_page(struct inode *dir, unsigned long n)
73 struct address_space *mapping = dir->i_mapping;
74 struct page *page = read_mapping_page(mapping, n, NULL);
75 if (!IS_ERR(page))
76 kmap(page);
77 return page;
80 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
82 return (void*)((char*)de + sbi->s_dirsize);
85 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
87 unsigned long pos = filp->f_pos;
88 struct inode *inode = filp->f_path.dentry->d_inode;
89 struct super_block *sb = inode->i_sb;
90 unsigned offset = pos & ~PAGE_CACHE_MASK;
91 unsigned long n = pos >> PAGE_CACHE_SHIFT;
92 unsigned long npages = dir_pages(inode);
93 struct minix_sb_info *sbi = minix_sb(sb);
94 unsigned chunk_size = sbi->s_dirsize;
95 char *name;
96 __u32 inumber;
98 pos = (pos + chunk_size-1) & ~(chunk_size-1);
99 if (pos >= inode->i_size)
100 goto done;
102 for ( ; n < npages; n++, offset = 0) {
103 char *p, *kaddr, *limit;
104 struct page *page = dir_get_page(inode, n);
106 if (IS_ERR(page))
107 continue;
108 kaddr = (char *)page_address(page);
109 p = kaddr+offset;
110 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
111 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
112 if (sbi->s_version == MINIX_V3) {
113 minix3_dirent *de3 = (minix3_dirent *)p;
114 name = de3->name;
115 inumber = de3->inode;
116 } else {
117 minix_dirent *de = (minix_dirent *)p;
118 name = de->name;
119 inumber = de->inode;
121 if (inumber) {
122 int over;
124 unsigned l = strnlen(name, sbi->s_namelen);
125 offset = p - kaddr;
126 over = filldir(dirent, name, l,
127 (n << PAGE_CACHE_SHIFT) | offset,
128 inumber, DT_UNKNOWN);
129 if (over) {
130 dir_put_page(page);
131 goto done;
135 dir_put_page(page);
138 done:
139 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
140 return 0;
143 static inline int namecompare(int len, int maxlen,
144 const char * name, const char * buffer)
146 if (len < maxlen && buffer[len])
147 return 0;
148 return !memcmp(name, buffer, len);
152 * minix_find_entry()
154 * finds an entry in the specified directory with the wanted name. It
155 * returns the cache buffer in which the entry was found, and the entry
156 * itself (as a parameter - res_dir). It does NOT read the inode of the
157 * entry - you'll have to do that yourself if you want to.
159 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
161 const char * name = dentry->d_name.name;
162 int namelen = dentry->d_name.len;
163 struct inode * dir = dentry->d_parent->d_inode;
164 struct super_block * sb = dir->i_sb;
165 struct minix_sb_info * sbi = minix_sb(sb);
166 unsigned long n;
167 unsigned long npages = dir_pages(dir);
168 struct page *page = NULL;
169 char *p;
171 char *namx;
172 __u32 inumber;
173 *res_page = NULL;
175 for (n = 0; n < npages; n++) {
176 char *kaddr, *limit;
178 page = dir_get_page(dir, n);
179 if (IS_ERR(page))
180 continue;
182 kaddr = (char*)page_address(page);
183 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
184 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
185 if (sbi->s_version == MINIX_V3) {
186 minix3_dirent *de3 = (minix3_dirent *)p;
187 namx = de3->name;
188 inumber = de3->inode;
189 } else {
190 minix_dirent *de = (minix_dirent *)p;
191 namx = de->name;
192 inumber = de->inode;
194 if (!inumber)
195 continue;
196 if (namecompare(namelen, sbi->s_namelen, name, namx))
197 goto found;
199 dir_put_page(page);
201 return NULL;
203 found:
204 *res_page = page;
205 return (minix_dirent *)p;
208 int minix_add_link(struct dentry *dentry, struct inode *inode)
210 struct inode *dir = dentry->d_parent->d_inode;
211 const char * name = dentry->d_name.name;
212 int namelen = dentry->d_name.len;
213 struct super_block * sb = dir->i_sb;
214 struct minix_sb_info * sbi = minix_sb(sb);
215 struct page *page = NULL;
216 unsigned long npages = dir_pages(dir);
217 unsigned long n;
218 char *kaddr, *p;
219 minix_dirent *de;
220 minix3_dirent *de3;
221 loff_t pos;
222 int err;
223 char *namx = NULL;
224 __u32 inumber;
227 * We take care of directory expansion in the same loop
228 * This code plays outside i_size, so it locks the page
229 * to protect that region.
231 for (n = 0; n <= npages; n++) {
232 char *limit, *dir_end;
234 page = dir_get_page(dir, n);
235 err = PTR_ERR(page);
236 if (IS_ERR(page))
237 goto out;
238 lock_page(page);
239 kaddr = (char*)page_address(page);
240 dir_end = kaddr + minix_last_byte(dir, n);
241 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
242 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
243 de = (minix_dirent *)p;
244 de3 = (minix3_dirent *)p;
245 if (sbi->s_version == MINIX_V3) {
246 namx = de3->name;
247 inumber = de3->inode;
248 } else {
249 namx = de->name;
250 inumber = de->inode;
252 if (p == dir_end) {
253 /* We hit i_size */
254 if (sbi->s_version == MINIX_V3)
255 de3->inode = 0;
256 else
257 de->inode = 0;
258 goto got_it;
260 if (!inumber)
261 goto got_it;
262 err = -EEXIST;
263 if (namecompare(namelen, sbi->s_namelen, name, namx))
264 goto out_unlock;
266 unlock_page(page);
267 dir_put_page(page);
269 BUG();
270 return -EINVAL;
272 got_it:
273 pos = page_offset(page) + p - (char *)page_address(page);
274 err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
275 if (err)
276 goto out_unlock;
277 memcpy (namx, name, namelen);
278 if (sbi->s_version == MINIX_V3) {
279 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
280 de3->inode = inode->i_ino;
281 } else {
282 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
283 de->inode = inode->i_ino;
285 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
286 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
287 mark_inode_dirty(dir);
288 out_put:
289 dir_put_page(page);
290 out:
291 return err;
292 out_unlock:
293 unlock_page(page);
294 goto out_put;
297 int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
299 struct inode *inode = page->mapping->host;
300 char *kaddr = page_address(page);
301 loff_t pos = page_offset(page) + (char*)de - kaddr;
302 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
303 unsigned len = sbi->s_dirsize;
304 int err;
306 lock_page(page);
307 err = minix_prepare_chunk(page, pos, len);
308 if (err == 0) {
309 if (sbi->s_version == MINIX_V3)
310 ((minix3_dirent *) de)->inode = 0;
311 else
312 de->inode = 0;
313 err = dir_commit_chunk(page, pos, len);
314 } else {
315 unlock_page(page);
317 dir_put_page(page);
318 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
319 mark_inode_dirty(inode);
320 return err;
323 int minix_make_empty(struct inode *inode, struct inode *dir)
325 struct page *page = grab_cache_page(inode->i_mapping, 0);
326 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
327 char *kaddr;
328 int err;
330 if (!page)
331 return -ENOMEM;
332 err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
333 if (err) {
334 unlock_page(page);
335 goto fail;
338 kaddr = kmap_atomic(page);
339 memset(kaddr, 0, PAGE_CACHE_SIZE);
341 if (sbi->s_version == MINIX_V3) {
342 minix3_dirent *de3 = (minix3_dirent *)kaddr;
344 de3->inode = inode->i_ino;
345 strcpy(de3->name, ".");
346 de3 = minix_next_entry(de3, sbi);
347 de3->inode = dir->i_ino;
348 strcpy(de3->name, "..");
349 } else {
350 minix_dirent *de = (minix_dirent *)kaddr;
352 de->inode = inode->i_ino;
353 strcpy(de->name, ".");
354 de = minix_next_entry(de, sbi);
355 de->inode = dir->i_ino;
356 strcpy(de->name, "..");
358 kunmap_atomic(kaddr);
360 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
361 fail:
362 page_cache_release(page);
363 return err;
367 * routine to check that the specified directory is empty (for rmdir)
369 int minix_empty_dir(struct inode * inode)
371 struct page *page = NULL;
372 unsigned long i, npages = dir_pages(inode);
373 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
374 char *name;
375 __u32 inumber;
377 for (i = 0; i < npages; i++) {
378 char *p, *kaddr, *limit;
380 page = dir_get_page(inode, i);
381 if (IS_ERR(page))
382 continue;
384 kaddr = (char *)page_address(page);
385 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
386 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
387 if (sbi->s_version == MINIX_V3) {
388 minix3_dirent *de3 = (minix3_dirent *)p;
389 name = de3->name;
390 inumber = de3->inode;
391 } else {
392 minix_dirent *de = (minix_dirent *)p;
393 name = de->name;
394 inumber = de->inode;
397 if (inumber != 0) {
398 /* check for . and .. */
399 if (name[0] != '.')
400 goto not_empty;
401 if (!name[1]) {
402 if (inumber != inode->i_ino)
403 goto not_empty;
404 } else if (name[1] != '.')
405 goto not_empty;
406 else if (name[2])
407 goto not_empty;
410 dir_put_page(page);
412 return 1;
414 not_empty:
415 dir_put_page(page);
416 return 0;
419 /* Releases the page */
420 void minix_set_link(struct minix_dir_entry *de, struct page *page,
421 struct inode *inode)
423 struct inode *dir = page->mapping->host;
424 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
425 loff_t pos = page_offset(page) +
426 (char *)de-(char*)page_address(page);
427 int err;
429 lock_page(page);
431 err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
432 if (err == 0) {
433 if (sbi->s_version == MINIX_V3)
434 ((minix3_dirent *) de)->inode = inode->i_ino;
435 else
436 de->inode = inode->i_ino;
437 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
438 } else {
439 unlock_page(page);
441 dir_put_page(page);
442 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
443 mark_inode_dirty(dir);
446 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
448 struct page *page = dir_get_page(dir, 0);
449 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
450 struct minix_dir_entry *de = NULL;
452 if (!IS_ERR(page)) {
453 de = minix_next_entry(page_address(page), sbi);
454 *p = page;
456 return de;
459 ino_t minix_inode_by_name(struct dentry *dentry)
461 struct page *page;
462 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
463 ino_t res = 0;
465 if (de) {
466 struct address_space *mapping = page->mapping;
467 struct inode *inode = mapping->host;
468 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
470 if (sbi->s_version == MINIX_V3)
471 res = ((minix3_dirent *) de)->inode;
472 else
473 res = de->inode;
474 dir_put_page(page);
476 return res;