PM / yenta: Split resume into early and late parts (rev. 4)
[linux/fpc-iii.git] / fs / minix / dir.c
blobd407e7a0b6fe4e73faf237e6d5a3b21dee89555d
1 /*
2 * linux/fs/minix/dir.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * minix directory handling functions
8 * Updated to filesystem version 3 by Daniel Aragones
9 */
11 #include "minix.h"
12 #include <linux/buffer_head.h>
13 #include <linux/highmem.h>
14 #include <linux/swap.h>
16 typedef struct minix_dir_entry minix_dirent;
17 typedef struct minix3_dir_entry minix3_dirent;
19 static int minix_readdir(struct file *, void *, filldir_t);
21 const struct file_operations minix_dir_operations = {
22 .llseek = generic_file_llseek,
23 .read = generic_read_dir,
24 .readdir = minix_readdir,
25 .fsync = simple_fsync,
28 static inline void dir_put_page(struct page *page)
30 kunmap(page);
31 page_cache_release(page);
35 * Return the offset into page `page_nr' of the last valid
36 * byte in that page, plus one.
38 static unsigned
39 minix_last_byte(struct inode *inode, unsigned long page_nr)
41 unsigned last_byte = PAGE_CACHE_SIZE;
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
45 return last_byte;
48 static inline unsigned long dir_pages(struct inode *inode)
50 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
53 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
55 struct address_space *mapping = page->mapping;
56 struct inode *dir = mapping->host;
57 int err = 0;
58 block_write_end(NULL, mapping, pos, len, len, page, NULL);
60 if (pos+len > dir->i_size) {
61 i_size_write(dir, pos+len);
62 mark_inode_dirty(dir);
64 if (IS_DIRSYNC(dir))
65 err = write_one_page(page, 1);
66 else
67 unlock_page(page);
68 return err;
71 static struct page * dir_get_page(struct inode *dir, unsigned long n)
73 struct address_space *mapping = dir->i_mapping;
74 struct page *page = read_mapping_page(mapping, n, NULL);
75 if (!IS_ERR(page)) {
76 kmap(page);
77 if (!PageUptodate(page))
78 goto fail;
80 return page;
82 fail:
83 dir_put_page(page);
84 return ERR_PTR(-EIO);
87 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
89 return (void*)((char*)de + sbi->s_dirsize);
92 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
94 unsigned long pos = filp->f_pos;
95 struct inode *inode = filp->f_path.dentry->d_inode;
96 struct super_block *sb = inode->i_sb;
97 unsigned offset = pos & ~PAGE_CACHE_MASK;
98 unsigned long n = pos >> PAGE_CACHE_SHIFT;
99 unsigned long npages = dir_pages(inode);
100 struct minix_sb_info *sbi = minix_sb(sb);
101 unsigned chunk_size = sbi->s_dirsize;
102 char *name;
103 __u32 inumber;
105 pos = (pos + chunk_size-1) & ~(chunk_size-1);
106 if (pos >= inode->i_size)
107 goto done;
109 for ( ; n < npages; n++, offset = 0) {
110 char *p, *kaddr, *limit;
111 struct page *page = dir_get_page(inode, n);
113 if (IS_ERR(page))
114 continue;
115 kaddr = (char *)page_address(page);
116 p = kaddr+offset;
117 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
118 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
119 if (sbi->s_version == MINIX_V3) {
120 minix3_dirent *de3 = (minix3_dirent *)p;
121 name = de3->name;
122 inumber = de3->inode;
123 } else {
124 minix_dirent *de = (minix_dirent *)p;
125 name = de->name;
126 inumber = de->inode;
128 if (inumber) {
129 int over;
131 unsigned l = strnlen(name, sbi->s_namelen);
132 offset = p - kaddr;
133 over = filldir(dirent, name, l,
134 (n << PAGE_CACHE_SHIFT) | offset,
135 inumber, DT_UNKNOWN);
136 if (over) {
137 dir_put_page(page);
138 goto done;
142 dir_put_page(page);
145 done:
146 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
147 return 0;
150 static inline int namecompare(int len, int maxlen,
151 const char * name, const char * buffer)
153 if (len < maxlen && buffer[len])
154 return 0;
155 return !memcmp(name, buffer, len);
159 * minix_find_entry()
161 * finds an entry in the specified directory with the wanted name. It
162 * returns the cache buffer in which the entry was found, and the entry
163 * itself (as a parameter - res_dir). It does NOT read the inode of the
164 * entry - you'll have to do that yourself if you want to.
166 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
168 const char * name = dentry->d_name.name;
169 int namelen = dentry->d_name.len;
170 struct inode * dir = dentry->d_parent->d_inode;
171 struct super_block * sb = dir->i_sb;
172 struct minix_sb_info * sbi = minix_sb(sb);
173 unsigned long n;
174 unsigned long npages = dir_pages(dir);
175 struct page *page = NULL;
176 char *p;
178 char *namx;
179 __u32 inumber;
180 *res_page = NULL;
182 for (n = 0; n < npages; n++) {
183 char *kaddr, *limit;
185 page = dir_get_page(dir, n);
186 if (IS_ERR(page))
187 continue;
189 kaddr = (char*)page_address(page);
190 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
191 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
192 if (sbi->s_version == MINIX_V3) {
193 minix3_dirent *de3 = (minix3_dirent *)p;
194 namx = de3->name;
195 inumber = de3->inode;
196 } else {
197 minix_dirent *de = (minix_dirent *)p;
198 namx = de->name;
199 inumber = de->inode;
201 if (!inumber)
202 continue;
203 if (namecompare(namelen, sbi->s_namelen, name, namx))
204 goto found;
206 dir_put_page(page);
208 return NULL;
210 found:
211 *res_page = page;
212 return (minix_dirent *)p;
215 int minix_add_link(struct dentry *dentry, struct inode *inode)
217 struct inode *dir = dentry->d_parent->d_inode;
218 const char * name = dentry->d_name.name;
219 int namelen = dentry->d_name.len;
220 struct super_block * sb = dir->i_sb;
221 struct minix_sb_info * sbi = minix_sb(sb);
222 struct page *page = NULL;
223 unsigned long npages = dir_pages(dir);
224 unsigned long n;
225 char *kaddr, *p;
226 minix_dirent *de;
227 minix3_dirent *de3;
228 loff_t pos;
229 int err;
230 char *namx = NULL;
231 __u32 inumber;
234 * We take care of directory expansion in the same loop
235 * This code plays outside i_size, so it locks the page
236 * to protect that region.
238 for (n = 0; n <= npages; n++) {
239 char *limit, *dir_end;
241 page = dir_get_page(dir, n);
242 err = PTR_ERR(page);
243 if (IS_ERR(page))
244 goto out;
245 lock_page(page);
246 kaddr = (char*)page_address(page);
247 dir_end = kaddr + minix_last_byte(dir, n);
248 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
249 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
250 de = (minix_dirent *)p;
251 de3 = (minix3_dirent *)p;
252 if (sbi->s_version == MINIX_V3) {
253 namx = de3->name;
254 inumber = de3->inode;
255 } else {
256 namx = de->name;
257 inumber = de->inode;
259 if (p == dir_end) {
260 /* We hit i_size */
261 if (sbi->s_version == MINIX_V3)
262 de3->inode = 0;
263 else
264 de->inode = 0;
265 goto got_it;
267 if (!inumber)
268 goto got_it;
269 err = -EEXIST;
270 if (namecompare(namelen, sbi->s_namelen, name, namx))
271 goto out_unlock;
273 unlock_page(page);
274 dir_put_page(page);
276 BUG();
277 return -EINVAL;
279 got_it:
280 pos = page_offset(page) + p - (char *)page_address(page);
281 err = __minix_write_begin(NULL, page->mapping, pos, sbi->s_dirsize,
282 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
283 if (err)
284 goto out_unlock;
285 memcpy (namx, name, namelen);
286 if (sbi->s_version == MINIX_V3) {
287 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
288 de3->inode = inode->i_ino;
289 } else {
290 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
291 de->inode = inode->i_ino;
293 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
294 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
295 mark_inode_dirty(dir);
296 out_put:
297 dir_put_page(page);
298 out:
299 return err;
300 out_unlock:
301 unlock_page(page);
302 goto out_put;
305 int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
307 struct address_space *mapping = page->mapping;
308 struct inode *inode = (struct inode*)mapping->host;
309 char *kaddr = page_address(page);
310 loff_t pos = page_offset(page) + (char*)de - kaddr;
311 unsigned len = minix_sb(inode->i_sb)->s_dirsize;
312 int err;
314 lock_page(page);
315 err = __minix_write_begin(NULL, mapping, pos, len,
316 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
317 if (err == 0) {
318 de->inode = 0;
319 err = dir_commit_chunk(page, pos, len);
320 } else {
321 unlock_page(page);
323 dir_put_page(page);
324 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
325 mark_inode_dirty(inode);
326 return err;
329 int minix_make_empty(struct inode *inode, struct inode *dir)
331 struct address_space *mapping = inode->i_mapping;
332 struct page *page = grab_cache_page(mapping, 0);
333 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
334 char *kaddr;
335 int err;
337 if (!page)
338 return -ENOMEM;
339 err = __minix_write_begin(NULL, mapping, 0, 2 * sbi->s_dirsize,
340 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
341 if (err) {
342 unlock_page(page);
343 goto fail;
346 kaddr = kmap_atomic(page, KM_USER0);
347 memset(kaddr, 0, PAGE_CACHE_SIZE);
349 if (sbi->s_version == MINIX_V3) {
350 minix3_dirent *de3 = (minix3_dirent *)kaddr;
352 de3->inode = inode->i_ino;
353 strcpy(de3->name, ".");
354 de3 = minix_next_entry(de3, sbi);
355 de3->inode = dir->i_ino;
356 strcpy(de3->name, "..");
357 } else {
358 minix_dirent *de = (minix_dirent *)kaddr;
360 de->inode = inode->i_ino;
361 strcpy(de->name, ".");
362 de = minix_next_entry(de, sbi);
363 de->inode = dir->i_ino;
364 strcpy(de->name, "..");
366 kunmap_atomic(kaddr, KM_USER0);
368 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
369 fail:
370 page_cache_release(page);
371 return err;
375 * routine to check that the specified directory is empty (for rmdir)
377 int minix_empty_dir(struct inode * inode)
379 struct page *page = NULL;
380 unsigned long i, npages = dir_pages(inode);
381 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
382 char *name;
383 __u32 inumber;
385 for (i = 0; i < npages; i++) {
386 char *p, *kaddr, *limit;
388 page = dir_get_page(inode, i);
389 if (IS_ERR(page))
390 continue;
392 kaddr = (char *)page_address(page);
393 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
394 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
395 if (sbi->s_version == MINIX_V3) {
396 minix3_dirent *de3 = (minix3_dirent *)p;
397 name = de3->name;
398 inumber = de3->inode;
399 } else {
400 minix_dirent *de = (minix_dirent *)p;
401 name = de->name;
402 inumber = de->inode;
405 if (inumber != 0) {
406 /* check for . and .. */
407 if (name[0] != '.')
408 goto not_empty;
409 if (!name[1]) {
410 if (inumber != inode->i_ino)
411 goto not_empty;
412 } else if (name[1] != '.')
413 goto not_empty;
414 else if (name[2])
415 goto not_empty;
418 dir_put_page(page);
420 return 1;
422 not_empty:
423 dir_put_page(page);
424 return 0;
427 /* Releases the page */
428 void minix_set_link(struct minix_dir_entry *de, struct page *page,
429 struct inode *inode)
431 struct address_space *mapping = page->mapping;
432 struct inode *dir = mapping->host;
433 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
434 loff_t pos = page_offset(page) +
435 (char *)de-(char*)page_address(page);
436 int err;
438 lock_page(page);
440 err = __minix_write_begin(NULL, mapping, pos, sbi->s_dirsize,
441 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
442 if (err == 0) {
443 de->inode = inode->i_ino;
444 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
445 } else {
446 unlock_page(page);
448 dir_put_page(page);
449 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
450 mark_inode_dirty(dir);
453 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
455 struct page *page = dir_get_page(dir, 0);
456 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
457 struct minix_dir_entry *de = NULL;
459 if (!IS_ERR(page)) {
460 de = minix_next_entry(page_address(page), sbi);
461 *p = page;
463 return de;
466 ino_t minix_inode_by_name(struct dentry *dentry)
468 struct page *page;
469 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
470 ino_t res = 0;
472 if (de) {
473 res = de->inode;
474 dir_put_page(page);
476 return res;