[SPARC64]: Fix 2 bugs in huge page support.
[linux-2.6/verdex.git] / fs / hfs / inode.c
blob39fd85b9b91613136867b4b2690a93590b7288e9
1 /*
2 * linux/fs/hfs/inode.c
4 * Copyright (C) 1995-1997 Paul H. Hargrove
5 * (C) 2003 Ardis Technologies <roman@ardistech.com>
6 * This file may be distributed under the terms of the GNU General Public License.
8 * This file contains inode-related functions which do not depend on
9 * which scheme is being used to represent forks.
11 * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
14 #include <linux/pagemap.h>
15 #include <linux/mpage.h>
17 #include "hfs_fs.h"
18 #include "btree.h"
20 static struct file_operations hfs_file_operations;
21 static struct inode_operations hfs_file_inode_operations;
23 /*================ Variable-like macros ================*/
25 #define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
27 static int hfs_writepage(struct page *page, struct writeback_control *wbc)
29 return block_write_full_page(page, hfs_get_block, wbc);
32 static int hfs_readpage(struct file *file, struct page *page)
34 return block_read_full_page(page, hfs_get_block);
37 static int hfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
39 return cont_prepare_write(page, from, to, hfs_get_block,
40 &HFS_I(page->mapping->host)->phys_size);
43 static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
45 return generic_block_bmap(mapping, block, hfs_get_block);
48 static int hfs_releasepage(struct page *page, gfp_t mask)
50 struct inode *inode = page->mapping->host;
51 struct super_block *sb = inode->i_sb;
52 struct hfs_btree *tree;
53 struct hfs_bnode *node;
54 u32 nidx;
55 int i, res = 1;
57 switch (inode->i_ino) {
58 case HFS_EXT_CNID:
59 tree = HFS_SB(sb)->ext_tree;
60 break;
61 case HFS_CAT_CNID:
62 tree = HFS_SB(sb)->cat_tree;
63 break;
64 default:
65 BUG();
66 return 0;
68 if (tree->node_size >= PAGE_CACHE_SIZE) {
69 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
70 spin_lock(&tree->hash_lock);
71 node = hfs_bnode_findhash(tree, nidx);
72 if (!node)
74 else if (atomic_read(&node->refcnt))
75 res = 0;
76 if (res && node) {
77 hfs_bnode_unhash(node);
78 hfs_bnode_free(node);
80 spin_unlock(&tree->hash_lock);
81 } else {
82 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
83 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
84 spin_lock(&tree->hash_lock);
85 do {
86 node = hfs_bnode_findhash(tree, nidx++);
87 if (!node)
88 continue;
89 if (atomic_read(&node->refcnt)) {
90 res = 0;
91 break;
93 hfs_bnode_unhash(node);
94 hfs_bnode_free(node);
95 } while (--i && nidx < tree->node_count);
96 spin_unlock(&tree->hash_lock);
98 return res ? try_to_free_buffers(page) : 0;
101 static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks,
102 struct buffer_head *bh_result, int create)
104 int ret;
106 ret = hfs_get_block(inode, iblock, bh_result, create);
107 if (!ret)
108 bh_result->b_size = (1 << inode->i_blkbits);
109 return ret;
112 static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
113 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
115 struct file *file = iocb->ki_filp;
116 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
118 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
119 offset, nr_segs, hfs_get_blocks, NULL);
122 static int hfs_writepages(struct address_space *mapping,
123 struct writeback_control *wbc)
125 return mpage_writepages(mapping, wbc, hfs_get_block);
128 struct address_space_operations hfs_btree_aops = {
129 .readpage = hfs_readpage,
130 .writepage = hfs_writepage,
131 .sync_page = block_sync_page,
132 .prepare_write = hfs_prepare_write,
133 .commit_write = generic_commit_write,
134 .bmap = hfs_bmap,
135 .releasepage = hfs_releasepage,
138 struct address_space_operations hfs_aops = {
139 .readpage = hfs_readpage,
140 .writepage = hfs_writepage,
141 .sync_page = block_sync_page,
142 .prepare_write = hfs_prepare_write,
143 .commit_write = generic_commit_write,
144 .bmap = hfs_bmap,
145 .direct_IO = hfs_direct_IO,
146 .writepages = hfs_writepages,
150 * hfs_new_inode
152 struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
154 struct super_block *sb = dir->i_sb;
155 struct inode *inode = new_inode(sb);
156 if (!inode)
157 return NULL;
159 init_MUTEX(&HFS_I(inode)->extents_lock);
160 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
161 hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
162 inode->i_ino = HFS_SB(sb)->next_id++;
163 inode->i_mode = mode;
164 inode->i_uid = current->fsuid;
165 inode->i_gid = current->fsgid;
166 inode->i_nlink = 1;
167 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
168 inode->i_blksize = HFS_SB(sb)->alloc_blksz;
169 HFS_I(inode)->flags = 0;
170 HFS_I(inode)->rsrc_inode = NULL;
171 HFS_I(inode)->fs_blocks = 0;
172 if (S_ISDIR(mode)) {
173 inode->i_size = 2;
174 HFS_SB(sb)->folder_count++;
175 if (dir->i_ino == HFS_ROOT_CNID)
176 HFS_SB(sb)->root_dirs++;
177 inode->i_op = &hfs_dir_inode_operations;
178 inode->i_fop = &hfs_dir_operations;
179 inode->i_mode |= S_IRWXUGO;
180 inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
181 } else if (S_ISREG(mode)) {
182 HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
183 HFS_SB(sb)->file_count++;
184 if (dir->i_ino == HFS_ROOT_CNID)
185 HFS_SB(sb)->root_files++;
186 inode->i_op = &hfs_file_inode_operations;
187 inode->i_fop = &hfs_file_operations;
188 inode->i_mapping->a_ops = &hfs_aops;
189 inode->i_mode |= S_IRUGO|S_IXUGO;
190 if (mode & S_IWUSR)
191 inode->i_mode |= S_IWUGO;
192 inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
193 HFS_I(inode)->phys_size = 0;
194 HFS_I(inode)->alloc_blocks = 0;
195 HFS_I(inode)->first_blocks = 0;
196 HFS_I(inode)->cached_start = 0;
197 HFS_I(inode)->cached_blocks = 0;
198 memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
199 memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
201 insert_inode_hash(inode);
202 mark_inode_dirty(inode);
203 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
204 sb->s_dirt = 1;
206 return inode;
209 void hfs_delete_inode(struct inode *inode)
211 struct super_block *sb = inode->i_sb;
213 dprint(DBG_INODE, "delete_inode: %lu\n", inode->i_ino);
214 if (S_ISDIR(inode->i_mode)) {
215 HFS_SB(sb)->folder_count--;
216 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
217 HFS_SB(sb)->root_dirs--;
218 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
219 sb->s_dirt = 1;
220 return;
222 HFS_SB(sb)->file_count--;
223 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
224 HFS_SB(sb)->root_files--;
225 if (S_ISREG(inode->i_mode)) {
226 if (!inode->i_nlink) {
227 inode->i_size = 0;
228 hfs_file_truncate(inode);
231 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
232 sb->s_dirt = 1;
235 void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
236 __be32 __log_size, __be32 phys_size, u32 clump_size)
238 struct super_block *sb = inode->i_sb;
239 u32 log_size = be32_to_cpu(__log_size);
240 u16 count;
241 int i;
243 memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
244 for (count = 0, i = 0; i < 3; i++)
245 count += be16_to_cpu(ext[i].count);
246 HFS_I(inode)->first_blocks = count;
248 inode->i_size = HFS_I(inode)->phys_size = log_size;
249 HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
250 inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
251 HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
252 HFS_SB(sb)->alloc_blksz;
253 HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
254 if (!HFS_I(inode)->clump_blocks)
255 HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
258 struct hfs_iget_data {
259 struct hfs_cat_key *key;
260 hfs_cat_rec *rec;
263 static int hfs_test_inode(struct inode *inode, void *data)
265 struct hfs_iget_data *idata = data;
266 hfs_cat_rec *rec;
268 rec = idata->rec;
269 switch (rec->type) {
270 case HFS_CDR_DIR:
271 return inode->i_ino == be32_to_cpu(rec->dir.DirID);
272 case HFS_CDR_FIL:
273 return inode->i_ino == be32_to_cpu(rec->file.FlNum);
274 default:
275 BUG();
276 return 1;
281 * hfs_read_inode
283 static int hfs_read_inode(struct inode *inode, void *data)
285 struct hfs_iget_data *idata = data;
286 struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
287 hfs_cat_rec *rec;
289 HFS_I(inode)->flags = 0;
290 HFS_I(inode)->rsrc_inode = NULL;
291 init_MUTEX(&HFS_I(inode)->extents_lock);
292 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
294 /* Initialize the inode */
295 inode->i_uid = hsb->s_uid;
296 inode->i_gid = hsb->s_gid;
297 inode->i_nlink = 1;
298 inode->i_blksize = HFS_SB(inode->i_sb)->alloc_blksz;
300 if (idata->key)
301 HFS_I(inode)->cat_key = *idata->key;
302 else
303 HFS_I(inode)->flags |= HFS_FLG_RSRC;
304 HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
306 rec = idata->rec;
307 switch (rec->type) {
308 case HFS_CDR_FIL:
309 if (!HFS_IS_RSRC(inode)) {
310 hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
311 rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
312 } else {
313 hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
314 rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
317 inode->i_ino = be32_to_cpu(rec->file.FlNum);
318 inode->i_mode = S_IRUGO | S_IXUGO;
319 if (!(rec->file.Flags & HFS_FIL_LOCK))
320 inode->i_mode |= S_IWUGO;
321 inode->i_mode &= ~hsb->s_file_umask;
322 inode->i_mode |= S_IFREG;
323 inode->i_ctime = inode->i_atime = inode->i_mtime =
324 hfs_m_to_utime(rec->file.MdDat);
325 inode->i_op = &hfs_file_inode_operations;
326 inode->i_fop = &hfs_file_operations;
327 inode->i_mapping->a_ops = &hfs_aops;
328 break;
329 case HFS_CDR_DIR:
330 inode->i_ino = be32_to_cpu(rec->dir.DirID);
331 inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
332 HFS_I(inode)->fs_blocks = 0;
333 inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
334 inode->i_ctime = inode->i_atime = inode->i_mtime =
335 hfs_m_to_utime(rec->dir.MdDat);
336 inode->i_op = &hfs_dir_inode_operations;
337 inode->i_fop = &hfs_dir_operations;
338 break;
339 default:
340 make_bad_inode(inode);
342 return 0;
346 * __hfs_iget()
348 * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
349 * the catalog B-tree and the 'type' of the desired file return the
350 * inode for that file/directory or NULL. Note that 'type' indicates
351 * whether we want the actual file or directory, or the corresponding
352 * metadata (AppleDouble header file or CAP metadata file).
354 struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
356 struct hfs_iget_data data = { key, rec };
357 struct inode *inode;
358 u32 cnid;
360 switch (rec->type) {
361 case HFS_CDR_DIR:
362 cnid = be32_to_cpu(rec->dir.DirID);
363 break;
364 case HFS_CDR_FIL:
365 cnid = be32_to_cpu(rec->file.FlNum);
366 break;
367 default:
368 return NULL;
370 inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
371 if (inode && (inode->i_state & I_NEW))
372 unlock_new_inode(inode);
373 return inode;
376 void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
377 __be32 *log_size, __be32 *phys_size)
379 memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
381 if (log_size)
382 *log_size = cpu_to_be32(inode->i_size);
383 if (phys_size)
384 *phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
385 HFS_SB(inode->i_sb)->alloc_blksz);
388 int hfs_write_inode(struct inode *inode, int unused)
390 struct inode *main_inode = inode;
391 struct hfs_find_data fd;
392 hfs_cat_rec rec;
394 dprint(DBG_INODE, "hfs_write_inode: %lu\n", inode->i_ino);
395 hfs_ext_write_extent(inode);
397 if (inode->i_ino < HFS_FIRSTUSER_CNID) {
398 switch (inode->i_ino) {
399 case HFS_ROOT_CNID:
400 break;
401 case HFS_EXT_CNID:
402 hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
403 return 0;
404 case HFS_CAT_CNID:
405 hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
406 return 0;
407 default:
408 BUG();
409 return -EIO;
413 if (HFS_IS_RSRC(inode))
414 main_inode = HFS_I(inode)->rsrc_inode;
416 if (!main_inode->i_nlink)
417 return 0;
419 if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
420 /* panic? */
421 return -EIO;
423 fd.search_key->cat = HFS_I(main_inode)->cat_key;
424 if (hfs_brec_find(&fd))
425 /* panic? */
426 goto out;
428 if (S_ISDIR(main_inode->i_mode)) {
429 if (fd.entrylength < sizeof(struct hfs_cat_dir))
430 /* panic? */;
431 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
432 sizeof(struct hfs_cat_dir));
433 if (rec.type != HFS_CDR_DIR ||
434 be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
437 rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
438 rec.dir.Val = cpu_to_be16(inode->i_size - 2);
440 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
441 sizeof(struct hfs_cat_dir));
442 } else if (HFS_IS_RSRC(inode)) {
443 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
444 sizeof(struct hfs_cat_file));
445 hfs_inode_write_fork(inode, rec.file.RExtRec,
446 &rec.file.RLgLen, &rec.file.RPyLen);
447 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
448 sizeof(struct hfs_cat_file));
449 } else {
450 if (fd.entrylength < sizeof(struct hfs_cat_file))
451 /* panic? */;
452 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
453 sizeof(struct hfs_cat_file));
454 if (rec.type != HFS_CDR_FIL ||
455 be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
458 if (inode->i_mode & S_IWUSR)
459 rec.file.Flags &= ~HFS_FIL_LOCK;
460 else
461 rec.file.Flags |= HFS_FIL_LOCK;
462 hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
463 rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
465 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
466 sizeof(struct hfs_cat_file));
468 out:
469 hfs_find_exit(&fd);
470 return 0;
473 static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
474 struct nameidata *nd)
476 struct inode *inode = NULL;
477 hfs_cat_rec rec;
478 struct hfs_find_data fd;
479 int res;
481 if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
482 goto out;
484 inode = HFS_I(dir)->rsrc_inode;
485 if (inode)
486 goto out;
488 inode = new_inode(dir->i_sb);
489 if (!inode)
490 return ERR_PTR(-ENOMEM);
492 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
493 fd.search_key->cat = HFS_I(dir)->cat_key;
494 res = hfs_brec_read(&fd, &rec, sizeof(rec));
495 if (!res) {
496 struct hfs_iget_data idata = { NULL, &rec };
497 hfs_read_inode(inode, &idata);
499 hfs_find_exit(&fd);
500 if (res) {
501 iput(inode);
502 return ERR_PTR(res);
504 HFS_I(inode)->rsrc_inode = dir;
505 HFS_I(dir)->rsrc_inode = inode;
506 igrab(dir);
507 hlist_add_head(&inode->i_hash, &HFS_SB(dir->i_sb)->rsrc_inodes);
508 mark_inode_dirty(inode);
509 out:
510 d_add(dentry, inode);
511 return NULL;
514 void hfs_clear_inode(struct inode *inode)
516 if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
517 HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
518 iput(HFS_I(inode)->rsrc_inode);
522 static int hfs_permission(struct inode *inode, int mask,
523 struct nameidata *nd)
525 if (S_ISREG(inode->i_mode) && mask & MAY_EXEC)
526 return 0;
527 return generic_permission(inode, mask, NULL);
530 static int hfs_file_open(struct inode *inode, struct file *file)
532 if (HFS_IS_RSRC(inode))
533 inode = HFS_I(inode)->rsrc_inode;
534 if (atomic_read(&file->f_count) != 1)
535 return 0;
536 atomic_inc(&HFS_I(inode)->opencnt);
537 return 0;
540 static int hfs_file_release(struct inode *inode, struct file *file)
542 //struct super_block *sb = inode->i_sb;
544 if (HFS_IS_RSRC(inode))
545 inode = HFS_I(inode)->rsrc_inode;
546 if (atomic_read(&file->f_count) != 0)
547 return 0;
548 if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
549 mutex_lock(&inode->i_mutex);
550 hfs_file_truncate(inode);
551 //if (inode->i_flags & S_DEAD) {
552 // hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
553 // hfs_delete_inode(inode);
555 mutex_unlock(&inode->i_mutex);
557 return 0;
561 * hfs_notify_change()
563 * Based very closely on fs/msdos/inode.c by Werner Almesberger
565 * This is the notify_change() field in the super_operations structure
566 * for HFS file systems. The purpose is to take that changes made to
567 * an inode and apply then in a filesystem-dependent manner. In this
568 * case the process has a few of tasks to do:
569 * 1) prevent changes to the i_uid and i_gid fields.
570 * 2) map file permissions to the closest allowable permissions
571 * 3) Since multiple Linux files can share the same on-disk inode under
572 * HFS (for instance the data and resource forks of a file) a change
573 * to permissions must be applied to all other in-core inodes which
574 * correspond to the same HFS file.
577 int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
579 struct inode *inode = dentry->d_inode;
580 struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
581 int error;
583 error = inode_change_ok(inode, attr); /* basic permission checks */
584 if (error)
585 return error;
587 /* no uig/gid changes and limit which mode bits can be set */
588 if (((attr->ia_valid & ATTR_UID) &&
589 (attr->ia_uid != hsb->s_uid)) ||
590 ((attr->ia_valid & ATTR_GID) &&
591 (attr->ia_gid != hsb->s_gid)) ||
592 ((attr->ia_valid & ATTR_MODE) &&
593 ((S_ISDIR(inode->i_mode) &&
594 (attr->ia_mode != inode->i_mode)) ||
595 (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
596 return hsb->s_quiet ? 0 : error;
599 if (attr->ia_valid & ATTR_MODE) {
600 /* Only the 'w' bits can ever change and only all together. */
601 if (attr->ia_mode & S_IWUSR)
602 attr->ia_mode = inode->i_mode | S_IWUGO;
603 else
604 attr->ia_mode = inode->i_mode & ~S_IWUGO;
605 attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
607 error = inode_setattr(inode, attr);
608 if (error)
609 return error;
611 return 0;
615 static struct file_operations hfs_file_operations = {
616 .llseek = generic_file_llseek,
617 .read = generic_file_read,
618 .write = generic_file_write,
619 .mmap = generic_file_mmap,
620 .sendfile = generic_file_sendfile,
621 .fsync = file_fsync,
622 .open = hfs_file_open,
623 .release = hfs_file_release,
626 static struct inode_operations hfs_file_inode_operations = {
627 .lookup = hfs_file_lookup,
628 .truncate = hfs_file_truncate,
629 .setattr = hfs_inode_setattr,
630 .permission = hfs_permission,
631 .setxattr = hfs_setxattr,
632 .getxattr = hfs_getxattr,
633 .listxattr = hfs_listxattr,