uml: kernel segfaults should dump proper registers
[pv_ops_mirror.git] / fs / hfs / inode.c
blobfafcba593871889efb1e193e4b818515e40725ca
1 /*
2 * linux/fs/hfs/inode.c
4 * Copyright (C) 1995-1997 Paul H. Hargrove
5 * (C) 2003 Ardis Technologies <roman@ardistech.com>
6 * This file may be distributed under the terms of the GNU General Public License.
8 * This file contains inode-related functions which do not depend on
9 * which scheme is being used to represent forks.
11 * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
14 #include <linux/pagemap.h>
15 #include <linux/mpage.h>
17 #include "hfs_fs.h"
18 #include "btree.h"
20 static const struct file_operations hfs_file_operations;
21 static const struct inode_operations hfs_file_inode_operations;
23 /*================ Variable-like macros ================*/
25 #define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
27 static int hfs_writepage(struct page *page, struct writeback_control *wbc)
29 return block_write_full_page(page, hfs_get_block, wbc);
32 static int hfs_readpage(struct file *file, struct page *page)
34 return block_read_full_page(page, hfs_get_block);
37 static int hfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
39 return cont_prepare_write(page, from, to, hfs_get_block,
40 &HFS_I(page->mapping->host)->phys_size);
43 static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
45 return generic_block_bmap(mapping, block, hfs_get_block);
48 static int hfs_releasepage(struct page *page, gfp_t mask)
50 struct inode *inode = page->mapping->host;
51 struct super_block *sb = inode->i_sb;
52 struct hfs_btree *tree;
53 struct hfs_bnode *node;
54 u32 nidx;
55 int i, res = 1;
57 switch (inode->i_ino) {
58 case HFS_EXT_CNID:
59 tree = HFS_SB(sb)->ext_tree;
60 break;
61 case HFS_CAT_CNID:
62 tree = HFS_SB(sb)->cat_tree;
63 break;
64 default:
65 BUG();
66 return 0;
68 if (tree->node_size >= PAGE_CACHE_SIZE) {
69 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
70 spin_lock(&tree->hash_lock);
71 node = hfs_bnode_findhash(tree, nidx);
72 if (!node)
74 else if (atomic_read(&node->refcnt))
75 res = 0;
76 if (res && node) {
77 hfs_bnode_unhash(node);
78 hfs_bnode_free(node);
80 spin_unlock(&tree->hash_lock);
81 } else {
82 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
83 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
84 spin_lock(&tree->hash_lock);
85 do {
86 node = hfs_bnode_findhash(tree, nidx++);
87 if (!node)
88 continue;
89 if (atomic_read(&node->refcnt)) {
90 res = 0;
91 break;
93 hfs_bnode_unhash(node);
94 hfs_bnode_free(node);
95 } while (--i && nidx < tree->node_count);
96 spin_unlock(&tree->hash_lock);
98 return res ? try_to_free_buffers(page) : 0;
101 static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
102 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
104 struct file *file = iocb->ki_filp;
105 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
107 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
108 offset, nr_segs, hfs_get_block, NULL);
111 static int hfs_writepages(struct address_space *mapping,
112 struct writeback_control *wbc)
114 return mpage_writepages(mapping, wbc, hfs_get_block);
117 const struct address_space_operations hfs_btree_aops = {
118 .readpage = hfs_readpage,
119 .writepage = hfs_writepage,
120 .sync_page = block_sync_page,
121 .prepare_write = hfs_prepare_write,
122 .commit_write = generic_commit_write,
123 .bmap = hfs_bmap,
124 .releasepage = hfs_releasepage,
127 const struct address_space_operations hfs_aops = {
128 .readpage = hfs_readpage,
129 .writepage = hfs_writepage,
130 .sync_page = block_sync_page,
131 .prepare_write = hfs_prepare_write,
132 .commit_write = generic_commit_write,
133 .bmap = hfs_bmap,
134 .direct_IO = hfs_direct_IO,
135 .writepages = hfs_writepages,
139 * hfs_new_inode
141 struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
143 struct super_block *sb = dir->i_sb;
144 struct inode *inode = new_inode(sb);
145 if (!inode)
146 return NULL;
148 init_MUTEX(&HFS_I(inode)->extents_lock);
149 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
150 hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
151 inode->i_ino = HFS_SB(sb)->next_id++;
152 inode->i_mode = mode;
153 inode->i_uid = current->fsuid;
154 inode->i_gid = current->fsgid;
155 inode->i_nlink = 1;
156 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
157 HFS_I(inode)->flags = 0;
158 HFS_I(inode)->rsrc_inode = NULL;
159 HFS_I(inode)->fs_blocks = 0;
160 if (S_ISDIR(mode)) {
161 inode->i_size = 2;
162 HFS_SB(sb)->folder_count++;
163 if (dir->i_ino == HFS_ROOT_CNID)
164 HFS_SB(sb)->root_dirs++;
165 inode->i_op = &hfs_dir_inode_operations;
166 inode->i_fop = &hfs_dir_operations;
167 inode->i_mode |= S_IRWXUGO;
168 inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
169 } else if (S_ISREG(mode)) {
170 HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
171 HFS_SB(sb)->file_count++;
172 if (dir->i_ino == HFS_ROOT_CNID)
173 HFS_SB(sb)->root_files++;
174 inode->i_op = &hfs_file_inode_operations;
175 inode->i_fop = &hfs_file_operations;
176 inode->i_mapping->a_ops = &hfs_aops;
177 inode->i_mode |= S_IRUGO|S_IXUGO;
178 if (mode & S_IWUSR)
179 inode->i_mode |= S_IWUGO;
180 inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
181 HFS_I(inode)->phys_size = 0;
182 HFS_I(inode)->alloc_blocks = 0;
183 HFS_I(inode)->first_blocks = 0;
184 HFS_I(inode)->cached_start = 0;
185 HFS_I(inode)->cached_blocks = 0;
186 memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
187 memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
189 insert_inode_hash(inode);
190 mark_inode_dirty(inode);
191 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
192 sb->s_dirt = 1;
194 return inode;
197 void hfs_delete_inode(struct inode *inode)
199 struct super_block *sb = inode->i_sb;
201 dprint(DBG_INODE, "delete_inode: %lu\n", inode->i_ino);
202 if (S_ISDIR(inode->i_mode)) {
203 HFS_SB(sb)->folder_count--;
204 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
205 HFS_SB(sb)->root_dirs--;
206 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
207 sb->s_dirt = 1;
208 return;
210 HFS_SB(sb)->file_count--;
211 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
212 HFS_SB(sb)->root_files--;
213 if (S_ISREG(inode->i_mode)) {
214 if (!inode->i_nlink) {
215 inode->i_size = 0;
216 hfs_file_truncate(inode);
219 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
220 sb->s_dirt = 1;
223 void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
224 __be32 __log_size, __be32 phys_size, u32 clump_size)
226 struct super_block *sb = inode->i_sb;
227 u32 log_size = be32_to_cpu(__log_size);
228 u16 count;
229 int i;
231 memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
232 for (count = 0, i = 0; i < 3; i++)
233 count += be16_to_cpu(ext[i].count);
234 HFS_I(inode)->first_blocks = count;
236 inode->i_size = HFS_I(inode)->phys_size = log_size;
237 HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
238 inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
239 HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
240 HFS_SB(sb)->alloc_blksz;
241 HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
242 if (!HFS_I(inode)->clump_blocks)
243 HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
246 struct hfs_iget_data {
247 struct hfs_cat_key *key;
248 hfs_cat_rec *rec;
251 static int hfs_test_inode(struct inode *inode, void *data)
253 struct hfs_iget_data *idata = data;
254 hfs_cat_rec *rec;
256 rec = idata->rec;
257 switch (rec->type) {
258 case HFS_CDR_DIR:
259 return inode->i_ino == be32_to_cpu(rec->dir.DirID);
260 case HFS_CDR_FIL:
261 return inode->i_ino == be32_to_cpu(rec->file.FlNum);
262 default:
263 BUG();
264 return 1;
269 * hfs_read_inode
271 static int hfs_read_inode(struct inode *inode, void *data)
273 struct hfs_iget_data *idata = data;
274 struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
275 hfs_cat_rec *rec;
277 HFS_I(inode)->flags = 0;
278 HFS_I(inode)->rsrc_inode = NULL;
279 init_MUTEX(&HFS_I(inode)->extents_lock);
280 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
282 /* Initialize the inode */
283 inode->i_uid = hsb->s_uid;
284 inode->i_gid = hsb->s_gid;
285 inode->i_nlink = 1;
287 if (idata->key)
288 HFS_I(inode)->cat_key = *idata->key;
289 else
290 HFS_I(inode)->flags |= HFS_FLG_RSRC;
291 HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
293 rec = idata->rec;
294 switch (rec->type) {
295 case HFS_CDR_FIL:
296 if (!HFS_IS_RSRC(inode)) {
297 hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
298 rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
299 } else {
300 hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
301 rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
304 inode->i_ino = be32_to_cpu(rec->file.FlNum);
305 inode->i_mode = S_IRUGO | S_IXUGO;
306 if (!(rec->file.Flags & HFS_FIL_LOCK))
307 inode->i_mode |= S_IWUGO;
308 inode->i_mode &= ~hsb->s_file_umask;
309 inode->i_mode |= S_IFREG;
310 inode->i_ctime = inode->i_atime = inode->i_mtime =
311 hfs_m_to_utime(rec->file.MdDat);
312 inode->i_op = &hfs_file_inode_operations;
313 inode->i_fop = &hfs_file_operations;
314 inode->i_mapping->a_ops = &hfs_aops;
315 break;
316 case HFS_CDR_DIR:
317 inode->i_ino = be32_to_cpu(rec->dir.DirID);
318 inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
319 HFS_I(inode)->fs_blocks = 0;
320 inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
321 inode->i_ctime = inode->i_atime = inode->i_mtime =
322 hfs_m_to_utime(rec->dir.MdDat);
323 inode->i_op = &hfs_dir_inode_operations;
324 inode->i_fop = &hfs_dir_operations;
325 break;
326 default:
327 make_bad_inode(inode);
329 return 0;
333 * __hfs_iget()
335 * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
336 * the catalog B-tree and the 'type' of the desired file return the
337 * inode for that file/directory or NULL. Note that 'type' indicates
338 * whether we want the actual file or directory, or the corresponding
339 * metadata (AppleDouble header file or CAP metadata file).
341 struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
343 struct hfs_iget_data data = { key, rec };
344 struct inode *inode;
345 u32 cnid;
347 switch (rec->type) {
348 case HFS_CDR_DIR:
349 cnid = be32_to_cpu(rec->dir.DirID);
350 break;
351 case HFS_CDR_FIL:
352 cnid = be32_to_cpu(rec->file.FlNum);
353 break;
354 default:
355 return NULL;
357 inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
358 if (inode && (inode->i_state & I_NEW))
359 unlock_new_inode(inode);
360 return inode;
363 void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
364 __be32 *log_size, __be32 *phys_size)
366 memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
368 if (log_size)
369 *log_size = cpu_to_be32(inode->i_size);
370 if (phys_size)
371 *phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
372 HFS_SB(inode->i_sb)->alloc_blksz);
375 int hfs_write_inode(struct inode *inode, int unused)
377 struct inode *main_inode = inode;
378 struct hfs_find_data fd;
379 hfs_cat_rec rec;
381 dprint(DBG_INODE, "hfs_write_inode: %lu\n", inode->i_ino);
382 hfs_ext_write_extent(inode);
384 if (inode->i_ino < HFS_FIRSTUSER_CNID) {
385 switch (inode->i_ino) {
386 case HFS_ROOT_CNID:
387 break;
388 case HFS_EXT_CNID:
389 hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
390 return 0;
391 case HFS_CAT_CNID:
392 hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
393 return 0;
394 default:
395 BUG();
396 return -EIO;
400 if (HFS_IS_RSRC(inode))
401 main_inode = HFS_I(inode)->rsrc_inode;
403 if (!main_inode->i_nlink)
404 return 0;
406 if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
407 /* panic? */
408 return -EIO;
410 fd.search_key->cat = HFS_I(main_inode)->cat_key;
411 if (hfs_brec_find(&fd))
412 /* panic? */
413 goto out;
415 if (S_ISDIR(main_inode->i_mode)) {
416 if (fd.entrylength < sizeof(struct hfs_cat_dir))
417 /* panic? */;
418 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
419 sizeof(struct hfs_cat_dir));
420 if (rec.type != HFS_CDR_DIR ||
421 be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
424 rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
425 rec.dir.Val = cpu_to_be16(inode->i_size - 2);
427 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
428 sizeof(struct hfs_cat_dir));
429 } else if (HFS_IS_RSRC(inode)) {
430 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
431 sizeof(struct hfs_cat_file));
432 hfs_inode_write_fork(inode, rec.file.RExtRec,
433 &rec.file.RLgLen, &rec.file.RPyLen);
434 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
435 sizeof(struct hfs_cat_file));
436 } else {
437 if (fd.entrylength < sizeof(struct hfs_cat_file))
438 /* panic? */;
439 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
440 sizeof(struct hfs_cat_file));
441 if (rec.type != HFS_CDR_FIL ||
442 be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
445 if (inode->i_mode & S_IWUSR)
446 rec.file.Flags &= ~HFS_FIL_LOCK;
447 else
448 rec.file.Flags |= HFS_FIL_LOCK;
449 hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
450 rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
452 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
453 sizeof(struct hfs_cat_file));
455 out:
456 hfs_find_exit(&fd);
457 return 0;
460 static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
461 struct nameidata *nd)
463 struct inode *inode = NULL;
464 hfs_cat_rec rec;
465 struct hfs_find_data fd;
466 int res;
468 if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
469 goto out;
471 inode = HFS_I(dir)->rsrc_inode;
472 if (inode)
473 goto out;
475 inode = new_inode(dir->i_sb);
476 if (!inode)
477 return ERR_PTR(-ENOMEM);
479 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
480 fd.search_key->cat = HFS_I(dir)->cat_key;
481 res = hfs_brec_read(&fd, &rec, sizeof(rec));
482 if (!res) {
483 struct hfs_iget_data idata = { NULL, &rec };
484 hfs_read_inode(inode, &idata);
486 hfs_find_exit(&fd);
487 if (res) {
488 iput(inode);
489 return ERR_PTR(res);
491 HFS_I(inode)->rsrc_inode = dir;
492 HFS_I(dir)->rsrc_inode = inode;
493 igrab(dir);
494 hlist_add_head(&inode->i_hash, &HFS_SB(dir->i_sb)->rsrc_inodes);
495 mark_inode_dirty(inode);
496 out:
497 d_add(dentry, inode);
498 return NULL;
501 void hfs_clear_inode(struct inode *inode)
503 if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
504 HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
505 iput(HFS_I(inode)->rsrc_inode);
509 static int hfs_permission(struct inode *inode, int mask,
510 struct nameidata *nd)
512 if (S_ISREG(inode->i_mode) && mask & MAY_EXEC)
513 return 0;
514 return generic_permission(inode, mask, NULL);
517 static int hfs_file_open(struct inode *inode, struct file *file)
519 if (HFS_IS_RSRC(inode))
520 inode = HFS_I(inode)->rsrc_inode;
521 if (atomic_read(&file->f_count) != 1)
522 return 0;
523 atomic_inc(&HFS_I(inode)->opencnt);
524 return 0;
527 static int hfs_file_release(struct inode *inode, struct file *file)
529 //struct super_block *sb = inode->i_sb;
531 if (HFS_IS_RSRC(inode))
532 inode = HFS_I(inode)->rsrc_inode;
533 if (atomic_read(&file->f_count) != 0)
534 return 0;
535 if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
536 mutex_lock(&inode->i_mutex);
537 hfs_file_truncate(inode);
538 //if (inode->i_flags & S_DEAD) {
539 // hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
540 // hfs_delete_inode(inode);
542 mutex_unlock(&inode->i_mutex);
544 return 0;
548 * hfs_notify_change()
550 * Based very closely on fs/msdos/inode.c by Werner Almesberger
552 * This is the notify_change() field in the super_operations structure
553 * for HFS file systems. The purpose is to take that changes made to
554 * an inode and apply then in a filesystem-dependent manner. In this
555 * case the process has a few of tasks to do:
556 * 1) prevent changes to the i_uid and i_gid fields.
557 * 2) map file permissions to the closest allowable permissions
558 * 3) Since multiple Linux files can share the same on-disk inode under
559 * HFS (for instance the data and resource forks of a file) a change
560 * to permissions must be applied to all other in-core inodes which
561 * correspond to the same HFS file.
564 int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
566 struct inode *inode = dentry->d_inode;
567 struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
568 int error;
570 error = inode_change_ok(inode, attr); /* basic permission checks */
571 if (error)
572 return error;
574 /* no uig/gid changes and limit which mode bits can be set */
575 if (((attr->ia_valid & ATTR_UID) &&
576 (attr->ia_uid != hsb->s_uid)) ||
577 ((attr->ia_valid & ATTR_GID) &&
578 (attr->ia_gid != hsb->s_gid)) ||
579 ((attr->ia_valid & ATTR_MODE) &&
580 ((S_ISDIR(inode->i_mode) &&
581 (attr->ia_mode != inode->i_mode)) ||
582 (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
583 return hsb->s_quiet ? 0 : error;
586 if (attr->ia_valid & ATTR_MODE) {
587 /* Only the 'w' bits can ever change and only all together. */
588 if (attr->ia_mode & S_IWUSR)
589 attr->ia_mode = inode->i_mode | S_IWUGO;
590 else
591 attr->ia_mode = inode->i_mode & ~S_IWUGO;
592 attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
594 error = inode_setattr(inode, attr);
595 if (error)
596 return error;
598 return 0;
602 static const struct file_operations hfs_file_operations = {
603 .llseek = generic_file_llseek,
604 .read = do_sync_read,
605 .aio_read = generic_file_aio_read,
606 .write = do_sync_write,
607 .aio_write = generic_file_aio_write,
608 .mmap = generic_file_mmap,
609 .sendfile = generic_file_sendfile,
610 .fsync = file_fsync,
611 .open = hfs_file_open,
612 .release = hfs_file_release,
615 static const struct inode_operations hfs_file_inode_operations = {
616 .lookup = hfs_file_lookup,
617 .truncate = hfs_file_truncate,
618 .setattr = hfs_inode_setattr,
619 .permission = hfs_permission,
620 .setxattr = hfs_setxattr,
621 .getxattr = hfs_getxattr,
622 .listxattr = hfs_listxattr,