powerpc/kprobes: Remove kretprobe_trampoline_holder.
[linux/fpc-iii.git] / fs / ext2 / xattr.c
blob1a5e3bff0b63c93454324a919fc1e008c0bc0193
1 /*
2 * linux/fs/ext2/xattr.c
4 * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
6 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
7 * Extended attributes for symlinks and special files added per
8 * suggestion of Luka Renko <luka.renko@hermes.si>.
9 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
10 * Red Hat Inc.
15 * Extended attributes are stored on disk blocks allocated outside of
16 * any inode. The i_file_acl field is then made to point to this allocated
17 * block. If all extended attributes of an inode are identical, these
18 * inodes may share the same extended attribute block. Such situations
19 * are automatically detected by keeping a cache of recent attribute block
20 * numbers and hashes over the block's contents in memory.
23 * Extended attribute block layout:
25 * +------------------+
26 * | header |
27 * | entry 1 | |
28 * | entry 2 | | growing downwards
29 * | entry 3 | v
30 * | four null bytes |
31 * | . . . |
32 * | value 1 | ^
33 * | value 3 | | growing upwards
34 * | value 2 | |
35 * +------------------+
37 * The block header is followed by multiple entry descriptors. These entry
38 * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
39 * byte boundaries. The entry descriptors are sorted by attribute name,
40 * so that two extended attribute blocks can be compared efficiently.
42 * Attribute values are aligned to the end of the block, stored in
43 * no specific order. They are also padded to EXT2_XATTR_PAD byte
44 * boundaries. No additional gaps are left between them.
46 * Locking strategy
47 * ----------------
48 * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
49 * EA blocks are only changed if they are exclusive to an inode, so
50 * holding xattr_sem also means that nothing but the EA block's reference
51 * count will change. Multiple writers to an EA block are synchronized
52 * by the bh lock. No more than a single bh lock is held at any time
53 * to avoid deadlocks.
56 #include <linux/buffer_head.h>
57 #include <linux/init.h>
58 #include <linux/slab.h>
59 #include <linux/mbcache.h>
60 #include <linux/quotaops.h>
61 #include <linux/rwsem.h>
62 #include <linux/security.h>
63 #include "ext2.h"
64 #include "xattr.h"
65 #include "acl.h"
67 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
68 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
69 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
70 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
72 #ifdef EXT2_XATTR_DEBUG
73 # define ea_idebug(inode, f...) do { \
74 printk(KERN_DEBUG "inode %s:%ld: ", \
75 inode->i_sb->s_id, inode->i_ino); \
76 printk(f); \
77 printk("\n"); \
78 } while (0)
79 # define ea_bdebug(bh, f...) do { \
80 printk(KERN_DEBUG "block %pg:%lu: ", \
81 bh->b_bdev, (unsigned long) bh->b_blocknr); \
82 printk(f); \
83 printk("\n"); \
84 } while (0)
85 #else
86 # define ea_idebug(f...)
87 # define ea_bdebug(f...)
88 #endif
90 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
91 struct ext2_xattr_header *);
93 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
94 static struct buffer_head *ext2_xattr_cache_find(struct inode *,
95 struct ext2_xattr_header *);
96 static void ext2_xattr_rehash(struct ext2_xattr_header *,
97 struct ext2_xattr_entry *);
99 static const struct xattr_handler *ext2_xattr_handler_map[] = {
100 [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
101 #ifdef CONFIG_EXT2_FS_POSIX_ACL
102 [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
103 [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
104 #endif
105 [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler,
106 #ifdef CONFIG_EXT2_FS_SECURITY
107 [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler,
108 #endif
111 const struct xattr_handler *ext2_xattr_handlers[] = {
112 &ext2_xattr_user_handler,
113 &ext2_xattr_trusted_handler,
114 #ifdef CONFIG_EXT2_FS_POSIX_ACL
115 &posix_acl_access_xattr_handler,
116 &posix_acl_default_xattr_handler,
117 #endif
118 #ifdef CONFIG_EXT2_FS_SECURITY
119 &ext2_xattr_security_handler,
120 #endif
121 NULL
124 static inline const struct xattr_handler *
125 ext2_xattr_handler(int name_index)
127 const struct xattr_handler *handler = NULL;
129 if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
130 handler = ext2_xattr_handler_map[name_index];
131 return handler;
135 * ext2_xattr_get()
137 * Copy an extended attribute into the buffer
138 * provided, or compute the buffer size required.
139 * Buffer is NULL to compute the size of the buffer required.
141 * Returns a negative error number on failure, or the number of bytes
142 * used / required on success.
145 ext2_xattr_get(struct inode *inode, int name_index, const char *name,
146 void *buffer, size_t buffer_size)
148 struct buffer_head *bh = NULL;
149 struct ext2_xattr_entry *entry;
150 size_t name_len, size;
151 char *end;
152 int error;
153 struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
155 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
156 name_index, name, buffer, (long)buffer_size);
158 if (name == NULL)
159 return -EINVAL;
160 name_len = strlen(name);
161 if (name_len > 255)
162 return -ERANGE;
164 down_read(&EXT2_I(inode)->xattr_sem);
165 error = -ENODATA;
166 if (!EXT2_I(inode)->i_file_acl)
167 goto cleanup;
168 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
169 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
170 error = -EIO;
171 if (!bh)
172 goto cleanup;
173 ea_bdebug(bh, "b_count=%d, refcount=%d",
174 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
175 end = bh->b_data + bh->b_size;
176 if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
177 HDR(bh)->h_blocks != cpu_to_le32(1)) {
178 bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
179 "inode %ld: bad block %d", inode->i_ino,
180 EXT2_I(inode)->i_file_acl);
181 error = -EIO;
182 goto cleanup;
185 /* find named attribute */
186 entry = FIRST_ENTRY(bh);
187 while (!IS_LAST_ENTRY(entry)) {
188 struct ext2_xattr_entry *next =
189 EXT2_XATTR_NEXT(entry);
190 if ((char *)next >= end)
191 goto bad_block;
192 if (name_index == entry->e_name_index &&
193 name_len == entry->e_name_len &&
194 memcmp(name, entry->e_name, name_len) == 0)
195 goto found;
196 entry = next;
198 if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
199 ea_idebug(inode, "cache insert failed");
200 error = -ENODATA;
201 goto cleanup;
202 found:
203 /* check the buffer size */
204 if (entry->e_value_block != 0)
205 goto bad_block;
206 size = le32_to_cpu(entry->e_value_size);
207 if (size > inode->i_sb->s_blocksize ||
208 le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
209 goto bad_block;
211 if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
212 ea_idebug(inode, "cache insert failed");
213 if (buffer) {
214 error = -ERANGE;
215 if (size > buffer_size)
216 goto cleanup;
217 /* return value of attribute */
218 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
219 size);
221 error = size;
223 cleanup:
224 brelse(bh);
225 up_read(&EXT2_I(inode)->xattr_sem);
227 return error;
231 * ext2_xattr_list()
233 * Copy a list of attribute names into the buffer
234 * provided, or compute the buffer size required.
235 * Buffer is NULL to compute the size of the buffer required.
237 * Returns a negative error number on failure, or the number of bytes
238 * used / required on success.
240 static int
241 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
243 struct inode *inode = d_inode(dentry);
244 struct buffer_head *bh = NULL;
245 struct ext2_xattr_entry *entry;
246 char *end;
247 size_t rest = buffer_size;
248 int error;
249 struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
251 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
252 buffer, (long)buffer_size);
254 down_read(&EXT2_I(inode)->xattr_sem);
255 error = 0;
256 if (!EXT2_I(inode)->i_file_acl)
257 goto cleanup;
258 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
259 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
260 error = -EIO;
261 if (!bh)
262 goto cleanup;
263 ea_bdebug(bh, "b_count=%d, refcount=%d",
264 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
265 end = bh->b_data + bh->b_size;
266 if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
267 HDR(bh)->h_blocks != cpu_to_le32(1)) {
268 bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
269 "inode %ld: bad block %d", inode->i_ino,
270 EXT2_I(inode)->i_file_acl);
271 error = -EIO;
272 goto cleanup;
275 /* check the on-disk data structure */
276 entry = FIRST_ENTRY(bh);
277 while (!IS_LAST_ENTRY(entry)) {
278 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
280 if ((char *)next >= end)
281 goto bad_block;
282 entry = next;
284 if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
285 ea_idebug(inode, "cache insert failed");
287 /* list the attribute names */
288 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
289 entry = EXT2_XATTR_NEXT(entry)) {
290 const struct xattr_handler *handler =
291 ext2_xattr_handler(entry->e_name_index);
293 if (handler && (!handler->list || handler->list(dentry))) {
294 const char *prefix = handler->prefix ?: handler->name;
295 size_t prefix_len = strlen(prefix);
296 size_t size = prefix_len + entry->e_name_len + 1;
298 if (buffer) {
299 if (size > rest) {
300 error = -ERANGE;
301 goto cleanup;
303 memcpy(buffer, prefix, prefix_len);
304 buffer += prefix_len;
305 memcpy(buffer, entry->e_name, entry->e_name_len);
306 buffer += entry->e_name_len;
307 *buffer++ = 0;
309 rest -= size;
312 error = buffer_size - rest; /* total size */
314 cleanup:
315 brelse(bh);
316 up_read(&EXT2_I(inode)->xattr_sem);
318 return error;
322 * Inode operation listxattr()
324 * d_inode(dentry)->i_mutex: don't care
326 ssize_t
327 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
329 return ext2_xattr_list(dentry, buffer, size);
333 * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
334 * not set, set it.
336 static void ext2_xattr_update_super_block(struct super_block *sb)
338 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
339 return;
341 spin_lock(&EXT2_SB(sb)->s_lock);
342 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
343 spin_unlock(&EXT2_SB(sb)->s_lock);
344 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
348 * ext2_xattr_set()
350 * Create, replace or remove an extended attribute for this inode. Value
351 * is NULL to remove an existing extended attribute, and non-NULL to
352 * either replace an existing extended attribute, or create a new extended
353 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
354 * specify that an extended attribute must exist and must not exist
355 * previous to the call, respectively.
357 * Returns 0, or a negative error number on failure.
360 ext2_xattr_set(struct inode *inode, int name_index, const char *name,
361 const void *value, size_t value_len, int flags)
363 struct super_block *sb = inode->i_sb;
364 struct buffer_head *bh = NULL;
365 struct ext2_xattr_header *header = NULL;
366 struct ext2_xattr_entry *here, *last;
367 size_t name_len, free, min_offs = sb->s_blocksize;
368 int not_found = 1, error;
369 char *end;
372 * header -- Points either into bh, or to a temporarily
373 * allocated buffer.
374 * here -- The named entry found, or the place for inserting, within
375 * the block pointed to by header.
376 * last -- Points right after the last named entry within the block
377 * pointed to by header.
378 * min_offs -- The offset of the first value (values are aligned
379 * towards the end of the block).
380 * end -- Points right after the block pointed to by header.
383 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
384 name_index, name, value, (long)value_len);
386 if (value == NULL)
387 value_len = 0;
388 if (name == NULL)
389 return -EINVAL;
390 name_len = strlen(name);
391 if (name_len > 255 || value_len > sb->s_blocksize)
392 return -ERANGE;
393 down_write(&EXT2_I(inode)->xattr_sem);
394 if (EXT2_I(inode)->i_file_acl) {
395 /* The inode already has an extended attribute block. */
396 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
397 error = -EIO;
398 if (!bh)
399 goto cleanup;
400 ea_bdebug(bh, "b_count=%d, refcount=%d",
401 atomic_read(&(bh->b_count)),
402 le32_to_cpu(HDR(bh)->h_refcount));
403 header = HDR(bh);
404 end = bh->b_data + bh->b_size;
405 if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
406 header->h_blocks != cpu_to_le32(1)) {
407 bad_block: ext2_error(sb, "ext2_xattr_set",
408 "inode %ld: bad block %d", inode->i_ino,
409 EXT2_I(inode)->i_file_acl);
410 error = -EIO;
411 goto cleanup;
413 /* Find the named attribute. */
414 here = FIRST_ENTRY(bh);
415 while (!IS_LAST_ENTRY(here)) {
416 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
417 if ((char *)next >= end)
418 goto bad_block;
419 if (!here->e_value_block && here->e_value_size) {
420 size_t offs = le16_to_cpu(here->e_value_offs);
421 if (offs < min_offs)
422 min_offs = offs;
424 not_found = name_index - here->e_name_index;
425 if (!not_found)
426 not_found = name_len - here->e_name_len;
427 if (!not_found)
428 not_found = memcmp(name, here->e_name,name_len);
429 if (not_found <= 0)
430 break;
431 here = next;
433 last = here;
434 /* We still need to compute min_offs and last. */
435 while (!IS_LAST_ENTRY(last)) {
436 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
437 if ((char *)next >= end)
438 goto bad_block;
439 if (!last->e_value_block && last->e_value_size) {
440 size_t offs = le16_to_cpu(last->e_value_offs);
441 if (offs < min_offs)
442 min_offs = offs;
444 last = next;
447 /* Check whether we have enough space left. */
448 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
449 } else {
450 /* We will use a new extended attribute block. */
451 free = sb->s_blocksize -
452 sizeof(struct ext2_xattr_header) - sizeof(__u32);
453 here = last = NULL; /* avoid gcc uninitialized warning. */
456 if (not_found) {
457 /* Request to remove a nonexistent attribute? */
458 error = -ENODATA;
459 if (flags & XATTR_REPLACE)
460 goto cleanup;
461 error = 0;
462 if (value == NULL)
463 goto cleanup;
464 } else {
465 /* Request to create an existing attribute? */
466 error = -EEXIST;
467 if (flags & XATTR_CREATE)
468 goto cleanup;
469 if (!here->e_value_block && here->e_value_size) {
470 size_t size = le32_to_cpu(here->e_value_size);
472 if (le16_to_cpu(here->e_value_offs) + size >
473 sb->s_blocksize || size > sb->s_blocksize)
474 goto bad_block;
475 free += EXT2_XATTR_SIZE(size);
477 free += EXT2_XATTR_LEN(name_len);
479 error = -ENOSPC;
480 if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
481 goto cleanup;
483 /* Here we know that we can set the new attribute. */
485 if (header) {
486 /* assert(header == HDR(bh)); */
487 lock_buffer(bh);
488 if (header->h_refcount == cpu_to_le32(1)) {
489 __u32 hash = le32_to_cpu(header->h_hash);
491 ea_bdebug(bh, "modifying in-place");
493 * This must happen under buffer lock for
494 * ext2_xattr_set2() to reliably detect modified block
496 mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache,
497 hash, bh->b_blocknr);
499 /* keep the buffer locked while modifying it. */
500 } else {
501 int offset;
503 unlock_buffer(bh);
504 ea_bdebug(bh, "cloning");
505 header = kmalloc(bh->b_size, GFP_KERNEL);
506 error = -ENOMEM;
507 if (header == NULL)
508 goto cleanup;
509 memcpy(header, HDR(bh), bh->b_size);
510 header->h_refcount = cpu_to_le32(1);
512 offset = (char *)here - bh->b_data;
513 here = ENTRY((char *)header + offset);
514 offset = (char *)last - bh->b_data;
515 last = ENTRY((char *)header + offset);
517 } else {
518 /* Allocate a buffer where we construct the new block. */
519 header = kzalloc(sb->s_blocksize, GFP_KERNEL);
520 error = -ENOMEM;
521 if (header == NULL)
522 goto cleanup;
523 end = (char *)header + sb->s_blocksize;
524 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
525 header->h_blocks = header->h_refcount = cpu_to_le32(1);
526 last = here = ENTRY(header+1);
529 /* Iff we are modifying the block in-place, bh is locked here. */
531 if (not_found) {
532 /* Insert the new name. */
533 size_t size = EXT2_XATTR_LEN(name_len);
534 size_t rest = (char *)last - (char *)here;
535 memmove((char *)here + size, here, rest);
536 memset(here, 0, size);
537 here->e_name_index = name_index;
538 here->e_name_len = name_len;
539 memcpy(here->e_name, name, name_len);
540 } else {
541 if (!here->e_value_block && here->e_value_size) {
542 char *first_val = (char *)header + min_offs;
543 size_t offs = le16_to_cpu(here->e_value_offs);
544 char *val = (char *)header + offs;
545 size_t size = EXT2_XATTR_SIZE(
546 le32_to_cpu(here->e_value_size));
548 if (size == EXT2_XATTR_SIZE(value_len)) {
549 /* The old and the new value have the same
550 size. Just replace. */
551 here->e_value_size = cpu_to_le32(value_len);
552 memset(val + size - EXT2_XATTR_PAD, 0,
553 EXT2_XATTR_PAD); /* Clear pad bytes. */
554 memcpy(val, value, value_len);
555 goto skip_replace;
558 /* Remove the old value. */
559 memmove(first_val + size, first_val, val - first_val);
560 memset(first_val, 0, size);
561 here->e_value_offs = 0;
562 min_offs += size;
564 /* Adjust all value offsets. */
565 last = ENTRY(header+1);
566 while (!IS_LAST_ENTRY(last)) {
567 size_t o = le16_to_cpu(last->e_value_offs);
568 if (!last->e_value_block && o < offs)
569 last->e_value_offs =
570 cpu_to_le16(o + size);
571 last = EXT2_XATTR_NEXT(last);
574 if (value == NULL) {
575 /* Remove the old name. */
576 size_t size = EXT2_XATTR_LEN(name_len);
577 last = ENTRY((char *)last - size);
578 memmove(here, (char*)here + size,
579 (char*)last - (char*)here);
580 memset(last, 0, size);
584 if (value != NULL) {
585 /* Insert the new value. */
586 here->e_value_size = cpu_to_le32(value_len);
587 if (value_len) {
588 size_t size = EXT2_XATTR_SIZE(value_len);
589 char *val = (char *)header + min_offs - size;
590 here->e_value_offs =
591 cpu_to_le16((char *)val - (char *)header);
592 memset(val + size - EXT2_XATTR_PAD, 0,
593 EXT2_XATTR_PAD); /* Clear the pad bytes. */
594 memcpy(val, value, value_len);
598 skip_replace:
599 if (IS_LAST_ENTRY(ENTRY(header+1))) {
600 /* This block is now empty. */
601 if (bh && header == HDR(bh))
602 unlock_buffer(bh); /* we were modifying in-place. */
603 error = ext2_xattr_set2(inode, bh, NULL);
604 } else {
605 ext2_xattr_rehash(header, here);
606 if (bh && header == HDR(bh))
607 unlock_buffer(bh); /* we were modifying in-place. */
608 error = ext2_xattr_set2(inode, bh, header);
611 cleanup:
612 brelse(bh);
613 if (!(bh && header == HDR(bh)))
614 kfree(header);
615 up_write(&EXT2_I(inode)->xattr_sem);
617 return error;
621 * Second half of ext2_xattr_set(): Update the file system.
623 static int
624 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
625 struct ext2_xattr_header *header)
627 struct super_block *sb = inode->i_sb;
628 struct buffer_head *new_bh = NULL;
629 int error;
630 struct mb_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache;
632 if (header) {
633 new_bh = ext2_xattr_cache_find(inode, header);
634 if (new_bh) {
635 /* We found an identical block in the cache. */
636 if (new_bh == old_bh) {
637 ea_bdebug(new_bh, "keeping this block");
638 } else {
639 /* The old block is released after updating
640 the inode. */
641 ea_bdebug(new_bh, "reusing block");
643 error = dquot_alloc_block(inode, 1);
644 if (error) {
645 unlock_buffer(new_bh);
646 goto cleanup;
648 le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
649 ea_bdebug(new_bh, "refcount now=%d",
650 le32_to_cpu(HDR(new_bh)->h_refcount));
652 unlock_buffer(new_bh);
653 } else if (old_bh && header == HDR(old_bh)) {
654 /* Keep this block. No need to lock the block as we
655 don't need to change the reference count. */
656 new_bh = old_bh;
657 get_bh(new_bh);
658 ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
659 } else {
660 /* We need to allocate a new block */
661 ext2_fsblk_t goal = ext2_group_first_block_no(sb,
662 EXT2_I(inode)->i_block_group);
663 int block = ext2_new_block(inode, goal, &error);
664 if (error)
665 goto cleanup;
666 ea_idebug(inode, "creating block %d", block);
668 new_bh = sb_getblk(sb, block);
669 if (unlikely(!new_bh)) {
670 ext2_free_blocks(inode, block, 1);
671 mark_inode_dirty(inode);
672 error = -ENOMEM;
673 goto cleanup;
675 lock_buffer(new_bh);
676 memcpy(new_bh->b_data, header, new_bh->b_size);
677 set_buffer_uptodate(new_bh);
678 unlock_buffer(new_bh);
679 ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
681 ext2_xattr_update_super_block(sb);
683 mark_buffer_dirty(new_bh);
684 if (IS_SYNC(inode)) {
685 sync_dirty_buffer(new_bh);
686 error = -EIO;
687 if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
688 goto cleanup;
692 /* Update the inode. */
693 EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
694 inode->i_ctime = CURRENT_TIME_SEC;
695 if (IS_SYNC(inode)) {
696 error = sync_inode_metadata(inode, 1);
697 /* In case sync failed due to ENOSPC the inode was actually
698 * written (only some dirty data were not) so we just proceed
699 * as if nothing happened and cleanup the unused block */
700 if (error && error != -ENOSPC) {
701 if (new_bh && new_bh != old_bh) {
702 dquot_free_block_nodirty(inode, 1);
703 mark_inode_dirty(inode);
705 goto cleanup;
707 } else
708 mark_inode_dirty(inode);
710 error = 0;
711 if (old_bh && old_bh != new_bh) {
713 * If there was an old block and we are no longer using it,
714 * release the old block.
716 lock_buffer(old_bh);
717 if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
718 __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
721 * This must happen under buffer lock for
722 * ext2_xattr_set2() to reliably detect freed block
724 mb_cache_entry_delete_block(ext2_mb_cache,
725 hash, old_bh->b_blocknr);
726 /* Free the old block. */
727 ea_bdebug(old_bh, "freeing");
728 ext2_free_blocks(inode, old_bh->b_blocknr, 1);
729 mark_inode_dirty(inode);
730 /* We let our caller release old_bh, so we
731 * need to duplicate the buffer before. */
732 get_bh(old_bh);
733 bforget(old_bh);
734 } else {
735 /* Decrement the refcount only. */
736 le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
737 dquot_free_block_nodirty(inode, 1);
738 mark_inode_dirty(inode);
739 mark_buffer_dirty(old_bh);
740 ea_bdebug(old_bh, "refcount now=%d",
741 le32_to_cpu(HDR(old_bh)->h_refcount));
743 unlock_buffer(old_bh);
746 cleanup:
747 brelse(new_bh);
749 return error;
753 * ext2_xattr_delete_inode()
755 * Free extended attribute resources associated with this inode. This
756 * is called immediately before an inode is freed.
758 void
759 ext2_xattr_delete_inode(struct inode *inode)
761 struct buffer_head *bh = NULL;
763 down_write(&EXT2_I(inode)->xattr_sem);
764 if (!EXT2_I(inode)->i_file_acl)
765 goto cleanup;
766 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
767 if (!bh) {
768 ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
769 "inode %ld: block %d read error", inode->i_ino,
770 EXT2_I(inode)->i_file_acl);
771 goto cleanup;
773 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
774 if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
775 HDR(bh)->h_blocks != cpu_to_le32(1)) {
776 ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
777 "inode %ld: bad block %d", inode->i_ino,
778 EXT2_I(inode)->i_file_acl);
779 goto cleanup;
781 lock_buffer(bh);
782 if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
783 __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
786 * This must happen under buffer lock for ext2_xattr_set2() to
787 * reliably detect freed block
789 mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache,
790 hash, bh->b_blocknr);
791 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
792 get_bh(bh);
793 bforget(bh);
794 unlock_buffer(bh);
795 } else {
796 le32_add_cpu(&HDR(bh)->h_refcount, -1);
797 ea_bdebug(bh, "refcount now=%d",
798 le32_to_cpu(HDR(bh)->h_refcount));
799 unlock_buffer(bh);
800 mark_buffer_dirty(bh);
801 if (IS_SYNC(inode))
802 sync_dirty_buffer(bh);
803 dquot_free_block_nodirty(inode, 1);
805 EXT2_I(inode)->i_file_acl = 0;
807 cleanup:
808 brelse(bh);
809 up_write(&EXT2_I(inode)->xattr_sem);
813 * ext2_xattr_cache_insert()
815 * Create a new entry in the extended attribute cache, and insert
816 * it unless such an entry is already in the cache.
818 * Returns 0, or a negative error number on failure.
820 static int
821 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
823 __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
824 int error;
826 error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1);
827 if (error) {
828 if (error == -EBUSY) {
829 ea_bdebug(bh, "already in cache (%d cache entries)",
830 atomic_read(&ext2_xattr_cache->c_entry_count));
831 error = 0;
833 } else
834 ea_bdebug(bh, "inserting [%x]", (int)hash);
835 return error;
839 * ext2_xattr_cmp()
841 * Compare two extended attribute blocks for equality.
843 * Returns 0 if the blocks are equal, 1 if they differ, and
844 * a negative error number on errors.
846 static int
847 ext2_xattr_cmp(struct ext2_xattr_header *header1,
848 struct ext2_xattr_header *header2)
850 struct ext2_xattr_entry *entry1, *entry2;
852 entry1 = ENTRY(header1+1);
853 entry2 = ENTRY(header2+1);
854 while (!IS_LAST_ENTRY(entry1)) {
855 if (IS_LAST_ENTRY(entry2))
856 return 1;
857 if (entry1->e_hash != entry2->e_hash ||
858 entry1->e_name_index != entry2->e_name_index ||
859 entry1->e_name_len != entry2->e_name_len ||
860 entry1->e_value_size != entry2->e_value_size ||
861 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
862 return 1;
863 if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
864 return -EIO;
865 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
866 (char *)header2 + le16_to_cpu(entry2->e_value_offs),
867 le32_to_cpu(entry1->e_value_size)))
868 return 1;
870 entry1 = EXT2_XATTR_NEXT(entry1);
871 entry2 = EXT2_XATTR_NEXT(entry2);
873 if (!IS_LAST_ENTRY(entry2))
874 return 1;
875 return 0;
879 * ext2_xattr_cache_find()
881 * Find an identical extended attribute block.
883 * Returns a locked buffer head to the block found, or NULL if such
884 * a block was not found or an error occurred.
886 static struct buffer_head *
887 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
889 __u32 hash = le32_to_cpu(header->h_hash);
890 struct mb_cache_entry *ce;
891 struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
893 if (!header->h_hash)
894 return NULL; /* never share */
895 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
896 again:
897 ce = mb_cache_entry_find_first(ext2_mb_cache, hash);
898 while (ce) {
899 struct buffer_head *bh;
901 bh = sb_bread(inode->i_sb, ce->e_block);
902 if (!bh) {
903 ext2_error(inode->i_sb, "ext2_xattr_cache_find",
904 "inode %ld: block %ld read error",
905 inode->i_ino, (unsigned long) ce->e_block);
906 } else {
907 lock_buffer(bh);
909 * We have to be careful about races with freeing or
910 * rehashing of xattr block. Once we hold buffer lock
911 * xattr block's state is stable so we can check
912 * whether the block got freed / rehashed or not.
913 * Since we unhash mbcache entry under buffer lock when
914 * freeing / rehashing xattr block, checking whether
915 * entry is still hashed is reliable.
917 if (hlist_bl_unhashed(&ce->e_hash_list)) {
918 mb_cache_entry_put(ext2_mb_cache, ce);
919 unlock_buffer(bh);
920 brelse(bh);
921 goto again;
922 } else if (le32_to_cpu(HDR(bh)->h_refcount) >
923 EXT2_XATTR_REFCOUNT_MAX) {
924 ea_idebug(inode, "block %ld refcount %d>%d",
925 (unsigned long) ce->e_block,
926 le32_to_cpu(HDR(bh)->h_refcount),
927 EXT2_XATTR_REFCOUNT_MAX);
928 } else if (!ext2_xattr_cmp(header, HDR(bh))) {
929 ea_bdebug(bh, "b_count=%d",
930 atomic_read(&(bh->b_count)));
931 mb_cache_entry_touch(ext2_mb_cache, ce);
932 mb_cache_entry_put(ext2_mb_cache, ce);
933 return bh;
935 unlock_buffer(bh);
936 brelse(bh);
938 ce = mb_cache_entry_find_next(ext2_mb_cache, ce);
940 return NULL;
943 #define NAME_HASH_SHIFT 5
944 #define VALUE_HASH_SHIFT 16
947 * ext2_xattr_hash_entry()
949 * Compute the hash of an extended attribute.
951 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
952 struct ext2_xattr_entry *entry)
954 __u32 hash = 0;
955 char *name = entry->e_name;
956 int n;
958 for (n=0; n < entry->e_name_len; n++) {
959 hash = (hash << NAME_HASH_SHIFT) ^
960 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
961 *name++;
964 if (entry->e_value_block == 0 && entry->e_value_size != 0) {
965 __le32 *value = (__le32 *)((char *)header +
966 le16_to_cpu(entry->e_value_offs));
967 for (n = (le32_to_cpu(entry->e_value_size) +
968 EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
969 hash = (hash << VALUE_HASH_SHIFT) ^
970 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
971 le32_to_cpu(*value++);
974 entry->e_hash = cpu_to_le32(hash);
977 #undef NAME_HASH_SHIFT
978 #undef VALUE_HASH_SHIFT
980 #define BLOCK_HASH_SHIFT 16
983 * ext2_xattr_rehash()
985 * Re-compute the extended attribute hash value after an entry has changed.
987 static void ext2_xattr_rehash(struct ext2_xattr_header *header,
988 struct ext2_xattr_entry *entry)
990 struct ext2_xattr_entry *here;
991 __u32 hash = 0;
993 ext2_xattr_hash_entry(header, entry);
994 here = ENTRY(header+1);
995 while (!IS_LAST_ENTRY(here)) {
996 if (!here->e_hash) {
997 /* Block is not shared if an entry's hash value == 0 */
998 hash = 0;
999 break;
1001 hash = (hash << BLOCK_HASH_SHIFT) ^
1002 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
1003 le32_to_cpu(here->e_hash);
1004 here = EXT2_XATTR_NEXT(here);
1006 header->h_hash = cpu_to_le32(hash);
1009 #undef BLOCK_HASH_SHIFT
1011 #define HASH_BUCKET_BITS 10
1013 struct mb_cache *ext2_xattr_create_cache(void)
1015 return mb_cache_create(HASH_BUCKET_BITS);
1018 void ext2_xattr_destroy_cache(struct mb_cache *cache)
1020 if (cache)
1021 mb_cache_destroy(cache);