mtd: nand_base: use __func__ instead of typing names
[linux/fpc-iii.git] / fs / gfs2 / eattr.c
blob07ea9529adda162830829080dac6c5c98b382103
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <asm/uaccess.h>
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "acl.h"
21 #include "eaops.h"
22 #include "eattr.h"
23 #include "glock.h"
24 #include "inode.h"
25 #include "meta_io.h"
26 #include "quota.h"
27 #include "rgrp.h"
28 #include "trans.h"
29 #include "util.h"
31 /**
32 * ea_calc_size - returns the acutal number of bytes the request will take up
33 * (not counting any unstuffed data blocks)
34 * @sdp:
35 * @er:
36 * @size:
38 * Returns: 1 if the EA should be stuffed
41 static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
42 unsigned int *size)
44 *size = GFS2_EAREQ_SIZE_STUFFED(er);
45 if (*size <= sdp->sd_jbsize)
46 return 1;
48 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
50 return 0;
53 static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
55 unsigned int size;
57 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
58 return -ERANGE;
60 ea_calc_size(sdp, er, &size);
62 /* This can only happen with 512 byte blocks */
63 if (size > sdp->sd_jbsize)
64 return -ERANGE;
66 return 0;
69 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
70 struct gfs2_ea_header *ea,
71 struct gfs2_ea_header *prev, void *private);
73 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
74 ea_call_t ea_call, void *data)
76 struct gfs2_ea_header *ea, *prev = NULL;
77 int error = 0;
79 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
80 return -EIO;
82 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
83 if (!GFS2_EA_REC_LEN(ea))
84 goto fail;
85 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
86 bh->b_data + bh->b_size))
87 goto fail;
88 if (!GFS2_EATYPE_VALID(ea->ea_type))
89 goto fail;
91 error = ea_call(ip, bh, ea, prev, data);
92 if (error)
93 return error;
95 if (GFS2_EA_IS_LAST(ea)) {
96 if ((char *)GFS2_EA2NEXT(ea) !=
97 bh->b_data + bh->b_size)
98 goto fail;
99 break;
103 return error;
105 fail:
106 gfs2_consist_inode(ip);
107 return -EIO;
110 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
112 struct buffer_head *bh, *eabh;
113 __be64 *eablk, *end;
114 int error;
116 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
117 if (error)
118 return error;
120 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
121 error = ea_foreach_i(ip, bh, ea_call, data);
122 goto out;
125 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
126 error = -EIO;
127 goto out;
130 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
131 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
133 for (; eablk < end; eablk++) {
134 u64 bn;
136 if (!*eablk)
137 break;
138 bn = be64_to_cpu(*eablk);
140 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
141 if (error)
142 break;
143 error = ea_foreach_i(ip, eabh, ea_call, data);
144 brelse(eabh);
145 if (error)
146 break;
148 out:
149 brelse(bh);
150 return error;
153 struct ea_find {
154 struct gfs2_ea_request *ef_er;
155 struct gfs2_ea_location *ef_el;
158 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
159 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
160 void *private)
162 struct ea_find *ef = private;
163 struct gfs2_ea_request *er = ef->ef_er;
165 if (ea->ea_type == GFS2_EATYPE_UNUSED)
166 return 0;
168 if (ea->ea_type == er->er_type) {
169 if (ea->ea_name_len == er->er_name_len &&
170 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
171 struct gfs2_ea_location *el = ef->ef_el;
172 get_bh(bh);
173 el->el_bh = bh;
174 el->el_ea = ea;
175 el->el_prev = prev;
176 return 1;
180 return 0;
183 int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
184 struct gfs2_ea_location *el)
186 struct ea_find ef;
187 int error;
189 ef.ef_er = er;
190 ef.ef_el = el;
192 memset(el, 0, sizeof(struct gfs2_ea_location));
194 error = ea_foreach(ip, ea_find_i, &ef);
195 if (error > 0)
196 return 0;
198 return error;
202 * ea_dealloc_unstuffed -
203 * @ip:
204 * @bh:
205 * @ea:
206 * @prev:
207 * @private:
209 * Take advantage of the fact that all unstuffed blocks are
210 * allocated from the same RG. But watch, this may not always
211 * be true.
213 * Returns: errno
216 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
217 struct gfs2_ea_header *ea,
218 struct gfs2_ea_header *prev, void *private)
220 int *leave = private;
221 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
222 struct gfs2_rgrpd *rgd;
223 struct gfs2_holder rg_gh;
224 struct buffer_head *dibh;
225 __be64 *dataptrs;
226 u64 bn = 0;
227 u64 bstart = 0;
228 unsigned int blen = 0;
229 unsigned int blks = 0;
230 unsigned int x;
231 int error;
233 if (GFS2_EA_IS_STUFFED(ea))
234 return 0;
236 dataptrs = GFS2_EA2DATAPTRS(ea);
237 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
238 if (*dataptrs) {
239 blks++;
240 bn = be64_to_cpu(*dataptrs);
243 if (!blks)
244 return 0;
246 rgd = gfs2_blk2rgrpd(sdp, bn);
247 if (!rgd) {
248 gfs2_consist_inode(ip);
249 return -EIO;
252 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
253 if (error)
254 return error;
256 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
257 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
258 if (error)
259 goto out_gunlock;
261 gfs2_trans_add_bh(ip->i_gl, bh, 1);
263 dataptrs = GFS2_EA2DATAPTRS(ea);
264 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
265 if (!*dataptrs)
266 break;
267 bn = be64_to_cpu(*dataptrs);
269 if (bstart + blen == bn)
270 blen++;
271 else {
272 if (bstart)
273 gfs2_free_meta(ip, bstart, blen);
274 bstart = bn;
275 blen = 1;
278 *dataptrs = 0;
279 gfs2_add_inode_blocks(&ip->i_inode, -1);
281 if (bstart)
282 gfs2_free_meta(ip, bstart, blen);
284 if (prev && !leave) {
285 u32 len;
287 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
288 prev->ea_rec_len = cpu_to_be32(len);
290 if (GFS2_EA_IS_LAST(ea))
291 prev->ea_flags |= GFS2_EAFLAG_LAST;
292 } else {
293 ea->ea_type = GFS2_EATYPE_UNUSED;
294 ea->ea_num_ptrs = 0;
297 error = gfs2_meta_inode_buffer(ip, &dibh);
298 if (!error) {
299 ip->i_inode.i_ctime = CURRENT_TIME;
300 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
301 gfs2_dinode_out(ip, dibh->b_data);
302 brelse(dibh);
305 gfs2_trans_end(sdp);
307 out_gunlock:
308 gfs2_glock_dq_uninit(&rg_gh);
309 return error;
312 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
313 struct gfs2_ea_header *ea,
314 struct gfs2_ea_header *prev, int leave)
316 struct gfs2_alloc *al;
317 int error;
319 al = gfs2_alloc_get(ip);
320 if (!al)
321 return -ENOMEM;
323 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
324 if (error)
325 goto out_alloc;
327 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
328 if (error)
329 goto out_quota;
331 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
333 gfs2_glock_dq_uninit(&al->al_ri_gh);
335 out_quota:
336 gfs2_quota_unhold(ip);
337 out_alloc:
338 gfs2_alloc_put(ip);
339 return error;
342 struct ea_list {
343 struct gfs2_ea_request *ei_er;
344 unsigned int ei_size;
347 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
348 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
349 void *private)
351 struct ea_list *ei = private;
352 struct gfs2_ea_request *er = ei->ei_er;
353 unsigned int ea_size = gfs2_ea_strlen(ea);
355 if (ea->ea_type == GFS2_EATYPE_UNUSED)
356 return 0;
358 if (er->er_data_len) {
359 char *prefix = NULL;
360 unsigned int l = 0;
361 char c = 0;
363 if (ei->ei_size + ea_size > er->er_data_len)
364 return -ERANGE;
366 switch (ea->ea_type) {
367 case GFS2_EATYPE_USR:
368 prefix = "user.";
369 l = 5;
370 break;
371 case GFS2_EATYPE_SYS:
372 prefix = "system.";
373 l = 7;
374 break;
375 case GFS2_EATYPE_SECURITY:
376 prefix = "security.";
377 l = 9;
378 break;
381 BUG_ON(l == 0);
383 memcpy(er->er_data + ei->ei_size, prefix, l);
384 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
385 ea->ea_name_len);
386 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
389 ei->ei_size += ea_size;
391 return 0;
395 * gfs2_ea_list -
396 * @ip:
397 * @er:
399 * Returns: actual size of data on success, -errno on error
402 int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
404 struct gfs2_holder i_gh;
405 int error;
407 if (!er->er_data || !er->er_data_len) {
408 er->er_data = NULL;
409 er->er_data_len = 0;
412 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
413 if (error)
414 return error;
416 if (ip->i_eattr) {
417 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
419 error = ea_foreach(ip, ea_list_i, &ei);
420 if (!error)
421 error = ei.ei_size;
424 gfs2_glock_dq_uninit(&i_gh);
426 return error;
430 * ea_get_unstuffed - actually copies the unstuffed data into the
431 * request buffer
432 * @ip: The GFS2 inode
433 * @ea: The extended attribute header structure
434 * @data: The data to be copied
436 * Returns: errno
439 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
440 char *data)
442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
443 struct buffer_head **bh;
444 unsigned int amount = GFS2_EA_DATA_LEN(ea);
445 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
446 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
447 unsigned int x;
448 int error = 0;
450 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
451 if (!bh)
452 return -ENOMEM;
454 for (x = 0; x < nptrs; x++) {
455 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
456 bh + x);
457 if (error) {
458 while (x--)
459 brelse(bh[x]);
460 goto out;
462 dataptrs++;
465 for (x = 0; x < nptrs; x++) {
466 error = gfs2_meta_wait(sdp, bh[x]);
467 if (error) {
468 for (; x < nptrs; x++)
469 brelse(bh[x]);
470 goto out;
472 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
473 for (; x < nptrs; x++)
474 brelse(bh[x]);
475 error = -EIO;
476 goto out;
479 memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
480 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
482 amount -= sdp->sd_jbsize;
483 data += sdp->sd_jbsize;
485 brelse(bh[x]);
488 out:
489 kfree(bh);
490 return error;
493 int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
494 char *data)
496 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
497 memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
498 return 0;
499 } else
500 return ea_get_unstuffed(ip, el->el_ea, data);
504 * gfs2_ea_get_i -
505 * @ip: The GFS2 inode
506 * @er: The request structure
508 * Returns: actual size of data on success, -errno on error
511 int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
513 struct gfs2_ea_location el;
514 int error;
516 if (!ip->i_eattr)
517 return -ENODATA;
519 error = gfs2_ea_find(ip, er, &el);
520 if (error)
521 return error;
522 if (!el.el_ea)
523 return -ENODATA;
525 if (er->er_data_len) {
526 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
527 error = -ERANGE;
528 else
529 error = gfs2_ea_get_copy(ip, &el, er->er_data);
531 if (!error)
532 error = GFS2_EA_DATA_LEN(el.el_ea);
534 brelse(el.el_bh);
536 return error;
540 * gfs2_ea_get -
541 * @ip: The GFS2 inode
542 * @er: The request structure
544 * Returns: actual size of data on success, -errno on error
547 int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
549 struct gfs2_holder i_gh;
550 int error;
552 if (!er->er_name_len ||
553 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
554 return -EINVAL;
555 if (!er->er_data || !er->er_data_len) {
556 er->er_data = NULL;
557 er->er_data_len = 0;
560 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
561 if (error)
562 return error;
564 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
566 gfs2_glock_dq_uninit(&i_gh);
568 return error;
572 * ea_alloc_blk - allocates a new block for extended attributes.
573 * @ip: A pointer to the inode that's getting extended attributes
574 * @bhp: Pointer to pointer to a struct buffer_head
576 * Returns: errno
579 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
581 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
582 struct gfs2_ea_header *ea;
583 unsigned int n = 1;
584 u64 block;
585 int error;
587 error = gfs2_alloc_block(ip, &block, &n);
588 if (error)
589 return error;
590 gfs2_trans_add_unrevoke(sdp, block, 1);
591 *bhp = gfs2_meta_new(ip->i_gl, block);
592 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
593 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
594 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
596 ea = GFS2_EA_BH2FIRST(*bhp);
597 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
598 ea->ea_type = GFS2_EATYPE_UNUSED;
599 ea->ea_flags = GFS2_EAFLAG_LAST;
600 ea->ea_num_ptrs = 0;
602 gfs2_add_inode_blocks(&ip->i_inode, 1);
604 return 0;
608 * ea_write - writes the request info to an ea, creating new blocks if
609 * necessary
610 * @ip: inode that is being modified
611 * @ea: the location of the new ea in a block
612 * @er: the write request
614 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
616 * returns : errno
619 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
620 struct gfs2_ea_request *er)
622 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
623 int error;
625 ea->ea_data_len = cpu_to_be32(er->er_data_len);
626 ea->ea_name_len = er->er_name_len;
627 ea->ea_type = er->er_type;
628 ea->__pad = 0;
630 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
632 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
633 ea->ea_num_ptrs = 0;
634 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
635 } else {
636 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
637 const char *data = er->er_data;
638 unsigned int data_len = er->er_data_len;
639 unsigned int copy;
640 unsigned int x;
642 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
643 for (x = 0; x < ea->ea_num_ptrs; x++) {
644 struct buffer_head *bh;
645 u64 block;
646 int mh_size = sizeof(struct gfs2_meta_header);
647 unsigned int n = 1;
649 error = gfs2_alloc_block(ip, &block, &n);
650 if (error)
651 return error;
652 gfs2_trans_add_unrevoke(sdp, block, 1);
653 bh = gfs2_meta_new(ip->i_gl, block);
654 gfs2_trans_add_bh(ip->i_gl, bh, 1);
655 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
657 gfs2_add_inode_blocks(&ip->i_inode, 1);
659 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
660 data_len;
661 memcpy(bh->b_data + mh_size, data, copy);
662 if (copy < sdp->sd_jbsize)
663 memset(bh->b_data + mh_size + copy, 0,
664 sdp->sd_jbsize - copy);
666 *dataptr++ = cpu_to_be64(bh->b_blocknr);
667 data += copy;
668 data_len -= copy;
670 brelse(bh);
673 gfs2_assert_withdraw(sdp, !data_len);
676 return 0;
679 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
680 struct gfs2_ea_request *er, void *private);
682 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
683 unsigned int blks,
684 ea_skeleton_call_t skeleton_call, void *private)
686 struct gfs2_alloc *al;
687 struct buffer_head *dibh;
688 int error;
690 al = gfs2_alloc_get(ip);
691 if (!al)
692 return -ENOMEM;
694 error = gfs2_quota_lock_check(ip);
695 if (error)
696 goto out;
698 al->al_requested = blks;
700 error = gfs2_inplace_reserve(ip);
701 if (error)
702 goto out_gunlock_q;
704 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
705 blks + al->al_rgd->rd_length +
706 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
707 if (error)
708 goto out_ipres;
710 error = skeleton_call(ip, er, private);
711 if (error)
712 goto out_end_trans;
714 error = gfs2_meta_inode_buffer(ip, &dibh);
715 if (!error) {
716 if (er->er_flags & GFS2_ERF_MODE) {
717 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
718 (ip->i_inode.i_mode & S_IFMT) ==
719 (er->er_mode & S_IFMT));
720 ip->i_inode.i_mode = er->er_mode;
722 ip->i_inode.i_ctime = CURRENT_TIME;
723 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
724 gfs2_dinode_out(ip, dibh->b_data);
725 brelse(dibh);
728 out_end_trans:
729 gfs2_trans_end(GFS2_SB(&ip->i_inode));
730 out_ipres:
731 gfs2_inplace_release(ip);
732 out_gunlock_q:
733 gfs2_quota_unlock(ip);
734 out:
735 gfs2_alloc_put(ip);
736 return error;
739 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
740 void *private)
742 struct buffer_head *bh;
743 int error;
745 error = ea_alloc_blk(ip, &bh);
746 if (error)
747 return error;
749 ip->i_eattr = bh->b_blocknr;
750 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
752 brelse(bh);
754 return error;
758 * ea_init - initializes a new eattr block
759 * @ip:
760 * @er:
762 * Returns: errno
765 static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
767 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
768 unsigned int blks = 1;
770 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
771 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
773 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
776 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
778 u32 ea_size = GFS2_EA_SIZE(ea);
779 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
780 ea_size);
781 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
782 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
784 ea->ea_rec_len = cpu_to_be32(ea_size);
785 ea->ea_flags ^= last;
787 new->ea_rec_len = cpu_to_be32(new_size);
788 new->ea_flags = last;
790 return new;
793 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
794 struct gfs2_ea_location *el)
796 struct gfs2_ea_header *ea = el->el_ea;
797 struct gfs2_ea_header *prev = el->el_prev;
798 u32 len;
800 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
802 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
803 ea->ea_type = GFS2_EATYPE_UNUSED;
804 return;
805 } else if (GFS2_EA2NEXT(prev) != ea) {
806 prev = GFS2_EA2NEXT(prev);
807 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
810 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
811 prev->ea_rec_len = cpu_to_be32(len);
813 if (GFS2_EA_IS_LAST(ea))
814 prev->ea_flags |= GFS2_EAFLAG_LAST;
817 struct ea_set {
818 int ea_split;
820 struct gfs2_ea_request *es_er;
821 struct gfs2_ea_location *es_el;
823 struct buffer_head *es_bh;
824 struct gfs2_ea_header *es_ea;
827 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
828 struct gfs2_ea_header *ea, struct ea_set *es)
830 struct gfs2_ea_request *er = es->es_er;
831 struct buffer_head *dibh;
832 int error;
834 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
835 if (error)
836 return error;
838 gfs2_trans_add_bh(ip->i_gl, bh, 1);
840 if (es->ea_split)
841 ea = ea_split_ea(ea);
843 ea_write(ip, ea, er);
845 if (es->es_el)
846 ea_set_remove_stuffed(ip, es->es_el);
848 error = gfs2_meta_inode_buffer(ip, &dibh);
849 if (error)
850 goto out;
852 if (er->er_flags & GFS2_ERF_MODE) {
853 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
854 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
855 ip->i_inode.i_mode = er->er_mode;
857 ip->i_inode.i_ctime = CURRENT_TIME;
858 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
859 gfs2_dinode_out(ip, dibh->b_data);
860 brelse(dibh);
861 out:
862 gfs2_trans_end(GFS2_SB(&ip->i_inode));
863 return error;
866 static int ea_set_simple_alloc(struct gfs2_inode *ip,
867 struct gfs2_ea_request *er, void *private)
869 struct ea_set *es = private;
870 struct gfs2_ea_header *ea = es->es_ea;
871 int error;
873 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
875 if (es->ea_split)
876 ea = ea_split_ea(ea);
878 error = ea_write(ip, ea, er);
879 if (error)
880 return error;
882 if (es->es_el)
883 ea_set_remove_stuffed(ip, es->es_el);
885 return 0;
888 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
889 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
890 void *private)
892 struct ea_set *es = private;
893 unsigned int size;
894 int stuffed;
895 int error;
897 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
899 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
900 if (GFS2_EA_REC_LEN(ea) < size)
901 return 0;
902 if (!GFS2_EA_IS_STUFFED(ea)) {
903 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
904 if (error)
905 return error;
907 es->ea_split = 0;
908 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
909 es->ea_split = 1;
910 else
911 return 0;
913 if (stuffed) {
914 error = ea_set_simple_noalloc(ip, bh, ea, es);
915 if (error)
916 return error;
917 } else {
918 unsigned int blks;
920 es->es_bh = bh;
921 es->es_ea = ea;
922 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
923 GFS2_SB(&ip->i_inode)->sd_jbsize);
925 error = ea_alloc_skeleton(ip, es->es_er, blks,
926 ea_set_simple_alloc, es);
927 if (error)
928 return error;
931 return 1;
934 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
935 void *private)
937 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
938 struct buffer_head *indbh, *newbh;
939 __be64 *eablk;
940 int error;
941 int mh_size = sizeof(struct gfs2_meta_header);
943 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
944 __be64 *end;
946 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
947 &indbh);
948 if (error)
949 return error;
951 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
952 error = -EIO;
953 goto out;
956 eablk = (__be64 *)(indbh->b_data + mh_size);
957 end = eablk + sdp->sd_inptrs;
959 for (; eablk < end; eablk++)
960 if (!*eablk)
961 break;
963 if (eablk == end) {
964 error = -ENOSPC;
965 goto out;
968 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
969 } else {
970 u64 blk;
971 unsigned int n = 1;
972 error = gfs2_alloc_block(ip, &blk, &n);
973 if (error)
974 return error;
975 gfs2_trans_add_unrevoke(sdp, blk, 1);
976 indbh = gfs2_meta_new(ip->i_gl, blk);
977 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
978 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
979 gfs2_buffer_clear_tail(indbh, mh_size);
981 eablk = (__be64 *)(indbh->b_data + mh_size);
982 *eablk = cpu_to_be64(ip->i_eattr);
983 ip->i_eattr = blk;
984 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
985 gfs2_add_inode_blocks(&ip->i_inode, 1);
987 eablk++;
990 error = ea_alloc_blk(ip, &newbh);
991 if (error)
992 goto out;
994 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
995 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
996 brelse(newbh);
997 if (error)
998 goto out;
1000 if (private)
1001 ea_set_remove_stuffed(ip, private);
1003 out:
1004 brelse(indbh);
1005 return error;
1008 static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1009 struct gfs2_ea_location *el)
1011 struct ea_set es;
1012 unsigned int blks = 2;
1013 int error;
1015 memset(&es, 0, sizeof(struct ea_set));
1016 es.es_er = er;
1017 es.es_el = el;
1019 error = ea_foreach(ip, ea_set_simple, &es);
1020 if (error > 0)
1021 return 0;
1022 if (error)
1023 return error;
1025 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1026 blks++;
1027 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1028 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1030 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1033 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1034 struct gfs2_ea_location *el)
1036 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1037 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1038 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1039 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1042 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1045 int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1047 struct gfs2_ea_location el;
1048 int error;
1050 if (!ip->i_eattr) {
1051 if (er->er_flags & XATTR_REPLACE)
1052 return -ENODATA;
1053 return ea_init(ip, er);
1056 error = gfs2_ea_find(ip, er, &el);
1057 if (error)
1058 return error;
1060 if (el.el_ea) {
1061 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1062 brelse(el.el_bh);
1063 return -EPERM;
1066 error = -EEXIST;
1067 if (!(er->er_flags & XATTR_CREATE)) {
1068 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1069 error = ea_set_i(ip, er, &el);
1070 if (!error && unstuffed)
1071 ea_set_remove_unstuffed(ip, &el);
1074 brelse(el.el_bh);
1075 } else {
1076 error = -ENODATA;
1077 if (!(er->er_flags & XATTR_REPLACE))
1078 error = ea_set_i(ip, er, NULL);
1081 return error;
1084 int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1086 struct gfs2_holder i_gh;
1087 int error;
1089 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1090 return -EINVAL;
1091 if (!er->er_data || !er->er_data_len) {
1092 er->er_data = NULL;
1093 er->er_data_len = 0;
1095 error = ea_check_size(GFS2_SB(&ip->i_inode), er);
1096 if (error)
1097 return error;
1099 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1100 if (error)
1101 return error;
1103 if (IS_IMMUTABLE(&ip->i_inode))
1104 error = -EPERM;
1105 else
1106 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1108 gfs2_glock_dq_uninit(&i_gh);
1110 return error;
1113 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1115 struct gfs2_ea_header *ea = el->el_ea;
1116 struct gfs2_ea_header *prev = el->el_prev;
1117 struct buffer_head *dibh;
1118 int error;
1120 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1121 if (error)
1122 return error;
1124 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1126 if (prev) {
1127 u32 len;
1129 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1130 prev->ea_rec_len = cpu_to_be32(len);
1132 if (GFS2_EA_IS_LAST(ea))
1133 prev->ea_flags |= GFS2_EAFLAG_LAST;
1134 } else
1135 ea->ea_type = GFS2_EATYPE_UNUSED;
1137 error = gfs2_meta_inode_buffer(ip, &dibh);
1138 if (!error) {
1139 ip->i_inode.i_ctime = CURRENT_TIME;
1140 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1141 gfs2_dinode_out(ip, dibh->b_data);
1142 brelse(dibh);
1145 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1147 return error;
1150 int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1152 struct gfs2_ea_location el;
1153 int error;
1155 if (!ip->i_eattr)
1156 return -ENODATA;
1158 error = gfs2_ea_find(ip, er, &el);
1159 if (error)
1160 return error;
1161 if (!el.el_ea)
1162 return -ENODATA;
1164 if (GFS2_EA_IS_STUFFED(el.el_ea))
1165 error = ea_remove_stuffed(ip, &el);
1166 else
1167 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1170 brelse(el.el_bh);
1172 return error;
1176 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1177 * @ip: pointer to the inode of the target file
1178 * @er: request information
1180 * Returns: errno
1183 int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1185 struct gfs2_holder i_gh;
1186 int error;
1188 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1189 return -EINVAL;
1191 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1192 if (error)
1193 return error;
1195 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1196 error = -EPERM;
1197 else
1198 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1200 gfs2_glock_dq_uninit(&i_gh);
1202 return error;
1205 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1206 struct gfs2_ea_header *ea, char *data)
1208 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1209 struct buffer_head **bh;
1210 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1211 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1212 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1213 unsigned int x;
1214 int error;
1216 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
1217 if (!bh)
1218 return -ENOMEM;
1220 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1221 if (error)
1222 goto out;
1224 for (x = 0; x < nptrs; x++) {
1225 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
1226 bh + x);
1227 if (error) {
1228 while (x--)
1229 brelse(bh[x]);
1230 goto fail;
1232 dataptrs++;
1235 for (x = 0; x < nptrs; x++) {
1236 error = gfs2_meta_wait(sdp, bh[x]);
1237 if (error) {
1238 for (; x < nptrs; x++)
1239 brelse(bh[x]);
1240 goto fail;
1242 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1243 for (; x < nptrs; x++)
1244 brelse(bh[x]);
1245 error = -EIO;
1246 goto fail;
1249 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1251 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
1252 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1254 amount -= sdp->sd_jbsize;
1255 data += sdp->sd_jbsize;
1257 brelse(bh[x]);
1260 out:
1261 kfree(bh);
1262 return error;
1264 fail:
1265 gfs2_trans_end(sdp);
1266 kfree(bh);
1267 return error;
1270 int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1271 struct iattr *attr, char *data)
1273 struct buffer_head *dibh;
1274 int error;
1276 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1277 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1278 if (error)
1279 return error;
1281 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1282 memcpy(GFS2_EA2DATA(el->el_ea), data,
1283 GFS2_EA_DATA_LEN(el->el_ea));
1284 } else
1285 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1287 if (error)
1288 return error;
1290 error = gfs2_meta_inode_buffer(ip, &dibh);
1291 if (!error) {
1292 error = inode_setattr(&ip->i_inode, attr);
1293 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1294 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1295 gfs2_dinode_out(ip, dibh->b_data);
1296 brelse(dibh);
1299 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1301 return error;
1304 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1306 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1307 struct gfs2_rgrp_list rlist;
1308 struct buffer_head *indbh, *dibh;
1309 __be64 *eablk, *end;
1310 unsigned int rg_blocks = 0;
1311 u64 bstart = 0;
1312 unsigned int blen = 0;
1313 unsigned int blks = 0;
1314 unsigned int x;
1315 int error;
1317 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1319 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
1320 if (error)
1321 return error;
1323 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1324 error = -EIO;
1325 goto out;
1328 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1329 end = eablk + sdp->sd_inptrs;
1331 for (; eablk < end; eablk++) {
1332 u64 bn;
1334 if (!*eablk)
1335 break;
1336 bn = be64_to_cpu(*eablk);
1338 if (bstart + blen == bn)
1339 blen++;
1340 else {
1341 if (bstart)
1342 gfs2_rlist_add(sdp, &rlist, bstart);
1343 bstart = bn;
1344 blen = 1;
1346 blks++;
1348 if (bstart)
1349 gfs2_rlist_add(sdp, &rlist, bstart);
1350 else
1351 goto out;
1353 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1355 for (x = 0; x < rlist.rl_rgrps; x++) {
1356 struct gfs2_rgrpd *rgd;
1357 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1358 rg_blocks += rgd->rd_length;
1361 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1362 if (error)
1363 goto out_rlist_free;
1365 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1366 RES_STATFS + RES_QUOTA, blks);
1367 if (error)
1368 goto out_gunlock;
1370 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1372 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1373 bstart = 0;
1374 blen = 0;
1376 for (; eablk < end; eablk++) {
1377 u64 bn;
1379 if (!*eablk)
1380 break;
1381 bn = be64_to_cpu(*eablk);
1383 if (bstart + blen == bn)
1384 blen++;
1385 else {
1386 if (bstart)
1387 gfs2_free_meta(ip, bstart, blen);
1388 bstart = bn;
1389 blen = 1;
1392 *eablk = 0;
1393 gfs2_add_inode_blocks(&ip->i_inode, -1);
1395 if (bstart)
1396 gfs2_free_meta(ip, bstart, blen);
1398 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1400 error = gfs2_meta_inode_buffer(ip, &dibh);
1401 if (!error) {
1402 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1403 gfs2_dinode_out(ip, dibh->b_data);
1404 brelse(dibh);
1407 gfs2_trans_end(sdp);
1409 out_gunlock:
1410 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1411 out_rlist_free:
1412 gfs2_rlist_free(&rlist);
1413 out:
1414 brelse(indbh);
1415 return error;
1418 static int ea_dealloc_block(struct gfs2_inode *ip)
1420 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1421 struct gfs2_alloc *al = ip->i_alloc;
1422 struct gfs2_rgrpd *rgd;
1423 struct buffer_head *dibh;
1424 int error;
1426 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
1427 if (!rgd) {
1428 gfs2_consist_inode(ip);
1429 return -EIO;
1432 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1433 &al->al_rgd_gh);
1434 if (error)
1435 return error;
1437 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1438 RES_QUOTA, 1);
1439 if (error)
1440 goto out_gunlock;
1442 gfs2_free_meta(ip, ip->i_eattr, 1);
1444 ip->i_eattr = 0;
1445 gfs2_add_inode_blocks(&ip->i_inode, -1);
1447 error = gfs2_meta_inode_buffer(ip, &dibh);
1448 if (!error) {
1449 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1450 gfs2_dinode_out(ip, dibh->b_data);
1451 brelse(dibh);
1454 gfs2_trans_end(sdp);
1456 out_gunlock:
1457 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1458 return error;
1462 * gfs2_ea_dealloc - deallocate the extended attribute fork
1463 * @ip: the inode
1465 * Returns: errno
1468 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1470 struct gfs2_alloc *al;
1471 int error;
1473 al = gfs2_alloc_get(ip);
1474 if (!al)
1475 return -ENOMEM;
1477 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1478 if (error)
1479 goto out_alloc;
1481 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
1482 if (error)
1483 goto out_quota;
1485 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1486 if (error)
1487 goto out_rindex;
1489 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1490 error = ea_dealloc_indirect(ip);
1491 if (error)
1492 goto out_rindex;
1495 error = ea_dealloc_block(ip);
1497 out_rindex:
1498 gfs2_glock_dq_uninit(&al->al_ri_gh);
1499 out_quota:
1500 gfs2_quota_unhold(ip);
1501 out_alloc:
1502 gfs2_alloc_put(ip);
1503 return error;