1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/completion.h>
10 #include <linux/buffer_head.h>
11 #include <linux/xattr.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/posix_acl_xattr.h>
14 #include <linux/uaccess.h>
30 * ea_calc_size - returns the actual number of bytes the request will take up
31 * (not counting any unstuffed data blocks)
33 * Returns: 1 if the EA should be stuffed
36 static int ea_calc_size(struct gfs2_sbd
*sdp
, unsigned int nsize
, size_t dsize
,
39 unsigned int jbsize
= sdp
->sd_jbsize
;
42 *size
= ALIGN(sizeof(struct gfs2_ea_header
) + nsize
+ dsize
, 8);
48 *size
= ALIGN(sizeof(struct gfs2_ea_header
) + nsize
+
49 (sizeof(__be64
) * DIV_ROUND_UP(dsize
, jbsize
)), 8);
54 static int ea_check_size(struct gfs2_sbd
*sdp
, unsigned int nsize
, size_t dsize
)
58 if (dsize
> GFS2_EA_MAX_DATA_LEN
)
61 ea_calc_size(sdp
, nsize
, dsize
, &size
);
63 /* This can only happen with 512 byte blocks */
64 if (size
> sdp
->sd_jbsize
)
70 static bool gfs2_eatype_valid(struct gfs2_sbd
*sdp
, u8 type
)
72 switch(sdp
->sd_sb
.sb_fs_format
) {
73 case GFS2_FS_FORMAT_MAX
:
76 case GFS2_FS_FORMAT_MIN
:
77 return type
<= GFS2_EATYPE_SECURITY
;
84 typedef int (*ea_call_t
) (struct gfs2_inode
*ip
, struct buffer_head
*bh
,
85 struct gfs2_ea_header
*ea
,
86 struct gfs2_ea_header
*prev
, void *private);
88 static int ea_foreach_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
89 ea_call_t ea_call
, void *data
)
91 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
92 struct gfs2_ea_header
*ea
, *prev
= NULL
;
95 if (gfs2_metatype_check(GFS2_SB(&ip
->i_inode
), bh
, GFS2_METATYPE_EA
))
98 for (ea
= GFS2_EA_BH2FIRST(bh
);; prev
= ea
, ea
= GFS2_EA2NEXT(ea
)) {
99 if (!GFS2_EA_REC_LEN(ea
)) {
100 gfs2_consist_inode(ip
);
103 if (!(bh
->b_data
<= (char *)ea
&& (char *)GFS2_EA2NEXT(ea
) <=
104 bh
->b_data
+ bh
->b_size
)) {
105 gfs2_consist_inode(ip
);
108 if (!gfs2_eatype_valid(sdp
, ea
->ea_type
)) {
109 gfs2_consist_inode(ip
);
112 error
= ea_call(ip
, bh
, ea
, prev
, data
);
116 if (GFS2_EA_IS_LAST(ea
)) {
117 if ((char *)GFS2_EA2NEXT(ea
) !=
118 bh
->b_data
+ bh
->b_size
) {
119 gfs2_consist_inode(ip
);
129 static int ea_foreach(struct gfs2_inode
*ip
, ea_call_t ea_call
, void *data
)
131 struct buffer_head
*bh
, *eabh
;
135 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_eattr
, DIO_WAIT
, 0, &bh
);
139 if (!(ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
)) {
140 error
= ea_foreach_i(ip
, bh
, ea_call
, data
);
144 if (gfs2_metatype_check(GFS2_SB(&ip
->i_inode
), bh
, GFS2_METATYPE_IN
)) {
149 eablk
= (__be64
*)(bh
->b_data
+ sizeof(struct gfs2_meta_header
));
150 end
= eablk
+ GFS2_SB(&ip
->i_inode
)->sd_inptrs
;
152 for (; eablk
< end
; eablk
++) {
157 bn
= be64_to_cpu(*eablk
);
159 error
= gfs2_meta_read(ip
->i_gl
, bn
, DIO_WAIT
, 0, &eabh
);
162 error
= ea_foreach_i(ip
, eabh
, ea_call
, data
);
176 struct gfs2_ea_location
*ef_el
;
179 static int ea_find_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
180 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
183 struct ea_find
*ef
= private;
185 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
)
188 if (ea
->ea_type
== ef
->type
) {
189 if (ea
->ea_name_len
== ef
->namel
&&
190 !memcmp(GFS2_EA2NAME(ea
), ef
->name
, ea
->ea_name_len
)) {
191 struct gfs2_ea_location
*el
= ef
->ef_el
;
203 static int gfs2_ea_find(struct gfs2_inode
*ip
, int type
, const char *name
,
204 struct gfs2_ea_location
*el
)
211 ef
.namel
= strlen(name
);
214 memset(el
, 0, sizeof(struct gfs2_ea_location
));
216 error
= ea_foreach(ip
, ea_find_i
, &ef
);
224 * ea_dealloc_unstuffed
226 * Take advantage of the fact that all unstuffed blocks are
227 * allocated from the same RG. But watch, this may not always
233 static int ea_dealloc_unstuffed(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
234 struct gfs2_ea_header
*ea
,
235 struct gfs2_ea_header
*prev
, void *private)
237 int *leave
= private;
238 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
239 struct gfs2_rgrpd
*rgd
;
240 struct gfs2_holder rg_gh
;
244 unsigned int blen
= 0;
245 unsigned int blks
= 0;
249 error
= gfs2_rindex_update(sdp
);
253 if (GFS2_EA_IS_STUFFED(ea
))
256 dataptrs
= GFS2_EA2DATAPTRS(ea
);
257 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++, dataptrs
++) {
260 bn
= be64_to_cpu(*dataptrs
);
266 rgd
= gfs2_blk2rgrpd(sdp
, bn
, 1);
268 gfs2_consist_inode(ip
);
272 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
,
273 LM_FLAG_NODE_SCOPE
, &rg_gh
);
277 error
= gfs2_trans_begin(sdp
, rgd
->rd_length
+ RES_DINODE
+
278 RES_EATTR
+ RES_STATFS
+ RES_QUOTA
, blks
);
282 gfs2_trans_add_meta(ip
->i_gl
, bh
);
284 dataptrs
= GFS2_EA2DATAPTRS(ea
);
285 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++, dataptrs
++) {
288 bn
= be64_to_cpu(*dataptrs
);
290 if (bstart
+ blen
== bn
)
294 gfs2_free_meta(ip
, rgd
, bstart
, blen
);
300 gfs2_add_inode_blocks(&ip
->i_inode
, -1);
303 gfs2_free_meta(ip
, rgd
, bstart
, blen
);
305 if (prev
&& !leave
) {
308 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
309 prev
->ea_rec_len
= cpu_to_be32(len
);
311 if (GFS2_EA_IS_LAST(ea
))
312 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
314 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
318 inode_set_ctime_current(&ip
->i_inode
);
319 __mark_inode_dirty(&ip
->i_inode
, I_DIRTY_DATASYNC
);
324 gfs2_glock_dq_uninit(&rg_gh
);
328 static int ea_remove_unstuffed(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
329 struct gfs2_ea_header
*ea
,
330 struct gfs2_ea_header
*prev
, int leave
)
334 error
= gfs2_rindex_update(GFS2_SB(&ip
->i_inode
));
338 error
= gfs2_quota_hold(ip
, NO_UID_QUOTA_CHANGE
, NO_GID_QUOTA_CHANGE
);
342 error
= ea_dealloc_unstuffed(ip
, bh
, ea
, prev
, (leave
) ? &error
: NULL
);
344 gfs2_quota_unhold(ip
);
350 struct gfs2_ea_request
*ei_er
;
351 unsigned int ei_size
;
354 static int ea_list_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
355 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
358 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
359 struct ea_list
*ei
= private;
360 struct gfs2_ea_request
*er
= ei
->ei_er
;
361 unsigned int ea_size
;
365 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
)
368 BUG_ON(ea
->ea_type
> GFS2_EATYPE_SECURITY
&&
369 sdp
->sd_sb
.sb_fs_format
== GFS2_FS_FORMAT_MIN
);
370 switch (ea
->ea_type
) {
371 case GFS2_EATYPE_USR
:
375 case GFS2_EATYPE_SYS
:
379 case GFS2_EATYPE_SECURITY
:
380 prefix
= "security.";
383 case GFS2_EATYPE_TRUSTED
:
391 ea_size
= l
+ ea
->ea_name_len
+ 1;
392 if (er
->er_data_len
) {
393 if (ei
->ei_size
+ ea_size
> er
->er_data_len
)
396 memcpy(er
->er_data
+ ei
->ei_size
, prefix
, l
);
397 memcpy(er
->er_data
+ ei
->ei_size
+ l
, GFS2_EA2NAME(ea
),
399 er
->er_data
[ei
->ei_size
+ ea_size
- 1] = 0;
402 ei
->ei_size
+= ea_size
;
408 * gfs2_listxattr - List gfs2 extended attributes
409 * @dentry: The dentry whose inode we are interested in
410 * @buffer: The buffer to write the results
411 * @size: The size of the buffer
413 * Returns: actual size of data on success, -errno on error
416 ssize_t
gfs2_listxattr(struct dentry
*dentry
, char *buffer
, size_t size
)
418 struct gfs2_inode
*ip
= GFS2_I(d_inode(dentry
));
419 struct gfs2_ea_request er
;
420 struct gfs2_holder i_gh
;
423 memset(&er
, 0, sizeof(struct gfs2_ea_request
));
426 er
.er_data_len
= size
;
429 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &i_gh
);
434 struct ea_list ei
= { .ei_er
= &er
, .ei_size
= 0 };
436 error
= ea_foreach(ip
, ea_list_i
, &ei
);
441 gfs2_glock_dq_uninit(&i_gh
);
447 * gfs2_iter_unstuffed - copies the unstuffed xattr data to/from the
449 * @ip: The GFS2 inode
450 * @ea: The extended attribute header structure
451 * @din: The data to be copied in
452 * @dout: The data to be copied out (one of din,dout will be NULL)
457 static int gfs2_iter_unstuffed(struct gfs2_inode
*ip
, struct gfs2_ea_header
*ea
,
458 const char *din
, char *dout
)
460 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
461 struct buffer_head
**bh
;
462 unsigned int amount
= GFS2_EA_DATA_LEN(ea
);
463 unsigned int nptrs
= DIV_ROUND_UP(amount
, sdp
->sd_jbsize
);
464 __be64
*dataptrs
= GFS2_EA2DATAPTRS(ea
);
470 bh
= kcalloc(nptrs
, sizeof(struct buffer_head
*), GFP_NOFS
);
474 for (x
= 0; x
< nptrs
; x
++) {
475 error
= gfs2_meta_read(ip
->i_gl
, be64_to_cpu(*dataptrs
), 0, 0,
485 for (x
= 0; x
< nptrs
; x
++) {
486 error
= gfs2_meta_wait(sdp
, bh
[x
]);
488 for (; x
< nptrs
; x
++)
492 if (gfs2_metatype_check(sdp
, bh
[x
], GFS2_METATYPE_ED
)) {
493 for (; x
< nptrs
; x
++)
499 pos
= bh
[x
]->b_data
+ sizeof(struct gfs2_meta_header
);
500 cp_size
= (sdp
->sd_jbsize
> amount
) ? amount
: sdp
->sd_jbsize
;
503 memcpy(dout
, pos
, cp_size
);
504 dout
+= sdp
->sd_jbsize
;
508 gfs2_trans_add_meta(ip
->i_gl
, bh
[x
]);
509 memcpy(pos
, din
, cp_size
);
510 din
+= sdp
->sd_jbsize
;
513 amount
-= sdp
->sd_jbsize
;
522 static int gfs2_ea_get_copy(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
,
523 char *data
, size_t size
)
526 size_t len
= GFS2_EA_DATA_LEN(el
->el_ea
);
530 if (GFS2_EA_IS_STUFFED(el
->el_ea
)) {
531 memcpy(data
, GFS2_EA2DATA(el
->el_ea
), len
);
534 ret
= gfs2_iter_unstuffed(ip
, el
->el_ea
, NULL
, data
);
540 int gfs2_xattr_acl_get(struct gfs2_inode
*ip
, const char *name
, char **ppdata
)
542 struct gfs2_ea_location el
;
547 error
= gfs2_ea_find(ip
, GFS2_EATYPE_SYS
, name
, &el
);
552 if (!GFS2_EA_DATA_LEN(el
.el_ea
))
555 len
= GFS2_EA_DATA_LEN(el
.el_ea
);
556 data
= kmalloc(len
, GFP_NOFS
);
561 error
= gfs2_ea_get_copy(ip
, &el
, data
, len
);
572 * __gfs2_xattr_get - Get a GFS2 extended attribute
574 * @name: The name of the extended attribute
575 * @buffer: The buffer to write the result into
576 * @size: The size of the buffer
577 * @type: The type of extended attribute
579 * Returns: actual size of data on success, -errno on error
581 static int __gfs2_xattr_get(struct inode
*inode
, const char *name
,
582 void *buffer
, size_t size
, int type
)
584 struct gfs2_inode
*ip
= GFS2_I(inode
);
585 struct gfs2_ea_location el
;
590 if (strlen(name
) > GFS2_EA_MAX_NAME_LEN
)
593 error
= gfs2_ea_find(ip
, type
, name
, &el
);
599 error
= gfs2_ea_get_copy(ip
, &el
, buffer
, size
);
601 error
= GFS2_EA_DATA_LEN(el
.el_ea
);
607 static int gfs2_xattr_get(const struct xattr_handler
*handler
,
608 struct dentry
*unused
, struct inode
*inode
,
609 const char *name
, void *buffer
, size_t size
)
611 struct gfs2_inode
*ip
= GFS2_I(inode
);
612 struct gfs2_holder gh
;
615 /* During lookup, SELinux calls this function with the glock locked. */
617 if (!gfs2_glock_is_locked_by_me(ip
->i_gl
)) {
618 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
, &gh
);
622 gfs2_holder_mark_uninitialized(&gh
);
624 ret
= __gfs2_xattr_get(inode
, name
, buffer
, size
, handler
->flags
);
625 if (gfs2_holder_initialized(&gh
))
626 gfs2_glock_dq_uninit(&gh
);
631 * ea_alloc_blk - allocates a new block for extended attributes.
632 * @ip: A pointer to the inode that's getting extended attributes
633 * @bhp: Pointer to pointer to a struct buffer_head
638 static int ea_alloc_blk(struct gfs2_inode
*ip
, struct buffer_head
**bhp
)
640 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
641 struct gfs2_ea_header
*ea
;
646 error
= gfs2_alloc_blocks(ip
, &block
, &n
, 0);
649 gfs2_trans_remove_revoke(sdp
, block
, 1);
650 *bhp
= gfs2_meta_new(ip
->i_gl
, block
);
651 gfs2_trans_add_meta(ip
->i_gl
, *bhp
);
652 gfs2_metatype_set(*bhp
, GFS2_METATYPE_EA
, GFS2_FORMAT_EA
);
653 gfs2_buffer_clear_tail(*bhp
, sizeof(struct gfs2_meta_header
));
655 ea
= GFS2_EA_BH2FIRST(*bhp
);
656 ea
->ea_rec_len
= cpu_to_be32(sdp
->sd_jbsize
);
657 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
658 ea
->ea_flags
= GFS2_EAFLAG_LAST
;
661 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
667 * ea_write - writes the request info to an ea, creating new blocks if
669 * @ip: inode that is being modified
670 * @ea: the location of the new ea in a block
671 * @er: the write request
673 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
678 static int ea_write(struct gfs2_inode
*ip
, struct gfs2_ea_header
*ea
,
679 struct gfs2_ea_request
*er
)
681 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
684 ea
->ea_data_len
= cpu_to_be32(er
->er_data_len
);
685 ea
->ea_name_len
= er
->er_name_len
;
686 ea
->ea_type
= er
->er_type
;
689 memcpy(GFS2_EA2NAME(ea
), er
->er_name
, er
->er_name_len
);
691 if (GFS2_EAREQ_SIZE_STUFFED(er
) <= sdp
->sd_jbsize
) {
693 memcpy(GFS2_EA2DATA(ea
), er
->er_data
, er
->er_data_len
);
695 __be64
*dataptr
= GFS2_EA2DATAPTRS(ea
);
696 const char *data
= er
->er_data
;
697 unsigned int data_len
= er
->er_data_len
;
701 ea
->ea_num_ptrs
= DIV_ROUND_UP(er
->er_data_len
, sdp
->sd_jbsize
);
702 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++) {
703 struct buffer_head
*bh
;
705 int mh_size
= sizeof(struct gfs2_meta_header
);
708 error
= gfs2_alloc_blocks(ip
, &block
, &n
, 0);
711 gfs2_trans_remove_revoke(sdp
, block
, 1);
712 bh
= gfs2_meta_new(ip
->i_gl
, block
);
713 gfs2_trans_add_meta(ip
->i_gl
, bh
);
714 gfs2_metatype_set(bh
, GFS2_METATYPE_ED
, GFS2_FORMAT_ED
);
716 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
718 copy
= data_len
> sdp
->sd_jbsize
? sdp
->sd_jbsize
:
720 memcpy(bh
->b_data
+ mh_size
, data
, copy
);
721 if (copy
< sdp
->sd_jbsize
)
722 memset(bh
->b_data
+ mh_size
+ copy
, 0,
723 sdp
->sd_jbsize
- copy
);
725 *dataptr
++ = cpu_to_be64(bh
->b_blocknr
);
732 gfs2_assert_withdraw(sdp
, !data_len
);
738 typedef int (*ea_skeleton_call_t
) (struct gfs2_inode
*ip
,
739 struct gfs2_ea_request
*er
, void *private);
741 static int ea_alloc_skeleton(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
743 ea_skeleton_call_t skeleton_call
, void *private)
745 struct gfs2_alloc_parms ap
= { .target
= blks
};
748 error
= gfs2_rindex_update(GFS2_SB(&ip
->i_inode
));
752 error
= gfs2_quota_lock_check(ip
, &ap
);
756 error
= gfs2_inplace_reserve(ip
, &ap
);
760 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
),
761 blks
+ gfs2_rg_blocks(ip
, blks
) +
762 RES_DINODE
+ RES_STATFS
+ RES_QUOTA
, 0);
766 error
= skeleton_call(ip
, er
, private);
770 inode_set_ctime_current(&ip
->i_inode
);
771 __mark_inode_dirty(&ip
->i_inode
, I_DIRTY_DATASYNC
);
774 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
776 gfs2_inplace_release(ip
);
778 gfs2_quota_unlock(ip
);
782 static int ea_init_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
785 struct buffer_head
*bh
;
788 error
= ea_alloc_blk(ip
, &bh
);
792 ip
->i_eattr
= bh
->b_blocknr
;
793 error
= ea_write(ip
, GFS2_EA_BH2FIRST(bh
), er
);
801 * ea_init - initializes a new eattr block
805 static int ea_init(struct gfs2_inode
*ip
, int type
, const char *name
,
806 const void *data
, size_t size
)
808 struct gfs2_ea_request er
;
809 unsigned int jbsize
= GFS2_SB(&ip
->i_inode
)->sd_jbsize
;
810 unsigned int blks
= 1;
814 er
.er_name_len
= strlen(name
);
815 er
.er_data
= (void *)data
;
816 er
.er_data_len
= size
;
818 if (GFS2_EAREQ_SIZE_STUFFED(&er
) > jbsize
)
819 blks
+= DIV_ROUND_UP(er
.er_data_len
, jbsize
);
821 return ea_alloc_skeleton(ip
, &er
, blks
, ea_init_i
, NULL
);
824 static struct gfs2_ea_header
*ea_split_ea(struct gfs2_ea_header
*ea
)
826 u32 ea_size
= GFS2_EA_SIZE(ea
);
827 struct gfs2_ea_header
*new = (struct gfs2_ea_header
*)((char *)ea
+
829 u32 new_size
= GFS2_EA_REC_LEN(ea
) - ea_size
;
830 int last
= ea
->ea_flags
& GFS2_EAFLAG_LAST
;
832 ea
->ea_rec_len
= cpu_to_be32(ea_size
);
833 ea
->ea_flags
^= last
;
835 new->ea_rec_len
= cpu_to_be32(new_size
);
836 new->ea_flags
= last
;
841 static void ea_set_remove_stuffed(struct gfs2_inode
*ip
,
842 struct gfs2_ea_location
*el
)
844 struct gfs2_ea_header
*ea
= el
->el_ea
;
845 struct gfs2_ea_header
*prev
= el
->el_prev
;
848 gfs2_trans_add_meta(ip
->i_gl
, el
->el_bh
);
850 if (!prev
|| !GFS2_EA_IS_STUFFED(ea
)) {
851 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
853 } else if (GFS2_EA2NEXT(prev
) != ea
) {
854 prev
= GFS2_EA2NEXT(prev
);
855 gfs2_assert_withdraw(GFS2_SB(&ip
->i_inode
), GFS2_EA2NEXT(prev
) == ea
);
858 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
859 prev
->ea_rec_len
= cpu_to_be32(len
);
861 if (GFS2_EA_IS_LAST(ea
))
862 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
868 struct gfs2_ea_request
*es_er
;
869 struct gfs2_ea_location
*es_el
;
871 struct buffer_head
*es_bh
;
872 struct gfs2_ea_header
*es_ea
;
875 static int ea_set_simple_noalloc(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
876 struct gfs2_ea_header
*ea
, struct ea_set
*es
)
878 struct gfs2_ea_request
*er
= es
->es_er
;
881 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
), RES_DINODE
+ 2 * RES_EATTR
, 0);
885 gfs2_trans_add_meta(ip
->i_gl
, bh
);
888 ea
= ea_split_ea(ea
);
890 ea_write(ip
, ea
, er
);
893 ea_set_remove_stuffed(ip
, es
->es_el
);
895 inode_set_ctime_current(&ip
->i_inode
);
896 __mark_inode_dirty(&ip
->i_inode
, I_DIRTY_DATASYNC
);
898 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
902 static int ea_set_simple_alloc(struct gfs2_inode
*ip
,
903 struct gfs2_ea_request
*er
, void *private)
905 struct ea_set
*es
= private;
906 struct gfs2_ea_header
*ea
= es
->es_ea
;
909 gfs2_trans_add_meta(ip
->i_gl
, es
->es_bh
);
912 ea
= ea_split_ea(ea
);
914 error
= ea_write(ip
, ea
, er
);
919 ea_set_remove_stuffed(ip
, es
->es_el
);
924 static int ea_set_simple(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
925 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
928 struct ea_set
*es
= private;
933 stuffed
= ea_calc_size(GFS2_SB(&ip
->i_inode
), es
->es_er
->er_name_len
,
934 es
->es_er
->er_data_len
, &size
);
936 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
) {
937 if (GFS2_EA_REC_LEN(ea
) < size
)
939 if (!GFS2_EA_IS_STUFFED(ea
)) {
940 error
= ea_remove_unstuffed(ip
, bh
, ea
, prev
, 1);
945 } else if (GFS2_EA_REC_LEN(ea
) - GFS2_EA_SIZE(ea
) >= size
)
951 error
= ea_set_simple_noalloc(ip
, bh
, ea
, es
);
959 blks
= 2 + DIV_ROUND_UP(es
->es_er
->er_data_len
,
960 GFS2_SB(&ip
->i_inode
)->sd_jbsize
);
962 error
= ea_alloc_skeleton(ip
, es
->es_er
, blks
,
963 ea_set_simple_alloc
, es
);
971 static int ea_set_block(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
974 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
975 struct buffer_head
*indbh
, *newbh
;
978 int mh_size
= sizeof(struct gfs2_meta_header
);
980 if (ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
) {
983 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_eattr
, DIO_WAIT
, 0,
988 if (gfs2_metatype_check(sdp
, indbh
, GFS2_METATYPE_IN
)) {
993 eablk
= (__be64
*)(indbh
->b_data
+ mh_size
);
994 end
= eablk
+ sdp
->sd_inptrs
;
996 for (; eablk
< end
; eablk
++)
1005 gfs2_trans_add_meta(ip
->i_gl
, indbh
);
1009 error
= gfs2_alloc_blocks(ip
, &blk
, &n
, 0);
1012 gfs2_trans_remove_revoke(sdp
, blk
, 1);
1013 indbh
= gfs2_meta_new(ip
->i_gl
, blk
);
1014 gfs2_trans_add_meta(ip
->i_gl
, indbh
);
1015 gfs2_metatype_set(indbh
, GFS2_METATYPE_IN
, GFS2_FORMAT_IN
);
1016 gfs2_buffer_clear_tail(indbh
, mh_size
);
1018 eablk
= (__be64
*)(indbh
->b_data
+ mh_size
);
1019 *eablk
= cpu_to_be64(ip
->i_eattr
);
1021 ip
->i_diskflags
|= GFS2_DIF_EA_INDIRECT
;
1022 gfs2_add_inode_blocks(&ip
->i_inode
, 1);
1027 error
= ea_alloc_blk(ip
, &newbh
);
1031 *eablk
= cpu_to_be64((u64
)newbh
->b_blocknr
);
1032 error
= ea_write(ip
, GFS2_EA_BH2FIRST(newbh
), er
);
1038 ea_set_remove_stuffed(ip
, private);
1045 static int ea_set_i(struct gfs2_inode
*ip
, int type
, const char *name
,
1046 const void *value
, size_t size
, struct gfs2_ea_location
*el
)
1048 struct gfs2_ea_request er
;
1050 unsigned int blks
= 2;
1055 er
.er_data
= (void *)value
;
1056 er
.er_name_len
= strlen(name
);
1057 er
.er_data_len
= size
;
1059 memset(&es
, 0, sizeof(struct ea_set
));
1063 error
= ea_foreach(ip
, ea_set_simple
, &es
);
1069 if (!(ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
))
1071 if (GFS2_EAREQ_SIZE_STUFFED(&er
) > GFS2_SB(&ip
->i_inode
)->sd_jbsize
)
1072 blks
+= DIV_ROUND_UP(er
.er_data_len
, GFS2_SB(&ip
->i_inode
)->sd_jbsize
);
1074 return ea_alloc_skeleton(ip
, &er
, blks
, ea_set_block
, el
);
1077 static int ea_set_remove_unstuffed(struct gfs2_inode
*ip
,
1078 struct gfs2_ea_location
*el
)
1080 if (el
->el_prev
&& GFS2_EA2NEXT(el
->el_prev
) != el
->el_ea
) {
1081 el
->el_prev
= GFS2_EA2NEXT(el
->el_prev
);
1082 gfs2_assert_withdraw(GFS2_SB(&ip
->i_inode
),
1083 GFS2_EA2NEXT(el
->el_prev
) == el
->el_ea
);
1086 return ea_remove_unstuffed(ip
, el
->el_bh
, el
->el_ea
, el
->el_prev
, 0);
1089 static int ea_remove_stuffed(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
)
1091 struct gfs2_ea_header
*ea
= el
->el_ea
;
1092 struct gfs2_ea_header
*prev
= el
->el_prev
;
1095 error
= gfs2_trans_begin(GFS2_SB(&ip
->i_inode
), RES_DINODE
+ RES_EATTR
, 0);
1099 gfs2_trans_add_meta(ip
->i_gl
, el
->el_bh
);
1104 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
1105 prev
->ea_rec_len
= cpu_to_be32(len
);
1107 if (GFS2_EA_IS_LAST(ea
))
1108 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
1110 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
1113 inode_set_ctime_current(&ip
->i_inode
);
1114 __mark_inode_dirty(&ip
->i_inode
, I_DIRTY_DATASYNC
);
1116 gfs2_trans_end(GFS2_SB(&ip
->i_inode
));
1122 * gfs2_xattr_remove - Remove a GFS2 extended attribute
1124 * @type: The type of the extended attribute
1125 * @name: The name of the extended attribute
1127 * This is not called directly by the VFS since we use the (common)
1128 * scheme of making a "set with NULL data" mean a remove request. Note
1129 * that this is different from a set with zero length data.
1131 * Returns: 0, or errno on failure
1134 static int gfs2_xattr_remove(struct gfs2_inode
*ip
, int type
, const char *name
)
1136 struct gfs2_ea_location el
;
1142 error
= gfs2_ea_find(ip
, type
, name
, &el
);
1148 if (GFS2_EA_IS_STUFFED(el
.el_ea
))
1149 error
= ea_remove_stuffed(ip
, &el
);
1151 error
= ea_remove_unstuffed(ip
, el
.el_bh
, el
.el_ea
, el
.el_prev
, 0);
1159 * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1161 * @name: The name of the extended attribute
1162 * @value: The value of the extended attribute (NULL for remove)
1163 * @size: The size of the @value argument
1164 * @flags: Create or Replace
1165 * @type: The type of the extended attribute
1167 * See gfs2_xattr_remove() for details of the removal of xattrs.
1169 * Returns: 0 or errno on failure
1172 int __gfs2_xattr_set(struct inode
*inode
, const char *name
,
1173 const void *value
, size_t size
, int flags
, int type
)
1175 struct gfs2_inode
*ip
= GFS2_I(inode
);
1176 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1177 struct gfs2_ea_location el
;
1178 unsigned int namel
= strlen(name
);
1181 if (IS_IMMUTABLE(inode
) || IS_APPEND(inode
))
1183 if (namel
> GFS2_EA_MAX_NAME_LEN
)
1186 if (value
== NULL
) {
1187 error
= gfs2_xattr_remove(ip
, type
, name
);
1188 if (error
== -ENODATA
&& !(flags
& XATTR_REPLACE
))
1193 if (ea_check_size(sdp
, namel
, size
))
1197 if (flags
& XATTR_REPLACE
)
1199 return ea_init(ip
, type
, name
, value
, size
);
1202 error
= gfs2_ea_find(ip
, type
, name
, &el
);
1207 if (ip
->i_diskflags
& GFS2_DIF_APPENDONLY
) {
1213 if (!(flags
& XATTR_CREATE
)) {
1214 int unstuffed
= !GFS2_EA_IS_STUFFED(el
.el_ea
);
1215 error
= ea_set_i(ip
, type
, name
, value
, size
, &el
);
1216 if (!error
&& unstuffed
)
1217 ea_set_remove_unstuffed(ip
, &el
);
1225 if (!(flags
& XATTR_REPLACE
))
1226 error
= ea_set_i(ip
, type
, name
, value
, size
, NULL
);
1231 static int gfs2_xattr_set(const struct xattr_handler
*handler
,
1232 struct mnt_idmap
*idmap
,
1233 struct dentry
*unused
, struct inode
*inode
,
1234 const char *name
, const void *value
,
1235 size_t size
, int flags
)
1237 struct gfs2_inode
*ip
= GFS2_I(inode
);
1238 struct gfs2_holder gh
;
1241 ret
= gfs2_qa_get(ip
);
1245 /* May be called from gfs_setattr with the glock locked. */
1247 if (!gfs2_glock_is_locked_by_me(ip
->i_gl
)) {
1248 ret
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
1252 if (WARN_ON_ONCE(ip
->i_gl
->gl_state
!= LM_ST_EXCLUSIVE
)) {
1256 gfs2_holder_mark_uninitialized(&gh
);
1258 ret
= __gfs2_xattr_set(inode
, name
, value
, size
, flags
, handler
->flags
);
1259 if (gfs2_holder_initialized(&gh
))
1260 gfs2_glock_dq_uninit(&gh
);
1266 static int ea_dealloc_indirect(struct gfs2_inode
*ip
)
1268 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1269 struct gfs2_rgrp_list rlist
;
1270 struct gfs2_rgrpd
*rgd
;
1271 struct buffer_head
*indbh
, *dibh
;
1272 __be64
*eablk
, *end
;
1273 unsigned int rg_blocks
= 0;
1275 unsigned int blen
= 0;
1276 unsigned int blks
= 0;
1280 error
= gfs2_rindex_update(sdp
);
1284 memset(&rlist
, 0, sizeof(struct gfs2_rgrp_list
));
1286 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_eattr
, DIO_WAIT
, 0, &indbh
);
1290 if (gfs2_metatype_check(sdp
, indbh
, GFS2_METATYPE_IN
)) {
1295 eablk
= (__be64
*)(indbh
->b_data
+ sizeof(struct gfs2_meta_header
));
1296 end
= eablk
+ sdp
->sd_inptrs
;
1298 for (; eablk
< end
; eablk
++) {
1303 bn
= be64_to_cpu(*eablk
);
1305 if (bstart
+ blen
== bn
)
1309 gfs2_rlist_add(ip
, &rlist
, bstart
);
1316 gfs2_rlist_add(ip
, &rlist
, bstart
);
1320 gfs2_rlist_alloc(&rlist
, LM_ST_EXCLUSIVE
, LM_FLAG_NODE_SCOPE
);
1322 for (x
= 0; x
< rlist
.rl_rgrps
; x
++) {
1323 rgd
= gfs2_glock2rgrp(rlist
.rl_ghs
[x
].gh_gl
);
1324 rg_blocks
+= rgd
->rd_length
;
1327 error
= gfs2_glock_nq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
1329 goto out_rlist_free
;
1331 error
= gfs2_trans_begin(sdp
, rg_blocks
+ RES_DINODE
+ RES_INDIRECT
+
1332 RES_STATFS
+ RES_QUOTA
, blks
);
1336 gfs2_trans_add_meta(ip
->i_gl
, indbh
);
1338 eablk
= (__be64
*)(indbh
->b_data
+ sizeof(struct gfs2_meta_header
));
1343 for (; eablk
< end
; eablk
++) {
1348 bn
= be64_to_cpu(*eablk
);
1350 if (bstart
+ blen
== bn
)
1354 gfs2_free_meta(ip
, rgd
, bstart
, blen
);
1356 rgd
= gfs2_blk2rgrpd(sdp
, bstart
, true);
1361 gfs2_add_inode_blocks(&ip
->i_inode
, -1);
1364 gfs2_free_meta(ip
, rgd
, bstart
, blen
);
1366 ip
->i_diskflags
&= ~GFS2_DIF_EA_INDIRECT
;
1368 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1370 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
1371 gfs2_dinode_out(ip
, dibh
->b_data
);
1375 gfs2_trans_end(sdp
);
1378 gfs2_glock_dq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
1380 gfs2_rlist_free(&rlist
);
1386 static int ea_dealloc_block(struct gfs2_inode
*ip
)
1388 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1389 struct gfs2_rgrpd
*rgd
;
1390 struct buffer_head
*dibh
;
1391 struct gfs2_holder gh
;
1394 error
= gfs2_rindex_update(sdp
);
1398 rgd
= gfs2_blk2rgrpd(sdp
, ip
->i_eattr
, 1);
1400 gfs2_consist_inode(ip
);
1404 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
,
1405 LM_FLAG_NODE_SCOPE
, &gh
);
1409 error
= gfs2_trans_begin(sdp
, RES_RG_BIT
+ RES_DINODE
+ RES_STATFS
+
1414 gfs2_free_meta(ip
, rgd
, ip
->i_eattr
, 1);
1417 gfs2_add_inode_blocks(&ip
->i_inode
, -1);
1419 if (likely(!test_bit(GIF_ALLOC_FAILED
, &ip
->i_flags
))) {
1420 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1422 gfs2_trans_add_meta(ip
->i_gl
, dibh
);
1423 gfs2_dinode_out(ip
, dibh
->b_data
);
1428 gfs2_trans_end(sdp
);
1431 gfs2_glock_dq_uninit(&gh
);
1436 * gfs2_ea_dealloc - deallocate the extended attribute fork
1442 int gfs2_ea_dealloc(struct gfs2_inode
*ip
)
1446 error
= gfs2_rindex_update(GFS2_SB(&ip
->i_inode
));
1450 error
= gfs2_quota_hold(ip
, NO_UID_QUOTA_CHANGE
, NO_GID_QUOTA_CHANGE
);
1454 if (likely(!test_bit(GIF_ALLOC_FAILED
, &ip
->i_flags
))) {
1455 error
= ea_foreach(ip
, ea_dealloc_unstuffed
, NULL
);
1459 if (ip
->i_diskflags
& GFS2_DIF_EA_INDIRECT
) {
1460 error
= ea_dealloc_indirect(ip
);
1466 error
= ea_dealloc_block(ip
);
1469 gfs2_quota_unhold(ip
);
1473 static const struct xattr_handler gfs2_xattr_user_handler
= {
1474 .prefix
= XATTR_USER_PREFIX
,
1475 .flags
= GFS2_EATYPE_USR
,
1476 .get
= gfs2_xattr_get
,
1477 .set
= gfs2_xattr_set
,
1480 static const struct xattr_handler gfs2_xattr_security_handler
= {
1481 .prefix
= XATTR_SECURITY_PREFIX
,
1482 .flags
= GFS2_EATYPE_SECURITY
,
1483 .get
= gfs2_xattr_get
,
1484 .set
= gfs2_xattr_set
,
1488 gfs2_xattr_trusted_list(struct dentry
*dentry
)
1490 return capable(CAP_SYS_ADMIN
);
1493 static const struct xattr_handler gfs2_xattr_trusted_handler
= {
1494 .prefix
= XATTR_TRUSTED_PREFIX
,
1495 .flags
= GFS2_EATYPE_TRUSTED
,
1496 .list
= gfs2_xattr_trusted_list
,
1497 .get
= gfs2_xattr_get
,
1498 .set
= gfs2_xattr_set
,
1501 const struct xattr_handler
* const gfs2_xattr_handlers_max
[] = {
1502 /* GFS2_FS_FORMAT_MAX */
1503 &gfs2_xattr_trusted_handler
,
1505 /* GFS2_FS_FORMAT_MIN */
1506 &gfs2_xattr_user_handler
,
1507 &gfs2_xattr_security_handler
,
1511 const struct xattr_handler
* const *gfs2_xattr_handlers_min
= gfs2_xattr_handlers_max
+ 1;