1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #include <linux/security.h>
11 struct super_block
*sb
;
19 static inline void xattr_iter_end(struct xattr_iter
*it
, bool atomic
)
21 /* the only user of kunmap() is 'init_inode_xattrs' */
25 kunmap_atomic(it
->kaddr
);
27 unlock_page(it
->page
);
31 static inline void xattr_iter_end_final(struct xattr_iter
*it
)
36 xattr_iter_end(it
, true);
39 static int init_inode_xattrs(struct inode
*inode
)
41 struct erofs_inode
*const vi
= EROFS_I(inode
);
44 struct erofs_xattr_ibody_header
*ih
;
45 struct super_block
*sb
;
46 struct erofs_sb_info
*sbi
;
50 /* the most case is that xattrs of this inode are initialized. */
51 if (test_bit(EROFS_I_EA_INITED_BIT
, &vi
->flags
))
54 if (wait_on_bit_lock(&vi
->flags
, EROFS_I_BL_XATTR_BIT
, TASK_KILLABLE
))
57 /* someone has initialized xattrs for us? */
58 if (test_bit(EROFS_I_EA_INITED_BIT
, &vi
->flags
))
62 * bypass all xattr operations if ->xattr_isize is not greater than
63 * sizeof(struct erofs_xattr_ibody_header), in detail:
64 * 1) it is not enough to contain erofs_xattr_ibody_header then
65 * ->xattr_isize should be 0 (it means no xattr);
66 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
67 * undefined right now (maybe use later with some new sb feature).
69 if (vi
->xattr_isize
== sizeof(struct erofs_xattr_ibody_header
)) {
70 erofs_err(inode
->i_sb
,
71 "xattr_isize %d of nid %llu is not supported yet",
72 vi
->xattr_isize
, vi
->nid
);
75 } else if (vi
->xattr_isize
< sizeof(struct erofs_xattr_ibody_header
)) {
76 if (vi
->xattr_isize
) {
77 erofs_err(inode
->i_sb
,
78 "bogus xattr ibody @ nid %llu", vi
->nid
);
81 goto out_unlock
; /* xattr ondisk layout error */
89 it
.blkaddr
= erofs_blknr(iloc(sbi
, vi
->nid
) + vi
->inode_isize
);
90 it
.ofs
= erofs_blkoff(iloc(sbi
, vi
->nid
) + vi
->inode_isize
);
92 it
.page
= erofs_get_meta_page(sb
, it
.blkaddr
);
93 if (IS_ERR(it
.page
)) {
94 ret
= PTR_ERR(it
.page
);
98 /* read in shared xattr array (non-atomic, see kmalloc below) */
99 it
.kaddr
= kmap(it
.page
);
102 ih
= (struct erofs_xattr_ibody_header
*)(it
.kaddr
+ it
.ofs
);
104 vi
->xattr_shared_count
= ih
->h_shared_count
;
105 vi
->xattr_shared_xattrs
= kmalloc_array(vi
->xattr_shared_count
,
106 sizeof(uint
), GFP_KERNEL
);
107 if (!vi
->xattr_shared_xattrs
) {
108 xattr_iter_end(&it
, atomic_map
);
113 /* let's skip ibody header */
114 it
.ofs
+= sizeof(struct erofs_xattr_ibody_header
);
116 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
117 if (it
.ofs
>= EROFS_BLKSIZ
) {
118 /* cannot be unaligned */
119 DBG_BUGON(it
.ofs
!= EROFS_BLKSIZ
);
120 xattr_iter_end(&it
, atomic_map
);
122 it
.page
= erofs_get_meta_page(sb
, ++it
.blkaddr
);
123 if (IS_ERR(it
.page
)) {
124 kfree(vi
->xattr_shared_xattrs
);
125 vi
->xattr_shared_xattrs
= NULL
;
126 ret
= PTR_ERR(it
.page
);
130 it
.kaddr
= kmap_atomic(it
.page
);
134 vi
->xattr_shared_xattrs
[i
] =
135 le32_to_cpu(*(__le32
*)(it
.kaddr
+ it
.ofs
));
136 it
.ofs
+= sizeof(__le32
);
138 xattr_iter_end(&it
, atomic_map
);
140 set_bit(EROFS_I_EA_INITED_BIT
, &vi
->flags
);
143 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT
, &vi
->flags
);
148 * the general idea for these return values is
149 * if 0 is returned, go on processing the current xattr;
150 * 1 (> 0) is returned, skip this round to process the next xattr;
151 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
152 * and need to be handled
154 struct xattr_iter_handlers
{
155 int (*entry
)(struct xattr_iter
*_it
, struct erofs_xattr_entry
*entry
);
156 int (*name
)(struct xattr_iter
*_it
, unsigned int processed
, char *buf
,
158 int (*alloc_buffer
)(struct xattr_iter
*_it
, unsigned int value_sz
);
159 void (*value
)(struct xattr_iter
*_it
, unsigned int processed
, char *buf
,
163 static inline int xattr_iter_fixup(struct xattr_iter
*it
)
165 if (it
->ofs
< EROFS_BLKSIZ
)
168 xattr_iter_end(it
, true);
170 it
->blkaddr
+= erofs_blknr(it
->ofs
);
172 it
->page
= erofs_get_meta_page(it
->sb
, it
->blkaddr
);
173 if (IS_ERR(it
->page
)) {
174 int err
= PTR_ERR(it
->page
);
180 it
->kaddr
= kmap_atomic(it
->page
);
181 it
->ofs
= erofs_blkoff(it
->ofs
);
185 static int inline_xattr_iter_begin(struct xattr_iter
*it
,
188 struct erofs_inode
*const vi
= EROFS_I(inode
);
189 struct erofs_sb_info
*const sbi
= EROFS_SB(inode
->i_sb
);
190 unsigned int xattr_header_sz
, inline_xattr_ofs
;
192 xattr_header_sz
= inlinexattr_header_size(inode
);
193 if (xattr_header_sz
>= vi
->xattr_isize
) {
194 DBG_BUGON(xattr_header_sz
> vi
->xattr_isize
);
198 inline_xattr_ofs
= vi
->inode_isize
+ xattr_header_sz
;
200 it
->blkaddr
= erofs_blknr(iloc(sbi
, vi
->nid
) + inline_xattr_ofs
);
201 it
->ofs
= erofs_blkoff(iloc(sbi
, vi
->nid
) + inline_xattr_ofs
);
203 it
->page
= erofs_get_meta_page(inode
->i_sb
, it
->blkaddr
);
204 if (IS_ERR(it
->page
))
205 return PTR_ERR(it
->page
);
207 it
->kaddr
= kmap_atomic(it
->page
);
208 return vi
->xattr_isize
- xattr_header_sz
;
212 * Regardless of success or failure, `xattr_foreach' will end up with
213 * `ofs' pointing to the next xattr item rather than an arbitrary position.
215 static int xattr_foreach(struct xattr_iter
*it
,
216 const struct xattr_iter_handlers
*op
,
217 unsigned int *tlimit
)
219 struct erofs_xattr_entry entry
;
220 unsigned int value_sz
, processed
, slice
;
223 /* 0. fixup blkaddr, ofs, ipage */
224 err
= xattr_iter_fixup(it
);
229 * 1. read xattr entry to the memory,
230 * since we do EROFS_XATTR_ALIGN
231 * therefore entry should be in the page
233 entry
= *(struct erofs_xattr_entry
*)(it
->kaddr
+ it
->ofs
);
235 unsigned int entry_sz
= erofs_xattr_entry_size(&entry
);
237 /* xattr on-disk corruption: xattr entry beyond xattr_isize */
238 if (*tlimit
< entry_sz
) {
240 return -EFSCORRUPTED
;
245 it
->ofs
+= sizeof(struct erofs_xattr_entry
);
246 value_sz
= le16_to_cpu(entry
.e_value_size
);
249 err
= op
->entry(it
, &entry
);
251 it
->ofs
+= entry
.e_name_len
+ value_sz
;
255 /* 2. handle xattr name (ofs will finally be at the end of name) */
258 while (processed
< entry
.e_name_len
) {
259 if (it
->ofs
>= EROFS_BLKSIZ
) {
260 DBG_BUGON(it
->ofs
> EROFS_BLKSIZ
);
262 err
= xattr_iter_fixup(it
);
268 slice
= min_t(unsigned int, PAGE_SIZE
- it
->ofs
,
269 entry
.e_name_len
- processed
);
272 err
= op
->name(it
, processed
, it
->kaddr
+ it
->ofs
, slice
);
274 it
->ofs
+= entry
.e_name_len
- processed
+ value_sz
;
282 /* 3. handle xattr value */
285 if (op
->alloc_buffer
) {
286 err
= op
->alloc_buffer(it
, value_sz
);
293 while (processed
< value_sz
) {
294 if (it
->ofs
>= EROFS_BLKSIZ
) {
295 DBG_BUGON(it
->ofs
> EROFS_BLKSIZ
);
297 err
= xattr_iter_fixup(it
);
303 slice
= min_t(unsigned int, PAGE_SIZE
- it
->ofs
,
304 value_sz
- processed
);
305 op
->value(it
, processed
, it
->kaddr
+ it
->ofs
, slice
);
311 /* xattrs should be 4-byte aligned (on-disk constraint) */
312 it
->ofs
= EROFS_XATTR_ALIGN(it
->ofs
);
313 return err
< 0 ? err
: 0;
316 struct getxattr_iter
{
317 struct xattr_iter it
;
320 int buffer_size
, index
;
324 static int xattr_entrymatch(struct xattr_iter
*_it
,
325 struct erofs_xattr_entry
*entry
)
327 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
329 return (it
->index
!= entry
->e_name_index
||
330 it
->name
.len
!= entry
->e_name_len
) ? -ENOATTR
: 0;
333 static int xattr_namematch(struct xattr_iter
*_it
,
334 unsigned int processed
, char *buf
, unsigned int len
)
336 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
338 return memcmp(buf
, it
->name
.name
+ processed
, len
) ? -ENOATTR
: 0;
341 static int xattr_checkbuffer(struct xattr_iter
*_it
,
342 unsigned int value_sz
)
344 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
345 int err
= it
->buffer_size
< value_sz
? -ERANGE
: 0;
347 it
->buffer_size
= value_sz
;
348 return !it
->buffer
? 1 : err
;
351 static void xattr_copyvalue(struct xattr_iter
*_it
,
352 unsigned int processed
,
353 char *buf
, unsigned int len
)
355 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
357 memcpy(it
->buffer
+ processed
, buf
, len
);
360 static const struct xattr_iter_handlers find_xattr_handlers
= {
361 .entry
= xattr_entrymatch
,
362 .name
= xattr_namematch
,
363 .alloc_buffer
= xattr_checkbuffer
,
364 .value
= xattr_copyvalue
367 static int inline_getxattr(struct inode
*inode
, struct getxattr_iter
*it
)
370 unsigned int remaining
;
372 ret
= inline_xattr_iter_begin(&it
->it
, inode
);
378 ret
= xattr_foreach(&it
->it
, &find_xattr_handlers
, &remaining
);
382 xattr_iter_end_final(&it
->it
);
384 return ret
? ret
: it
->buffer_size
;
387 static int shared_getxattr(struct inode
*inode
, struct getxattr_iter
*it
)
389 struct erofs_inode
*const vi
= EROFS_I(inode
);
390 struct super_block
*const sb
= inode
->i_sb
;
391 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
395 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
396 erofs_blk_t blkaddr
=
397 xattrblock_addr(sbi
, vi
->xattr_shared_xattrs
[i
]);
399 it
->it
.ofs
= xattrblock_offset(sbi
, vi
->xattr_shared_xattrs
[i
]);
401 if (!i
|| blkaddr
!= it
->it
.blkaddr
) {
403 xattr_iter_end(&it
->it
, true);
405 it
->it
.page
= erofs_get_meta_page(sb
, blkaddr
);
406 if (IS_ERR(it
->it
.page
))
407 return PTR_ERR(it
->it
.page
);
409 it
->it
.kaddr
= kmap_atomic(it
->it
.page
);
410 it
->it
.blkaddr
= blkaddr
;
413 ret
= xattr_foreach(&it
->it
, &find_xattr_handlers
, NULL
);
417 if (vi
->xattr_shared_count
)
418 xattr_iter_end_final(&it
->it
);
420 return ret
? ret
: it
->buffer_size
;
423 static bool erofs_xattr_user_list(struct dentry
*dentry
)
425 return test_opt(EROFS_SB(dentry
->d_sb
), XATTR_USER
);
428 static bool erofs_xattr_trusted_list(struct dentry
*dentry
)
430 return capable(CAP_SYS_ADMIN
);
433 int erofs_getxattr(struct inode
*inode
, int index
,
435 void *buffer
, size_t buffer_size
)
438 struct getxattr_iter it
;
443 ret
= init_inode_xattrs(inode
);
449 it
.name
.len
= strlen(name
);
450 if (it
.name
.len
> EROFS_NAME_LEN
)
455 it
.buffer_size
= buffer_size
;
457 it
.it
.sb
= inode
->i_sb
;
458 ret
= inline_getxattr(inode
, &it
);
460 ret
= shared_getxattr(inode
, &it
);
464 static int erofs_xattr_generic_get(const struct xattr_handler
*handler
,
465 struct dentry
*unused
, struct inode
*inode
,
466 const char *name
, void *buffer
, size_t size
)
468 struct erofs_sb_info
*const sbi
= EROFS_I_SB(inode
);
470 switch (handler
->flags
) {
471 case EROFS_XATTR_INDEX_USER
:
472 if (!test_opt(sbi
, XATTR_USER
))
475 case EROFS_XATTR_INDEX_TRUSTED
:
476 if (!capable(CAP_SYS_ADMIN
))
479 case EROFS_XATTR_INDEX_SECURITY
:
485 return erofs_getxattr(inode
, handler
->flags
, name
, buffer
, size
);
488 const struct xattr_handler erofs_xattr_user_handler
= {
489 .prefix
= XATTR_USER_PREFIX
,
490 .flags
= EROFS_XATTR_INDEX_USER
,
491 .list
= erofs_xattr_user_list
,
492 .get
= erofs_xattr_generic_get
,
495 const struct xattr_handler erofs_xattr_trusted_handler
= {
496 .prefix
= XATTR_TRUSTED_PREFIX
,
497 .flags
= EROFS_XATTR_INDEX_TRUSTED
,
498 .list
= erofs_xattr_trusted_list
,
499 .get
= erofs_xattr_generic_get
,
502 #ifdef CONFIG_EROFS_FS_SECURITY
503 const struct xattr_handler __maybe_unused erofs_xattr_security_handler
= {
504 .prefix
= XATTR_SECURITY_PREFIX
,
505 .flags
= EROFS_XATTR_INDEX_SECURITY
,
506 .get
= erofs_xattr_generic_get
,
510 const struct xattr_handler
*erofs_xattr_handlers
[] = {
511 &erofs_xattr_user_handler
,
512 #ifdef CONFIG_EROFS_FS_POSIX_ACL
513 &posix_acl_access_xattr_handler
,
514 &posix_acl_default_xattr_handler
,
516 &erofs_xattr_trusted_handler
,
517 #ifdef CONFIG_EROFS_FS_SECURITY
518 &erofs_xattr_security_handler
,
523 struct listxattr_iter
{
524 struct xattr_iter it
;
526 struct dentry
*dentry
;
528 int buffer_size
, buffer_ofs
;
531 static int xattr_entrylist(struct xattr_iter
*_it
,
532 struct erofs_xattr_entry
*entry
)
534 struct listxattr_iter
*it
=
535 container_of(_it
, struct listxattr_iter
, it
);
536 unsigned int prefix_len
;
539 const struct xattr_handler
*h
=
540 erofs_xattr_handler(entry
->e_name_index
);
542 if (!h
|| (h
->list
&& !h
->list(it
->dentry
)))
545 prefix
= xattr_prefix(h
);
546 prefix_len
= strlen(prefix
);
549 it
->buffer_ofs
+= prefix_len
+ entry
->e_name_len
+ 1;
553 if (it
->buffer_ofs
+ prefix_len
554 + entry
->e_name_len
+ 1 > it
->buffer_size
)
557 memcpy(it
->buffer
+ it
->buffer_ofs
, prefix
, prefix_len
);
558 it
->buffer_ofs
+= prefix_len
;
562 static int xattr_namelist(struct xattr_iter
*_it
,
563 unsigned int processed
, char *buf
, unsigned int len
)
565 struct listxattr_iter
*it
=
566 container_of(_it
, struct listxattr_iter
, it
);
568 memcpy(it
->buffer
+ it
->buffer_ofs
, buf
, len
);
569 it
->buffer_ofs
+= len
;
573 static int xattr_skipvalue(struct xattr_iter
*_it
,
574 unsigned int value_sz
)
576 struct listxattr_iter
*it
=
577 container_of(_it
, struct listxattr_iter
, it
);
579 it
->buffer
[it
->buffer_ofs
++] = '\0';
583 static const struct xattr_iter_handlers list_xattr_handlers
= {
584 .entry
= xattr_entrylist
,
585 .name
= xattr_namelist
,
586 .alloc_buffer
= xattr_skipvalue
,
590 static int inline_listxattr(struct listxattr_iter
*it
)
593 unsigned int remaining
;
595 ret
= inline_xattr_iter_begin(&it
->it
, d_inode(it
->dentry
));
601 ret
= xattr_foreach(&it
->it
, &list_xattr_handlers
, &remaining
);
605 xattr_iter_end_final(&it
->it
);
606 return ret
? ret
: it
->buffer_ofs
;
609 static int shared_listxattr(struct listxattr_iter
*it
)
611 struct inode
*const inode
= d_inode(it
->dentry
);
612 struct erofs_inode
*const vi
= EROFS_I(inode
);
613 struct super_block
*const sb
= inode
->i_sb
;
614 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
618 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
619 erofs_blk_t blkaddr
=
620 xattrblock_addr(sbi
, vi
->xattr_shared_xattrs
[i
]);
622 it
->it
.ofs
= xattrblock_offset(sbi
, vi
->xattr_shared_xattrs
[i
]);
623 if (!i
|| blkaddr
!= it
->it
.blkaddr
) {
625 xattr_iter_end(&it
->it
, true);
627 it
->it
.page
= erofs_get_meta_page(sb
, blkaddr
);
628 if (IS_ERR(it
->it
.page
))
629 return PTR_ERR(it
->it
.page
);
631 it
->it
.kaddr
= kmap_atomic(it
->it
.page
);
632 it
->it
.blkaddr
= blkaddr
;
635 ret
= xattr_foreach(&it
->it
, &list_xattr_handlers
, NULL
);
639 if (vi
->xattr_shared_count
)
640 xattr_iter_end_final(&it
->it
);
642 return ret
? ret
: it
->buffer_ofs
;
645 ssize_t
erofs_listxattr(struct dentry
*dentry
,
646 char *buffer
, size_t buffer_size
)
649 struct listxattr_iter it
;
651 ret
= init_inode_xattrs(d_inode(dentry
));
659 it
.buffer_size
= buffer_size
;
662 it
.it
.sb
= dentry
->d_sb
;
664 ret
= inline_listxattr(&it
);
665 if (ret
< 0 && ret
!= -ENOATTR
)
667 return shared_listxattr(&it
);
670 #ifdef CONFIG_EROFS_FS_POSIX_ACL
671 struct posix_acl
*erofs_get_acl(struct inode
*inode
, int type
)
673 struct posix_acl
*acl
;
678 case ACL_TYPE_ACCESS
:
679 prefix
= EROFS_XATTR_INDEX_POSIX_ACL_ACCESS
;
681 case ACL_TYPE_DEFAULT
:
682 prefix
= EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT
;
685 return ERR_PTR(-EINVAL
);
688 rc
= erofs_getxattr(inode
, prefix
, "", NULL
, 0);
690 value
= kmalloc(rc
, GFP_KERNEL
);
692 return ERR_PTR(-ENOMEM
);
693 rc
= erofs_getxattr(inode
, prefix
, "", value
, rc
);
701 acl
= posix_acl_from_xattr(&init_user_ns
, value
, rc
);