1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #include <linux/security.h>
11 struct super_block
*sb
;
19 static inline void xattr_iter_end(struct xattr_iter
*it
, bool atomic
)
21 /* the only user of kunmap() is 'init_inode_xattrs' */
25 kunmap_atomic(it
->kaddr
);
27 unlock_page(it
->page
);
31 static inline void xattr_iter_end_final(struct xattr_iter
*it
)
36 xattr_iter_end(it
, true);
39 static int init_inode_xattrs(struct inode
*inode
)
41 struct erofs_inode
*const vi
= EROFS_I(inode
);
44 struct erofs_xattr_ibody_header
*ih
;
45 struct super_block
*sb
;
46 struct erofs_sb_info
*sbi
;
50 /* the most case is that xattrs of this inode are initialized. */
51 if (test_bit(EROFS_I_EA_INITED_BIT
, &vi
->flags
))
54 if (wait_on_bit_lock(&vi
->flags
, EROFS_I_BL_XATTR_BIT
, TASK_KILLABLE
))
57 /* someone has initialized xattrs for us? */
58 if (test_bit(EROFS_I_EA_INITED_BIT
, &vi
->flags
))
62 * bypass all xattr operations if ->xattr_isize is not greater than
63 * sizeof(struct erofs_xattr_ibody_header), in detail:
64 * 1) it is not enough to contain erofs_xattr_ibody_header then
65 * ->xattr_isize should be 0 (it means no xattr);
66 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
67 * undefined right now (maybe use later with some new sb feature).
69 if (vi
->xattr_isize
== sizeof(struct erofs_xattr_ibody_header
)) {
70 erofs_err(inode
->i_sb
,
71 "xattr_isize %d of nid %llu is not supported yet",
72 vi
->xattr_isize
, vi
->nid
);
75 } else if (vi
->xattr_isize
< sizeof(struct erofs_xattr_ibody_header
)) {
76 if (vi
->xattr_isize
) {
77 erofs_err(inode
->i_sb
,
78 "bogus xattr ibody @ nid %llu", vi
->nid
);
81 goto out_unlock
; /* xattr ondisk layout error */
89 it
.blkaddr
= erofs_blknr(iloc(sbi
, vi
->nid
) + vi
->inode_isize
);
90 it
.ofs
= erofs_blkoff(iloc(sbi
, vi
->nid
) + vi
->inode_isize
);
92 it
.page
= erofs_get_meta_page(sb
, it
.blkaddr
);
93 if (IS_ERR(it
.page
)) {
94 ret
= PTR_ERR(it
.page
);
98 /* read in shared xattr array (non-atomic, see kmalloc below) */
99 it
.kaddr
= kmap(it
.page
);
102 ih
= (struct erofs_xattr_ibody_header
*)(it
.kaddr
+ it
.ofs
);
104 vi
->xattr_shared_count
= ih
->h_shared_count
;
105 vi
->xattr_shared_xattrs
= kmalloc_array(vi
->xattr_shared_count
,
106 sizeof(uint
), GFP_KERNEL
);
107 if (!vi
->xattr_shared_xattrs
) {
108 xattr_iter_end(&it
, atomic_map
);
113 /* let's skip ibody header */
114 it
.ofs
+= sizeof(struct erofs_xattr_ibody_header
);
116 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
117 if (it
.ofs
>= EROFS_BLKSIZ
) {
118 /* cannot be unaligned */
119 DBG_BUGON(it
.ofs
!= EROFS_BLKSIZ
);
120 xattr_iter_end(&it
, atomic_map
);
122 it
.page
= erofs_get_meta_page(sb
, ++it
.blkaddr
);
123 if (IS_ERR(it
.page
)) {
124 kfree(vi
->xattr_shared_xattrs
);
125 vi
->xattr_shared_xattrs
= NULL
;
126 ret
= PTR_ERR(it
.page
);
130 it
.kaddr
= kmap_atomic(it
.page
);
134 vi
->xattr_shared_xattrs
[i
] =
135 le32_to_cpu(*(__le32
*)(it
.kaddr
+ it
.ofs
));
136 it
.ofs
+= sizeof(__le32
);
138 xattr_iter_end(&it
, atomic_map
);
140 set_bit(EROFS_I_EA_INITED_BIT
, &vi
->flags
);
143 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT
, &vi
->flags
);
148 * the general idea for these return values is
149 * if 0 is returned, go on processing the current xattr;
150 * 1 (> 0) is returned, skip this round to process the next xattr;
151 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
152 * and need to be handled
154 struct xattr_iter_handlers
{
155 int (*entry
)(struct xattr_iter
*_it
, struct erofs_xattr_entry
*entry
);
156 int (*name
)(struct xattr_iter
*_it
, unsigned int processed
, char *buf
,
158 int (*alloc_buffer
)(struct xattr_iter
*_it
, unsigned int value_sz
);
159 void (*value
)(struct xattr_iter
*_it
, unsigned int processed
, char *buf
,
163 static inline int xattr_iter_fixup(struct xattr_iter
*it
)
165 if (it
->ofs
< EROFS_BLKSIZ
)
168 xattr_iter_end(it
, true);
170 it
->blkaddr
+= erofs_blknr(it
->ofs
);
172 it
->page
= erofs_get_meta_page(it
->sb
, it
->blkaddr
);
173 if (IS_ERR(it
->page
)) {
174 int err
= PTR_ERR(it
->page
);
180 it
->kaddr
= kmap_atomic(it
->page
);
181 it
->ofs
= erofs_blkoff(it
->ofs
);
185 static int inline_xattr_iter_begin(struct xattr_iter
*it
,
188 struct erofs_inode
*const vi
= EROFS_I(inode
);
189 struct erofs_sb_info
*const sbi
= EROFS_SB(inode
->i_sb
);
190 unsigned int xattr_header_sz
, inline_xattr_ofs
;
192 xattr_header_sz
= inlinexattr_header_size(inode
);
193 if (xattr_header_sz
>= vi
->xattr_isize
) {
194 DBG_BUGON(xattr_header_sz
> vi
->xattr_isize
);
198 inline_xattr_ofs
= vi
->inode_isize
+ xattr_header_sz
;
200 it
->blkaddr
= erofs_blknr(iloc(sbi
, vi
->nid
) + inline_xattr_ofs
);
201 it
->ofs
= erofs_blkoff(iloc(sbi
, vi
->nid
) + inline_xattr_ofs
);
203 it
->page
= erofs_get_meta_page(inode
->i_sb
, it
->blkaddr
);
204 if (IS_ERR(it
->page
))
205 return PTR_ERR(it
->page
);
207 it
->kaddr
= kmap_atomic(it
->page
);
208 return vi
->xattr_isize
- xattr_header_sz
;
212 * Regardless of success or failure, `xattr_foreach' will end up with
213 * `ofs' pointing to the next xattr item rather than an arbitrary position.
215 static int xattr_foreach(struct xattr_iter
*it
,
216 const struct xattr_iter_handlers
*op
,
217 unsigned int *tlimit
)
219 struct erofs_xattr_entry entry
;
220 unsigned int value_sz
, processed
, slice
;
223 /* 0. fixup blkaddr, ofs, ipage */
224 err
= xattr_iter_fixup(it
);
229 * 1. read xattr entry to the memory,
230 * since we do EROFS_XATTR_ALIGN
231 * therefore entry should be in the page
233 entry
= *(struct erofs_xattr_entry
*)(it
->kaddr
+ it
->ofs
);
235 unsigned int entry_sz
= erofs_xattr_entry_size(&entry
);
237 /* xattr on-disk corruption: xattr entry beyond xattr_isize */
238 if (*tlimit
< entry_sz
) {
240 return -EFSCORRUPTED
;
245 it
->ofs
+= sizeof(struct erofs_xattr_entry
);
246 value_sz
= le16_to_cpu(entry
.e_value_size
);
249 err
= op
->entry(it
, &entry
);
251 it
->ofs
+= entry
.e_name_len
+ value_sz
;
255 /* 2. handle xattr name (ofs will finally be at the end of name) */
258 while (processed
< entry
.e_name_len
) {
259 if (it
->ofs
>= EROFS_BLKSIZ
) {
260 DBG_BUGON(it
->ofs
> EROFS_BLKSIZ
);
262 err
= xattr_iter_fixup(it
);
268 slice
= min_t(unsigned int, PAGE_SIZE
- it
->ofs
,
269 entry
.e_name_len
- processed
);
272 err
= op
->name(it
, processed
, it
->kaddr
+ it
->ofs
, slice
);
274 it
->ofs
+= entry
.e_name_len
- processed
+ value_sz
;
282 /* 3. handle xattr value */
285 if (op
->alloc_buffer
) {
286 err
= op
->alloc_buffer(it
, value_sz
);
293 while (processed
< value_sz
) {
294 if (it
->ofs
>= EROFS_BLKSIZ
) {
295 DBG_BUGON(it
->ofs
> EROFS_BLKSIZ
);
297 err
= xattr_iter_fixup(it
);
303 slice
= min_t(unsigned int, PAGE_SIZE
- it
->ofs
,
304 value_sz
- processed
);
305 op
->value(it
, processed
, it
->kaddr
+ it
->ofs
, slice
);
311 /* xattrs should be 4-byte aligned (on-disk constraint) */
312 it
->ofs
= EROFS_XATTR_ALIGN(it
->ofs
);
313 return err
< 0 ? err
: 0;
316 struct getxattr_iter
{
317 struct xattr_iter it
;
320 int buffer_size
, index
;
324 static int xattr_entrymatch(struct xattr_iter
*_it
,
325 struct erofs_xattr_entry
*entry
)
327 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
329 return (it
->index
!= entry
->e_name_index
||
330 it
->name
.len
!= entry
->e_name_len
) ? -ENOATTR
: 0;
333 static int xattr_namematch(struct xattr_iter
*_it
,
334 unsigned int processed
, char *buf
, unsigned int len
)
336 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
338 return memcmp(buf
, it
->name
.name
+ processed
, len
) ? -ENOATTR
: 0;
341 static int xattr_checkbuffer(struct xattr_iter
*_it
,
342 unsigned int value_sz
)
344 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
345 int err
= it
->buffer_size
< value_sz
? -ERANGE
: 0;
347 it
->buffer_size
= value_sz
;
348 return !it
->buffer
? 1 : err
;
351 static void xattr_copyvalue(struct xattr_iter
*_it
,
352 unsigned int processed
,
353 char *buf
, unsigned int len
)
355 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
357 memcpy(it
->buffer
+ processed
, buf
, len
);
360 static const struct xattr_iter_handlers find_xattr_handlers
= {
361 .entry
= xattr_entrymatch
,
362 .name
= xattr_namematch
,
363 .alloc_buffer
= xattr_checkbuffer
,
364 .value
= xattr_copyvalue
367 static int inline_getxattr(struct inode
*inode
, struct getxattr_iter
*it
)
370 unsigned int remaining
;
372 ret
= inline_xattr_iter_begin(&it
->it
, inode
);
378 ret
= xattr_foreach(&it
->it
, &find_xattr_handlers
, &remaining
);
382 xattr_iter_end_final(&it
->it
);
384 return ret
? ret
: it
->buffer_size
;
387 static int shared_getxattr(struct inode
*inode
, struct getxattr_iter
*it
)
389 struct erofs_inode
*const vi
= EROFS_I(inode
);
390 struct super_block
*const sb
= inode
->i_sb
;
391 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
395 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
396 erofs_blk_t blkaddr
=
397 xattrblock_addr(sbi
, vi
->xattr_shared_xattrs
[i
]);
399 it
->it
.ofs
= xattrblock_offset(sbi
, vi
->xattr_shared_xattrs
[i
]);
401 if (!i
|| blkaddr
!= it
->it
.blkaddr
) {
403 xattr_iter_end(&it
->it
, true);
405 it
->it
.page
= erofs_get_meta_page(sb
, blkaddr
);
406 if (IS_ERR(it
->it
.page
))
407 return PTR_ERR(it
->it
.page
);
409 it
->it
.kaddr
= kmap_atomic(it
->it
.page
);
410 it
->it
.blkaddr
= blkaddr
;
413 ret
= xattr_foreach(&it
->it
, &find_xattr_handlers
, NULL
);
417 if (vi
->xattr_shared_count
)
418 xattr_iter_end_final(&it
->it
);
420 return ret
? ret
: it
->buffer_size
;
423 static bool erofs_xattr_user_list(struct dentry
*dentry
)
425 return test_opt(&EROFS_SB(dentry
->d_sb
)->ctx
, XATTR_USER
);
428 static bool erofs_xattr_trusted_list(struct dentry
*dentry
)
430 return capable(CAP_SYS_ADMIN
);
433 int erofs_getxattr(struct inode
*inode
, int index
,
435 void *buffer
, size_t buffer_size
)
438 struct getxattr_iter it
;
443 ret
= init_inode_xattrs(inode
);
449 it
.name
.len
= strlen(name
);
450 if (it
.name
.len
> EROFS_NAME_LEN
)
455 it
.buffer_size
= buffer_size
;
457 it
.it
.sb
= inode
->i_sb
;
458 ret
= inline_getxattr(inode
, &it
);
460 ret
= shared_getxattr(inode
, &it
);
464 static int erofs_xattr_generic_get(const struct xattr_handler
*handler
,
465 struct dentry
*unused
, struct inode
*inode
,
466 const char *name
, void *buffer
, size_t size
)
468 struct erofs_sb_info
*const sbi
= EROFS_I_SB(inode
);
470 switch (handler
->flags
) {
471 case EROFS_XATTR_INDEX_USER
:
472 if (!test_opt(&sbi
->ctx
, XATTR_USER
))
475 case EROFS_XATTR_INDEX_TRUSTED
:
477 case EROFS_XATTR_INDEX_SECURITY
:
483 return erofs_getxattr(inode
, handler
->flags
, name
, buffer
, size
);
486 const struct xattr_handler erofs_xattr_user_handler
= {
487 .prefix
= XATTR_USER_PREFIX
,
488 .flags
= EROFS_XATTR_INDEX_USER
,
489 .list
= erofs_xattr_user_list
,
490 .get
= erofs_xattr_generic_get
,
493 const struct xattr_handler erofs_xattr_trusted_handler
= {
494 .prefix
= XATTR_TRUSTED_PREFIX
,
495 .flags
= EROFS_XATTR_INDEX_TRUSTED
,
496 .list
= erofs_xattr_trusted_list
,
497 .get
= erofs_xattr_generic_get
,
500 #ifdef CONFIG_EROFS_FS_SECURITY
501 const struct xattr_handler __maybe_unused erofs_xattr_security_handler
= {
502 .prefix
= XATTR_SECURITY_PREFIX
,
503 .flags
= EROFS_XATTR_INDEX_SECURITY
,
504 .get
= erofs_xattr_generic_get
,
508 const struct xattr_handler
*erofs_xattr_handlers
[] = {
509 &erofs_xattr_user_handler
,
510 #ifdef CONFIG_EROFS_FS_POSIX_ACL
511 &posix_acl_access_xattr_handler
,
512 &posix_acl_default_xattr_handler
,
514 &erofs_xattr_trusted_handler
,
515 #ifdef CONFIG_EROFS_FS_SECURITY
516 &erofs_xattr_security_handler
,
521 struct listxattr_iter
{
522 struct xattr_iter it
;
524 struct dentry
*dentry
;
526 int buffer_size
, buffer_ofs
;
529 static int xattr_entrylist(struct xattr_iter
*_it
,
530 struct erofs_xattr_entry
*entry
)
532 struct listxattr_iter
*it
=
533 container_of(_it
, struct listxattr_iter
, it
);
534 unsigned int prefix_len
;
537 const struct xattr_handler
*h
=
538 erofs_xattr_handler(entry
->e_name_index
);
540 if (!h
|| (h
->list
&& !h
->list(it
->dentry
)))
543 prefix
= xattr_prefix(h
);
544 prefix_len
= strlen(prefix
);
547 it
->buffer_ofs
+= prefix_len
+ entry
->e_name_len
+ 1;
551 if (it
->buffer_ofs
+ prefix_len
552 + entry
->e_name_len
+ 1 > it
->buffer_size
)
555 memcpy(it
->buffer
+ it
->buffer_ofs
, prefix
, prefix_len
);
556 it
->buffer_ofs
+= prefix_len
;
560 static int xattr_namelist(struct xattr_iter
*_it
,
561 unsigned int processed
, char *buf
, unsigned int len
)
563 struct listxattr_iter
*it
=
564 container_of(_it
, struct listxattr_iter
, it
);
566 memcpy(it
->buffer
+ it
->buffer_ofs
, buf
, len
);
567 it
->buffer_ofs
+= len
;
571 static int xattr_skipvalue(struct xattr_iter
*_it
,
572 unsigned int value_sz
)
574 struct listxattr_iter
*it
=
575 container_of(_it
, struct listxattr_iter
, it
);
577 it
->buffer
[it
->buffer_ofs
++] = '\0';
581 static const struct xattr_iter_handlers list_xattr_handlers
= {
582 .entry
= xattr_entrylist
,
583 .name
= xattr_namelist
,
584 .alloc_buffer
= xattr_skipvalue
,
588 static int inline_listxattr(struct listxattr_iter
*it
)
591 unsigned int remaining
;
593 ret
= inline_xattr_iter_begin(&it
->it
, d_inode(it
->dentry
));
599 ret
= xattr_foreach(&it
->it
, &list_xattr_handlers
, &remaining
);
603 xattr_iter_end_final(&it
->it
);
604 return ret
? ret
: it
->buffer_ofs
;
607 static int shared_listxattr(struct listxattr_iter
*it
)
609 struct inode
*const inode
= d_inode(it
->dentry
);
610 struct erofs_inode
*const vi
= EROFS_I(inode
);
611 struct super_block
*const sb
= inode
->i_sb
;
612 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
616 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
617 erofs_blk_t blkaddr
=
618 xattrblock_addr(sbi
, vi
->xattr_shared_xattrs
[i
]);
620 it
->it
.ofs
= xattrblock_offset(sbi
, vi
->xattr_shared_xattrs
[i
]);
621 if (!i
|| blkaddr
!= it
->it
.blkaddr
) {
623 xattr_iter_end(&it
->it
, true);
625 it
->it
.page
= erofs_get_meta_page(sb
, blkaddr
);
626 if (IS_ERR(it
->it
.page
))
627 return PTR_ERR(it
->it
.page
);
629 it
->it
.kaddr
= kmap_atomic(it
->it
.page
);
630 it
->it
.blkaddr
= blkaddr
;
633 ret
= xattr_foreach(&it
->it
, &list_xattr_handlers
, NULL
);
637 if (vi
->xattr_shared_count
)
638 xattr_iter_end_final(&it
->it
);
640 return ret
? ret
: it
->buffer_ofs
;
643 ssize_t
erofs_listxattr(struct dentry
*dentry
,
644 char *buffer
, size_t buffer_size
)
647 struct listxattr_iter it
;
649 ret
= init_inode_xattrs(d_inode(dentry
));
657 it
.buffer_size
= buffer_size
;
660 it
.it
.sb
= dentry
->d_sb
;
662 ret
= inline_listxattr(&it
);
663 if (ret
< 0 && ret
!= -ENOATTR
)
665 return shared_listxattr(&it
);
668 #ifdef CONFIG_EROFS_FS_POSIX_ACL
669 struct posix_acl
*erofs_get_acl(struct inode
*inode
, int type
)
671 struct posix_acl
*acl
;
676 case ACL_TYPE_ACCESS
:
677 prefix
= EROFS_XATTR_INDEX_POSIX_ACL_ACCESS
;
679 case ACL_TYPE_DEFAULT
:
680 prefix
= EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT
;
683 return ERR_PTR(-EINVAL
);
686 rc
= erofs_getxattr(inode
, prefix
, "", NULL
, 0);
688 value
= kmalloc(rc
, GFP_KERNEL
);
690 return ERR_PTR(-ENOMEM
);
691 rc
= erofs_getxattr(inode
, prefix
, "", value
, rc
);
699 acl
= posix_acl_from_xattr(&init_user_ns
, value
, rc
);