1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/xattr.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include <linux/security.h>
17 struct super_block
*sb
;
25 static inline void xattr_iter_end(struct xattr_iter
*it
, bool atomic
)
27 /* the only user of kunmap() is 'init_inode_xattrs' */
28 if (unlikely(!atomic
))
31 kunmap_atomic(it
->kaddr
);
33 unlock_page(it
->page
);
37 static inline void xattr_iter_end_final(struct xattr_iter
*it
)
42 xattr_iter_end(it
, true);
45 static int init_inode_xattrs(struct inode
*inode
)
47 struct erofs_vnode
*const vi
= EROFS_V(inode
);
50 struct erofs_xattr_ibody_header
*ih
;
51 struct erofs_sb_info
*sbi
;
55 /* the most case is that xattrs of this inode are initialized. */
56 if (test_bit(EROFS_V_EA_INITED_BIT
, &vi
->flags
))
59 if (wait_on_bit_lock(&vi
->flags
, EROFS_V_BL_XATTR_BIT
, TASK_KILLABLE
))
62 /* someone has initialized xattrs for us? */
63 if (test_bit(EROFS_V_EA_INITED_BIT
, &vi
->flags
))
67 * bypass all xattr operations if ->xattr_isize is not greater than
68 * sizeof(struct erofs_xattr_ibody_header), in detail:
69 * 1) it is not enough to contain erofs_xattr_ibody_header then
70 * ->xattr_isize should be 0 (it means no xattr);
71 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
72 * undefined right now (maybe use later with some new sb feature).
74 if (vi
->xattr_isize
== sizeof(struct erofs_xattr_ibody_header
)) {
75 errln("xattr_isize %d of nid %llu is not supported yet",
76 vi
->xattr_isize
, vi
->nid
);
79 } else if (vi
->xattr_isize
< sizeof(struct erofs_xattr_ibody_header
)) {
80 if (unlikely(vi
->xattr_isize
)) {
83 goto out_unlock
; /* xattr ondisk layout error */
89 sbi
= EROFS_I_SB(inode
);
90 it
.blkaddr
= erofs_blknr(iloc(sbi
, vi
->nid
) + vi
->inode_isize
);
91 it
.ofs
= erofs_blkoff(iloc(sbi
, vi
->nid
) + vi
->inode_isize
);
93 it
.page
= erofs_get_inline_page(inode
, it
.blkaddr
);
94 if (IS_ERR(it
.page
)) {
95 ret
= PTR_ERR(it
.page
);
99 /* read in shared xattr array (non-atomic, see kmalloc below) */
100 it
.kaddr
= kmap(it
.page
);
103 ih
= (struct erofs_xattr_ibody_header
*)(it
.kaddr
+ it
.ofs
);
105 vi
->xattr_shared_count
= ih
->h_shared_count
;
106 vi
->xattr_shared_xattrs
= kmalloc_array(vi
->xattr_shared_count
,
107 sizeof(uint
), GFP_KERNEL
);
108 if (!vi
->xattr_shared_xattrs
) {
109 xattr_iter_end(&it
, atomic_map
);
114 /* let's skip ibody header */
115 it
.ofs
+= sizeof(struct erofs_xattr_ibody_header
);
117 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
118 if (unlikely(it
.ofs
>= EROFS_BLKSIZ
)) {
119 /* cannot be unaligned */
120 BUG_ON(it
.ofs
!= EROFS_BLKSIZ
);
121 xattr_iter_end(&it
, atomic_map
);
123 it
.page
= erofs_get_meta_page(inode
->i_sb
,
124 ++it
.blkaddr
, S_ISDIR(inode
->i_mode
));
125 if (IS_ERR(it
.page
)) {
126 kfree(vi
->xattr_shared_xattrs
);
127 vi
->xattr_shared_xattrs
= NULL
;
128 ret
= PTR_ERR(it
.page
);
132 it
.kaddr
= kmap_atomic(it
.page
);
136 vi
->xattr_shared_xattrs
[i
] =
137 le32_to_cpu(*(__le32
*)(it
.kaddr
+ it
.ofs
));
138 it
.ofs
+= sizeof(__le32
);
140 xattr_iter_end(&it
, atomic_map
);
142 set_bit(EROFS_V_EA_INITED_BIT
, &vi
->flags
);
145 clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT
, &vi
->flags
);
149 struct xattr_iter_handlers
{
150 int (*entry
)(struct xattr_iter
*, struct erofs_xattr_entry
*);
151 int (*name
)(struct xattr_iter
*, unsigned, char *, unsigned);
152 int (*alloc_buffer
)(struct xattr_iter
*, unsigned);
153 void (*value
)(struct xattr_iter
*, unsigned, char *, unsigned);
156 static inline int xattr_iter_fixup(struct xattr_iter
*it
)
158 if (it
->ofs
< EROFS_BLKSIZ
)
161 xattr_iter_end(it
, true);
163 it
->blkaddr
+= erofs_blknr(it
->ofs
);
164 it
->page
= erofs_get_meta_page(it
->sb
, it
->blkaddr
, false);
165 if (IS_ERR(it
->page
)) {
166 int err
= PTR_ERR(it
->page
);
172 it
->kaddr
= kmap_atomic(it
->page
);
173 it
->ofs
= erofs_blkoff(it
->ofs
);
177 static int inline_xattr_iter_begin(struct xattr_iter
*it
,
180 struct erofs_vnode
*const vi
= EROFS_V(inode
);
181 struct erofs_sb_info
*const sbi
= EROFS_SB(inode
->i_sb
);
182 unsigned xattr_header_sz
, inline_xattr_ofs
;
184 xattr_header_sz
= inlinexattr_header_size(inode
);
185 if (unlikely(xattr_header_sz
>= vi
->xattr_isize
)) {
186 BUG_ON(xattr_header_sz
> vi
->xattr_isize
);
190 inline_xattr_ofs
= vi
->inode_isize
+ xattr_header_sz
;
192 it
->blkaddr
= erofs_blknr(iloc(sbi
, vi
->nid
) + inline_xattr_ofs
);
193 it
->ofs
= erofs_blkoff(iloc(sbi
, vi
->nid
) + inline_xattr_ofs
);
195 it
->page
= erofs_get_inline_page(inode
, it
->blkaddr
);
196 if (IS_ERR(it
->page
))
197 return PTR_ERR(it
->page
);
199 it
->kaddr
= kmap_atomic(it
->page
);
200 return vi
->xattr_isize
- xattr_header_sz
;
203 static int xattr_foreach(struct xattr_iter
*it
,
204 const struct xattr_iter_handlers
*op
, unsigned int *tlimit
)
206 struct erofs_xattr_entry entry
;
207 unsigned value_sz
, processed
, slice
;
210 /* 0. fixup blkaddr, ofs, ipage */
211 err
= xattr_iter_fixup(it
);
216 * 1. read xattr entry to the memory,
217 * since we do EROFS_XATTR_ALIGN
218 * therefore entry should be in the page
220 entry
= *(struct erofs_xattr_entry
*)(it
->kaddr
+ it
->ofs
);
221 if (tlimit
!= NULL
) {
222 unsigned entry_sz
= EROFS_XATTR_ENTRY_SIZE(&entry
);
224 BUG_ON(*tlimit
< entry_sz
);
228 it
->ofs
+= sizeof(struct erofs_xattr_entry
);
229 value_sz
= le16_to_cpu(entry
.e_value_size
);
232 err
= op
->entry(it
, &entry
);
234 it
->ofs
+= entry
.e_name_len
+ value_sz
;
238 /* 2. handle xattr name (ofs will finally be at the end of name) */
241 while (processed
< entry
.e_name_len
) {
242 if (it
->ofs
>= EROFS_BLKSIZ
) {
243 BUG_ON(it
->ofs
> EROFS_BLKSIZ
);
245 err
= xattr_iter_fixup(it
);
251 slice
= min_t(unsigned, PAGE_SIZE
- it
->ofs
,
252 entry
.e_name_len
- processed
);
255 err
= op
->name(it
, processed
, it
->kaddr
+ it
->ofs
, slice
);
257 it
->ofs
+= entry
.e_name_len
- processed
+ value_sz
;
265 /* 3. handle xattr value */
268 if (op
->alloc_buffer
!= NULL
) {
269 err
= op
->alloc_buffer(it
, value_sz
);
276 while (processed
< value_sz
) {
277 if (it
->ofs
>= EROFS_BLKSIZ
) {
278 BUG_ON(it
->ofs
> EROFS_BLKSIZ
);
280 err
= xattr_iter_fixup(it
);
286 slice
= min_t(unsigned, PAGE_SIZE
- it
->ofs
,
287 value_sz
- processed
);
288 op
->value(it
, processed
, it
->kaddr
+ it
->ofs
, slice
);
294 /* we assume that ofs is aligned with 4 bytes */
295 it
->ofs
= EROFS_XATTR_ALIGN(it
->ofs
);
299 struct getxattr_iter
{
300 struct xattr_iter it
;
303 int buffer_size
, index
;
307 static int xattr_entrymatch(struct xattr_iter
*_it
,
308 struct erofs_xattr_entry
*entry
)
310 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
312 return (it
->index
!= entry
->e_name_index
||
313 it
->name
.len
!= entry
->e_name_len
) ? -ENOATTR
: 0;
316 static int xattr_namematch(struct xattr_iter
*_it
,
317 unsigned processed
, char *buf
, unsigned len
)
319 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
321 return memcmp(buf
, it
->name
.name
+ processed
, len
) ? -ENOATTR
: 0;
324 static int xattr_checkbuffer(struct xattr_iter
*_it
,
327 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
328 int err
= it
->buffer_size
< value_sz
? -ERANGE
: 0;
330 it
->buffer_size
= value_sz
;
331 return it
->buffer
== NULL
? 1 : err
;
334 static void xattr_copyvalue(struct xattr_iter
*_it
,
335 unsigned processed
, char *buf
, unsigned len
)
337 struct getxattr_iter
*it
= container_of(_it
, struct getxattr_iter
, it
);
339 memcpy(it
->buffer
+ processed
, buf
, len
);
342 static const struct xattr_iter_handlers find_xattr_handlers
= {
343 .entry
= xattr_entrymatch
,
344 .name
= xattr_namematch
,
345 .alloc_buffer
= xattr_checkbuffer
,
346 .value
= xattr_copyvalue
349 static int inline_getxattr(struct inode
*inode
, struct getxattr_iter
*it
)
354 ret
= inline_xattr_iter_begin(&it
->it
, inode
);
360 ret
= xattr_foreach(&it
->it
, &find_xattr_handlers
, &remaining
);
364 if (ret
!= -ENOATTR
) /* -ENOMEM, -EIO, etc. */
367 xattr_iter_end_final(&it
->it
);
369 return ret
< 0 ? ret
: it
->buffer_size
;
372 static int shared_getxattr(struct inode
*inode
, struct getxattr_iter
*it
)
374 struct erofs_vnode
*const vi
= EROFS_V(inode
);
375 struct erofs_sb_info
*const sbi
= EROFS_SB(inode
->i_sb
);
379 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
380 erofs_blk_t blkaddr
=
381 xattrblock_addr(sbi
, vi
->xattr_shared_xattrs
[i
]);
383 it
->it
.ofs
= xattrblock_offset(sbi
, vi
->xattr_shared_xattrs
[i
]);
385 if (!i
|| blkaddr
!= it
->it
.blkaddr
) {
387 xattr_iter_end(&it
->it
, true);
389 it
->it
.page
= erofs_get_meta_page(inode
->i_sb
,
391 if (IS_ERR(it
->it
.page
))
392 return PTR_ERR(it
->it
.page
);
394 it
->it
.kaddr
= kmap_atomic(it
->it
.page
);
395 it
->it
.blkaddr
= blkaddr
;
398 ret
= xattr_foreach(&it
->it
, &find_xattr_handlers
, NULL
);
402 if (ret
!= -ENOATTR
) /* -ENOMEM, -EIO, etc. */
405 if (vi
->xattr_shared_count
)
406 xattr_iter_end_final(&it
->it
);
408 return ret
< 0 ? ret
: it
->buffer_size
;
411 static bool erofs_xattr_user_list(struct dentry
*dentry
)
413 return test_opt(EROFS_SB(dentry
->d_sb
), XATTR_USER
);
416 static bool erofs_xattr_trusted_list(struct dentry
*dentry
)
418 return capable(CAP_SYS_ADMIN
);
421 int erofs_getxattr(struct inode
*inode
, int index
,
423 void *buffer
, size_t buffer_size
)
426 struct getxattr_iter it
;
428 if (unlikely(name
== NULL
))
431 ret
= init_inode_xattrs(inode
);
437 it
.name
.len
= strlen(name
);
438 if (it
.name
.len
> EROFS_NAME_LEN
)
443 it
.buffer_size
= buffer_size
;
445 it
.it
.sb
= inode
->i_sb
;
446 ret
= inline_getxattr(inode
, &it
);
448 ret
= shared_getxattr(inode
, &it
);
452 static int erofs_xattr_generic_get(const struct xattr_handler
*handler
,
453 struct dentry
*unused
, struct inode
*inode
,
454 const char *name
, void *buffer
, size_t size
)
456 struct erofs_sb_info
*const sbi
= EROFS_I_SB(inode
);
458 switch (handler
->flags
) {
459 case EROFS_XATTR_INDEX_USER
:
460 if (!test_opt(sbi
, XATTR_USER
))
463 case EROFS_XATTR_INDEX_TRUSTED
:
464 if (!capable(CAP_SYS_ADMIN
))
467 case EROFS_XATTR_INDEX_SECURITY
:
473 return erofs_getxattr(inode
, handler
->flags
, name
, buffer
, size
);
476 const struct xattr_handler erofs_xattr_user_handler
= {
477 .prefix
= XATTR_USER_PREFIX
,
478 .flags
= EROFS_XATTR_INDEX_USER
,
479 .list
= erofs_xattr_user_list
,
480 .get
= erofs_xattr_generic_get
,
483 const struct xattr_handler erofs_xattr_trusted_handler
= {
484 .prefix
= XATTR_TRUSTED_PREFIX
,
485 .flags
= EROFS_XATTR_INDEX_TRUSTED
,
486 .list
= erofs_xattr_trusted_list
,
487 .get
= erofs_xattr_generic_get
,
490 #ifdef CONFIG_EROFS_FS_SECURITY
491 const struct xattr_handler __maybe_unused erofs_xattr_security_handler
= {
492 .prefix
= XATTR_SECURITY_PREFIX
,
493 .flags
= EROFS_XATTR_INDEX_SECURITY
,
494 .get
= erofs_xattr_generic_get
,
498 const struct xattr_handler
*erofs_xattr_handlers
[] = {
499 &erofs_xattr_user_handler
,
500 #ifdef CONFIG_EROFS_FS_POSIX_ACL
501 &posix_acl_access_xattr_handler
,
502 &posix_acl_default_xattr_handler
,
504 &erofs_xattr_trusted_handler
,
505 #ifdef CONFIG_EROFS_FS_SECURITY
506 &erofs_xattr_security_handler
,
511 struct listxattr_iter
{
512 struct xattr_iter it
;
514 struct dentry
*dentry
;
516 int buffer_size
, buffer_ofs
;
519 static int xattr_entrylist(struct xattr_iter
*_it
,
520 struct erofs_xattr_entry
*entry
)
522 struct listxattr_iter
*it
=
523 container_of(_it
, struct listxattr_iter
, it
);
527 const struct xattr_handler
*h
=
528 erofs_xattr_handler(entry
->e_name_index
);
530 if (h
== NULL
|| (h
->list
!= NULL
&& !h
->list(it
->dentry
)))
533 /* Note that at least one of 'prefix' and 'name' should be non-NULL */
534 prefix
= h
->prefix
!= NULL
? h
->prefix
: h
->name
;
535 prefix_len
= strlen(prefix
);
537 if (it
->buffer
== NULL
) {
538 it
->buffer_ofs
+= prefix_len
+ entry
->e_name_len
+ 1;
542 if (it
->buffer_ofs
+ prefix_len
543 + entry
->e_name_len
+ 1 > it
->buffer_size
)
546 memcpy(it
->buffer
+ it
->buffer_ofs
, prefix
, prefix_len
);
547 it
->buffer_ofs
+= prefix_len
;
551 static int xattr_namelist(struct xattr_iter
*_it
,
552 unsigned processed
, char *buf
, unsigned len
)
554 struct listxattr_iter
*it
=
555 container_of(_it
, struct listxattr_iter
, it
);
557 memcpy(it
->buffer
+ it
->buffer_ofs
, buf
, len
);
558 it
->buffer_ofs
+= len
;
562 static int xattr_skipvalue(struct xattr_iter
*_it
,
565 struct listxattr_iter
*it
=
566 container_of(_it
, struct listxattr_iter
, it
);
568 it
->buffer
[it
->buffer_ofs
++] = '\0';
572 static const struct xattr_iter_handlers list_xattr_handlers
= {
573 .entry
= xattr_entrylist
,
574 .name
= xattr_namelist
,
575 .alloc_buffer
= xattr_skipvalue
,
579 static int inline_listxattr(struct listxattr_iter
*it
)
584 ret
= inline_xattr_iter_begin(&it
->it
, d_inode(it
->dentry
));
590 ret
= xattr_foreach(&it
->it
, &list_xattr_handlers
, &remaining
);
594 xattr_iter_end_final(&it
->it
);
595 return ret
< 0 ? ret
: it
->buffer_ofs
;
598 static int shared_listxattr(struct listxattr_iter
*it
)
600 struct inode
*const inode
= d_inode(it
->dentry
);
601 struct erofs_vnode
*const vi
= EROFS_V(inode
);
602 struct erofs_sb_info
*const sbi
= EROFS_I_SB(inode
);
606 for (i
= 0; i
< vi
->xattr_shared_count
; ++i
) {
607 erofs_blk_t blkaddr
=
608 xattrblock_addr(sbi
, vi
->xattr_shared_xattrs
[i
]);
610 it
->it
.ofs
= xattrblock_offset(sbi
, vi
->xattr_shared_xattrs
[i
]);
611 if (!i
|| blkaddr
!= it
->it
.blkaddr
) {
613 xattr_iter_end(&it
->it
, true);
615 it
->it
.page
= erofs_get_meta_page(inode
->i_sb
,
617 if (IS_ERR(it
->it
.page
))
618 return PTR_ERR(it
->it
.page
);
620 it
->it
.kaddr
= kmap_atomic(it
->it
.page
);
621 it
->it
.blkaddr
= blkaddr
;
624 ret
= xattr_foreach(&it
->it
, &list_xattr_handlers
, NULL
);
628 if (vi
->xattr_shared_count
)
629 xattr_iter_end_final(&it
->it
);
631 return ret
< 0 ? ret
: it
->buffer_ofs
;
634 ssize_t
erofs_listxattr(struct dentry
*dentry
,
635 char *buffer
, size_t buffer_size
)
638 struct listxattr_iter it
;
640 ret
= init_inode_xattrs(d_inode(dentry
));
648 it
.buffer_size
= buffer_size
;
651 it
.it
.sb
= dentry
->d_sb
;
653 ret
= inline_listxattr(&it
);
654 if (ret
< 0 && ret
!= -ENOATTR
)
656 return shared_listxattr(&it
);