Linux 4.19.133
[linux/fpc-iii.git] / drivers / staging / erofs / xattr.c
blobd48687ca21990279a8bead27bf2b12d9524d2360
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/drivers/staging/erofs/xattr.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include <linux/security.h>
14 #include "xattr.h"
16 struct xattr_iter {
17 struct super_block *sb;
18 struct page *page;
19 void *kaddr;
21 erofs_blk_t blkaddr;
22 unsigned ofs;
25 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
27 /* the only user of kunmap() is 'init_inode_xattrs' */
28 if (unlikely(!atomic))
29 kunmap(it->page);
30 else
31 kunmap_atomic(it->kaddr);
33 unlock_page(it->page);
34 put_page(it->page);
37 static inline void xattr_iter_end_final(struct xattr_iter *it)
39 if (!it->page)
40 return;
42 xattr_iter_end(it, true);
45 static int init_inode_xattrs(struct inode *inode)
47 struct erofs_vnode *const vi = EROFS_V(inode);
48 struct xattr_iter it;
49 unsigned i;
50 struct erofs_xattr_ibody_header *ih;
51 struct erofs_sb_info *sbi;
52 bool atomic_map;
53 int ret = 0;
55 /* the most case is that xattrs of this inode are initialized. */
56 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
57 return 0;
59 if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
60 return -ERESTARTSYS;
62 /* someone has initialized xattrs for us? */
63 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
64 goto out_unlock;
67 * bypass all xattr operations if ->xattr_isize is not greater than
68 * sizeof(struct erofs_xattr_ibody_header), in detail:
69 * 1) it is not enough to contain erofs_xattr_ibody_header then
70 * ->xattr_isize should be 0 (it means no xattr);
71 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
72 * undefined right now (maybe use later with some new sb feature).
74 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
75 errln("xattr_isize %d of nid %llu is not supported yet",
76 vi->xattr_isize, vi->nid);
77 ret = -ENOTSUPP;
78 goto out_unlock;
79 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
80 if (unlikely(vi->xattr_isize)) {
81 DBG_BUGON(1);
82 ret = -EIO;
83 goto out_unlock; /* xattr ondisk layout error */
85 ret = -ENOATTR;
86 goto out_unlock;
89 sbi = EROFS_I_SB(inode);
90 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
91 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
93 it.page = erofs_get_inline_page(inode, it.blkaddr);
94 if (IS_ERR(it.page)) {
95 ret = PTR_ERR(it.page);
96 goto out_unlock;
99 /* read in shared xattr array (non-atomic, see kmalloc below) */
100 it.kaddr = kmap(it.page);
101 atomic_map = false;
103 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
105 vi->xattr_shared_count = ih->h_shared_count;
106 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
107 sizeof(uint), GFP_KERNEL);
108 if (!vi->xattr_shared_xattrs) {
109 xattr_iter_end(&it, atomic_map);
110 ret = -ENOMEM;
111 goto out_unlock;
114 /* let's skip ibody header */
115 it.ofs += sizeof(struct erofs_xattr_ibody_header);
117 for (i = 0; i < vi->xattr_shared_count; ++i) {
118 if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
119 /* cannot be unaligned */
120 BUG_ON(it.ofs != EROFS_BLKSIZ);
121 xattr_iter_end(&it, atomic_map);
123 it.page = erofs_get_meta_page(inode->i_sb,
124 ++it.blkaddr, S_ISDIR(inode->i_mode));
125 if (IS_ERR(it.page)) {
126 kfree(vi->xattr_shared_xattrs);
127 vi->xattr_shared_xattrs = NULL;
128 ret = PTR_ERR(it.page);
129 goto out_unlock;
132 it.kaddr = kmap_atomic(it.page);
133 atomic_map = true;
134 it.ofs = 0;
136 vi->xattr_shared_xattrs[i] =
137 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
138 it.ofs += sizeof(__le32);
140 xattr_iter_end(&it, atomic_map);
142 set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
144 out_unlock:
145 clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
146 return ret;
149 struct xattr_iter_handlers {
150 int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *);
151 int (*name)(struct xattr_iter *, unsigned, char *, unsigned);
152 int (*alloc_buffer)(struct xattr_iter *, unsigned);
153 void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
156 static inline int xattr_iter_fixup(struct xattr_iter *it)
158 if (it->ofs < EROFS_BLKSIZ)
159 return 0;
161 xattr_iter_end(it, true);
163 it->blkaddr += erofs_blknr(it->ofs);
164 it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
165 if (IS_ERR(it->page)) {
166 int err = PTR_ERR(it->page);
168 it->page = NULL;
169 return err;
172 it->kaddr = kmap_atomic(it->page);
173 it->ofs = erofs_blkoff(it->ofs);
174 return 0;
177 static int inline_xattr_iter_begin(struct xattr_iter *it,
178 struct inode *inode)
180 struct erofs_vnode *const vi = EROFS_V(inode);
181 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
182 unsigned xattr_header_sz, inline_xattr_ofs;
184 xattr_header_sz = inlinexattr_header_size(inode);
185 if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
186 BUG_ON(xattr_header_sz > vi->xattr_isize);
187 return -ENOATTR;
190 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
192 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
193 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
195 it->page = erofs_get_inline_page(inode, it->blkaddr);
196 if (IS_ERR(it->page))
197 return PTR_ERR(it->page);
199 it->kaddr = kmap_atomic(it->page);
200 return vi->xattr_isize - xattr_header_sz;
203 static int xattr_foreach(struct xattr_iter *it,
204 const struct xattr_iter_handlers *op, unsigned int *tlimit)
206 struct erofs_xattr_entry entry;
207 unsigned value_sz, processed, slice;
208 int err;
210 /* 0. fixup blkaddr, ofs, ipage */
211 err = xattr_iter_fixup(it);
212 if (err)
213 return err;
216 * 1. read xattr entry to the memory,
217 * since we do EROFS_XATTR_ALIGN
218 * therefore entry should be in the page
220 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
221 if (tlimit != NULL) {
222 unsigned entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
224 BUG_ON(*tlimit < entry_sz);
225 *tlimit -= entry_sz;
228 it->ofs += sizeof(struct erofs_xattr_entry);
229 value_sz = le16_to_cpu(entry.e_value_size);
231 /* handle entry */
232 err = op->entry(it, &entry);
233 if (err) {
234 it->ofs += entry.e_name_len + value_sz;
235 goto out;
238 /* 2. handle xattr name (ofs will finally be at the end of name) */
239 processed = 0;
241 while (processed < entry.e_name_len) {
242 if (it->ofs >= EROFS_BLKSIZ) {
243 BUG_ON(it->ofs > EROFS_BLKSIZ);
245 err = xattr_iter_fixup(it);
246 if (err)
247 goto out;
248 it->ofs = 0;
251 slice = min_t(unsigned, PAGE_SIZE - it->ofs,
252 entry.e_name_len - processed);
254 /* handle name */
255 err = op->name(it, processed, it->kaddr + it->ofs, slice);
256 if (err) {
257 it->ofs += entry.e_name_len - processed + value_sz;
258 goto out;
261 it->ofs += slice;
262 processed += slice;
265 /* 3. handle xattr value */
266 processed = 0;
268 if (op->alloc_buffer != NULL) {
269 err = op->alloc_buffer(it, value_sz);
270 if (err) {
271 it->ofs += value_sz;
272 goto out;
276 while (processed < value_sz) {
277 if (it->ofs >= EROFS_BLKSIZ) {
278 BUG_ON(it->ofs > EROFS_BLKSIZ);
280 err = xattr_iter_fixup(it);
281 if (err)
282 goto out;
283 it->ofs = 0;
286 slice = min_t(unsigned, PAGE_SIZE - it->ofs,
287 value_sz - processed);
288 op->value(it, processed, it->kaddr + it->ofs, slice);
289 it->ofs += slice;
290 processed += slice;
293 out:
294 /* we assume that ofs is aligned with 4 bytes */
295 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
296 return err;
299 struct getxattr_iter {
300 struct xattr_iter it;
302 char *buffer;
303 int buffer_size, index;
304 struct qstr name;
307 static int xattr_entrymatch(struct xattr_iter *_it,
308 struct erofs_xattr_entry *entry)
310 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
312 return (it->index != entry->e_name_index ||
313 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
316 static int xattr_namematch(struct xattr_iter *_it,
317 unsigned processed, char *buf, unsigned len)
319 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
321 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
324 static int xattr_checkbuffer(struct xattr_iter *_it,
325 unsigned value_sz)
327 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
328 int err = it->buffer_size < value_sz ? -ERANGE : 0;
330 it->buffer_size = value_sz;
331 return it->buffer == NULL ? 1 : err;
334 static void xattr_copyvalue(struct xattr_iter *_it,
335 unsigned processed, char *buf, unsigned len)
337 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
339 memcpy(it->buffer + processed, buf, len);
342 static const struct xattr_iter_handlers find_xattr_handlers = {
343 .entry = xattr_entrymatch,
344 .name = xattr_namematch,
345 .alloc_buffer = xattr_checkbuffer,
346 .value = xattr_copyvalue
349 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
351 int ret;
352 unsigned remaining;
354 ret = inline_xattr_iter_begin(&it->it, inode);
355 if (ret < 0)
356 return ret;
358 remaining = ret;
359 while (remaining) {
360 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
361 if (ret >= 0)
362 break;
364 if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
365 break;
367 xattr_iter_end_final(&it->it);
369 return ret < 0 ? ret : it->buffer_size;
372 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
374 struct erofs_vnode *const vi = EROFS_V(inode);
375 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
376 unsigned i;
377 int ret = -ENOATTR;
379 for (i = 0; i < vi->xattr_shared_count; ++i) {
380 erofs_blk_t blkaddr =
381 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
383 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
385 if (!i || blkaddr != it->it.blkaddr) {
386 if (i)
387 xattr_iter_end(&it->it, true);
389 it->it.page = erofs_get_meta_page(inode->i_sb,
390 blkaddr, false);
391 if (IS_ERR(it->it.page))
392 return PTR_ERR(it->it.page);
394 it->it.kaddr = kmap_atomic(it->it.page);
395 it->it.blkaddr = blkaddr;
398 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
399 if (ret >= 0)
400 break;
402 if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
403 break;
405 if (vi->xattr_shared_count)
406 xattr_iter_end_final(&it->it);
408 return ret < 0 ? ret : it->buffer_size;
411 static bool erofs_xattr_user_list(struct dentry *dentry)
413 return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
416 static bool erofs_xattr_trusted_list(struct dentry *dentry)
418 return capable(CAP_SYS_ADMIN);
421 int erofs_getxattr(struct inode *inode, int index,
422 const char *name,
423 void *buffer, size_t buffer_size)
425 int ret;
426 struct getxattr_iter it;
428 if (unlikely(name == NULL))
429 return -EINVAL;
431 ret = init_inode_xattrs(inode);
432 if (ret)
433 return ret;
435 it.index = index;
437 it.name.len = strlen(name);
438 if (it.name.len > EROFS_NAME_LEN)
439 return -ERANGE;
440 it.name.name = name;
442 it.buffer = buffer;
443 it.buffer_size = buffer_size;
445 it.it.sb = inode->i_sb;
446 ret = inline_getxattr(inode, &it);
447 if (ret == -ENOATTR)
448 ret = shared_getxattr(inode, &it);
449 return ret;
452 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
453 struct dentry *unused, struct inode *inode,
454 const char *name, void *buffer, size_t size)
456 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
458 switch (handler->flags) {
459 case EROFS_XATTR_INDEX_USER:
460 if (!test_opt(sbi, XATTR_USER))
461 return -EOPNOTSUPP;
462 break;
463 case EROFS_XATTR_INDEX_TRUSTED:
464 if (!capable(CAP_SYS_ADMIN))
465 return -EPERM;
466 break;
467 case EROFS_XATTR_INDEX_SECURITY:
468 break;
469 default:
470 return -EINVAL;
473 return erofs_getxattr(inode, handler->flags, name, buffer, size);
476 const struct xattr_handler erofs_xattr_user_handler = {
477 .prefix = XATTR_USER_PREFIX,
478 .flags = EROFS_XATTR_INDEX_USER,
479 .list = erofs_xattr_user_list,
480 .get = erofs_xattr_generic_get,
483 const struct xattr_handler erofs_xattr_trusted_handler = {
484 .prefix = XATTR_TRUSTED_PREFIX,
485 .flags = EROFS_XATTR_INDEX_TRUSTED,
486 .list = erofs_xattr_trusted_list,
487 .get = erofs_xattr_generic_get,
490 #ifdef CONFIG_EROFS_FS_SECURITY
491 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
492 .prefix = XATTR_SECURITY_PREFIX,
493 .flags = EROFS_XATTR_INDEX_SECURITY,
494 .get = erofs_xattr_generic_get,
496 #endif
498 const struct xattr_handler *erofs_xattr_handlers[] = {
499 &erofs_xattr_user_handler,
500 #ifdef CONFIG_EROFS_FS_POSIX_ACL
501 &posix_acl_access_xattr_handler,
502 &posix_acl_default_xattr_handler,
503 #endif
504 &erofs_xattr_trusted_handler,
505 #ifdef CONFIG_EROFS_FS_SECURITY
506 &erofs_xattr_security_handler,
507 #endif
508 NULL,
511 struct listxattr_iter {
512 struct xattr_iter it;
514 struct dentry *dentry;
515 char *buffer;
516 int buffer_size, buffer_ofs;
519 static int xattr_entrylist(struct xattr_iter *_it,
520 struct erofs_xattr_entry *entry)
522 struct listxattr_iter *it =
523 container_of(_it, struct listxattr_iter, it);
524 unsigned prefix_len;
525 const char *prefix;
527 const struct xattr_handler *h =
528 erofs_xattr_handler(entry->e_name_index);
530 if (h == NULL || (h->list != NULL && !h->list(it->dentry)))
531 return 1;
533 /* Note that at least one of 'prefix' and 'name' should be non-NULL */
534 prefix = h->prefix != NULL ? h->prefix : h->name;
535 prefix_len = strlen(prefix);
537 if (it->buffer == NULL) {
538 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
539 return 1;
542 if (it->buffer_ofs + prefix_len
543 + entry->e_name_len + 1 > it->buffer_size)
544 return -ERANGE;
546 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
547 it->buffer_ofs += prefix_len;
548 return 0;
551 static int xattr_namelist(struct xattr_iter *_it,
552 unsigned processed, char *buf, unsigned len)
554 struct listxattr_iter *it =
555 container_of(_it, struct listxattr_iter, it);
557 memcpy(it->buffer + it->buffer_ofs, buf, len);
558 it->buffer_ofs += len;
559 return 0;
562 static int xattr_skipvalue(struct xattr_iter *_it,
563 unsigned value_sz)
565 struct listxattr_iter *it =
566 container_of(_it, struct listxattr_iter, it);
568 it->buffer[it->buffer_ofs++] = '\0';
569 return 1;
572 static const struct xattr_iter_handlers list_xattr_handlers = {
573 .entry = xattr_entrylist,
574 .name = xattr_namelist,
575 .alloc_buffer = xattr_skipvalue,
576 .value = NULL
579 static int inline_listxattr(struct listxattr_iter *it)
581 int ret;
582 unsigned remaining;
584 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
585 if (ret < 0)
586 return ret;
588 remaining = ret;
589 while (remaining) {
590 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
591 if (ret < 0)
592 break;
594 xattr_iter_end_final(&it->it);
595 return ret < 0 ? ret : it->buffer_ofs;
598 static int shared_listxattr(struct listxattr_iter *it)
600 struct inode *const inode = d_inode(it->dentry);
601 struct erofs_vnode *const vi = EROFS_V(inode);
602 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
603 unsigned i;
604 int ret = 0;
606 for (i = 0; i < vi->xattr_shared_count; ++i) {
607 erofs_blk_t blkaddr =
608 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
610 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
611 if (!i || blkaddr != it->it.blkaddr) {
612 if (i)
613 xattr_iter_end(&it->it, true);
615 it->it.page = erofs_get_meta_page(inode->i_sb,
616 blkaddr, false);
617 if (IS_ERR(it->it.page))
618 return PTR_ERR(it->it.page);
620 it->it.kaddr = kmap_atomic(it->it.page);
621 it->it.blkaddr = blkaddr;
624 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
625 if (ret < 0)
626 break;
628 if (vi->xattr_shared_count)
629 xattr_iter_end_final(&it->it);
631 return ret < 0 ? ret : it->buffer_ofs;
634 ssize_t erofs_listxattr(struct dentry *dentry,
635 char *buffer, size_t buffer_size)
637 int ret;
638 struct listxattr_iter it;
640 ret = init_inode_xattrs(d_inode(dentry));
641 if (ret == -ENOATTR)
642 return 0;
643 if (ret)
644 return ret;
646 it.dentry = dentry;
647 it.buffer = buffer;
648 it.buffer_size = buffer_size;
649 it.buffer_ofs = 0;
651 it.it.sb = dentry->d_sb;
653 ret = inline_listxattr(&it);
654 if (ret < 0 && ret != -ENOATTR)
655 return ret;
656 return shared_listxattr(&it);