1 #include <linux/ceph/ceph_debug.h>
2 #include <linux/ceph/pagelist.h>
5 #include "mds_client.h"
7 #include <linux/ceph/decode.h>
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
13 #define XATTR_CEPH_PREFIX "ceph."
14 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
16 static int __remove_xattr(struct ceph_inode_info
*ci
,
17 struct ceph_inode_xattr
*xattr
);
19 static const struct xattr_handler ceph_other_xattr_handler
;
22 * List of handlers for synthetic system.* attributes. Other
23 * attributes are handled directly.
25 const struct xattr_handler
*ceph_xattr_handlers
[] = {
26 #ifdef CONFIG_CEPH_FS_POSIX_ACL
27 &posix_acl_access_xattr_handler
,
28 &posix_acl_default_xattr_handler
,
30 &ceph_other_xattr_handler
,
34 static bool ceph_is_valid_xattr(const char *name
)
36 return !strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
) ||
37 !strncmp(name
, XATTR_SECURITY_PREFIX
,
38 XATTR_SECURITY_PREFIX_LEN
) ||
39 !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
40 !strncmp(name
, XATTR_USER_PREFIX
, XATTR_USER_PREFIX_LEN
);
44 * These define virtual xattrs exposing the recursive directory
45 * statistics and layout metadata.
49 size_t name_size
; /* strlen(name) + 1 (for '\0') */
50 size_t (*getxattr_cb
)(struct ceph_inode_info
*ci
, char *val
,
52 bool readonly
, hidden
;
53 bool (*exists_cb
)(struct ceph_inode_info
*ci
);
58 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info
*ci
)
60 struct ceph_file_layout
*fl
= &ci
->i_layout
;
61 return (fl
->stripe_unit
> 0 || fl
->stripe_count
> 0 ||
62 fl
->object_size
> 0 || fl
->pool_id
>= 0 ||
63 rcu_dereference_raw(fl
->pool_ns
) != NULL
);
66 static size_t ceph_vxattrcb_layout(struct ceph_inode_info
*ci
, char *val
,
69 struct ceph_fs_client
*fsc
= ceph_sb_to_client(ci
->vfs_inode
.i_sb
);
70 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
71 struct ceph_string
*pool_ns
;
72 s64 pool
= ci
->i_layout
.pool_id
;
73 const char *pool_name
;
74 const char *ns_field
= " pool_namespace=";
76 size_t len
, total_len
= 0;
79 pool_ns
= ceph_try_get_string(ci
->i_layout
.pool_ns
);
81 dout("ceph_vxattrcb_layout %p\n", &ci
->vfs_inode
);
82 down_read(&osdc
->lock
);
83 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, pool
);
85 len
= snprintf(buf
, sizeof(buf
),
86 "stripe_unit=%u stripe_count=%u object_size=%u pool=",
87 ci
->i_layout
.stripe_unit
, ci
->i_layout
.stripe_count
,
88 ci
->i_layout
.object_size
);
89 total_len
= len
+ strlen(pool_name
);
91 len
= snprintf(buf
, sizeof(buf
),
92 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
93 ci
->i_layout
.stripe_unit
, ci
->i_layout
.stripe_count
,
94 ci
->i_layout
.object_size
, (unsigned long long)pool
);
99 total_len
+= strlen(ns_field
) + pool_ns
->len
;
103 } else if (total_len
> size
) {
106 memcpy(val
, buf
, len
);
109 len
= strlen(pool_name
);
110 memcpy(val
+ ret
, pool_name
, len
);
114 len
= strlen(ns_field
);
115 memcpy(val
+ ret
, ns_field
, len
);
117 memcpy(val
+ ret
, pool_ns
->str
, pool_ns
->len
);
121 up_read(&osdc
->lock
);
122 ceph_put_string(pool_ns
);
126 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info
*ci
,
127 char *val
, size_t size
)
129 return snprintf(val
, size
, "%u", ci
->i_layout
.stripe_unit
);
132 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info
*ci
,
133 char *val
, size_t size
)
135 return snprintf(val
, size
, "%u", ci
->i_layout
.stripe_count
);
138 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info
*ci
,
139 char *val
, size_t size
)
141 return snprintf(val
, size
, "%u", ci
->i_layout
.object_size
);
144 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info
*ci
,
145 char *val
, size_t size
)
148 struct ceph_fs_client
*fsc
= ceph_sb_to_client(ci
->vfs_inode
.i_sb
);
149 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
150 s64 pool
= ci
->i_layout
.pool_id
;
151 const char *pool_name
;
153 down_read(&osdc
->lock
);
154 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, pool
);
156 ret
= snprintf(val
, size
, "%s", pool_name
);
158 ret
= snprintf(val
, size
, "%lld", (unsigned long long)pool
);
159 up_read(&osdc
->lock
);
163 static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info
*ci
,
164 char *val
, size_t size
)
167 struct ceph_string
*ns
= ceph_try_get_string(ci
->i_layout
.pool_ns
);
169 ret
= snprintf(val
, size
, "%.*s", (int)ns
->len
, ns
->str
);
177 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info
*ci
, char *val
,
180 return snprintf(val
, size
, "%lld", ci
->i_files
+ ci
->i_subdirs
);
183 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info
*ci
, char *val
,
186 return snprintf(val
, size
, "%lld", ci
->i_files
);
189 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info
*ci
, char *val
,
192 return snprintf(val
, size
, "%lld", ci
->i_subdirs
);
195 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info
*ci
, char *val
,
198 return snprintf(val
, size
, "%lld", ci
->i_rfiles
+ ci
->i_rsubdirs
);
201 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info
*ci
, char *val
,
204 return snprintf(val
, size
, "%lld", ci
->i_rfiles
);
207 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info
*ci
, char *val
,
210 return snprintf(val
, size
, "%lld", ci
->i_rsubdirs
);
213 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info
*ci
, char *val
,
216 return snprintf(val
, size
, "%lld", ci
->i_rbytes
);
219 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info
*ci
, char *val
,
222 return snprintf(val
, size
, "%ld.09%ld", (long)ci
->i_rctime
.tv_sec
,
223 (long)ci
->i_rctime
.tv_nsec
);
227 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
228 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
229 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
231 #define XATTR_NAME_CEPH(_type, _name) \
233 .name = CEPH_XATTR_NAME(_type, _name), \
234 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
235 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
240 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
242 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
243 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
244 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
247 .exists_cb = ceph_vxattrcb_layout_exists, \
250 static struct ceph_vxattr ceph_dir_vxattrs
[] = {
252 .name
= "ceph.dir.layout",
253 .name_size
= sizeof("ceph.dir.layout"),
254 .getxattr_cb
= ceph_vxattrcb_layout
,
257 .exists_cb
= ceph_vxattrcb_layout_exists
,
259 XATTR_LAYOUT_FIELD(dir
, layout
, stripe_unit
),
260 XATTR_LAYOUT_FIELD(dir
, layout
, stripe_count
),
261 XATTR_LAYOUT_FIELD(dir
, layout
, object_size
),
262 XATTR_LAYOUT_FIELD(dir
, layout
, pool
),
263 XATTR_LAYOUT_FIELD(dir
, layout
, pool_namespace
),
264 XATTR_NAME_CEPH(dir
, entries
),
265 XATTR_NAME_CEPH(dir
, files
),
266 XATTR_NAME_CEPH(dir
, subdirs
),
267 XATTR_NAME_CEPH(dir
, rentries
),
268 XATTR_NAME_CEPH(dir
, rfiles
),
269 XATTR_NAME_CEPH(dir
, rsubdirs
),
270 XATTR_NAME_CEPH(dir
, rbytes
),
271 XATTR_NAME_CEPH(dir
, rctime
),
272 { .name
= NULL
, 0 } /* Required table terminator */
274 static size_t ceph_dir_vxattrs_name_size
; /* total size of all names */
278 static struct ceph_vxattr ceph_file_vxattrs
[] = {
280 .name
= "ceph.file.layout",
281 .name_size
= sizeof("ceph.file.layout"),
282 .getxattr_cb
= ceph_vxattrcb_layout
,
285 .exists_cb
= ceph_vxattrcb_layout_exists
,
287 XATTR_LAYOUT_FIELD(file
, layout
, stripe_unit
),
288 XATTR_LAYOUT_FIELD(file
, layout
, stripe_count
),
289 XATTR_LAYOUT_FIELD(file
, layout
, object_size
),
290 XATTR_LAYOUT_FIELD(file
, layout
, pool
),
291 XATTR_LAYOUT_FIELD(file
, layout
, pool_namespace
),
292 { .name
= NULL
, 0 } /* Required table terminator */
294 static size_t ceph_file_vxattrs_name_size
; /* total size of all names */
296 static struct ceph_vxattr
*ceph_inode_vxattrs(struct inode
*inode
)
298 if (S_ISDIR(inode
->i_mode
))
299 return ceph_dir_vxattrs
;
300 else if (S_ISREG(inode
->i_mode
))
301 return ceph_file_vxattrs
;
305 static size_t ceph_vxattrs_name_size(struct ceph_vxattr
*vxattrs
)
307 if (vxattrs
== ceph_dir_vxattrs
)
308 return ceph_dir_vxattrs_name_size
;
309 if (vxattrs
== ceph_file_vxattrs
)
310 return ceph_file_vxattrs_name_size
;
316 * Compute the aggregate size (including terminating '\0') of all
317 * virtual extended attribute names in the given vxattr table.
319 static size_t __init
vxattrs_name_size(struct ceph_vxattr
*vxattrs
)
321 struct ceph_vxattr
*vxattr
;
324 for (vxattr
= vxattrs
; vxattr
->name
; vxattr
++)
326 size
+= vxattr
->name_size
;
331 /* Routines called at initialization and exit time */
333 void __init
ceph_xattr_init(void)
335 ceph_dir_vxattrs_name_size
= vxattrs_name_size(ceph_dir_vxattrs
);
336 ceph_file_vxattrs_name_size
= vxattrs_name_size(ceph_file_vxattrs
);
339 void ceph_xattr_exit(void)
341 ceph_dir_vxattrs_name_size
= 0;
342 ceph_file_vxattrs_name_size
= 0;
345 static struct ceph_vxattr
*ceph_match_vxattr(struct inode
*inode
,
348 struct ceph_vxattr
*vxattr
= ceph_inode_vxattrs(inode
);
351 while (vxattr
->name
) {
352 if (!strcmp(vxattr
->name
, name
))
361 static int __set_xattr(struct ceph_inode_info
*ci
,
362 const char *name
, int name_len
,
363 const char *val
, int val_len
,
364 int flags
, int update_xattr
,
365 struct ceph_inode_xattr
**newxattr
)
368 struct rb_node
*parent
= NULL
;
369 struct ceph_inode_xattr
*xattr
= NULL
;
373 p
= &ci
->i_xattrs
.index
.rb_node
;
376 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
377 c
= strncmp(name
, xattr
->name
, min(name_len
, xattr
->name_len
));
383 if (name_len
== xattr
->name_len
)
385 else if (name_len
< xattr
->name_len
)
396 if (xattr
&& (flags
& XATTR_CREATE
))
398 else if (!xattr
&& (flags
& XATTR_REPLACE
))
406 if (update_xattr
< 0) {
408 __remove_xattr(ci
, xattr
);
419 xattr
->name_len
= name_len
;
420 xattr
->should_free_name
= update_xattr
;
422 ci
->i_xattrs
.count
++;
423 dout("__set_xattr count=%d\n", ci
->i_xattrs
.count
);
427 if (xattr
->should_free_val
)
428 kfree((void *)xattr
->val
);
434 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
435 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
437 ci
->i_xattrs
.names_size
+= name_len
;
438 ci
->i_xattrs
.vals_size
+= val_len
;
444 xattr
->val_len
= val_len
;
445 xattr
->dirty
= update_xattr
;
446 xattr
->should_free_val
= (val
&& update_xattr
);
449 rb_link_node(&xattr
->node
, parent
, p
);
450 rb_insert_color(&xattr
->node
, &ci
->i_xattrs
.index
);
451 dout("__set_xattr_val p=%p\n", p
);
454 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
455 ceph_vinop(&ci
->vfs_inode
), xattr
, name
, val_len
, val
);
460 static struct ceph_inode_xattr
*__get_xattr(struct ceph_inode_info
*ci
,
464 struct rb_node
*parent
= NULL
;
465 struct ceph_inode_xattr
*xattr
= NULL
;
466 int name_len
= strlen(name
);
469 p
= &ci
->i_xattrs
.index
.rb_node
;
472 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
473 c
= strncmp(name
, xattr
->name
, xattr
->name_len
);
474 if (c
== 0 && name_len
> xattr
->name_len
)
481 dout("__get_xattr %s: found %.*s\n", name
,
482 xattr
->val_len
, xattr
->val
);
487 dout("__get_xattr %s: not found\n", name
);
492 static void __free_xattr(struct ceph_inode_xattr
*xattr
)
496 if (xattr
->should_free_name
)
497 kfree((void *)xattr
->name
);
498 if (xattr
->should_free_val
)
499 kfree((void *)xattr
->val
);
504 static int __remove_xattr(struct ceph_inode_info
*ci
,
505 struct ceph_inode_xattr
*xattr
)
510 rb_erase(&xattr
->node
, &ci
->i_xattrs
.index
);
512 if (xattr
->should_free_name
)
513 kfree((void *)xattr
->name
);
514 if (xattr
->should_free_val
)
515 kfree((void *)xattr
->val
);
517 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
518 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
519 ci
->i_xattrs
.count
--;
525 static char *__copy_xattr_names(struct ceph_inode_info
*ci
,
529 struct ceph_inode_xattr
*xattr
= NULL
;
531 p
= rb_first(&ci
->i_xattrs
.index
);
532 dout("__copy_xattr_names count=%d\n", ci
->i_xattrs
.count
);
535 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
536 memcpy(dest
, xattr
->name
, xattr
->name_len
);
537 dest
[xattr
->name_len
] = '\0';
539 dout("dest=%s %p (%s) (%d/%d)\n", dest
, xattr
, xattr
->name
,
540 xattr
->name_len
, ci
->i_xattrs
.names_size
);
542 dest
+= xattr
->name_len
+ 1;
549 void __ceph_destroy_xattrs(struct ceph_inode_info
*ci
)
551 struct rb_node
*p
, *tmp
;
552 struct ceph_inode_xattr
*xattr
= NULL
;
554 p
= rb_first(&ci
->i_xattrs
.index
);
556 dout("__ceph_destroy_xattrs p=%p\n", p
);
559 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
562 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p
,
563 xattr
->name_len
, xattr
->name
);
564 rb_erase(tmp
, &ci
->i_xattrs
.index
);
569 ci
->i_xattrs
.names_size
= 0;
570 ci
->i_xattrs
.vals_size
= 0;
571 ci
->i_xattrs
.index_version
= 0;
572 ci
->i_xattrs
.count
= 0;
573 ci
->i_xattrs
.index
= RB_ROOT
;
576 static int __build_xattrs(struct inode
*inode
)
577 __releases(ci
->i_ceph_lock
)
578 __acquires(ci
->i_ceph_lock
)
584 const char *name
, *val
;
585 struct ceph_inode_info
*ci
= ceph_inode(inode
);
587 struct ceph_inode_xattr
**xattrs
= NULL
;
591 dout("__build_xattrs() len=%d\n",
592 ci
->i_xattrs
.blob
? (int)ci
->i_xattrs
.blob
->vec
.iov_len
: 0);
594 if (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)
595 return 0; /* already built */
597 __ceph_destroy_xattrs(ci
);
600 /* updated internal xattr rb tree */
601 if (ci
->i_xattrs
.blob
&& ci
->i_xattrs
.blob
->vec
.iov_len
> 4) {
602 p
= ci
->i_xattrs
.blob
->vec
.iov_base
;
603 end
= p
+ ci
->i_xattrs
.blob
->vec
.iov_len
;
604 ceph_decode_32_safe(&p
, end
, numattr
, bad
);
605 xattr_version
= ci
->i_xattrs
.version
;
606 spin_unlock(&ci
->i_ceph_lock
);
608 xattrs
= kcalloc(numattr
, sizeof(struct ceph_inode_xattr
*),
614 for (i
= 0; i
< numattr
; i
++) {
615 xattrs
[i
] = kmalloc(sizeof(struct ceph_inode_xattr
),
621 spin_lock(&ci
->i_ceph_lock
);
622 if (ci
->i_xattrs
.version
!= xattr_version
) {
623 /* lost a race, retry */
624 for (i
= 0; i
< numattr
; i
++)
632 ceph_decode_32_safe(&p
, end
, len
, bad
);
636 ceph_decode_32_safe(&p
, end
, len
, bad
);
640 err
= __set_xattr(ci
, name
, namelen
, val
, len
,
641 0, 0, &xattrs
[numattr
]);
648 ci
->i_xattrs
.index_version
= ci
->i_xattrs
.version
;
649 ci
->i_xattrs
.dirty
= false;
653 spin_lock(&ci
->i_ceph_lock
);
656 for (i
= 0; i
< numattr
; i
++)
660 ci
->i_xattrs
.names_size
= 0;
664 static int __get_required_blob_size(struct ceph_inode_info
*ci
, int name_size
,
668 * 4 bytes for the length, and additional 4 bytes per each xattr name,
669 * 4 bytes per each value
671 int size
= 4 + ci
->i_xattrs
.count
*(4 + 4) +
672 ci
->i_xattrs
.names_size
+
673 ci
->i_xattrs
.vals_size
;
674 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
675 ci
->i_xattrs
.count
, ci
->i_xattrs
.names_size
,
676 ci
->i_xattrs
.vals_size
);
679 size
+= 4 + 4 + name_size
+ val_size
;
685 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
686 * and swap into place.
688 void __ceph_build_xattrs_blob(struct ceph_inode_info
*ci
)
691 struct ceph_inode_xattr
*xattr
= NULL
;
694 dout("__build_xattrs_blob %p\n", &ci
->vfs_inode
);
695 if (ci
->i_xattrs
.dirty
) {
696 int need
= __get_required_blob_size(ci
, 0, 0);
698 BUG_ON(need
> ci
->i_xattrs
.prealloc_blob
->alloc_len
);
700 p
= rb_first(&ci
->i_xattrs
.index
);
701 dest
= ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
703 ceph_encode_32(&dest
, ci
->i_xattrs
.count
);
705 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
707 ceph_encode_32(&dest
, xattr
->name_len
);
708 memcpy(dest
, xattr
->name
, xattr
->name_len
);
709 dest
+= xattr
->name_len
;
710 ceph_encode_32(&dest
, xattr
->val_len
);
711 memcpy(dest
, xattr
->val
, xattr
->val_len
);
712 dest
+= xattr
->val_len
;
717 /* adjust buffer len; it may be larger than we need */
718 ci
->i_xattrs
.prealloc_blob
->vec
.iov_len
=
719 dest
- ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
721 if (ci
->i_xattrs
.blob
)
722 ceph_buffer_put(ci
->i_xattrs
.blob
);
723 ci
->i_xattrs
.blob
= ci
->i_xattrs
.prealloc_blob
;
724 ci
->i_xattrs
.prealloc_blob
= NULL
;
725 ci
->i_xattrs
.dirty
= false;
726 ci
->i_xattrs
.version
++;
730 static inline int __get_request_mask(struct inode
*in
) {
731 struct ceph_mds_request
*req
= current
->journal_info
;
733 if (req
&& req
->r_target_inode
== in
) {
734 if (req
->r_op
== CEPH_MDS_OP_LOOKUP
||
735 req
->r_op
== CEPH_MDS_OP_LOOKUPINO
||
736 req
->r_op
== CEPH_MDS_OP_LOOKUPPARENT
||
737 req
->r_op
== CEPH_MDS_OP_GETATTR
) {
738 mask
= le32_to_cpu(req
->r_args
.getattr
.mask
);
739 } else if (req
->r_op
== CEPH_MDS_OP_OPEN
||
740 req
->r_op
== CEPH_MDS_OP_CREATE
) {
741 mask
= le32_to_cpu(req
->r_args
.open
.mask
);
747 ssize_t
__ceph_getxattr(struct inode
*inode
, const char *name
, void *value
,
750 struct ceph_inode_info
*ci
= ceph_inode(inode
);
751 struct ceph_inode_xattr
*xattr
;
752 struct ceph_vxattr
*vxattr
= NULL
;
756 /* let's see if a virtual xattr was requested */
757 vxattr
= ceph_match_vxattr(inode
, name
);
760 if (!(vxattr
->exists_cb
&& !vxattr
->exists_cb(ci
)))
761 err
= vxattr
->getxattr_cb(ci
, value
, size
);
765 req_mask
= __get_request_mask(inode
);
767 spin_lock(&ci
->i_ceph_lock
);
768 dout("getxattr %p ver=%lld index_ver=%lld\n", inode
,
769 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
771 if (ci
->i_xattrs
.version
== 0 ||
772 !((req_mask
& CEPH_CAP_XATTR_SHARED
) ||
773 __ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1))) {
774 spin_unlock(&ci
->i_ceph_lock
);
776 /* security module gets xattr while filling trace */
777 if (current
->journal_info
!= NULL
) {
778 pr_warn_ratelimited("sync getxattr %p "
779 "during filling trace\n", inode
);
783 /* get xattrs from mds (if we don't already have them) */
784 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
, true);
787 spin_lock(&ci
->i_ceph_lock
);
790 err
= __build_xattrs(inode
);
794 err
= -ENODATA
; /* == ENOATTR */
795 xattr
= __get_xattr(ci
, name
);
800 if (size
&& size
< xattr
->val_len
)
803 err
= xattr
->val_len
;
807 memcpy(value
, xattr
->val
, xattr
->val_len
);
809 if (current
->journal_info
!= NULL
&&
810 !strncmp(name
, XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
))
811 ci
->i_ceph_flags
|= CEPH_I_SEC_INITED
;
813 spin_unlock(&ci
->i_ceph_lock
);
817 ssize_t
ceph_listxattr(struct dentry
*dentry
, char *names
, size_t size
)
819 struct inode
*inode
= d_inode(dentry
);
820 struct ceph_inode_info
*ci
= ceph_inode(inode
);
821 struct ceph_vxattr
*vxattrs
= ceph_inode_vxattrs(inode
);
828 spin_lock(&ci
->i_ceph_lock
);
829 dout("listxattr %p ver=%lld index_ver=%lld\n", inode
,
830 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
832 if (ci
->i_xattrs
.version
== 0 ||
833 !__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1)) {
834 spin_unlock(&ci
->i_ceph_lock
);
835 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
, true);
838 spin_lock(&ci
->i_ceph_lock
);
841 err
= __build_xattrs(inode
);
845 * Start with virtual dir xattr names (if any) (including
846 * terminating '\0' characters for each).
848 vir_namelen
= ceph_vxattrs_name_size(vxattrs
);
850 /* adding 1 byte per each variable due to the null termination */
851 namelen
= ci
->i_xattrs
.names_size
+ ci
->i_xattrs
.count
;
853 if (size
&& vir_namelen
+ namelen
> size
)
856 err
= namelen
+ vir_namelen
;
860 names
= __copy_xattr_names(ci
, names
);
862 /* virtual xattr names, too */
865 for (i
= 0; vxattrs
[i
].name
; i
++) {
866 if (!vxattrs
[i
].hidden
&&
867 !(vxattrs
[i
].exists_cb
&&
868 !vxattrs
[i
].exists_cb(ci
))) {
869 len
= sprintf(names
, "%s", vxattrs
[i
].name
);
877 spin_unlock(&ci
->i_ceph_lock
);
881 static int ceph_sync_setxattr(struct inode
*inode
, const char *name
,
882 const char *value
, size_t size
, int flags
)
884 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
885 struct ceph_inode_info
*ci
= ceph_inode(inode
);
886 struct ceph_mds_request
*req
;
887 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
888 struct ceph_pagelist
*pagelist
= NULL
;
889 int op
= CEPH_MDS_OP_SETXATTR
;
893 /* copy value into pagelist */
894 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
898 ceph_pagelist_init(pagelist
);
899 err
= ceph_pagelist_append(pagelist
, value
, size
);
903 if (flags
& CEPH_XATTR_REPLACE
)
904 op
= CEPH_MDS_OP_RMXATTR
;
906 flags
|= CEPH_XATTR_REMOVE
;
909 dout("setxattr value=%.*s\n", (int)size
, value
);
912 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
918 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
920 ceph_mdsc_put_request(req
);
925 if (op
== CEPH_MDS_OP_SETXATTR
) {
926 req
->r_args
.setxattr
.flags
= cpu_to_le32(flags
);
927 req
->r_pagelist
= pagelist
;
931 req
->r_inode
= inode
;
934 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
936 dout("xattr.ver (before): %lld\n", ci
->i_xattrs
.version
);
937 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
938 ceph_mdsc_put_request(req
);
939 dout("xattr.ver (after): %lld\n", ci
->i_xattrs
.version
);
943 ceph_pagelist_release(pagelist
);
947 int __ceph_setxattr(struct inode
*inode
, const char *name
,
948 const void *value
, size_t size
, int flags
)
950 struct ceph_vxattr
*vxattr
;
951 struct ceph_inode_info
*ci
= ceph_inode(inode
);
952 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(inode
->i_sb
)->mdsc
;
953 struct ceph_cap_flush
*prealloc_cf
= NULL
;
957 int name_len
= strlen(name
);
959 char *newname
= NULL
;
961 struct ceph_inode_xattr
*xattr
= NULL
;
962 int required_blob_size
;
963 bool lock_snap_rwsem
= false;
965 if (ceph_snap(inode
) != CEPH_NOSNAP
)
968 vxattr
= ceph_match_vxattr(inode
, name
);
969 if (vxattr
&& vxattr
->readonly
)
972 /* pass any unhandled ceph.* xattrs through to the MDS */
973 if (!strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
))
974 goto do_sync_unlocked
;
976 /* preallocate memory for xattr name, value, index node */
978 newname
= kmemdup(name
, name_len
+ 1, GFP_NOFS
);
983 newval
= kmemdup(value
, val_len
, GFP_NOFS
);
988 xattr
= kmalloc(sizeof(struct ceph_inode_xattr
), GFP_NOFS
);
992 prealloc_cf
= ceph_alloc_cap_flush();
996 spin_lock(&ci
->i_ceph_lock
);
998 issued
= __ceph_caps_issued(ci
, NULL
);
999 if (ci
->i_xattrs
.version
== 0 || !(issued
& CEPH_CAP_XATTR_EXCL
))
1002 if (!lock_snap_rwsem
&& !ci
->i_head_snapc
) {
1003 lock_snap_rwsem
= true;
1004 if (!down_read_trylock(&mdsc
->snap_rwsem
)) {
1005 spin_unlock(&ci
->i_ceph_lock
);
1006 down_read(&mdsc
->snap_rwsem
);
1007 spin_lock(&ci
->i_ceph_lock
);
1012 dout("setxattr %p issued %s\n", inode
, ceph_cap_string(issued
));
1013 __build_xattrs(inode
);
1015 required_blob_size
= __get_required_blob_size(ci
, name_len
, val_len
);
1017 if (!ci
->i_xattrs
.prealloc_blob
||
1018 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
1019 struct ceph_buffer
*blob
;
1021 spin_unlock(&ci
->i_ceph_lock
);
1022 dout(" preaallocating new blob size=%d\n", required_blob_size
);
1023 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
1025 goto do_sync_unlocked
;
1026 spin_lock(&ci
->i_ceph_lock
);
1027 if (ci
->i_xattrs
.prealloc_blob
)
1028 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
1029 ci
->i_xattrs
.prealloc_blob
= blob
;
1033 err
= __set_xattr(ci
, newname
, name_len
, newval
, val_len
,
1034 flags
, value
? 1 : -1, &xattr
);
1037 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
,
1039 ci
->i_xattrs
.dirty
= true;
1040 inode
->i_ctime
= current_time(inode
);
1043 spin_unlock(&ci
->i_ceph_lock
);
1044 if (lock_snap_rwsem
)
1045 up_read(&mdsc
->snap_rwsem
);
1047 __mark_inode_dirty(inode
, dirty
);
1048 ceph_free_cap_flush(prealloc_cf
);
1052 spin_unlock(&ci
->i_ceph_lock
);
1054 if (lock_snap_rwsem
)
1055 up_read(&mdsc
->snap_rwsem
);
1057 /* security module set xattr while filling trace */
1058 if (current
->journal_info
!= NULL
) {
1059 pr_warn_ratelimited("sync setxattr %p "
1060 "during filling trace\n", inode
);
1063 err
= ceph_sync_setxattr(inode
, name
, value
, size
, flags
);
1066 ceph_free_cap_flush(prealloc_cf
);
1073 static int ceph_get_xattr_handler(const struct xattr_handler
*handler
,
1074 struct dentry
*dentry
, struct inode
*inode
,
1075 const char *name
, void *value
, size_t size
)
1077 if (!ceph_is_valid_xattr(name
))
1079 return __ceph_getxattr(inode
, name
, value
, size
);
1082 static int ceph_set_xattr_handler(const struct xattr_handler
*handler
,
1083 struct dentry
*unused
, struct inode
*inode
,
1084 const char *name
, const void *value
,
1085 size_t size
, int flags
)
1087 if (!ceph_is_valid_xattr(name
))
1089 return __ceph_setxattr(inode
, name
, value
, size
, flags
);
1092 static const struct xattr_handler ceph_other_xattr_handler
= {
1093 .prefix
= "", /* match any name => handlers called with full name */
1094 .get
= ceph_get_xattr_handler
,
1095 .set
= ceph_set_xattr_handler
,
1098 #ifdef CONFIG_SECURITY
1099 bool ceph_security_xattr_wanted(struct inode
*in
)
1101 return in
->i_security
!= NULL
;
1104 bool ceph_security_xattr_deadlock(struct inode
*in
)
1106 struct ceph_inode_info
*ci
;
1108 if (in
->i_security
== NULL
)
1110 ci
= ceph_inode(in
);
1111 spin_lock(&ci
->i_ceph_lock
);
1112 ret
= !(ci
->i_ceph_flags
& CEPH_I_SEC_INITED
) &&
1113 !(ci
->i_xattrs
.version
> 0 &&
1114 __ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 0));
1115 spin_unlock(&ci
->i_ceph_lock
);