1 #include <linux/ceph/ceph_debug.h>
4 #include "mds_client.h"
6 #include <linux/ceph/decode.h>
8 #include <linux/xattr.h>
9 #include <linux/slab.h>
11 static bool ceph_is_valid_xattr(const char *name
)
13 return !strncmp(name
, "ceph.", 5) ||
14 !strncmp(name
, XATTR_SECURITY_PREFIX
,
15 XATTR_SECURITY_PREFIX_LEN
) ||
16 !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
17 !strncmp(name
, XATTR_USER_PREFIX
, XATTR_USER_PREFIX_LEN
);
21 * These define virtual xattrs exposing the recursive directory
22 * statistics and layout metadata.
24 struct ceph_vxattr_cb
{
27 size_t (*getxattr_cb
)(struct ceph_inode_info
*ci
, char *val
,
33 static size_t ceph_vxattrcb_entries(struct ceph_inode_info
*ci
, char *val
,
36 return snprintf(val
, size
, "%lld", ci
->i_files
+ ci
->i_subdirs
);
39 static size_t ceph_vxattrcb_files(struct ceph_inode_info
*ci
, char *val
,
42 return snprintf(val
, size
, "%lld", ci
->i_files
);
45 static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info
*ci
, char *val
,
48 return snprintf(val
, size
, "%lld", ci
->i_subdirs
);
51 static size_t ceph_vxattrcb_rentries(struct ceph_inode_info
*ci
, char *val
,
54 return snprintf(val
, size
, "%lld", ci
->i_rfiles
+ ci
->i_rsubdirs
);
57 static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info
*ci
, char *val
,
60 return snprintf(val
, size
, "%lld", ci
->i_rfiles
);
63 static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info
*ci
, char *val
,
66 return snprintf(val
, size
, "%lld", ci
->i_rsubdirs
);
69 static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info
*ci
, char *val
,
72 return snprintf(val
, size
, "%lld", ci
->i_rbytes
);
75 static size_t ceph_vxattrcb_rctime(struct ceph_inode_info
*ci
, char *val
,
78 return snprintf(val
, size
, "%ld.%ld", (long)ci
->i_rctime
.tv_sec
,
79 (long)ci
->i_rctime
.tv_nsec
);
82 static struct ceph_vxattr_cb ceph_dir_vxattrs
[] = {
83 { true, "ceph.dir.entries", ceph_vxattrcb_entries
},
84 { true, "ceph.dir.files", ceph_vxattrcb_files
},
85 { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs
},
86 { true, "ceph.dir.rentries", ceph_vxattrcb_rentries
},
87 { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles
},
88 { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs
},
89 { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes
},
90 { true, "ceph.dir.rctime", ceph_vxattrcb_rctime
},
96 static size_t ceph_vxattrcb_layout(struct ceph_inode_info
*ci
, char *val
,
101 ret
= snprintf(val
, size
,
102 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
103 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
104 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
105 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
106 if (ceph_file_layout_pg_preferred(ci
->i_layout
))
107 ret
+= snprintf(val
+ ret
, size
, "preferred_osd=%lld\n",
108 (unsigned long long)ceph_file_layout_pg_preferred(
113 static struct ceph_vxattr_cb ceph_file_vxattrs
[] = {
114 { true, "ceph.file.layout", ceph_vxattrcb_layout
},
115 /* The following extended attribute name is deprecated */
116 { true, "ceph.layout", ceph_vxattrcb_layout
},
120 static struct ceph_vxattr_cb
*ceph_inode_vxattrs(struct inode
*inode
)
122 if (S_ISDIR(inode
->i_mode
))
123 return ceph_dir_vxattrs
;
124 else if (S_ISREG(inode
->i_mode
))
125 return ceph_file_vxattrs
;
129 static struct ceph_vxattr_cb
*ceph_match_vxattr(struct ceph_vxattr_cb
*vxattr
,
133 if (strcmp(vxattr
->name
, name
) == 0)
136 } while (vxattr
->name
);
140 static int __set_xattr(struct ceph_inode_info
*ci
,
141 const char *name
, int name_len
,
142 const char *val
, int val_len
,
144 int should_free_name
, int should_free_val
,
145 struct ceph_inode_xattr
**newxattr
)
148 struct rb_node
*parent
= NULL
;
149 struct ceph_inode_xattr
*xattr
= NULL
;
153 p
= &ci
->i_xattrs
.index
.rb_node
;
156 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
157 c
= strncmp(name
, xattr
->name
, min(name_len
, xattr
->name_len
));
163 if (name_len
== xattr
->name_len
)
165 else if (name_len
< xattr
->name_len
)
177 xattr
->name_len
= name_len
;
178 xattr
->should_free_name
= should_free_name
;
180 ci
->i_xattrs
.count
++;
181 dout("__set_xattr count=%d\n", ci
->i_xattrs
.count
);
185 if (xattr
->should_free_val
)
186 kfree((void *)xattr
->val
);
188 if (should_free_name
) {
192 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
193 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
195 ci
->i_xattrs
.names_size
+= name_len
;
196 ci
->i_xattrs
.vals_size
+= val_len
;
202 xattr
->val_len
= val_len
;
203 xattr
->dirty
= dirty
;
204 xattr
->should_free_val
= (val
&& should_free_val
);
207 rb_link_node(&xattr
->node
, parent
, p
);
208 rb_insert_color(&xattr
->node
, &ci
->i_xattrs
.index
);
209 dout("__set_xattr_val p=%p\n", p
);
212 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
213 ceph_vinop(&ci
->vfs_inode
), xattr
, name
, val_len
, val
);
218 static struct ceph_inode_xattr
*__get_xattr(struct ceph_inode_info
*ci
,
222 struct rb_node
*parent
= NULL
;
223 struct ceph_inode_xattr
*xattr
= NULL
;
224 int name_len
= strlen(name
);
227 p
= &ci
->i_xattrs
.index
.rb_node
;
230 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
231 c
= strncmp(name
, xattr
->name
, xattr
->name_len
);
232 if (c
== 0 && name_len
> xattr
->name_len
)
239 dout("__get_xattr %s: found %.*s\n", name
,
240 xattr
->val_len
, xattr
->val
);
245 dout("__get_xattr %s: not found\n", name
);
250 static void __free_xattr(struct ceph_inode_xattr
*xattr
)
254 if (xattr
->should_free_name
)
255 kfree((void *)xattr
->name
);
256 if (xattr
->should_free_val
)
257 kfree((void *)xattr
->val
);
262 static int __remove_xattr(struct ceph_inode_info
*ci
,
263 struct ceph_inode_xattr
*xattr
)
268 rb_erase(&xattr
->node
, &ci
->i_xattrs
.index
);
270 if (xattr
->should_free_name
)
271 kfree((void *)xattr
->name
);
272 if (xattr
->should_free_val
)
273 kfree((void *)xattr
->val
);
275 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
276 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
277 ci
->i_xattrs
.count
--;
283 static int __remove_xattr_by_name(struct ceph_inode_info
*ci
,
287 struct ceph_inode_xattr
*xattr
;
290 p
= &ci
->i_xattrs
.index
.rb_node
;
291 xattr
= __get_xattr(ci
, name
);
292 err
= __remove_xattr(ci
, xattr
);
296 static char *__copy_xattr_names(struct ceph_inode_info
*ci
,
300 struct ceph_inode_xattr
*xattr
= NULL
;
302 p
= rb_first(&ci
->i_xattrs
.index
);
303 dout("__copy_xattr_names count=%d\n", ci
->i_xattrs
.count
);
306 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
307 memcpy(dest
, xattr
->name
, xattr
->name_len
);
308 dest
[xattr
->name_len
] = '\0';
310 dout("dest=%s %p (%s) (%d/%d)\n", dest
, xattr
, xattr
->name
,
311 xattr
->name_len
, ci
->i_xattrs
.names_size
);
313 dest
+= xattr
->name_len
+ 1;
320 void __ceph_destroy_xattrs(struct ceph_inode_info
*ci
)
322 struct rb_node
*p
, *tmp
;
323 struct ceph_inode_xattr
*xattr
= NULL
;
325 p
= rb_first(&ci
->i_xattrs
.index
);
327 dout("__ceph_destroy_xattrs p=%p\n", p
);
330 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
333 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p
,
334 xattr
->name_len
, xattr
->name
);
335 rb_erase(tmp
, &ci
->i_xattrs
.index
);
340 ci
->i_xattrs
.names_size
= 0;
341 ci
->i_xattrs
.vals_size
= 0;
342 ci
->i_xattrs
.index_version
= 0;
343 ci
->i_xattrs
.count
= 0;
344 ci
->i_xattrs
.index
= RB_ROOT
;
347 static int __build_xattrs(struct inode
*inode
)
348 __releases(ci
->i_ceph_lock
)
349 __acquires(ci
->i_ceph_lock
)
355 const char *name
, *val
;
356 struct ceph_inode_info
*ci
= ceph_inode(inode
);
358 struct ceph_inode_xattr
**xattrs
= NULL
;
362 dout("__build_xattrs() len=%d\n",
363 ci
->i_xattrs
.blob
? (int)ci
->i_xattrs
.blob
->vec
.iov_len
: 0);
365 if (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)
366 return 0; /* already built */
368 __ceph_destroy_xattrs(ci
);
371 /* updated internal xattr rb tree */
372 if (ci
->i_xattrs
.blob
&& ci
->i_xattrs
.blob
->vec
.iov_len
> 4) {
373 p
= ci
->i_xattrs
.blob
->vec
.iov_base
;
374 end
= p
+ ci
->i_xattrs
.blob
->vec
.iov_len
;
375 ceph_decode_32_safe(&p
, end
, numattr
, bad
);
376 xattr_version
= ci
->i_xattrs
.version
;
377 spin_unlock(&ci
->i_ceph_lock
);
379 xattrs
= kcalloc(numattr
, sizeof(struct ceph_xattr
*),
384 memset(xattrs
, 0, numattr
*sizeof(struct ceph_xattr
*));
385 for (i
= 0; i
< numattr
; i
++) {
386 xattrs
[i
] = kmalloc(sizeof(struct ceph_inode_xattr
),
392 spin_lock(&ci
->i_ceph_lock
);
393 if (ci
->i_xattrs
.version
!= xattr_version
) {
394 /* lost a race, retry */
395 for (i
= 0; i
< numattr
; i
++)
402 ceph_decode_32_safe(&p
, end
, len
, bad
);
406 ceph_decode_32_safe(&p
, end
, len
, bad
);
410 err
= __set_xattr(ci
, name
, namelen
, val
, len
,
411 0, 0, 0, &xattrs
[numattr
]);
418 ci
->i_xattrs
.index_version
= ci
->i_xattrs
.version
;
419 ci
->i_xattrs
.dirty
= false;
423 spin_lock(&ci
->i_ceph_lock
);
426 for (i
= 0; i
< numattr
; i
++)
430 ci
->i_xattrs
.names_size
= 0;
434 static int __get_required_blob_size(struct ceph_inode_info
*ci
, int name_size
,
438 * 4 bytes for the length, and additional 4 bytes per each xattr name,
439 * 4 bytes per each value
441 int size
= 4 + ci
->i_xattrs
.count
*(4 + 4) +
442 ci
->i_xattrs
.names_size
+
443 ci
->i_xattrs
.vals_size
;
444 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
445 ci
->i_xattrs
.count
, ci
->i_xattrs
.names_size
,
446 ci
->i_xattrs
.vals_size
);
449 size
+= 4 + 4 + name_size
+ val_size
;
455 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
456 * and swap into place.
458 void __ceph_build_xattrs_blob(struct ceph_inode_info
*ci
)
461 struct ceph_inode_xattr
*xattr
= NULL
;
464 dout("__build_xattrs_blob %p\n", &ci
->vfs_inode
);
465 if (ci
->i_xattrs
.dirty
) {
466 int need
= __get_required_blob_size(ci
, 0, 0);
468 BUG_ON(need
> ci
->i_xattrs
.prealloc_blob
->alloc_len
);
470 p
= rb_first(&ci
->i_xattrs
.index
);
471 dest
= ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
473 ceph_encode_32(&dest
, ci
->i_xattrs
.count
);
475 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
477 ceph_encode_32(&dest
, xattr
->name_len
);
478 memcpy(dest
, xattr
->name
, xattr
->name_len
);
479 dest
+= xattr
->name_len
;
480 ceph_encode_32(&dest
, xattr
->val_len
);
481 memcpy(dest
, xattr
->val
, xattr
->val_len
);
482 dest
+= xattr
->val_len
;
487 /* adjust buffer len; it may be larger than we need */
488 ci
->i_xattrs
.prealloc_blob
->vec
.iov_len
=
489 dest
- ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
491 if (ci
->i_xattrs
.blob
)
492 ceph_buffer_put(ci
->i_xattrs
.blob
);
493 ci
->i_xattrs
.blob
= ci
->i_xattrs
.prealloc_blob
;
494 ci
->i_xattrs
.prealloc_blob
= NULL
;
495 ci
->i_xattrs
.dirty
= false;
496 ci
->i_xattrs
.version
++;
500 ssize_t
ceph_getxattr(struct dentry
*dentry
, const char *name
, void *value
,
503 struct inode
*inode
= dentry
->d_inode
;
504 struct ceph_inode_info
*ci
= ceph_inode(inode
);
505 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
507 struct ceph_inode_xattr
*xattr
;
508 struct ceph_vxattr_cb
*vxattr
= NULL
;
510 if (!ceph_is_valid_xattr(name
))
513 /* let's see if a virtual xattr was requested */
515 vxattr
= ceph_match_vxattr(vxattrs
, name
);
517 spin_lock(&ci
->i_ceph_lock
);
518 dout("getxattr %p ver=%lld index_ver=%lld\n", inode
,
519 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
521 if (__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1) &&
522 (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)) {
525 spin_unlock(&ci
->i_ceph_lock
);
526 /* get xattrs from mds (if we don't already have them) */
527 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
);
532 spin_lock(&ci
->i_ceph_lock
);
534 if (vxattr
&& vxattr
->readonly
) {
535 err
= vxattr
->getxattr_cb(ci
, value
, size
);
539 err
= __build_xattrs(inode
);
544 err
= -ENODATA
; /* == ENOATTR */
545 xattr
= __get_xattr(ci
, name
);
548 err
= vxattr
->getxattr_cb(ci
, value
, size
);
553 if (size
&& size
< xattr
->val_len
)
556 err
= xattr
->val_len
;
560 memcpy(value
, xattr
->val
, xattr
->val_len
);
563 spin_unlock(&ci
->i_ceph_lock
);
567 ssize_t
ceph_listxattr(struct dentry
*dentry
, char *names
, size_t size
)
569 struct inode
*inode
= dentry
->d_inode
;
570 struct ceph_inode_info
*ci
= ceph_inode(inode
);
571 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
578 spin_lock(&ci
->i_ceph_lock
);
579 dout("listxattr %p ver=%lld index_ver=%lld\n", inode
,
580 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
582 if (__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1) &&
583 (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)) {
586 spin_unlock(&ci
->i_ceph_lock
);
587 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
);
592 spin_lock(&ci
->i_ceph_lock
);
594 err
= __build_xattrs(inode
);
600 /* include virtual dir xattrs */
602 for (i
= 0; vxattrs
[i
].name
; i
++)
603 vir_namelen
+= strlen(vxattrs
[i
].name
) + 1;
604 /* adding 1 byte per each variable due to the null termination */
605 namelen
= vir_namelen
+ ci
->i_xattrs
.names_size
+ ci
->i_xattrs
.count
;
607 if (size
&& namelen
> size
)
614 names
= __copy_xattr_names(ci
, names
);
616 /* virtual xattr names, too */
618 for (i
= 0; vxattrs
[i
].name
; i
++) {
619 len
= sprintf(names
, "%s", vxattrs
[i
].name
);
624 spin_unlock(&ci
->i_ceph_lock
);
628 static int ceph_sync_setxattr(struct dentry
*dentry
, const char *name
,
629 const char *value
, size_t size
, int flags
)
631 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
632 struct inode
*inode
= dentry
->d_inode
;
633 struct ceph_inode_info
*ci
= ceph_inode(inode
);
634 struct inode
*parent_inode
;
635 struct ceph_mds_request
*req
;
636 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
639 struct page
**pages
= NULL
;
642 /* copy value into some pages */
643 nr_pages
= calc_pages_for(0, size
);
645 pages
= kmalloc(sizeof(pages
[0])*nr_pages
, GFP_NOFS
);
649 for (i
= 0; i
< nr_pages
; i
++) {
650 pages
[i
] = __page_cache_alloc(GFP_NOFS
);
655 kaddr
= kmap(pages
[i
]);
656 memcpy(kaddr
, value
+ i
*PAGE_CACHE_SIZE
,
657 min(PAGE_CACHE_SIZE
, size
-i
*PAGE_CACHE_SIZE
));
661 dout("setxattr value=%.*s\n", (int)size
, value
);
664 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETXATTR
,
670 req
->r_inode
= inode
;
672 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
674 req
->r_args
.setxattr
.flags
= cpu_to_le32(flags
);
675 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
677 req
->r_pages
= pages
;
678 req
->r_num_pages
= nr_pages
;
679 req
->r_data_len
= size
;
681 dout("xattr.ver (before): %lld\n", ci
->i_xattrs
.version
);
682 parent_inode
= ceph_get_dentry_parent_inode(dentry
);
683 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
685 ceph_mdsc_put_request(req
);
686 dout("xattr.ver (after): %lld\n", ci
->i_xattrs
.version
);
690 for (i
= 0; i
< nr_pages
; i
++)
691 __free_page(pages
[i
]);
697 int ceph_setxattr(struct dentry
*dentry
, const char *name
,
698 const void *value
, size_t size
, int flags
)
700 struct inode
*inode
= dentry
->d_inode
;
701 struct ceph_inode_info
*ci
= ceph_inode(inode
);
702 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
704 int name_len
= strlen(name
);
706 char *newname
= NULL
;
708 struct ceph_inode_xattr
*xattr
= NULL
;
710 int required_blob_size
;
713 if (ceph_snap(inode
) != CEPH_NOSNAP
)
716 if (!ceph_is_valid_xattr(name
))
720 struct ceph_vxattr_cb
*vxattr
=
721 ceph_match_vxattr(vxattrs
, name
);
722 if (vxattr
&& vxattr
->readonly
)
726 /* preallocate memory for xattr name, value, index node */
728 newname
= kmemdup(name
, name_len
+ 1, GFP_NOFS
);
733 newval
= kmalloc(val_len
+ 1, GFP_NOFS
);
736 memcpy(newval
, value
, val_len
);
737 newval
[val_len
] = '\0';
740 xattr
= kmalloc(sizeof(struct ceph_inode_xattr
), GFP_NOFS
);
744 spin_lock(&ci
->i_ceph_lock
);
746 issued
= __ceph_caps_issued(ci
, NULL
);
747 if (!(issued
& CEPH_CAP_XATTR_EXCL
))
749 __build_xattrs(inode
);
751 required_blob_size
= __get_required_blob_size(ci
, name_len
, val_len
);
753 if (!ci
->i_xattrs
.prealloc_blob
||
754 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
755 struct ceph_buffer
*blob
= NULL
;
757 spin_unlock(&ci
->i_ceph_lock
);
758 dout(" preaallocating new blob size=%d\n", required_blob_size
);
759 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
762 spin_lock(&ci
->i_ceph_lock
);
763 if (ci
->i_xattrs
.prealloc_blob
)
764 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
765 ci
->i_xattrs
.prealloc_blob
= blob
;
769 dout("setxattr %p issued %s\n", inode
, ceph_cap_string(issued
));
770 err
= __set_xattr(ci
, newname
, name_len
, newval
,
771 val_len
, 1, 1, 1, &xattr
);
772 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
);
773 ci
->i_xattrs
.dirty
= true;
774 inode
->i_ctime
= CURRENT_TIME
;
775 spin_unlock(&ci
->i_ceph_lock
);
777 __mark_inode_dirty(inode
, dirty
);
781 spin_unlock(&ci
->i_ceph_lock
);
782 err
= ceph_sync_setxattr(dentry
, name
, value
, size
, flags
);
790 static int ceph_send_removexattr(struct dentry
*dentry
, const char *name
)
792 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
793 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
794 struct inode
*inode
= dentry
->d_inode
;
795 struct inode
*parent_inode
;
796 struct ceph_mds_request
*req
;
799 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RMXATTR
,
803 req
->r_inode
= inode
;
805 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
807 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
809 parent_inode
= ceph_get_dentry_parent_inode(dentry
);
810 err
= ceph_mdsc_do_request(mdsc
, parent_inode
, req
);
812 ceph_mdsc_put_request(req
);
816 int ceph_removexattr(struct dentry
*dentry
, const char *name
)
818 struct inode
*inode
= dentry
->d_inode
;
819 struct ceph_inode_info
*ci
= ceph_inode(inode
);
820 struct ceph_vxattr_cb
*vxattrs
= ceph_inode_vxattrs(inode
);
823 int required_blob_size
;
826 if (ceph_snap(inode
) != CEPH_NOSNAP
)
829 if (!ceph_is_valid_xattr(name
))
833 struct ceph_vxattr_cb
*vxattr
=
834 ceph_match_vxattr(vxattrs
, name
);
835 if (vxattr
&& vxattr
->readonly
)
840 spin_lock(&ci
->i_ceph_lock
);
841 __build_xattrs(inode
);
843 issued
= __ceph_caps_issued(ci
, NULL
);
844 dout("removexattr %p issued %s\n", inode
, ceph_cap_string(issued
));
846 if (!(issued
& CEPH_CAP_XATTR_EXCL
))
849 required_blob_size
= __get_required_blob_size(ci
, 0, 0);
851 if (!ci
->i_xattrs
.prealloc_blob
||
852 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
853 struct ceph_buffer
*blob
;
855 spin_unlock(&ci
->i_ceph_lock
);
856 dout(" preaallocating new blob size=%d\n", required_blob_size
);
857 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
860 spin_lock(&ci
->i_ceph_lock
);
861 if (ci
->i_xattrs
.prealloc_blob
)
862 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
863 ci
->i_xattrs
.prealloc_blob
= blob
;
867 err
= __remove_xattr_by_name(ceph_inode(inode
), name
);
868 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
);
869 ci
->i_xattrs
.dirty
= true;
870 inode
->i_ctime
= CURRENT_TIME
;
872 spin_unlock(&ci
->i_ceph_lock
);
874 __mark_inode_dirty(inode
, dirty
);
877 spin_unlock(&ci
->i_ceph_lock
);
878 err
= ceph_send_removexattr(dentry
, name
);