mm: do_migrate_pages() calls migrate_to_node() even if task is already on a correct...
[linux/fpc-iii.git] / fs / ceph / xattr.c
blob35b86331d8a5ce84c311e9eb2730757f80149179
1 #include <linux/ceph/ceph_debug.h>
3 #include "super.h"
4 #include "mds_client.h"
6 #include <linux/ceph/decode.h>
8 #include <linux/xattr.h>
9 #include <linux/slab.h>
11 #define XATTR_CEPH_PREFIX "ceph."
12 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
14 static bool ceph_is_valid_xattr(const char *name)
16 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
17 !strncmp(name, XATTR_SECURITY_PREFIX,
18 XATTR_SECURITY_PREFIX_LEN) ||
19 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
20 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
24 * These define virtual xattrs exposing the recursive directory
25 * statistics and layout metadata.
27 struct ceph_vxattr {
28 char *name;
29 size_t name_size; /* strlen(name) + 1 (for '\0') */
30 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
31 size_t size);
32 bool readonly;
35 /* directories */
37 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
38 size_t size)
40 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
43 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
44 size_t size)
46 return snprintf(val, size, "%lld", ci->i_files);
49 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
50 size_t size)
52 return snprintf(val, size, "%lld", ci->i_subdirs);
55 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
56 size_t size)
58 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
61 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
62 size_t size)
64 return snprintf(val, size, "%lld", ci->i_rfiles);
67 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
68 size_t size)
70 return snprintf(val, size, "%lld", ci->i_rsubdirs);
73 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
74 size_t size)
76 return snprintf(val, size, "%lld", ci->i_rbytes);
79 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
80 size_t size)
82 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
83 (long)ci->i_rctime.tv_nsec);
86 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
88 #define XATTR_NAME_CEPH(_type, _name) \
89 { \
90 .name = CEPH_XATTR_NAME(_type, _name), \
91 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
92 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
93 .readonly = true, \
96 static struct ceph_vxattr ceph_dir_vxattrs[] = {
97 XATTR_NAME_CEPH(dir, entries),
98 XATTR_NAME_CEPH(dir, files),
99 XATTR_NAME_CEPH(dir, subdirs),
100 XATTR_NAME_CEPH(dir, rentries),
101 XATTR_NAME_CEPH(dir, rfiles),
102 XATTR_NAME_CEPH(dir, rsubdirs),
103 XATTR_NAME_CEPH(dir, rbytes),
104 XATTR_NAME_CEPH(dir, rctime),
105 { 0 } /* Required table terminator */
107 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */
109 /* files */
111 static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
112 size_t size)
114 int ret;
116 ret = snprintf(val, size,
117 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
118 (unsigned long long)ceph_file_layout_su(ci->i_layout),
119 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
120 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
122 if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
123 val += ret;
124 size -= ret;
125 ret += snprintf(val, size, "preferred_osd=%lld\n",
126 (unsigned long long)ceph_file_layout_pg_preferred(
127 ci->i_layout));
130 return ret;
133 static struct ceph_vxattr ceph_file_vxattrs[] = {
134 XATTR_NAME_CEPH(file, layout),
135 /* The following extended attribute name is deprecated */
137 .name = XATTR_CEPH_PREFIX "layout",
138 .name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
139 .getxattr_cb = ceph_vxattrcb_file_layout,
140 .readonly = true,
142 { 0 } /* Required table terminator */
144 static size_t ceph_file_vxattrs_name_size; /* total size of all names */
146 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
148 if (S_ISDIR(inode->i_mode))
149 return ceph_dir_vxattrs;
150 else if (S_ISREG(inode->i_mode))
151 return ceph_file_vxattrs;
152 return NULL;
155 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs)
157 if (vxattrs == ceph_dir_vxattrs)
158 return ceph_dir_vxattrs_name_size;
159 if (vxattrs == ceph_file_vxattrs)
160 return ceph_file_vxattrs_name_size;
161 BUG();
163 return 0;
167 * Compute the aggregate size (including terminating '\0') of all
168 * virtual extended attribute names in the given vxattr table.
170 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
172 struct ceph_vxattr *vxattr;
173 size_t size = 0;
175 for (vxattr = vxattrs; vxattr->name; vxattr++)
176 size += vxattr->name_size;
178 return size;
181 /* Routines called at initialization and exit time */
183 void __init ceph_xattr_init(void)
185 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs);
186 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs);
189 void ceph_xattr_exit(void)
191 ceph_dir_vxattrs_name_size = 0;
192 ceph_file_vxattrs_name_size = 0;
195 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
196 const char *name)
198 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
200 if (vxattr) {
201 while (vxattr->name) {
202 if (!strcmp(vxattr->name, name))
203 return vxattr;
204 vxattr++;
208 return NULL;
211 static int __set_xattr(struct ceph_inode_info *ci,
212 const char *name, int name_len,
213 const char *val, int val_len,
214 int dirty,
215 int should_free_name, int should_free_val,
216 struct ceph_inode_xattr **newxattr)
218 struct rb_node **p;
219 struct rb_node *parent = NULL;
220 struct ceph_inode_xattr *xattr = NULL;
221 int c;
222 int new = 0;
224 p = &ci->i_xattrs.index.rb_node;
225 while (*p) {
226 parent = *p;
227 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
228 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
229 if (c < 0)
230 p = &(*p)->rb_left;
231 else if (c > 0)
232 p = &(*p)->rb_right;
233 else {
234 if (name_len == xattr->name_len)
235 break;
236 else if (name_len < xattr->name_len)
237 p = &(*p)->rb_left;
238 else
239 p = &(*p)->rb_right;
241 xattr = NULL;
244 if (!xattr) {
245 new = 1;
246 xattr = *newxattr;
247 xattr->name = name;
248 xattr->name_len = name_len;
249 xattr->should_free_name = should_free_name;
251 ci->i_xattrs.count++;
252 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
253 } else {
254 kfree(*newxattr);
255 *newxattr = NULL;
256 if (xattr->should_free_val)
257 kfree((void *)xattr->val);
259 if (should_free_name) {
260 kfree((void *)name);
261 name = xattr->name;
263 ci->i_xattrs.names_size -= xattr->name_len;
264 ci->i_xattrs.vals_size -= xattr->val_len;
266 ci->i_xattrs.names_size += name_len;
267 ci->i_xattrs.vals_size += val_len;
268 if (val)
269 xattr->val = val;
270 else
271 xattr->val = "";
273 xattr->val_len = val_len;
274 xattr->dirty = dirty;
275 xattr->should_free_val = (val && should_free_val);
277 if (new) {
278 rb_link_node(&xattr->node, parent, p);
279 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
280 dout("__set_xattr_val p=%p\n", p);
283 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
284 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
286 return 0;
289 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
290 const char *name)
292 struct rb_node **p;
293 struct rb_node *parent = NULL;
294 struct ceph_inode_xattr *xattr = NULL;
295 int name_len = strlen(name);
296 int c;
298 p = &ci->i_xattrs.index.rb_node;
299 while (*p) {
300 parent = *p;
301 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
302 c = strncmp(name, xattr->name, xattr->name_len);
303 if (c == 0 && name_len > xattr->name_len)
304 c = 1;
305 if (c < 0)
306 p = &(*p)->rb_left;
307 else if (c > 0)
308 p = &(*p)->rb_right;
309 else {
310 dout("__get_xattr %s: found %.*s\n", name,
311 xattr->val_len, xattr->val);
312 return xattr;
316 dout("__get_xattr %s: not found\n", name);
318 return NULL;
321 static void __free_xattr(struct ceph_inode_xattr *xattr)
323 BUG_ON(!xattr);
325 if (xattr->should_free_name)
326 kfree((void *)xattr->name);
327 if (xattr->should_free_val)
328 kfree((void *)xattr->val);
330 kfree(xattr);
333 static int __remove_xattr(struct ceph_inode_info *ci,
334 struct ceph_inode_xattr *xattr)
336 if (!xattr)
337 return -EOPNOTSUPP;
339 rb_erase(&xattr->node, &ci->i_xattrs.index);
341 if (xattr->should_free_name)
342 kfree((void *)xattr->name);
343 if (xattr->should_free_val)
344 kfree((void *)xattr->val);
346 ci->i_xattrs.names_size -= xattr->name_len;
347 ci->i_xattrs.vals_size -= xattr->val_len;
348 ci->i_xattrs.count--;
349 kfree(xattr);
351 return 0;
354 static int __remove_xattr_by_name(struct ceph_inode_info *ci,
355 const char *name)
357 struct rb_node **p;
358 struct ceph_inode_xattr *xattr;
359 int err;
361 p = &ci->i_xattrs.index.rb_node;
362 xattr = __get_xattr(ci, name);
363 err = __remove_xattr(ci, xattr);
364 return err;
367 static char *__copy_xattr_names(struct ceph_inode_info *ci,
368 char *dest)
370 struct rb_node *p;
371 struct ceph_inode_xattr *xattr = NULL;
373 p = rb_first(&ci->i_xattrs.index);
374 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
376 while (p) {
377 xattr = rb_entry(p, struct ceph_inode_xattr, node);
378 memcpy(dest, xattr->name, xattr->name_len);
379 dest[xattr->name_len] = '\0';
381 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
382 xattr->name_len, ci->i_xattrs.names_size);
384 dest += xattr->name_len + 1;
385 p = rb_next(p);
388 return dest;
391 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
393 struct rb_node *p, *tmp;
394 struct ceph_inode_xattr *xattr = NULL;
396 p = rb_first(&ci->i_xattrs.index);
398 dout("__ceph_destroy_xattrs p=%p\n", p);
400 while (p) {
401 xattr = rb_entry(p, struct ceph_inode_xattr, node);
402 tmp = p;
403 p = rb_next(tmp);
404 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
405 xattr->name_len, xattr->name);
406 rb_erase(tmp, &ci->i_xattrs.index);
408 __free_xattr(xattr);
411 ci->i_xattrs.names_size = 0;
412 ci->i_xattrs.vals_size = 0;
413 ci->i_xattrs.index_version = 0;
414 ci->i_xattrs.count = 0;
415 ci->i_xattrs.index = RB_ROOT;
418 static int __build_xattrs(struct inode *inode)
419 __releases(ci->i_ceph_lock)
420 __acquires(ci->i_ceph_lock)
422 u32 namelen;
423 u32 numattr = 0;
424 void *p, *end;
425 u32 len;
426 const char *name, *val;
427 struct ceph_inode_info *ci = ceph_inode(inode);
428 int xattr_version;
429 struct ceph_inode_xattr **xattrs = NULL;
430 int err = 0;
431 int i;
433 dout("__build_xattrs() len=%d\n",
434 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
436 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
437 return 0; /* already built */
439 __ceph_destroy_xattrs(ci);
441 start:
442 /* updated internal xattr rb tree */
443 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
444 p = ci->i_xattrs.blob->vec.iov_base;
445 end = p + ci->i_xattrs.blob->vec.iov_len;
446 ceph_decode_32_safe(&p, end, numattr, bad);
447 xattr_version = ci->i_xattrs.version;
448 spin_unlock(&ci->i_ceph_lock);
450 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
451 GFP_NOFS);
452 err = -ENOMEM;
453 if (!xattrs)
454 goto bad_lock;
455 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
456 for (i = 0; i < numattr; i++) {
457 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
458 GFP_NOFS);
459 if (!xattrs[i])
460 goto bad_lock;
463 spin_lock(&ci->i_ceph_lock);
464 if (ci->i_xattrs.version != xattr_version) {
465 /* lost a race, retry */
466 for (i = 0; i < numattr; i++)
467 kfree(xattrs[i]);
468 kfree(xattrs);
469 goto start;
471 err = -EIO;
472 while (numattr--) {
473 ceph_decode_32_safe(&p, end, len, bad);
474 namelen = len;
475 name = p;
476 p += len;
477 ceph_decode_32_safe(&p, end, len, bad);
478 val = p;
479 p += len;
481 err = __set_xattr(ci, name, namelen, val, len,
482 0, 0, 0, &xattrs[numattr]);
484 if (err < 0)
485 goto bad;
487 kfree(xattrs);
489 ci->i_xattrs.index_version = ci->i_xattrs.version;
490 ci->i_xattrs.dirty = false;
492 return err;
493 bad_lock:
494 spin_lock(&ci->i_ceph_lock);
495 bad:
496 if (xattrs) {
497 for (i = 0; i < numattr; i++)
498 kfree(xattrs[i]);
499 kfree(xattrs);
501 ci->i_xattrs.names_size = 0;
502 return err;
505 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
506 int val_size)
509 * 4 bytes for the length, and additional 4 bytes per each xattr name,
510 * 4 bytes per each value
512 int size = 4 + ci->i_xattrs.count*(4 + 4) +
513 ci->i_xattrs.names_size +
514 ci->i_xattrs.vals_size;
515 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
516 ci->i_xattrs.count, ci->i_xattrs.names_size,
517 ci->i_xattrs.vals_size);
519 if (name_size)
520 size += 4 + 4 + name_size + val_size;
522 return size;
526 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
527 * and swap into place.
529 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
531 struct rb_node *p;
532 struct ceph_inode_xattr *xattr = NULL;
533 void *dest;
535 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
536 if (ci->i_xattrs.dirty) {
537 int need = __get_required_blob_size(ci, 0, 0);
539 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
541 p = rb_first(&ci->i_xattrs.index);
542 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
544 ceph_encode_32(&dest, ci->i_xattrs.count);
545 while (p) {
546 xattr = rb_entry(p, struct ceph_inode_xattr, node);
548 ceph_encode_32(&dest, xattr->name_len);
549 memcpy(dest, xattr->name, xattr->name_len);
550 dest += xattr->name_len;
551 ceph_encode_32(&dest, xattr->val_len);
552 memcpy(dest, xattr->val, xattr->val_len);
553 dest += xattr->val_len;
555 p = rb_next(p);
558 /* adjust buffer len; it may be larger than we need */
559 ci->i_xattrs.prealloc_blob->vec.iov_len =
560 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
562 if (ci->i_xattrs.blob)
563 ceph_buffer_put(ci->i_xattrs.blob);
564 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
565 ci->i_xattrs.prealloc_blob = NULL;
566 ci->i_xattrs.dirty = false;
567 ci->i_xattrs.version++;
571 ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
572 size_t size)
574 struct inode *inode = dentry->d_inode;
575 struct ceph_inode_info *ci = ceph_inode(inode);
576 int err;
577 struct ceph_inode_xattr *xattr;
578 struct ceph_vxattr *vxattr = NULL;
580 if (!ceph_is_valid_xattr(name))
581 return -ENODATA;
583 /* let's see if a virtual xattr was requested */
584 vxattr = ceph_match_vxattr(inode, name);
586 spin_lock(&ci->i_ceph_lock);
587 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
588 ci->i_xattrs.version, ci->i_xattrs.index_version);
590 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
591 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
592 goto get_xattr;
593 } else {
594 spin_unlock(&ci->i_ceph_lock);
595 /* get xattrs from mds (if we don't already have them) */
596 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
597 if (err)
598 return err;
601 spin_lock(&ci->i_ceph_lock);
603 if (vxattr && vxattr->readonly) {
604 err = vxattr->getxattr_cb(ci, value, size);
605 goto out;
608 err = __build_xattrs(inode);
609 if (err < 0)
610 goto out;
612 get_xattr:
613 err = -ENODATA; /* == ENOATTR */
614 xattr = __get_xattr(ci, name);
615 if (!xattr) {
616 if (vxattr)
617 err = vxattr->getxattr_cb(ci, value, size);
618 goto out;
621 err = -ERANGE;
622 if (size && size < xattr->val_len)
623 goto out;
625 err = xattr->val_len;
626 if (size == 0)
627 goto out;
629 memcpy(value, xattr->val, xattr->val_len);
631 out:
632 spin_unlock(&ci->i_ceph_lock);
633 return err;
636 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
638 struct inode *inode = dentry->d_inode;
639 struct ceph_inode_info *ci = ceph_inode(inode);
640 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
641 u32 vir_namelen = 0;
642 u32 namelen;
643 int err;
644 u32 len;
645 int i;
647 spin_lock(&ci->i_ceph_lock);
648 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
649 ci->i_xattrs.version, ci->i_xattrs.index_version);
651 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
652 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
653 goto list_xattr;
654 } else {
655 spin_unlock(&ci->i_ceph_lock);
656 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
657 if (err)
658 return err;
661 spin_lock(&ci->i_ceph_lock);
663 err = __build_xattrs(inode);
664 if (err < 0)
665 goto out;
667 list_xattr:
669 * Start with virtual dir xattr names (if any) (including
670 * terminating '\0' characters for each).
672 vir_namelen = ceph_vxattrs_name_size(vxattrs);
674 /* adding 1 byte per each variable due to the null termination */
675 namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
676 err = -ERANGE;
677 if (size && namelen > size)
678 goto out;
680 err = namelen;
681 if (size == 0)
682 goto out;
684 names = __copy_xattr_names(ci, names);
686 /* virtual xattr names, too */
687 if (vxattrs)
688 for (i = 0; vxattrs[i].name; i++) {
689 len = sprintf(names, "%s", vxattrs[i].name);
690 names += len + 1;
693 out:
694 spin_unlock(&ci->i_ceph_lock);
695 return err;
698 static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
699 const char *value, size_t size, int flags)
701 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
702 struct inode *inode = dentry->d_inode;
703 struct ceph_inode_info *ci = ceph_inode(inode);
704 struct inode *parent_inode;
705 struct ceph_mds_request *req;
706 struct ceph_mds_client *mdsc = fsc->mdsc;
707 int err;
708 int i, nr_pages;
709 struct page **pages = NULL;
710 void *kaddr;
712 /* copy value into some pages */
713 nr_pages = calc_pages_for(0, size);
714 if (nr_pages) {
715 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
716 if (!pages)
717 return -ENOMEM;
718 err = -ENOMEM;
719 for (i = 0; i < nr_pages; i++) {
720 pages[i] = __page_cache_alloc(GFP_NOFS);
721 if (!pages[i]) {
722 nr_pages = i;
723 goto out;
725 kaddr = kmap(pages[i]);
726 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
727 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
731 dout("setxattr value=%.*s\n", (int)size, value);
733 /* do request */
734 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
735 USE_AUTH_MDS);
736 if (IS_ERR(req)) {
737 err = PTR_ERR(req);
738 goto out;
740 req->r_inode = inode;
741 ihold(inode);
742 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
743 req->r_num_caps = 1;
744 req->r_args.setxattr.flags = cpu_to_le32(flags);
745 req->r_path2 = kstrdup(name, GFP_NOFS);
747 req->r_pages = pages;
748 req->r_num_pages = nr_pages;
749 req->r_data_len = size;
751 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
752 parent_inode = ceph_get_dentry_parent_inode(dentry);
753 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
754 iput(parent_inode);
755 ceph_mdsc_put_request(req);
756 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
758 out:
759 if (pages) {
760 for (i = 0; i < nr_pages; i++)
761 __free_page(pages[i]);
762 kfree(pages);
764 return err;
767 int ceph_setxattr(struct dentry *dentry, const char *name,
768 const void *value, size_t size, int flags)
770 struct inode *inode = dentry->d_inode;
771 struct ceph_vxattr *vxattr;
772 struct ceph_inode_info *ci = ceph_inode(inode);
773 int issued;
774 int err;
775 int dirty;
776 int name_len = strlen(name);
777 int val_len = size;
778 char *newname = NULL;
779 char *newval = NULL;
780 struct ceph_inode_xattr *xattr = NULL;
781 int required_blob_size;
783 if (ceph_snap(inode) != CEPH_NOSNAP)
784 return -EROFS;
786 if (!ceph_is_valid_xattr(name))
787 return -EOPNOTSUPP;
789 vxattr = ceph_match_vxattr(inode, name);
790 if (vxattr && vxattr->readonly)
791 return -EOPNOTSUPP;
793 /* preallocate memory for xattr name, value, index node */
794 err = -ENOMEM;
795 newname = kmemdup(name, name_len + 1, GFP_NOFS);
796 if (!newname)
797 goto out;
799 if (val_len) {
800 newval = kmemdup(value, val_len, GFP_NOFS);
801 if (!newval)
802 goto out;
805 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
806 if (!xattr)
807 goto out;
809 spin_lock(&ci->i_ceph_lock);
810 retry:
811 issued = __ceph_caps_issued(ci, NULL);
812 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
813 if (!(issued & CEPH_CAP_XATTR_EXCL))
814 goto do_sync;
815 __build_xattrs(inode);
817 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
819 if (!ci->i_xattrs.prealloc_blob ||
820 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
821 struct ceph_buffer *blob;
823 spin_unlock(&ci->i_ceph_lock);
824 dout(" preaallocating new blob size=%d\n", required_blob_size);
825 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
826 if (!blob)
827 goto out;
828 spin_lock(&ci->i_ceph_lock);
829 if (ci->i_xattrs.prealloc_blob)
830 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
831 ci->i_xattrs.prealloc_blob = blob;
832 goto retry;
835 err = __set_xattr(ci, newname, name_len, newval,
836 val_len, 1, 1, 1, &xattr);
838 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
839 ci->i_xattrs.dirty = true;
840 inode->i_ctime = CURRENT_TIME;
842 spin_unlock(&ci->i_ceph_lock);
843 if (dirty)
844 __mark_inode_dirty(inode, dirty);
845 return err;
847 do_sync:
848 spin_unlock(&ci->i_ceph_lock);
849 err = ceph_sync_setxattr(dentry, name, value, size, flags);
850 out:
851 kfree(newname);
852 kfree(newval);
853 kfree(xattr);
854 return err;
857 static int ceph_send_removexattr(struct dentry *dentry, const char *name)
859 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
860 struct ceph_mds_client *mdsc = fsc->mdsc;
861 struct inode *inode = dentry->d_inode;
862 struct inode *parent_inode;
863 struct ceph_mds_request *req;
864 int err;
866 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
867 USE_AUTH_MDS);
868 if (IS_ERR(req))
869 return PTR_ERR(req);
870 req->r_inode = inode;
871 ihold(inode);
872 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
873 req->r_num_caps = 1;
874 req->r_path2 = kstrdup(name, GFP_NOFS);
876 parent_inode = ceph_get_dentry_parent_inode(dentry);
877 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
878 iput(parent_inode);
879 ceph_mdsc_put_request(req);
880 return err;
883 int ceph_removexattr(struct dentry *dentry, const char *name)
885 struct inode *inode = dentry->d_inode;
886 struct ceph_vxattr *vxattr;
887 struct ceph_inode_info *ci = ceph_inode(inode);
888 int issued;
889 int err;
890 int required_blob_size;
891 int dirty;
893 if (ceph_snap(inode) != CEPH_NOSNAP)
894 return -EROFS;
896 if (!ceph_is_valid_xattr(name))
897 return -EOPNOTSUPP;
899 vxattr = ceph_match_vxattr(inode, name);
900 if (vxattr && vxattr->readonly)
901 return -EOPNOTSUPP;
903 err = -ENOMEM;
904 spin_lock(&ci->i_ceph_lock);
905 retry:
906 issued = __ceph_caps_issued(ci, NULL);
907 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
909 if (!(issued & CEPH_CAP_XATTR_EXCL))
910 goto do_sync;
911 __build_xattrs(inode);
913 required_blob_size = __get_required_blob_size(ci, 0, 0);
915 if (!ci->i_xattrs.prealloc_blob ||
916 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
917 struct ceph_buffer *blob;
919 spin_unlock(&ci->i_ceph_lock);
920 dout(" preaallocating new blob size=%d\n", required_blob_size);
921 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
922 if (!blob)
923 goto out;
924 spin_lock(&ci->i_ceph_lock);
925 if (ci->i_xattrs.prealloc_blob)
926 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
927 ci->i_xattrs.prealloc_blob = blob;
928 goto retry;
931 err = __remove_xattr_by_name(ceph_inode(inode), name);
933 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
934 ci->i_xattrs.dirty = true;
935 inode->i_ctime = CURRENT_TIME;
936 spin_unlock(&ci->i_ceph_lock);
937 if (dirty)
938 __mark_inode_dirty(inode, dirty);
939 return err;
940 do_sync:
941 spin_unlock(&ci->i_ceph_lock);
942 err = ceph_send_removexattr(dentry, name);
943 out:
944 return err;