1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
18 #include "mds_client.h"
20 #include <linux/ceph/decode.h>
23 * Ceph inode operations
25 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
26 * setattr, etc.), xattr helpers, and helpers for assimilating
27 * metadata returned by the MDS into our cache.
29 * Also define helpers for doing asynchronous writeback, invalidation,
30 * and truncation for the benefit of those who can't afford to block
31 * (typically because they are in the message handler path).
34 static const struct inode_operations ceph_symlink_iops
;
36 static void ceph_invalidate_work(struct work_struct
*work
);
37 static void ceph_writeback_work(struct work_struct
*work
);
38 static void ceph_vmtruncate_work(struct work_struct
*work
);
41 * find or create an inode, given the ceph ino number
43 static int ceph_set_ino_cb(struct inode
*inode
, void *data
)
45 ceph_inode(inode
)->i_vino
= *(struct ceph_vino
*)data
;
46 inode
->i_ino
= ceph_vino_to_ino(*(struct ceph_vino
*)data
);
50 struct inode
*ceph_get_inode(struct super_block
*sb
, struct ceph_vino vino
)
53 ino_t t
= ceph_vino_to_ino(vino
);
55 inode
= iget5_locked(sb
, t
, ceph_ino_compare
, ceph_set_ino_cb
, &vino
);
57 return ERR_PTR(-ENOMEM
);
58 if (inode
->i_state
& I_NEW
) {
59 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
60 inode
, ceph_vinop(inode
), (u64
)inode
->i_ino
);
61 unlock_new_inode(inode
);
64 dout("get_inode on %lu=%llx.%llx got %p\n", inode
->i_ino
, vino
.ino
,
70 * get/constuct snapdir inode for a given directory
72 struct inode
*ceph_get_snapdir(struct inode
*parent
)
74 struct ceph_vino vino
= {
75 .ino
= ceph_ino(parent
),
78 struct inode
*inode
= ceph_get_inode(parent
->i_sb
, vino
);
79 struct ceph_inode_info
*ci
= ceph_inode(inode
);
81 BUG_ON(!S_ISDIR(parent
->i_mode
));
84 inode
->i_mode
= parent
->i_mode
;
85 inode
->i_uid
= parent
->i_uid
;
86 inode
->i_gid
= parent
->i_gid
;
87 inode
->i_op
= &ceph_snapdir_iops
;
88 inode
->i_fop
= &ceph_snapdir_fops
;
89 ci
->i_snap_caps
= CEPH_CAP_PIN
; /* so we can open */
94 const struct inode_operations ceph_file_iops
= {
95 .permission
= ceph_permission
,
96 .setattr
= ceph_setattr
,
97 .getattr
= ceph_getattr
,
98 .listxattr
= ceph_listxattr
,
99 .get_acl
= ceph_get_acl
,
100 .set_acl
= ceph_set_acl
,
105 * We use a 'frag tree' to keep track of the MDS's directory fragments
106 * for a given inode (usually there is just a single fragment). We
107 * need to know when a child frag is delegated to a new MDS, or when
108 * it is flagged as replicated, so we can direct our requests
113 * find/create a frag in the tree
115 static struct ceph_inode_frag
*__get_or_create_frag(struct ceph_inode_info
*ci
,
119 struct rb_node
*parent
= NULL
;
120 struct ceph_inode_frag
*frag
;
123 p
= &ci
->i_fragtree
.rb_node
;
126 frag
= rb_entry(parent
, struct ceph_inode_frag
, node
);
127 c
= ceph_frag_compare(f
, frag
->frag
);
136 frag
= kmalloc(sizeof(*frag
), GFP_NOFS
);
138 return ERR_PTR(-ENOMEM
);
145 rb_link_node(&frag
->node
, parent
, p
);
146 rb_insert_color(&frag
->node
, &ci
->i_fragtree
);
148 dout("get_or_create_frag added %llx.%llx frag %x\n",
149 ceph_vinop(&ci
->vfs_inode
), f
);
154 * find a specific frag @f
156 struct ceph_inode_frag
*__ceph_find_frag(struct ceph_inode_info
*ci
, u32 f
)
158 struct rb_node
*n
= ci
->i_fragtree
.rb_node
;
161 struct ceph_inode_frag
*frag
=
162 rb_entry(n
, struct ceph_inode_frag
, node
);
163 int c
= ceph_frag_compare(f
, frag
->frag
);
175 * Choose frag containing the given value @v. If @pfrag is
176 * specified, copy the frag delegation info to the caller if
179 static u32
__ceph_choose_frag(struct ceph_inode_info
*ci
, u32 v
,
180 struct ceph_inode_frag
*pfrag
, int *found
)
182 u32 t
= ceph_frag_make(0, 0);
183 struct ceph_inode_frag
*frag
;
191 WARN_ON(!ceph_frag_contains_value(t
, v
));
192 frag
= __ceph_find_frag(ci
, t
);
194 break; /* t is a leaf */
195 if (frag
->split_by
== 0) {
197 memcpy(pfrag
, frag
, sizeof(*pfrag
));
204 nway
= 1 << frag
->split_by
;
205 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v
, t
,
206 frag
->split_by
, nway
);
207 for (i
= 0; i
< nway
; i
++) {
208 n
= ceph_frag_make_child(t
, frag
->split_by
, i
);
209 if (ceph_frag_contains_value(n
, v
)) {
216 dout("choose_frag(%x) = %x\n", v
, t
);
221 u32
ceph_choose_frag(struct ceph_inode_info
*ci
, u32 v
,
222 struct ceph_inode_frag
*pfrag
, int *found
)
225 mutex_lock(&ci
->i_fragtree_mutex
);
226 ret
= __ceph_choose_frag(ci
, v
, pfrag
, found
);
227 mutex_unlock(&ci
->i_fragtree_mutex
);
232 * Process dirfrag (delegation) info from the mds. Include leaf
233 * fragment in tree ONLY if ndist > 0. Otherwise, only
234 * branches/splits are included in i_fragtree)
236 static int ceph_fill_dirfrag(struct inode
*inode
,
237 struct ceph_mds_reply_dirfrag
*dirinfo
)
239 struct ceph_inode_info
*ci
= ceph_inode(inode
);
240 struct ceph_inode_frag
*frag
;
241 u32 id
= le32_to_cpu(dirinfo
->frag
);
242 int mds
= le32_to_cpu(dirinfo
->auth
);
243 int ndist
= le32_to_cpu(dirinfo
->ndist
);
248 spin_lock(&ci
->i_ceph_lock
);
250 diri_auth
= ci
->i_auth_cap
->mds
;
251 spin_unlock(&ci
->i_ceph_lock
);
253 if (mds
== -1) /* CDIR_AUTH_PARENT */
256 mutex_lock(&ci
->i_fragtree_mutex
);
257 if (ndist
== 0 && mds
== diri_auth
) {
258 /* no delegation info needed. */
259 frag
= __ceph_find_frag(ci
, id
);
262 if (frag
->split_by
== 0) {
263 /* tree leaf, remove */
264 dout("fill_dirfrag removed %llx.%llx frag %x"
265 " (no ref)\n", ceph_vinop(inode
), id
);
266 rb_erase(&frag
->node
, &ci
->i_fragtree
);
269 /* tree branch, keep and clear */
270 dout("fill_dirfrag cleared %llx.%llx frag %x"
271 " referral\n", ceph_vinop(inode
), id
);
279 /* find/add this frag to store mds delegation info */
280 frag
= __get_or_create_frag(ci
, id
);
282 /* this is not the end of the world; we can continue
283 with bad/inaccurate delegation info */
284 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
285 ceph_vinop(inode
), le32_to_cpu(dirinfo
->frag
));
291 frag
->ndist
= min_t(u32
, ndist
, CEPH_MAX_DIRFRAG_REP
);
292 for (i
= 0; i
< frag
->ndist
; i
++)
293 frag
->dist
[i
] = le32_to_cpu(dirinfo
->dist
[i
]);
294 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
295 ceph_vinop(inode
), frag
->frag
, frag
->ndist
);
298 mutex_unlock(&ci
->i_fragtree_mutex
);
302 static int frag_tree_split_cmp(const void *l
, const void *r
)
304 struct ceph_frag_tree_split
*ls
= (struct ceph_frag_tree_split
*)l
;
305 struct ceph_frag_tree_split
*rs
= (struct ceph_frag_tree_split
*)r
;
306 return ceph_frag_compare(le32_to_cpu(ls
->frag
),
307 le32_to_cpu(rs
->frag
));
310 static bool is_frag_child(u32 f
, struct ceph_inode_frag
*frag
)
313 return f
== ceph_frag_make(0, 0);
314 if (ceph_frag_bits(f
) != ceph_frag_bits(frag
->frag
) + frag
->split_by
)
316 return ceph_frag_contains_value(frag
->frag
, ceph_frag_value(f
));
319 static int ceph_fill_fragtree(struct inode
*inode
,
320 struct ceph_frag_tree_head
*fragtree
,
321 struct ceph_mds_reply_dirfrag
*dirinfo
)
323 struct ceph_inode_info
*ci
= ceph_inode(inode
);
324 struct ceph_inode_frag
*frag
, *prev_frag
= NULL
;
325 struct rb_node
*rb_node
;
326 unsigned i
, split_by
, nsplits
;
330 mutex_lock(&ci
->i_fragtree_mutex
);
331 nsplits
= le32_to_cpu(fragtree
->nsplits
);
332 if (nsplits
!= ci
->i_fragtree_nsplits
) {
334 } else if (nsplits
) {
335 i
= prandom_u32() % nsplits
;
336 id
= le32_to_cpu(fragtree
->splits
[i
].frag
);
337 if (!__ceph_find_frag(ci
, id
))
339 } else if (!RB_EMPTY_ROOT(&ci
->i_fragtree
)) {
340 rb_node
= rb_first(&ci
->i_fragtree
);
341 frag
= rb_entry(rb_node
, struct ceph_inode_frag
, node
);
342 if (frag
->frag
!= ceph_frag_make(0, 0) || rb_next(rb_node
))
345 if (!update
&& dirinfo
) {
346 id
= le32_to_cpu(dirinfo
->frag
);
347 if (id
!= __ceph_choose_frag(ci
, id
, NULL
, NULL
))
354 sort(fragtree
->splits
, nsplits
, sizeof(fragtree
->splits
[0]),
355 frag_tree_split_cmp
, NULL
);
358 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode
));
359 rb_node
= rb_first(&ci
->i_fragtree
);
360 for (i
= 0; i
< nsplits
; i
++) {
361 id
= le32_to_cpu(fragtree
->splits
[i
].frag
);
362 split_by
= le32_to_cpu(fragtree
->splits
[i
].by
);
363 if (split_by
== 0 || ceph_frag_bits(id
) + split_by
> 24) {
364 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
365 "frag %x split by %d\n", ceph_vinop(inode
),
366 i
, nsplits
, id
, split_by
);
371 frag
= rb_entry(rb_node
, struct ceph_inode_frag
, node
);
372 if (ceph_frag_compare(frag
->frag
, id
) >= 0) {
373 if (frag
->frag
!= id
)
376 rb_node
= rb_next(rb_node
);
379 rb_node
= rb_next(rb_node
);
380 /* delete stale split/leaf node */
381 if (frag
->split_by
> 0 ||
382 !is_frag_child(frag
->frag
, prev_frag
)) {
383 rb_erase(&frag
->node
, &ci
->i_fragtree
);
384 if (frag
->split_by
> 0)
385 ci
->i_fragtree_nsplits
--;
391 frag
= __get_or_create_frag(ci
, id
);
395 if (frag
->split_by
== 0)
396 ci
->i_fragtree_nsplits
++;
397 frag
->split_by
= split_by
;
398 dout(" frag %x split by %d\n", frag
->frag
, frag
->split_by
);
402 frag
= rb_entry(rb_node
, struct ceph_inode_frag
, node
);
403 rb_node
= rb_next(rb_node
);
404 /* delete stale split/leaf node */
405 if (frag
->split_by
> 0 ||
406 !is_frag_child(frag
->frag
, prev_frag
)) {
407 rb_erase(&frag
->node
, &ci
->i_fragtree
);
408 if (frag
->split_by
> 0)
409 ci
->i_fragtree_nsplits
--;
414 mutex_unlock(&ci
->i_fragtree_mutex
);
419 * initialize a newly allocated inode.
421 struct inode
*ceph_alloc_inode(struct super_block
*sb
)
423 struct ceph_inode_info
*ci
;
426 ci
= kmem_cache_alloc(ceph_inode_cachep
, GFP_NOFS
);
430 dout("alloc_inode %p\n", &ci
->vfs_inode
);
432 spin_lock_init(&ci
->i_ceph_lock
);
435 ci
->i_inline_version
= 0;
436 ci
->i_time_warp_seq
= 0;
437 ci
->i_ceph_flags
= 0;
438 atomic64_set(&ci
->i_ordered_count
, 1);
439 atomic64_set(&ci
->i_release_count
, 1);
440 atomic64_set(&ci
->i_complete_seq
[0], 0);
441 atomic64_set(&ci
->i_complete_seq
[1], 0);
442 ci
->i_symlink
= NULL
;
444 memset(&ci
->i_dir_layout
, 0, sizeof(ci
->i_dir_layout
));
445 RCU_INIT_POINTER(ci
->i_layout
.pool_ns
, NULL
);
447 ci
->i_fragtree
= RB_ROOT
;
448 mutex_init(&ci
->i_fragtree_mutex
);
450 ci
->i_xattrs
.blob
= NULL
;
451 ci
->i_xattrs
.prealloc_blob
= NULL
;
452 ci
->i_xattrs
.dirty
= false;
453 ci
->i_xattrs
.index
= RB_ROOT
;
454 ci
->i_xattrs
.count
= 0;
455 ci
->i_xattrs
.names_size
= 0;
456 ci
->i_xattrs
.vals_size
= 0;
457 ci
->i_xattrs
.version
= 0;
458 ci
->i_xattrs
.index_version
= 0;
460 ci
->i_caps
= RB_ROOT
;
461 ci
->i_auth_cap
= NULL
;
462 ci
->i_dirty_caps
= 0;
463 ci
->i_flushing_caps
= 0;
464 INIT_LIST_HEAD(&ci
->i_dirty_item
);
465 INIT_LIST_HEAD(&ci
->i_flushing_item
);
466 ci
->i_prealloc_cap_flush
= NULL
;
467 INIT_LIST_HEAD(&ci
->i_cap_flush_list
);
468 init_waitqueue_head(&ci
->i_cap_wq
);
469 ci
->i_hold_caps_min
= 0;
470 ci
->i_hold_caps_max
= 0;
471 INIT_LIST_HEAD(&ci
->i_cap_delay_list
);
472 INIT_LIST_HEAD(&ci
->i_cap_snaps
);
473 ci
->i_head_snapc
= NULL
;
476 for (i
= 0; i
< CEPH_FILE_MODE_BITS
; i
++)
477 ci
->i_nr_by_mode
[i
] = 0;
479 mutex_init(&ci
->i_truncate_mutex
);
480 ci
->i_truncate_seq
= 0;
481 ci
->i_truncate_size
= 0;
482 ci
->i_truncate_pending
= 0;
485 ci
->i_reported_size
= 0;
486 ci
->i_wanted_max_size
= 0;
487 ci
->i_requested_max_size
= 0;
491 ci
->i_rdcache_ref
= 0;
494 ci
->i_wrbuffer_ref
= 0;
495 ci
->i_wrbuffer_ref_head
= 0;
496 ci
->i_shared_gen
= 0;
497 ci
->i_rdcache_gen
= 0;
498 ci
->i_rdcache_revoking
= 0;
500 INIT_LIST_HEAD(&ci
->i_unsafe_dirops
);
501 INIT_LIST_HEAD(&ci
->i_unsafe_iops
);
502 spin_lock_init(&ci
->i_unsafe_lock
);
504 ci
->i_snap_realm
= NULL
;
505 INIT_LIST_HEAD(&ci
->i_snap_realm_item
);
506 INIT_LIST_HEAD(&ci
->i_snap_flush_item
);
508 INIT_WORK(&ci
->i_wb_work
, ceph_writeback_work
);
509 INIT_WORK(&ci
->i_pg_inv_work
, ceph_invalidate_work
);
511 INIT_WORK(&ci
->i_vmtruncate_work
, ceph_vmtruncate_work
);
513 ceph_fscache_inode_init(ci
);
515 return &ci
->vfs_inode
;
518 static void ceph_i_callback(struct rcu_head
*head
)
520 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
521 struct ceph_inode_info
*ci
= ceph_inode(inode
);
523 kmem_cache_free(ceph_inode_cachep
, ci
);
526 void ceph_destroy_inode(struct inode
*inode
)
528 struct ceph_inode_info
*ci
= ceph_inode(inode
);
529 struct ceph_inode_frag
*frag
;
532 dout("destroy_inode %p ino %llx.%llx\n", inode
, ceph_vinop(inode
));
534 ceph_fscache_unregister_inode_cookie(ci
);
536 ceph_queue_caps_release(inode
);
539 * we may still have a snap_realm reference if there are stray
540 * caps in i_snap_caps.
542 if (ci
->i_snap_realm
) {
543 struct ceph_mds_client
*mdsc
=
544 ceph_sb_to_client(ci
->vfs_inode
.i_sb
)->mdsc
;
545 struct ceph_snap_realm
*realm
= ci
->i_snap_realm
;
547 dout(" dropping residual ref to snap realm %p\n", realm
);
548 spin_lock(&realm
->inodes_with_caps_lock
);
549 list_del_init(&ci
->i_snap_realm_item
);
550 spin_unlock(&realm
->inodes_with_caps_lock
);
551 ceph_put_snap_realm(mdsc
, realm
);
554 kfree(ci
->i_symlink
);
555 while ((n
= rb_first(&ci
->i_fragtree
)) != NULL
) {
556 frag
= rb_entry(n
, struct ceph_inode_frag
, node
);
557 rb_erase(n
, &ci
->i_fragtree
);
560 ci
->i_fragtree_nsplits
= 0;
562 __ceph_destroy_xattrs(ci
);
563 if (ci
->i_xattrs
.blob
)
564 ceph_buffer_put(ci
->i_xattrs
.blob
);
565 if (ci
->i_xattrs
.prealloc_blob
)
566 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
568 ceph_put_string(rcu_dereference_raw(ci
->i_layout
.pool_ns
));
570 call_rcu(&inode
->i_rcu
, ceph_i_callback
);
573 int ceph_drop_inode(struct inode
*inode
)
576 * Positve dentry and corresponding inode are always accompanied
577 * in MDS reply. So no need to keep inode in the cache after
578 * dropping all its aliases.
583 static inline blkcnt_t
calc_inode_blocks(u64 size
)
585 return (size
+ (1<<9) - 1) >> 9;
589 * Helpers to fill in size, ctime, mtime, and atime. We have to be
590 * careful because either the client or MDS may have more up to date
591 * info, depending on which capabilities are held, and whether
592 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
593 * and size are monotonically increasing, except when utimes() or
594 * truncate() increments the corresponding _seq values.)
596 int ceph_fill_file_size(struct inode
*inode
, int issued
,
597 u32 truncate_seq
, u64 truncate_size
, u64 size
)
599 struct ceph_inode_info
*ci
= ceph_inode(inode
);
602 if (ceph_seq_cmp(truncate_seq
, ci
->i_truncate_seq
) > 0 ||
603 (truncate_seq
== ci
->i_truncate_seq
&& size
> inode
->i_size
)) {
604 dout("size %lld -> %llu\n", inode
->i_size
, size
);
605 if (size
> 0 && S_ISDIR(inode
->i_mode
)) {
606 pr_err("fill_file_size non-zero size for directory\n");
609 i_size_write(inode
, size
);
610 inode
->i_blocks
= calc_inode_blocks(size
);
611 ci
->i_reported_size
= size
;
612 if (truncate_seq
!= ci
->i_truncate_seq
) {
613 dout("truncate_seq %u -> %u\n",
614 ci
->i_truncate_seq
, truncate_seq
);
615 ci
->i_truncate_seq
= truncate_seq
;
617 /* the MDS should have revoked these caps */
618 WARN_ON_ONCE(issued
& (CEPH_CAP_FILE_EXCL
|
621 CEPH_CAP_FILE_LAZYIO
));
623 * If we hold relevant caps, or in the case where we're
624 * not the only client referencing this file and we
625 * don't hold those caps, then we need to check whether
626 * the file is either opened or mmaped
628 if ((issued
& (CEPH_CAP_FILE_CACHE
|
629 CEPH_CAP_FILE_BUFFER
)) ||
630 mapping_mapped(inode
->i_mapping
) ||
631 __ceph_caps_file_wanted(ci
)) {
632 ci
->i_truncate_pending
++;
637 if (ceph_seq_cmp(truncate_seq
, ci
->i_truncate_seq
) >= 0 &&
638 ci
->i_truncate_size
!= truncate_size
) {
639 dout("truncate_size %lld -> %llu\n", ci
->i_truncate_size
,
641 ci
->i_truncate_size
= truncate_size
;
645 ceph_fscache_invalidate(inode
);
650 void ceph_fill_file_time(struct inode
*inode
, int issued
,
651 u64 time_warp_seq
, struct timespec
*ctime
,
652 struct timespec
*mtime
, struct timespec
*atime
)
654 struct ceph_inode_info
*ci
= ceph_inode(inode
);
657 if (issued
& (CEPH_CAP_FILE_EXCL
|
659 CEPH_CAP_FILE_BUFFER
|
661 CEPH_CAP_XATTR_EXCL
)) {
662 if (timespec_compare(ctime
, &inode
->i_ctime
) > 0) {
663 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
664 inode
->i_ctime
.tv_sec
, inode
->i_ctime
.tv_nsec
,
665 ctime
->tv_sec
, ctime
->tv_nsec
);
666 inode
->i_ctime
= *ctime
;
668 if (ceph_seq_cmp(time_warp_seq
, ci
->i_time_warp_seq
) > 0) {
669 /* the MDS did a utimes() */
670 dout("mtime %ld.%09ld -> %ld.%09ld "
672 inode
->i_mtime
.tv_sec
, inode
->i_mtime
.tv_nsec
,
673 mtime
->tv_sec
, mtime
->tv_nsec
,
674 ci
->i_time_warp_seq
, (int)time_warp_seq
);
676 inode
->i_mtime
= *mtime
;
677 inode
->i_atime
= *atime
;
678 ci
->i_time_warp_seq
= time_warp_seq
;
679 } else if (time_warp_seq
== ci
->i_time_warp_seq
) {
680 /* nobody did utimes(); take the max */
681 if (timespec_compare(mtime
, &inode
->i_mtime
) > 0) {
682 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
683 inode
->i_mtime
.tv_sec
,
684 inode
->i_mtime
.tv_nsec
,
685 mtime
->tv_sec
, mtime
->tv_nsec
);
686 inode
->i_mtime
= *mtime
;
688 if (timespec_compare(atime
, &inode
->i_atime
) > 0) {
689 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
690 inode
->i_atime
.tv_sec
,
691 inode
->i_atime
.tv_nsec
,
692 atime
->tv_sec
, atime
->tv_nsec
);
693 inode
->i_atime
= *atime
;
695 } else if (issued
& CEPH_CAP_FILE_EXCL
) {
696 /* we did a utimes(); ignore mds values */
701 /* we have no write|excl caps; whatever the MDS says is true */
702 if (ceph_seq_cmp(time_warp_seq
, ci
->i_time_warp_seq
) >= 0) {
703 inode
->i_ctime
= *ctime
;
704 inode
->i_mtime
= *mtime
;
705 inode
->i_atime
= *atime
;
706 ci
->i_time_warp_seq
= time_warp_seq
;
711 if (warn
) /* time_warp_seq shouldn't go backwards */
712 dout("%p mds time_warp_seq %llu < %u\n",
713 inode
, time_warp_seq
, ci
->i_time_warp_seq
);
717 * Populate an inode based on info from mds. May be called on new or
720 static int fill_inode(struct inode
*inode
, struct page
*locked_page
,
721 struct ceph_mds_reply_info_in
*iinfo
,
722 struct ceph_mds_reply_dirfrag
*dirinfo
,
723 struct ceph_mds_session
*session
,
724 unsigned long ttl_from
, int cap_fmode
,
725 struct ceph_cap_reservation
*caps_reservation
)
727 struct ceph_mds_client
*mdsc
= ceph_inode_to_client(inode
)->mdsc
;
728 struct ceph_mds_reply_inode
*info
= iinfo
->in
;
729 struct ceph_inode_info
*ci
= ceph_inode(inode
);
730 int issued
= 0, implemented
, new_issued
;
731 struct timespec mtime
, atime
, ctime
;
732 struct ceph_buffer
*xattr_blob
= NULL
;
733 struct ceph_string
*pool_ns
= NULL
;
734 struct ceph_cap
*new_cap
= NULL
;
737 bool queue_trunc
= false;
738 bool new_version
= false;
739 bool fill_inline
= false;
741 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
742 inode
, ceph_vinop(inode
), le64_to_cpu(info
->version
),
745 /* prealloc new cap struct */
746 if (info
->cap
.caps
&& ceph_snap(inode
) == CEPH_NOSNAP
)
747 new_cap
= ceph_get_cap(mdsc
, caps_reservation
);
750 * prealloc xattr data, if it looks like we'll need it. only
751 * if len > 4 (meaning there are actually xattrs; the first 4
752 * bytes are the xattr count).
754 if (iinfo
->xattr_len
> 4) {
755 xattr_blob
= ceph_buffer_new(iinfo
->xattr_len
, GFP_NOFS
);
757 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
761 if (iinfo
->pool_ns_len
> 0)
762 pool_ns
= ceph_find_or_create_string(iinfo
->pool_ns_data
,
765 spin_lock(&ci
->i_ceph_lock
);
768 * provided version will be odd if inode value is projected,
769 * even if stable. skip the update if we have newer stable
770 * info (ours>=theirs, e.g. due to racing mds replies), unless
771 * we are getting projected (unstable) info (in which case the
772 * version is odd, and we want ours>theirs).
778 if (ci
->i_version
== 0 ||
779 ((info
->cap
.flags
& CEPH_CAP_FLAG_AUTH
) &&
780 le64_to_cpu(info
->version
) > (ci
->i_version
& ~1)))
783 issued
= __ceph_caps_issued(ci
, &implemented
);
784 issued
|= implemented
| __ceph_caps_dirty(ci
);
785 new_issued
= ~issued
& le32_to_cpu(info
->cap
.caps
);
788 ci
->i_version
= le64_to_cpu(info
->version
);
790 inode
->i_rdev
= le32_to_cpu(info
->rdev
);
791 inode
->i_blkbits
= fls(le32_to_cpu(info
->layout
.fl_stripe_unit
)) - 1;
793 if ((new_version
|| (new_issued
& CEPH_CAP_AUTH_SHARED
)) &&
794 (issued
& CEPH_CAP_AUTH_EXCL
) == 0) {
795 inode
->i_mode
= le32_to_cpu(info
->mode
);
796 inode
->i_uid
= make_kuid(&init_user_ns
, le32_to_cpu(info
->uid
));
797 inode
->i_gid
= make_kgid(&init_user_ns
, le32_to_cpu(info
->gid
));
798 dout("%p mode 0%o uid.gid %d.%d\n", inode
, inode
->i_mode
,
799 from_kuid(&init_user_ns
, inode
->i_uid
),
800 from_kgid(&init_user_ns
, inode
->i_gid
));
803 if ((new_version
|| (new_issued
& CEPH_CAP_LINK_SHARED
)) &&
804 (issued
& CEPH_CAP_LINK_EXCL
) == 0)
805 set_nlink(inode
, le32_to_cpu(info
->nlink
));
807 if (new_version
|| (new_issued
& CEPH_CAP_ANY_RD
)) {
808 /* be careful with mtime, atime, size */
809 ceph_decode_timespec(&atime
, &info
->atime
);
810 ceph_decode_timespec(&mtime
, &info
->mtime
);
811 ceph_decode_timespec(&ctime
, &info
->ctime
);
812 ceph_fill_file_time(inode
, issued
,
813 le32_to_cpu(info
->time_warp_seq
),
814 &ctime
, &mtime
, &atime
);
818 (new_issued
& (CEPH_CAP_ANY_FILE_RD
| CEPH_CAP_ANY_FILE_WR
))) {
819 s64 old_pool
= ci
->i_layout
.pool_id
;
820 struct ceph_string
*old_ns
;
822 ceph_file_layout_from_legacy(&ci
->i_layout
, &info
->layout
);
823 old_ns
= rcu_dereference_protected(ci
->i_layout
.pool_ns
,
824 lockdep_is_held(&ci
->i_ceph_lock
));
825 rcu_assign_pointer(ci
->i_layout
.pool_ns
, pool_ns
);
827 if (ci
->i_layout
.pool_id
!= old_pool
|| pool_ns
!= old_ns
)
828 ci
->i_ceph_flags
&= ~CEPH_I_POOL_PERM
;
832 queue_trunc
= ceph_fill_file_size(inode
, issued
,
833 le32_to_cpu(info
->truncate_seq
),
834 le64_to_cpu(info
->truncate_size
),
835 le64_to_cpu(info
->size
));
836 /* only update max_size on auth cap */
837 if ((info
->cap
.flags
& CEPH_CAP_FLAG_AUTH
) &&
838 ci
->i_max_size
!= le64_to_cpu(info
->max_size
)) {
839 dout("max_size %lld -> %llu\n", ci
->i_max_size
,
840 le64_to_cpu(info
->max_size
));
841 ci
->i_max_size
= le64_to_cpu(info
->max_size
);
846 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
847 if ((ci
->i_xattrs
.version
== 0 || !(issued
& CEPH_CAP_XATTR_EXCL
)) &&
848 le64_to_cpu(info
->xattr_version
) > ci
->i_xattrs
.version
) {
849 if (ci
->i_xattrs
.blob
)
850 ceph_buffer_put(ci
->i_xattrs
.blob
);
851 ci
->i_xattrs
.blob
= xattr_blob
;
853 memcpy(ci
->i_xattrs
.blob
->vec
.iov_base
,
854 iinfo
->xattr_data
, iinfo
->xattr_len
);
855 ci
->i_xattrs
.version
= le64_to_cpu(info
->xattr_version
);
856 ceph_forget_all_cached_acls(inode
);
860 inode
->i_mapping
->a_ops
= &ceph_aops
;
862 switch (inode
->i_mode
& S_IFMT
) {
867 init_special_inode(inode
, inode
->i_mode
, inode
->i_rdev
);
868 inode
->i_op
= &ceph_file_iops
;
871 inode
->i_op
= &ceph_file_iops
;
872 inode
->i_fop
= &ceph_file_fops
;
875 inode
->i_op
= &ceph_symlink_iops
;
876 if (!ci
->i_symlink
) {
877 u32 symlen
= iinfo
->symlink_len
;
880 spin_unlock(&ci
->i_ceph_lock
);
882 if (symlen
!= i_size_read(inode
)) {
883 pr_err("fill_inode %llx.%llx BAD symlink "
884 "size %lld\n", ceph_vinop(inode
),
886 i_size_write(inode
, symlen
);
887 inode
->i_blocks
= calc_inode_blocks(symlen
);
891 sym
= kstrndup(iinfo
->symlink
, symlen
, GFP_NOFS
);
895 spin_lock(&ci
->i_ceph_lock
);
899 kfree(sym
); /* lost a race */
901 inode
->i_link
= ci
->i_symlink
;
904 inode
->i_op
= &ceph_dir_iops
;
905 inode
->i_fop
= &ceph_dir_fops
;
907 ci
->i_dir_layout
= iinfo
->dir_layout
;
909 ci
->i_files
= le64_to_cpu(info
->files
);
910 ci
->i_subdirs
= le64_to_cpu(info
->subdirs
);
911 ci
->i_rbytes
= le64_to_cpu(info
->rbytes
);
912 ci
->i_rfiles
= le64_to_cpu(info
->rfiles
);
913 ci
->i_rsubdirs
= le64_to_cpu(info
->rsubdirs
);
914 ceph_decode_timespec(&ci
->i_rctime
, &info
->rctime
);
917 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
918 ceph_vinop(inode
), inode
->i_mode
);
921 /* were we issued a capability? */
922 if (info
->cap
.caps
) {
923 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
924 unsigned caps
= le32_to_cpu(info
->cap
.caps
);
925 ceph_add_cap(inode
, session
,
926 le64_to_cpu(info
->cap
.cap_id
),
928 le32_to_cpu(info
->cap
.wanted
),
929 le32_to_cpu(info
->cap
.seq
),
930 le32_to_cpu(info
->cap
.mseq
),
931 le64_to_cpu(info
->cap
.realm
),
932 info
->cap
.flags
, &new_cap
);
934 /* set dir completion flag? */
935 if (S_ISDIR(inode
->i_mode
) &&
936 ci
->i_files
== 0 && ci
->i_subdirs
== 0 &&
937 (caps
& CEPH_CAP_FILE_SHARED
) &&
938 (issued
& CEPH_CAP_FILE_EXCL
) == 0 &&
939 !__ceph_dir_is_complete(ci
)) {
940 dout(" marking %p complete (empty)\n", inode
);
941 i_size_write(inode
, 0);
942 __ceph_dir_set_complete(ci
,
943 atomic64_read(&ci
->i_release_count
),
944 atomic64_read(&ci
->i_ordered_count
));
949 dout(" %p got snap_caps %s\n", inode
,
950 ceph_cap_string(le32_to_cpu(info
->cap
.caps
)));
951 ci
->i_snap_caps
|= le32_to_cpu(info
->cap
.caps
);
953 __ceph_get_fmode(ci
, cap_fmode
);
955 } else if (cap_fmode
>= 0) {
956 pr_warn("mds issued no caps on %llx.%llx\n",
958 __ceph_get_fmode(ci
, cap_fmode
);
961 if (iinfo
->inline_version
> 0 &&
962 iinfo
->inline_version
>= ci
->i_inline_version
) {
963 int cache_caps
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
964 ci
->i_inline_version
= iinfo
->inline_version
;
965 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
&&
967 (le32_to_cpu(info
->cap
.caps
) & cache_caps
)))
971 spin_unlock(&ci
->i_ceph_lock
);
974 ceph_fill_inline_data(inode
, locked_page
,
975 iinfo
->inline_data
, iinfo
->inline_len
);
978 wake_up_all(&ci
->i_cap_wq
);
980 /* queue truncate if we saw i_size decrease */
982 ceph_queue_vmtruncate(inode
);
984 /* populate frag tree */
985 if (S_ISDIR(inode
->i_mode
))
986 ceph_fill_fragtree(inode
, &info
->fragtree
, dirinfo
);
988 /* update delegation info? */
990 ceph_fill_dirfrag(inode
, dirinfo
);
995 ceph_put_cap(mdsc
, new_cap
);
997 ceph_buffer_put(xattr_blob
);
998 ceph_put_string(pool_ns
);
1003 * caller should hold session s_mutex.
1005 static void update_dentry_lease(struct dentry
*dentry
,
1006 struct ceph_mds_reply_lease
*lease
,
1007 struct ceph_mds_session
*session
,
1008 unsigned long from_time
,
1009 struct ceph_vino
*tgt_vino
,
1010 struct ceph_vino
*dir_vino
)
1012 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1013 long unsigned duration
= le32_to_cpu(lease
->duration_ms
);
1014 long unsigned ttl
= from_time
+ (duration
* HZ
) / 1000;
1015 long unsigned half_ttl
= from_time
+ (duration
* HZ
/ 2) / 1000;
1017 struct ceph_mds_session
*old_lease_session
= NULL
;
1020 * Make sure dentry's inode matches tgt_vino. NULL tgt_vino means that
1021 * we expect a negative dentry.
1023 if (!tgt_vino
&& d_really_is_positive(dentry
))
1026 if (tgt_vino
&& (d_really_is_negative(dentry
) ||
1027 !ceph_ino_compare(d_inode(dentry
), tgt_vino
)))
1030 spin_lock(&dentry
->d_lock
);
1031 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1032 dentry
, duration
, ttl
);
1034 dir
= d_inode(dentry
->d_parent
);
1036 /* make sure parent matches dir_vino */
1037 if (!ceph_ino_compare(dir
, dir_vino
))
1040 /* only track leases on regular dentries */
1041 if (ceph_snap(dir
) != CEPH_NOSNAP
)
1044 di
->lease_shared_gen
= ceph_inode(dir
)->i_shared_gen
;
1049 if (di
->lease_gen
== session
->s_cap_gen
&&
1050 time_before(ttl
, di
->time
))
1051 goto out_unlock
; /* we already have a newer lease. */
1053 if (di
->lease_session
&& di
->lease_session
!= session
) {
1054 old_lease_session
= di
->lease_session
;
1055 di
->lease_session
= NULL
;
1058 ceph_dentry_lru_touch(dentry
);
1060 if (!di
->lease_session
)
1061 di
->lease_session
= ceph_get_mds_session(session
);
1062 di
->lease_gen
= session
->s_cap_gen
;
1063 di
->lease_seq
= le32_to_cpu(lease
->seq
);
1064 di
->lease_renew_after
= half_ttl
;
1065 di
->lease_renew_from
= 0;
1068 spin_unlock(&dentry
->d_lock
);
1069 if (old_lease_session
)
1070 ceph_put_mds_session(old_lease_session
);
1074 * splice a dentry to an inode.
1075 * caller must hold directory i_mutex for this to be safe.
1077 static struct dentry
*splice_dentry(struct dentry
*dn
, struct inode
*in
)
1079 struct dentry
*realdn
;
1081 BUG_ON(d_inode(dn
));
1083 /* dn must be unhashed */
1084 if (!d_unhashed(dn
))
1086 realdn
= d_splice_alias(in
, dn
);
1087 if (IS_ERR(realdn
)) {
1088 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1089 PTR_ERR(realdn
), dn
, in
, ceph_vinop(in
));
1090 dn
= realdn
; /* note realdn contains the error */
1092 } else if (realdn
) {
1093 dout("dn %p (%d) spliced with %p (%d) "
1094 "inode %p ino %llx.%llx\n",
1096 realdn
, d_count(realdn
),
1097 d_inode(realdn
), ceph_vinop(d_inode(realdn
)));
1101 BUG_ON(!ceph_dentry(dn
));
1102 dout("dn %p attached to %p ino %llx.%llx\n",
1103 dn
, d_inode(dn
), ceph_vinop(d_inode(dn
)));
1110 * Incorporate results into the local cache. This is either just
1111 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1114 * A reply may contain
1115 * a directory inode along with a dentry.
1116 * and/or a target inode
1118 * Called with snap_rwsem (read).
1120 int ceph_fill_trace(struct super_block
*sb
, struct ceph_mds_request
*req
)
1122 struct ceph_mds_session
*session
= req
->r_session
;
1123 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1124 struct inode
*in
= NULL
;
1125 struct ceph_vino tvino
, dvino
;
1126 struct ceph_fs_client
*fsc
= ceph_sb_to_client(sb
);
1129 dout("fill_trace %p is_dentry %d is_target %d\n", req
,
1130 rinfo
->head
->is_dentry
, rinfo
->head
->is_target
);
1132 if (!rinfo
->head
->is_target
&& !rinfo
->head
->is_dentry
) {
1133 dout("fill_trace reply is empty!\n");
1134 if (rinfo
->head
->result
== 0 && req
->r_parent
)
1135 ceph_invalidate_dir_request(req
);
1139 if (rinfo
->head
->is_dentry
) {
1140 struct inode
*dir
= req
->r_parent
;
1143 err
= fill_inode(dir
, NULL
,
1144 &rinfo
->diri
, rinfo
->dirfrag
,
1145 session
, req
->r_request_started
, -1,
1146 &req
->r_caps_reservation
);
1153 if (dir
&& req
->r_op
== CEPH_MDS_OP_LOOKUPNAME
) {
1155 struct dentry
*dn
, *parent
;
1157 BUG_ON(!rinfo
->head
->is_target
);
1158 BUG_ON(req
->r_dentry
);
1160 parent
= d_find_any_alias(dir
);
1163 dname
.name
= rinfo
->dname
;
1164 dname
.len
= rinfo
->dname_len
;
1165 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
1166 tvino
.ino
= le64_to_cpu(rinfo
->targeti
.in
->ino
);
1167 tvino
.snap
= le64_to_cpu(rinfo
->targeti
.in
->snapid
);
1169 dn
= d_lookup(parent
, &dname
);
1170 dout("d_lookup on parent=%p name=%.*s got %p\n",
1171 parent
, dname
.len
, dname
.name
, dn
);
1174 dn
= d_alloc(parent
, &dname
);
1175 dout("d_alloc %p '%.*s' = %p\n", parent
,
1176 dname
.len
, dname
.name
, dn
);
1183 } else if (d_really_is_positive(dn
) &&
1184 (ceph_ino(d_inode(dn
)) != tvino
.ino
||
1185 ceph_snap(d_inode(dn
)) != tvino
.snap
)) {
1186 dout(" dn %p points to wrong inode %p\n",
1198 if (rinfo
->head
->is_target
) {
1199 tvino
.ino
= le64_to_cpu(rinfo
->targeti
.in
->ino
);
1200 tvino
.snap
= le64_to_cpu(rinfo
->targeti
.in
->snapid
);
1202 in
= ceph_get_inode(sb
, tvino
);
1207 req
->r_target_inode
= in
;
1209 err
= fill_inode(in
, req
->r_locked_page
, &rinfo
->targeti
, NULL
,
1210 session
, req
->r_request_started
,
1211 (!test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
) &&
1212 rinfo
->head
->result
== 0) ? req
->r_fmode
: -1,
1213 &req
->r_caps_reservation
);
1215 pr_err("fill_inode badness %p %llx.%llx\n",
1216 in
, ceph_vinop(in
));
1222 * ignore null lease/binding on snapdir ENOENT, or else we
1223 * will have trouble splicing in the virtual snapdir later
1225 if (rinfo
->head
->is_dentry
&&
1226 !test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
) &&
1227 test_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
) &&
1228 (rinfo
->head
->is_target
|| strncmp(req
->r_dentry
->d_name
.name
,
1229 fsc
->mount_options
->snapdir_name
,
1230 req
->r_dentry
->d_name
.len
))) {
1232 * lookup link rename : null -> possibly existing inode
1233 * mknod symlink mkdir : null -> new inode
1234 * unlink : linked -> null
1236 struct inode
*dir
= req
->r_parent
;
1237 struct dentry
*dn
= req
->r_dentry
;
1238 bool have_dir_cap
, have_lease
;
1242 BUG_ON(d_inode(dn
->d_parent
) != dir
);
1244 dvino
.ino
= le64_to_cpu(rinfo
->diri
.in
->ino
);
1245 dvino
.snap
= le64_to_cpu(rinfo
->diri
.in
->snapid
);
1247 BUG_ON(ceph_ino(dir
) != dvino
.ino
);
1248 BUG_ON(ceph_snap(dir
) != dvino
.snap
);
1250 /* do we have a lease on the whole dir? */
1252 (le32_to_cpu(rinfo
->diri
.in
->cap
.caps
) &
1253 CEPH_CAP_FILE_SHARED
);
1255 /* do we have a dn lease? */
1256 have_lease
= have_dir_cap
||
1257 le32_to_cpu(rinfo
->dlease
->duration_ms
);
1259 dout("fill_trace no dentry lease or dir cap\n");
1262 if (req
->r_old_dentry
&& req
->r_op
== CEPH_MDS_OP_RENAME
) {
1263 struct inode
*olddir
= req
->r_old_dentry_dir
;
1266 dout(" src %p '%pd' dst %p '%pd'\n",
1270 dout("fill_trace doing d_move %p -> %p\n",
1271 req
->r_old_dentry
, dn
);
1273 /* d_move screws up sibling dentries' offsets */
1274 ceph_dir_clear_ordered(dir
);
1275 ceph_dir_clear_ordered(olddir
);
1277 d_move(req
->r_old_dentry
, dn
);
1278 dout(" src %p '%pd' dst %p '%pd'\n",
1283 /* ensure target dentry is invalidated, despite
1284 rehashing bug in vfs_rename_dir */
1285 ceph_invalidate_dentry_lease(dn
);
1287 dout("dn %p gets new offset %lld\n", req
->r_old_dentry
,
1288 ceph_dentry(req
->r_old_dentry
)->offset
);
1290 dn
= req
->r_old_dentry
; /* use old_dentry */
1294 if (!rinfo
->head
->is_target
) {
1295 dout("fill_trace null dentry\n");
1296 if (d_really_is_positive(dn
)) {
1297 ceph_dir_clear_ordered(dir
);
1298 dout("d_delete %p\n", dn
);
1300 } else if (have_lease
) {
1303 update_dentry_lease(dn
, rinfo
->dlease
,
1305 req
->r_request_started
,
1311 /* attach proper inode */
1312 if (d_really_is_negative(dn
)) {
1313 ceph_dir_clear_ordered(dir
);
1315 dn
= splice_dentry(dn
, in
);
1320 req
->r_dentry
= dn
; /* may have spliced */
1321 } else if (d_really_is_positive(dn
) && d_inode(dn
) != in
) {
1322 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1323 dn
, d_inode(dn
), ceph_vinop(d_inode(dn
)),
1330 tvino
.ino
= le64_to_cpu(rinfo
->targeti
.in
->ino
);
1331 tvino
.snap
= le64_to_cpu(rinfo
->targeti
.in
->snapid
);
1332 update_dentry_lease(dn
, rinfo
->dlease
, session
,
1333 req
->r_request_started
,
1336 dout(" final dn %p\n", dn
);
1337 } else if ((req
->r_op
== CEPH_MDS_OP_LOOKUPSNAP
||
1338 req
->r_op
== CEPH_MDS_OP_MKSNAP
) &&
1339 !test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
1340 struct dentry
*dn
= req
->r_dentry
;
1341 struct inode
*dir
= req
->r_parent
;
1343 /* fill out a snapdir LOOKUPSNAP dentry */
1346 BUG_ON(ceph_snap(dir
) != CEPH_SNAPDIR
);
1347 dout(" linking snapped dir %p to dn %p\n", in
, dn
);
1348 ceph_dir_clear_ordered(dir
);
1350 dn
= splice_dentry(dn
, in
);
1355 req
->r_dentry
= dn
; /* may have spliced */
1356 } else if (rinfo
->head
->is_dentry
) {
1357 struct ceph_vino
*ptvino
= NULL
;
1359 if ((le32_to_cpu(rinfo
->diri
.in
->cap
.caps
) & CEPH_CAP_FILE_SHARED
) ||
1360 le32_to_cpu(rinfo
->dlease
->duration_ms
)) {
1361 dvino
.ino
= le64_to_cpu(rinfo
->diri
.in
->ino
);
1362 dvino
.snap
= le64_to_cpu(rinfo
->diri
.in
->snapid
);
1364 if (rinfo
->head
->is_target
) {
1365 tvino
.ino
= le64_to_cpu(rinfo
->targeti
.in
->ino
);
1366 tvino
.snap
= le64_to_cpu(rinfo
->targeti
.in
->snapid
);
1370 update_dentry_lease(req
->r_dentry
, rinfo
->dlease
,
1371 session
, req
->r_request_started
, ptvino
,
1374 dout("%s: no dentry lease or dir cap\n", __func__
);
1378 dout("fill_trace done err=%d\n", err
);
1383 * Prepopulate our cache with readdir results, leases, etc.
1385 static int readdir_prepopulate_inodes_only(struct ceph_mds_request
*req
,
1386 struct ceph_mds_session
*session
)
1388 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1391 for (i
= 0; i
< rinfo
->dir_nr
; i
++) {
1392 struct ceph_mds_reply_dir_entry
*rde
= rinfo
->dir_entries
+ i
;
1393 struct ceph_vino vino
;
1397 vino
.ino
= le64_to_cpu(rde
->inode
.in
->ino
);
1398 vino
.snap
= le64_to_cpu(rde
->inode
.in
->snapid
);
1400 in
= ceph_get_inode(req
->r_dentry
->d_sb
, vino
);
1403 dout("new_inode badness got %d\n", err
);
1406 rc
= fill_inode(in
, NULL
, &rde
->inode
, NULL
, session
,
1407 req
->r_request_started
, -1,
1408 &req
->r_caps_reservation
);
1410 pr_err("fill_inode badness on %p got %d\n", in
, rc
);
1419 void ceph_readdir_cache_release(struct ceph_readdir_cache_control
*ctl
)
1423 put_page(ctl
->page
);
1428 static int fill_readdir_cache(struct inode
*dir
, struct dentry
*dn
,
1429 struct ceph_readdir_cache_control
*ctl
,
1430 struct ceph_mds_request
*req
)
1432 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1433 unsigned nsize
= PAGE_SIZE
/ sizeof(struct dentry
*);
1434 unsigned idx
= ctl
->index
% nsize
;
1435 pgoff_t pgoff
= ctl
->index
/ nsize
;
1437 if (!ctl
->page
|| pgoff
!= page_index(ctl
->page
)) {
1438 ceph_readdir_cache_release(ctl
);
1440 ctl
->page
= grab_cache_page(&dir
->i_data
, pgoff
);
1442 ctl
->page
= find_lock_page(&dir
->i_data
, pgoff
);
1445 return idx
== 0 ? -ENOMEM
: 0;
1447 /* reading/filling the cache are serialized by
1448 * i_mutex, no need to use page lock */
1449 unlock_page(ctl
->page
);
1450 ctl
->dentries
= kmap(ctl
->page
);
1452 memset(ctl
->dentries
, 0, PAGE_SIZE
);
1455 if (req
->r_dir_release_cnt
== atomic64_read(&ci
->i_release_count
) &&
1456 req
->r_dir_ordered_cnt
== atomic64_read(&ci
->i_ordered_count
)) {
1457 dout("readdir cache dn %p idx %d\n", dn
, ctl
->index
);
1458 ctl
->dentries
[idx
] = dn
;
1461 dout("disable readdir cache\n");
1467 int ceph_readdir_prepopulate(struct ceph_mds_request
*req
,
1468 struct ceph_mds_session
*session
)
1470 struct dentry
*parent
= req
->r_dentry
;
1471 struct ceph_inode_info
*ci
= ceph_inode(d_inode(parent
));
1472 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1476 int err
= 0, skipped
= 0, ret
, i
;
1477 struct ceph_mds_request_head
*rhead
= req
->r_request
->front
.iov_base
;
1478 u32 frag
= le32_to_cpu(rhead
->args
.readdir
.frag
);
1481 struct ceph_readdir_cache_control cache_ctl
= {};
1483 if (test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
))
1484 return readdir_prepopulate_inodes_only(req
, session
);
1486 if (rinfo
->hash_order
) {
1488 last_hash
= ceph_str_hash(ci
->i_dir_layout
.dl_dir_hash
,
1490 strlen(req
->r_path2
));
1491 last_hash
= ceph_frag_value(last_hash
);
1492 } else if (rinfo
->offset_hash
) {
1493 /* mds understands offset_hash */
1494 WARN_ON_ONCE(req
->r_readdir_offset
!= 2);
1495 last_hash
= le32_to_cpu(rhead
->args
.readdir
.offset_hash
);
1499 if (rinfo
->dir_dir
&&
1500 le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
1501 dout("readdir_prepopulate got new frag %x -> %x\n",
1502 frag
, le32_to_cpu(rinfo
->dir_dir
->frag
));
1503 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
1504 if (!rinfo
->hash_order
)
1505 req
->r_readdir_offset
= 2;
1508 if (le32_to_cpu(rinfo
->head
->op
) == CEPH_MDS_OP_LSSNAP
) {
1509 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1510 rinfo
->dir_nr
, parent
);
1512 dout("readdir_prepopulate %d items under dn %p\n",
1513 rinfo
->dir_nr
, parent
);
1515 ceph_fill_dirfrag(d_inode(parent
), rinfo
->dir_dir
);
1517 if (ceph_frag_is_leftmost(frag
) &&
1518 req
->r_readdir_offset
== 2 &&
1519 !(rinfo
->hash_order
&& last_hash
)) {
1520 /* note dir version at start of readdir so we can
1521 * tell if any dentries get dropped */
1522 req
->r_dir_release_cnt
=
1523 atomic64_read(&ci
->i_release_count
);
1524 req
->r_dir_ordered_cnt
=
1525 atomic64_read(&ci
->i_ordered_count
);
1526 req
->r_readdir_cache_idx
= 0;
1530 cache_ctl
.index
= req
->r_readdir_cache_idx
;
1531 fpos_offset
= req
->r_readdir_offset
;
1533 /* FIXME: release caps/leases if error occurs */
1534 for (i
= 0; i
< rinfo
->dir_nr
; i
++) {
1535 struct ceph_mds_reply_dir_entry
*rde
= rinfo
->dir_entries
+ i
;
1536 struct ceph_vino tvino
, dvino
;
1538 dname
.name
= rde
->name
;
1539 dname
.len
= rde
->name_len
;
1540 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
1542 tvino
.ino
= le64_to_cpu(rde
->inode
.in
->ino
);
1543 tvino
.snap
= le64_to_cpu(rde
->inode
.in
->snapid
);
1545 if (rinfo
->hash_order
) {
1546 u32 hash
= ceph_str_hash(ci
->i_dir_layout
.dl_dir_hash
,
1547 rde
->name
, rde
->name_len
);
1548 hash
= ceph_frag_value(hash
);
1549 if (hash
!= last_hash
)
1552 rde
->offset
= ceph_make_fpos(hash
, fpos_offset
++, true);
1554 rde
->offset
= ceph_make_fpos(frag
, fpos_offset
++, false);
1558 dn
= d_lookup(parent
, &dname
);
1559 dout("d_lookup on parent=%p name=%.*s got %p\n",
1560 parent
, dname
.len
, dname
.name
, dn
);
1563 dn
= d_alloc(parent
, &dname
);
1564 dout("d_alloc %p '%.*s' = %p\n", parent
,
1565 dname
.len
, dname
.name
, dn
);
1567 dout("d_alloc badness\n");
1571 } else if (d_really_is_positive(dn
) &&
1572 (ceph_ino(d_inode(dn
)) != tvino
.ino
||
1573 ceph_snap(d_inode(dn
)) != tvino
.snap
)) {
1574 dout(" dn %p points to wrong inode %p\n",
1582 if (d_really_is_positive(dn
)) {
1585 in
= ceph_get_inode(parent
->d_sb
, tvino
);
1587 dout("new_inode badness\n");
1595 ret
= fill_inode(in
, NULL
, &rde
->inode
, NULL
, session
,
1596 req
->r_request_started
, -1,
1597 &req
->r_caps_reservation
);
1599 pr_err("fill_inode badness on %p\n", in
);
1600 if (d_really_is_negative(dn
))
1607 if (d_really_is_negative(dn
)) {
1608 struct dentry
*realdn
;
1610 if (ceph_security_xattr_deadlock(in
)) {
1611 dout(" skip splicing dn %p to inode %p"
1612 " (security xattr deadlock)\n", dn
, in
);
1618 realdn
= splice_dentry(dn
, in
);
1619 if (IS_ERR(realdn
)) {
1620 err
= PTR_ERR(realdn
);
1628 ceph_dentry(dn
)->offset
= rde
->offset
;
1630 dvino
= ceph_vino(d_inode(parent
));
1631 update_dentry_lease(dn
, rde
->lease
, req
->r_session
,
1632 req
->r_request_started
, &tvino
, &dvino
);
1634 if (err
== 0 && skipped
== 0 && cache_ctl
.index
>= 0) {
1635 ret
= fill_readdir_cache(d_inode(parent
), dn
,
1645 if (err
== 0 && skipped
== 0) {
1646 set_bit(CEPH_MDS_R_DID_PREPOPULATE
, &req
->r_req_flags
);
1647 req
->r_readdir_cache_idx
= cache_ctl
.index
;
1649 ceph_readdir_cache_release(&cache_ctl
);
1650 dout("readdir_prepopulate done\n");
1654 bool ceph_inode_set_size(struct inode
*inode
, loff_t size
)
1656 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1659 spin_lock(&ci
->i_ceph_lock
);
1660 dout("set_size %p %llu -> %llu\n", inode
, inode
->i_size
, size
);
1661 i_size_write(inode
, size
);
1662 inode
->i_blocks
= calc_inode_blocks(size
);
1664 ret
= __ceph_should_report_size(ci
);
1666 spin_unlock(&ci
->i_ceph_lock
);
1671 * Write back inode data in a worker thread. (This can't be done
1672 * in the message handler context.)
1674 void ceph_queue_writeback(struct inode
*inode
)
1677 if (queue_work(ceph_inode_to_client(inode
)->wb_wq
,
1678 &ceph_inode(inode
)->i_wb_work
)) {
1679 dout("ceph_queue_writeback %p\n", inode
);
1681 dout("ceph_queue_writeback %p failed\n", inode
);
1686 static void ceph_writeback_work(struct work_struct
*work
)
1688 struct ceph_inode_info
*ci
= container_of(work
, struct ceph_inode_info
,
1690 struct inode
*inode
= &ci
->vfs_inode
;
1692 dout("writeback %p\n", inode
);
1693 filemap_fdatawrite(&inode
->i_data
);
1698 * queue an async invalidation
1700 void ceph_queue_invalidate(struct inode
*inode
)
1703 if (queue_work(ceph_inode_to_client(inode
)->pg_inv_wq
,
1704 &ceph_inode(inode
)->i_pg_inv_work
)) {
1705 dout("ceph_queue_invalidate %p\n", inode
);
1707 dout("ceph_queue_invalidate %p failed\n", inode
);
1713 * Invalidate inode pages in a worker thread. (This can't be done
1714 * in the message handler context.)
1716 static void ceph_invalidate_work(struct work_struct
*work
)
1718 struct ceph_inode_info
*ci
= container_of(work
, struct ceph_inode_info
,
1720 struct inode
*inode
= &ci
->vfs_inode
;
1721 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1725 mutex_lock(&ci
->i_truncate_mutex
);
1727 if (READ_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
1728 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1729 inode
, ceph_ino(inode
));
1730 mapping_set_error(inode
->i_mapping
, -EIO
);
1731 truncate_pagecache(inode
, 0);
1732 mutex_unlock(&ci
->i_truncate_mutex
);
1736 spin_lock(&ci
->i_ceph_lock
);
1737 dout("invalidate_pages %p gen %d revoking %d\n", inode
,
1738 ci
->i_rdcache_gen
, ci
->i_rdcache_revoking
);
1739 if (ci
->i_rdcache_revoking
!= ci
->i_rdcache_gen
) {
1740 if (__ceph_caps_revoking_other(ci
, NULL
, CEPH_CAP_FILE_CACHE
))
1742 spin_unlock(&ci
->i_ceph_lock
);
1743 mutex_unlock(&ci
->i_truncate_mutex
);
1746 orig_gen
= ci
->i_rdcache_gen
;
1747 spin_unlock(&ci
->i_ceph_lock
);
1749 if (invalidate_inode_pages2(inode
->i_mapping
) < 0) {
1750 pr_err("invalidate_pages %p fails\n", inode
);
1753 spin_lock(&ci
->i_ceph_lock
);
1754 if (orig_gen
== ci
->i_rdcache_gen
&&
1755 orig_gen
== ci
->i_rdcache_revoking
) {
1756 dout("invalidate_pages %p gen %d successful\n", inode
,
1758 ci
->i_rdcache_revoking
--;
1761 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1762 inode
, orig_gen
, ci
->i_rdcache_gen
,
1763 ci
->i_rdcache_revoking
);
1764 if (__ceph_caps_revoking_other(ci
, NULL
, CEPH_CAP_FILE_CACHE
))
1767 spin_unlock(&ci
->i_ceph_lock
);
1768 mutex_unlock(&ci
->i_truncate_mutex
);
1771 ceph_check_caps(ci
, 0, NULL
);
1777 * called by trunc_wq;
1779 * We also truncate in a separate thread as well.
1781 static void ceph_vmtruncate_work(struct work_struct
*work
)
1783 struct ceph_inode_info
*ci
= container_of(work
, struct ceph_inode_info
,
1785 struct inode
*inode
= &ci
->vfs_inode
;
1787 dout("vmtruncate_work %p\n", inode
);
1788 __ceph_do_pending_vmtruncate(inode
);
1793 * Queue an async vmtruncate. If we fail to queue work, we will handle
1794 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1796 void ceph_queue_vmtruncate(struct inode
*inode
)
1798 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1802 if (queue_work(ceph_sb_to_client(inode
->i_sb
)->trunc_wq
,
1803 &ci
->i_vmtruncate_work
)) {
1804 dout("ceph_queue_vmtruncate %p\n", inode
);
1806 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1807 inode
, ci
->i_truncate_pending
);
1813 * Make sure any pending truncation is applied before doing anything
1814 * that may depend on it.
1816 void __ceph_do_pending_vmtruncate(struct inode
*inode
)
1818 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1820 int wrbuffer_refs
, finish
= 0;
1822 mutex_lock(&ci
->i_truncate_mutex
);
1824 spin_lock(&ci
->i_ceph_lock
);
1825 if (ci
->i_truncate_pending
== 0) {
1826 dout("__do_pending_vmtruncate %p none pending\n", inode
);
1827 spin_unlock(&ci
->i_ceph_lock
);
1828 mutex_unlock(&ci
->i_truncate_mutex
);
1833 * make sure any dirty snapped pages are flushed before we
1834 * possibly truncate them.. so write AND block!
1836 if (ci
->i_wrbuffer_ref_head
< ci
->i_wrbuffer_ref
) {
1837 struct ceph_cap_snap
*capsnap
;
1838 to
= ci
->i_truncate_size
;
1839 list_for_each_entry(capsnap
, &ci
->i_cap_snaps
, ci_item
) {
1840 // MDS should have revoked Frw caps
1841 WARN_ON_ONCE(capsnap
->writing
);
1842 if (capsnap
->dirty_pages
&& capsnap
->size
> to
)
1845 spin_unlock(&ci
->i_ceph_lock
);
1846 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1849 truncate_pagecache(inode
, to
);
1851 filemap_write_and_wait_range(&inode
->i_data
, 0,
1852 inode
->i_sb
->s_maxbytes
);
1856 /* there should be no reader or writer */
1857 WARN_ON_ONCE(ci
->i_rd_ref
|| ci
->i_wr_ref
);
1859 to
= ci
->i_truncate_size
;
1860 wrbuffer_refs
= ci
->i_wrbuffer_ref
;
1861 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode
,
1862 ci
->i_truncate_pending
, to
);
1863 spin_unlock(&ci
->i_ceph_lock
);
1865 truncate_pagecache(inode
, to
);
1867 spin_lock(&ci
->i_ceph_lock
);
1868 if (to
== ci
->i_truncate_size
) {
1869 ci
->i_truncate_pending
= 0;
1872 spin_unlock(&ci
->i_ceph_lock
);
1876 mutex_unlock(&ci
->i_truncate_mutex
);
1878 if (wrbuffer_refs
== 0)
1879 ceph_check_caps(ci
, CHECK_CAPS_AUTHONLY
, NULL
);
1881 wake_up_all(&ci
->i_cap_wq
);
1887 static const struct inode_operations ceph_symlink_iops
= {
1888 .get_link
= simple_get_link
,
1889 .setattr
= ceph_setattr
,
1890 .getattr
= ceph_getattr
,
1891 .listxattr
= ceph_listxattr
,
1894 int __ceph_setattr(struct inode
*inode
, struct iattr
*attr
)
1896 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1897 const unsigned int ia_valid
= attr
->ia_valid
;
1898 struct ceph_mds_request
*req
;
1899 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(inode
->i_sb
)->mdsc
;
1900 struct ceph_cap_flush
*prealloc_cf
;
1902 int release
= 0, dirtied
= 0;
1905 int inode_dirty_flags
= 0;
1906 bool lock_snap_rwsem
= false;
1908 prealloc_cf
= ceph_alloc_cap_flush();
1912 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETATTR
,
1915 ceph_free_cap_flush(prealloc_cf
);
1916 return PTR_ERR(req
);
1919 spin_lock(&ci
->i_ceph_lock
);
1920 issued
= __ceph_caps_issued(ci
, NULL
);
1922 if (!ci
->i_head_snapc
&&
1923 (issued
& (CEPH_CAP_ANY_EXCL
| CEPH_CAP_FILE_WR
))) {
1924 lock_snap_rwsem
= true;
1925 if (!down_read_trylock(&mdsc
->snap_rwsem
)) {
1926 spin_unlock(&ci
->i_ceph_lock
);
1927 down_read(&mdsc
->snap_rwsem
);
1928 spin_lock(&ci
->i_ceph_lock
);
1929 issued
= __ceph_caps_issued(ci
, NULL
);
1933 dout("setattr %p issued %s\n", inode
, ceph_cap_string(issued
));
1935 if (ia_valid
& ATTR_UID
) {
1936 dout("setattr %p uid %d -> %d\n", inode
,
1937 from_kuid(&init_user_ns
, inode
->i_uid
),
1938 from_kuid(&init_user_ns
, attr
->ia_uid
));
1939 if (issued
& CEPH_CAP_AUTH_EXCL
) {
1940 inode
->i_uid
= attr
->ia_uid
;
1941 dirtied
|= CEPH_CAP_AUTH_EXCL
;
1942 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
1943 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) {
1944 req
->r_args
.setattr
.uid
= cpu_to_le32(
1945 from_kuid(&init_user_ns
, attr
->ia_uid
));
1946 mask
|= CEPH_SETATTR_UID
;
1947 release
|= CEPH_CAP_AUTH_SHARED
;
1950 if (ia_valid
& ATTR_GID
) {
1951 dout("setattr %p gid %d -> %d\n", inode
,
1952 from_kgid(&init_user_ns
, inode
->i_gid
),
1953 from_kgid(&init_user_ns
, attr
->ia_gid
));
1954 if (issued
& CEPH_CAP_AUTH_EXCL
) {
1955 inode
->i_gid
= attr
->ia_gid
;
1956 dirtied
|= CEPH_CAP_AUTH_EXCL
;
1957 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
1958 !gid_eq(attr
->ia_gid
, inode
->i_gid
)) {
1959 req
->r_args
.setattr
.gid
= cpu_to_le32(
1960 from_kgid(&init_user_ns
, attr
->ia_gid
));
1961 mask
|= CEPH_SETATTR_GID
;
1962 release
|= CEPH_CAP_AUTH_SHARED
;
1965 if (ia_valid
& ATTR_MODE
) {
1966 dout("setattr %p mode 0%o -> 0%o\n", inode
, inode
->i_mode
,
1968 if (issued
& CEPH_CAP_AUTH_EXCL
) {
1969 inode
->i_mode
= attr
->ia_mode
;
1970 dirtied
|= CEPH_CAP_AUTH_EXCL
;
1971 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
1972 attr
->ia_mode
!= inode
->i_mode
) {
1973 inode
->i_mode
= attr
->ia_mode
;
1974 req
->r_args
.setattr
.mode
= cpu_to_le32(attr
->ia_mode
);
1975 mask
|= CEPH_SETATTR_MODE
;
1976 release
|= CEPH_CAP_AUTH_SHARED
;
1980 if (ia_valid
& ATTR_ATIME
) {
1981 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode
,
1982 inode
->i_atime
.tv_sec
, inode
->i_atime
.tv_nsec
,
1983 attr
->ia_atime
.tv_sec
, attr
->ia_atime
.tv_nsec
);
1984 if (issued
& CEPH_CAP_FILE_EXCL
) {
1985 ci
->i_time_warp_seq
++;
1986 inode
->i_atime
= attr
->ia_atime
;
1987 dirtied
|= CEPH_CAP_FILE_EXCL
;
1988 } else if ((issued
& CEPH_CAP_FILE_WR
) &&
1989 timespec_compare(&inode
->i_atime
,
1990 &attr
->ia_atime
) < 0) {
1991 inode
->i_atime
= attr
->ia_atime
;
1992 dirtied
|= CEPH_CAP_FILE_WR
;
1993 } else if ((issued
& CEPH_CAP_FILE_SHARED
) == 0 ||
1994 !timespec_equal(&inode
->i_atime
, &attr
->ia_atime
)) {
1995 ceph_encode_timespec(&req
->r_args
.setattr
.atime
,
1997 mask
|= CEPH_SETATTR_ATIME
;
1998 release
|= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_RD
|
2002 if (ia_valid
& ATTR_MTIME
) {
2003 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode
,
2004 inode
->i_mtime
.tv_sec
, inode
->i_mtime
.tv_nsec
,
2005 attr
->ia_mtime
.tv_sec
, attr
->ia_mtime
.tv_nsec
);
2006 if (issued
& CEPH_CAP_FILE_EXCL
) {
2007 ci
->i_time_warp_seq
++;
2008 inode
->i_mtime
= attr
->ia_mtime
;
2009 dirtied
|= CEPH_CAP_FILE_EXCL
;
2010 } else if ((issued
& CEPH_CAP_FILE_WR
) &&
2011 timespec_compare(&inode
->i_mtime
,
2012 &attr
->ia_mtime
) < 0) {
2013 inode
->i_mtime
= attr
->ia_mtime
;
2014 dirtied
|= CEPH_CAP_FILE_WR
;
2015 } else if ((issued
& CEPH_CAP_FILE_SHARED
) == 0 ||
2016 !timespec_equal(&inode
->i_mtime
, &attr
->ia_mtime
)) {
2017 ceph_encode_timespec(&req
->r_args
.setattr
.mtime
,
2019 mask
|= CEPH_SETATTR_MTIME
;
2020 release
|= CEPH_CAP_FILE_SHARED
| CEPH_CAP_FILE_RD
|
2024 if (ia_valid
& ATTR_SIZE
) {
2025 dout("setattr %p size %lld -> %lld\n", inode
,
2026 inode
->i_size
, attr
->ia_size
);
2027 if ((issued
& CEPH_CAP_FILE_EXCL
) &&
2028 attr
->ia_size
> inode
->i_size
) {
2029 i_size_write(inode
, attr
->ia_size
);
2030 inode
->i_blocks
= calc_inode_blocks(attr
->ia_size
);
2031 ci
->i_reported_size
= attr
->ia_size
;
2032 dirtied
|= CEPH_CAP_FILE_EXCL
;
2033 } else if ((issued
& CEPH_CAP_FILE_SHARED
) == 0 ||
2034 attr
->ia_size
!= inode
->i_size
) {
2035 req
->r_args
.setattr
.size
= cpu_to_le64(attr
->ia_size
);
2036 req
->r_args
.setattr
.old_size
=
2037 cpu_to_le64(inode
->i_size
);
2038 mask
|= CEPH_SETATTR_SIZE
;
2039 release
|= CEPH_CAP_FILE_SHARED
| CEPH_CAP_FILE_RD
|
2044 /* these do nothing */
2045 if (ia_valid
& ATTR_CTIME
) {
2046 bool only
= (ia_valid
& (ATTR_SIZE
|ATTR_MTIME
|ATTR_ATIME
|
2047 ATTR_MODE
|ATTR_UID
|ATTR_GID
)) == 0;
2048 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode
,
2049 inode
->i_ctime
.tv_sec
, inode
->i_ctime
.tv_nsec
,
2050 attr
->ia_ctime
.tv_sec
, attr
->ia_ctime
.tv_nsec
,
2051 only
? "ctime only" : "ignored");
2054 * if kernel wants to dirty ctime but nothing else,
2055 * we need to choose a cap to dirty under, or do
2056 * a almost-no-op setattr
2058 if (issued
& CEPH_CAP_AUTH_EXCL
)
2059 dirtied
|= CEPH_CAP_AUTH_EXCL
;
2060 else if (issued
& CEPH_CAP_FILE_EXCL
)
2061 dirtied
|= CEPH_CAP_FILE_EXCL
;
2062 else if (issued
& CEPH_CAP_XATTR_EXCL
)
2063 dirtied
|= CEPH_CAP_XATTR_EXCL
;
2065 mask
|= CEPH_SETATTR_CTIME
;
2068 if (ia_valid
& ATTR_FILE
)
2069 dout("setattr %p ATTR_FILE ... hrm!\n", inode
);
2072 inode_dirty_flags
= __ceph_mark_dirty_caps(ci
, dirtied
,
2074 inode
->i_ctime
= attr
->ia_ctime
;
2078 spin_unlock(&ci
->i_ceph_lock
);
2079 if (lock_snap_rwsem
)
2080 up_read(&mdsc
->snap_rwsem
);
2082 if (inode_dirty_flags
)
2083 __mark_inode_dirty(inode
, inode_dirty_flags
);
2087 req
->r_inode
= inode
;
2089 req
->r_inode_drop
= release
;
2090 req
->r_args
.setattr
.mask
= cpu_to_le32(mask
);
2091 req
->r_num_caps
= 1;
2092 req
->r_stamp
= attr
->ia_ctime
;
2093 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
2095 dout("setattr %p result=%d (%s locally, %d remote)\n", inode
, err
,
2096 ceph_cap_string(dirtied
), mask
);
2098 ceph_mdsc_put_request(req
);
2099 ceph_free_cap_flush(prealloc_cf
);
2101 if (err
>= 0 && (mask
& CEPH_SETATTR_SIZE
))
2102 __ceph_do_pending_vmtruncate(inode
);
2110 int ceph_setattr(struct dentry
*dentry
, struct iattr
*attr
)
2112 struct inode
*inode
= d_inode(dentry
);
2115 if (ceph_snap(inode
) != CEPH_NOSNAP
)
2118 err
= setattr_prepare(dentry
, attr
);
2122 err
= __ceph_setattr(inode
, attr
);
2124 if (err
>= 0 && (attr
->ia_valid
& ATTR_MODE
))
2125 err
= posix_acl_chmod(inode
, attr
->ia_mode
);
2131 * Verify that we have a lease on the given mask. If not,
2132 * do a getattr against an mds.
2134 int __ceph_do_getattr(struct inode
*inode
, struct page
*locked_page
,
2135 int mask
, bool force
)
2137 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
2138 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
2139 struct ceph_mds_request
*req
;
2142 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
2143 dout("do_getattr inode %p SNAPDIR\n", inode
);
2147 dout("do_getattr inode %p mask %s mode 0%o\n",
2148 inode
, ceph_cap_string(mask
), inode
->i_mode
);
2149 if (!force
&& ceph_caps_issued_mask(ceph_inode(inode
), mask
, 1))
2152 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_GETATTR
, USE_ANY_MDS
);
2154 return PTR_ERR(req
);
2155 req
->r_inode
= inode
;
2157 req
->r_num_caps
= 1;
2158 req
->r_args
.getattr
.mask
= cpu_to_le32(mask
);
2159 req
->r_locked_page
= locked_page
;
2160 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
2161 if (locked_page
&& err
== 0) {
2162 u64 inline_version
= req
->r_reply_info
.targeti
.inline_version
;
2163 if (inline_version
== 0) {
2164 /* the reply is supposed to contain inline data */
2166 } else if (inline_version
== CEPH_INLINE_NONE
) {
2169 err
= req
->r_reply_info
.targeti
.inline_len
;
2172 ceph_mdsc_put_request(req
);
2173 dout("do_getattr result=%d\n", err
);
2179 * Check inode permissions. We verify we have a valid value for
2180 * the AUTH cap, then call the generic handler.
2182 int ceph_permission(struct inode
*inode
, int mask
)
2186 if (mask
& MAY_NOT_BLOCK
)
2189 err
= ceph_do_getattr(inode
, CEPH_CAP_AUTH_SHARED
, false);
2192 err
= generic_permission(inode
, mask
);
2197 * Get all attributes. Hopefully somedata we'll have a statlite()
2198 * and can limit the fields we require to be accurate.
2200 int ceph_getattr(const struct path
*path
, struct kstat
*stat
,
2201 u32 request_mask
, unsigned int flags
)
2203 struct inode
*inode
= d_inode(path
->dentry
);
2204 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2207 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_INODE_ALL
, false);
2209 generic_fillattr(inode
, stat
);
2210 stat
->ino
= ceph_translate_ino(inode
->i_sb
, inode
->i_ino
);
2211 if (ceph_snap(inode
) != CEPH_NOSNAP
)
2212 stat
->dev
= ceph_snap(inode
);
2215 if (S_ISDIR(inode
->i_mode
)) {
2216 if (ceph_test_mount_opt(ceph_sb_to_client(inode
->i_sb
),
2218 stat
->size
= ci
->i_rbytes
;
2220 stat
->size
= ci
->i_files
+ ci
->i_subdirs
;
2222 stat
->blksize
= 65536;