1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
20 #include "mds_client.h"
23 #include <linux/ceph/decode.h>
26 * Ceph inode operations
28 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29 * setattr, etc.), xattr helpers, and helpers for assimilating
30 * metadata returned by the MDS into our cache.
32 * Also define helpers for doing asynchronous writeback, invalidation,
33 * and truncation for the benefit of those who can't afford to block
34 * (typically because they are in the message handler path).
37 static const struct inode_operations ceph_symlink_iops
;
38 static const struct inode_operations ceph_encrypted_symlink_iops
;
40 static void ceph_inode_work(struct work_struct
*work
);
43 * find or create an inode, given the ceph ino number
45 static int ceph_set_ino_cb(struct inode
*inode
, void *data
)
47 struct ceph_inode_info
*ci
= ceph_inode(inode
);
48 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(inode
->i_sb
);
50 ci
->i_vino
= *(struct ceph_vino
*)data
;
51 inode
->i_ino
= ceph_vino_to_ino_t(ci
->i_vino
);
52 inode_set_iversion_raw(inode
, 0);
53 percpu_counter_inc(&mdsc
->metric
.total_inodes
);
59 * ceph_new_inode - allocate a new inode in advance of an expected create
60 * @dir: parent directory for new inode
61 * @dentry: dentry that may eventually point to new inode
62 * @mode: mode of new inode
63 * @as_ctx: pointer to inherited security context
65 * Allocate a new inode in advance of an operation to create a new inode.
66 * This allocates the inode and sets up the acl_sec_ctx with appropriate
67 * info for the new inode.
69 * Returns a pointer to the new inode or an ERR_PTR.
71 struct inode
*ceph_new_inode(struct inode
*dir
, struct dentry
*dentry
,
72 umode_t
*mode
, struct ceph_acl_sec_ctx
*as_ctx
)
77 inode
= new_inode(dir
->i_sb
);
79 return ERR_PTR(-ENOMEM
);
81 inode
->i_blkbits
= CEPH_FSCRYPT_BLOCK_SHIFT
;
83 if (!S_ISLNK(*mode
)) {
84 err
= ceph_pre_init_acls(dir
, mode
, as_ctx
);
90 inode
->i_mode
= *mode
;
92 err
= ceph_security_init_secctx(dentry
, *mode
, as_ctx
);
97 * We'll skip setting fscrypt context for snapshots, leaving that for
100 if (ceph_snap(dir
) != CEPH_SNAPDIR
) {
101 err
= ceph_fscrypt_prepare_context(dir
, inode
, as_ctx
);
112 void ceph_as_ctx_to_req(struct ceph_mds_request
*req
,
113 struct ceph_acl_sec_ctx
*as_ctx
)
115 if (as_ctx
->pagelist
) {
116 req
->r_pagelist
= as_ctx
->pagelist
;
117 as_ctx
->pagelist
= NULL
;
119 ceph_fscrypt_as_ctx_to_req(req
, as_ctx
);
123 * ceph_get_inode - find or create/hash a new inode
124 * @sb: superblock to search and allocate in
125 * @vino: vino to search for
126 * @newino: optional new inode to insert if one isn't found (may be NULL)
128 * Search for or insert a new inode into the hash for the given vino, and
129 * return a reference to it. If new is non-NULL, its reference is consumed.
131 struct inode
*ceph_get_inode(struct super_block
*sb
, struct ceph_vino vino
,
132 struct inode
*newino
)
134 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(sb
);
135 struct ceph_client
*cl
= mdsc
->fsc
->client
;
138 if (ceph_vino_is_reserved(vino
))
139 return ERR_PTR(-EREMOTEIO
);
142 inode
= inode_insert5(newino
, (unsigned long)vino
.ino
,
143 ceph_ino_compare
, ceph_set_ino_cb
, &vino
);
147 inode
= iget5_locked(sb
, (unsigned long)vino
.ino
,
148 ceph_ino_compare
, ceph_set_ino_cb
, &vino
);
152 doutc(cl
, "no inode found for %llx.%llx\n", vino
.ino
, vino
.snap
);
153 return ERR_PTR(-ENOMEM
);
156 doutc(cl
, "on %llx=%llx.%llx got %p new %d\n",
157 ceph_present_inode(inode
), ceph_vinop(inode
), inode
,
158 !!(inode
->i_state
& I_NEW
));
163 * get/construct snapdir inode for a given directory
165 struct inode
*ceph_get_snapdir(struct inode
*parent
)
167 struct ceph_client
*cl
= ceph_inode_to_client(parent
);
168 struct ceph_vino vino
= {
169 .ino
= ceph_ino(parent
),
170 .snap
= CEPH_SNAPDIR
,
172 struct inode
*inode
= ceph_get_inode(parent
->i_sb
, vino
, NULL
);
173 struct ceph_inode_info
*ci
= ceph_inode(inode
);
179 if (!S_ISDIR(parent
->i_mode
)) {
180 pr_warn_once_client(cl
, "bad snapdir parent type (mode=0%o)\n",
185 if (!(inode
->i_state
& I_NEW
) && !S_ISDIR(inode
->i_mode
)) {
186 pr_warn_once_client(cl
, "bad snapdir inode type (mode=0%o)\n",
191 inode
->i_mode
= parent
->i_mode
;
192 inode
->i_uid
= parent
->i_uid
;
193 inode
->i_gid
= parent
->i_gid
;
194 inode_set_mtime_to_ts(inode
, inode_get_mtime(parent
));
195 inode_set_ctime_to_ts(inode
, inode_get_ctime(parent
));
196 inode_set_atime_to_ts(inode
, inode_get_atime(parent
));
198 ci
->i_btime
= ceph_inode(parent
)->i_btime
;
200 #ifdef CONFIG_FS_ENCRYPTION
201 /* if encrypted, just borrow fscrypt_auth from parent */
202 if (IS_ENCRYPTED(parent
)) {
203 struct ceph_inode_info
*pci
= ceph_inode(parent
);
205 ci
->fscrypt_auth
= kmemdup(pci
->fscrypt_auth
,
206 pci
->fscrypt_auth_len
,
208 if (ci
->fscrypt_auth
) {
209 inode
->i_flags
|= S_ENCRYPTED
;
210 ci
->fscrypt_auth_len
= pci
->fscrypt_auth_len
;
212 doutc(cl
, "Failed to alloc snapdir fscrypt_auth\n");
218 if (inode
->i_state
& I_NEW
) {
219 inode
->i_op
= &ceph_snapdir_iops
;
220 inode
->i_fop
= &ceph_snapdir_fops
;
221 ci
->i_snap_caps
= CEPH_CAP_PIN
; /* so we can open */
222 unlock_new_inode(inode
);
227 if ((inode
->i_state
& I_NEW
))
228 discard_new_inode(inode
);
234 const struct inode_operations ceph_file_iops
= {
235 .permission
= ceph_permission
,
236 .setattr
= ceph_setattr
,
237 .getattr
= ceph_getattr
,
238 .listxattr
= ceph_listxattr
,
239 .get_inode_acl
= ceph_get_acl
,
240 .set_acl
= ceph_set_acl
,
245 * We use a 'frag tree' to keep track of the MDS's directory fragments
246 * for a given inode (usually there is just a single fragment). We
247 * need to know when a child frag is delegated to a new MDS, or when
248 * it is flagged as replicated, so we can direct our requests
253 * find/create a frag in the tree
255 static struct ceph_inode_frag
*__get_or_create_frag(struct ceph_inode_info
*ci
,
258 struct inode
*inode
= &ci
->netfs
.inode
;
259 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
261 struct rb_node
*parent
= NULL
;
262 struct ceph_inode_frag
*frag
;
265 p
= &ci
->i_fragtree
.rb_node
;
268 frag
= rb_entry(parent
, struct ceph_inode_frag
, node
);
269 c
= ceph_frag_compare(f
, frag
->frag
);
278 frag
= kmalloc(sizeof(*frag
), GFP_NOFS
);
280 return ERR_PTR(-ENOMEM
);
287 rb_link_node(&frag
->node
, parent
, p
);
288 rb_insert_color(&frag
->node
, &ci
->i_fragtree
);
290 doutc(cl
, "added %p %llx.%llx frag %x\n", inode
, ceph_vinop(inode
), f
);
295 * find a specific frag @f
297 struct ceph_inode_frag
*__ceph_find_frag(struct ceph_inode_info
*ci
, u32 f
)
299 struct rb_node
*n
= ci
->i_fragtree
.rb_node
;
302 struct ceph_inode_frag
*frag
=
303 rb_entry(n
, struct ceph_inode_frag
, node
);
304 int c
= ceph_frag_compare(f
, frag
->frag
);
316 * Choose frag containing the given value @v. If @pfrag is
317 * specified, copy the frag delegation info to the caller if
320 static u32
__ceph_choose_frag(struct ceph_inode_info
*ci
, u32 v
,
321 struct ceph_inode_frag
*pfrag
, int *found
)
323 struct ceph_client
*cl
= ceph_inode_to_client(&ci
->netfs
.inode
);
324 u32 t
= ceph_frag_make(0, 0);
325 struct ceph_inode_frag
*frag
;
333 WARN_ON(!ceph_frag_contains_value(t
, v
));
334 frag
= __ceph_find_frag(ci
, t
);
336 break; /* t is a leaf */
337 if (frag
->split_by
== 0) {
339 memcpy(pfrag
, frag
, sizeof(*pfrag
));
346 nway
= 1 << frag
->split_by
;
347 doutc(cl
, "frag(%x) %x splits by %d (%d ways)\n", v
, t
,
348 frag
->split_by
, nway
);
349 for (i
= 0; i
< nway
; i
++) {
350 n
= ceph_frag_make_child(t
, frag
->split_by
, i
);
351 if (ceph_frag_contains_value(n
, v
)) {
358 doutc(cl
, "frag(%x) = %x\n", v
, t
);
363 u32
ceph_choose_frag(struct ceph_inode_info
*ci
, u32 v
,
364 struct ceph_inode_frag
*pfrag
, int *found
)
367 mutex_lock(&ci
->i_fragtree_mutex
);
368 ret
= __ceph_choose_frag(ci
, v
, pfrag
, found
);
369 mutex_unlock(&ci
->i_fragtree_mutex
);
374 * Process dirfrag (delegation) info from the mds. Include leaf
375 * fragment in tree ONLY if ndist > 0. Otherwise, only
376 * branches/splits are included in i_fragtree)
378 static int ceph_fill_dirfrag(struct inode
*inode
,
379 struct ceph_mds_reply_dirfrag
*dirinfo
)
381 struct ceph_inode_info
*ci
= ceph_inode(inode
);
382 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
383 struct ceph_inode_frag
*frag
;
384 u32 id
= le32_to_cpu(dirinfo
->frag
);
385 int mds
= le32_to_cpu(dirinfo
->auth
);
386 int ndist
= le32_to_cpu(dirinfo
->ndist
);
391 spin_lock(&ci
->i_ceph_lock
);
393 diri_auth
= ci
->i_auth_cap
->mds
;
394 spin_unlock(&ci
->i_ceph_lock
);
396 if (mds
== -1) /* CDIR_AUTH_PARENT */
399 mutex_lock(&ci
->i_fragtree_mutex
);
400 if (ndist
== 0 && mds
== diri_auth
) {
401 /* no delegation info needed. */
402 frag
= __ceph_find_frag(ci
, id
);
405 if (frag
->split_by
== 0) {
406 /* tree leaf, remove */
407 doutc(cl
, "removed %p %llx.%llx frag %x (no ref)\n",
408 inode
, ceph_vinop(inode
), id
);
409 rb_erase(&frag
->node
, &ci
->i_fragtree
);
412 /* tree branch, keep and clear */
413 doutc(cl
, "cleared %p %llx.%llx frag %x referral\n",
414 inode
, ceph_vinop(inode
), id
);
422 /* find/add this frag to store mds delegation info */
423 frag
= __get_or_create_frag(ci
, id
);
425 /* this is not the end of the world; we can continue
426 with bad/inaccurate delegation info */
427 pr_err_client(cl
, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
428 inode
, ceph_vinop(inode
),
429 le32_to_cpu(dirinfo
->frag
));
435 frag
->ndist
= min_t(u32
, ndist
, CEPH_MAX_DIRFRAG_REP
);
436 for (i
= 0; i
< frag
->ndist
; i
++)
437 frag
->dist
[i
] = le32_to_cpu(dirinfo
->dist
[i
]);
438 doutc(cl
, "%p %llx.%llx frag %x ndist=%d\n", inode
,
439 ceph_vinop(inode
), frag
->frag
, frag
->ndist
);
442 mutex_unlock(&ci
->i_fragtree_mutex
);
446 static int frag_tree_split_cmp(const void *l
, const void *r
)
448 struct ceph_frag_tree_split
*ls
= (struct ceph_frag_tree_split
*)l
;
449 struct ceph_frag_tree_split
*rs
= (struct ceph_frag_tree_split
*)r
;
450 return ceph_frag_compare(le32_to_cpu(ls
->frag
),
451 le32_to_cpu(rs
->frag
));
454 static bool is_frag_child(u32 f
, struct ceph_inode_frag
*frag
)
457 return f
== ceph_frag_make(0, 0);
458 if (ceph_frag_bits(f
) != ceph_frag_bits(frag
->frag
) + frag
->split_by
)
460 return ceph_frag_contains_value(frag
->frag
, ceph_frag_value(f
));
463 static int ceph_fill_fragtree(struct inode
*inode
,
464 struct ceph_frag_tree_head
*fragtree
,
465 struct ceph_mds_reply_dirfrag
*dirinfo
)
467 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
468 struct ceph_inode_info
*ci
= ceph_inode(inode
);
469 struct ceph_inode_frag
*frag
, *prev_frag
= NULL
;
470 struct rb_node
*rb_node
;
471 unsigned i
, split_by
, nsplits
;
475 mutex_lock(&ci
->i_fragtree_mutex
);
476 nsplits
= le32_to_cpu(fragtree
->nsplits
);
477 if (nsplits
!= ci
->i_fragtree_nsplits
) {
479 } else if (nsplits
) {
480 i
= get_random_u32_below(nsplits
);
481 id
= le32_to_cpu(fragtree
->splits
[i
].frag
);
482 if (!__ceph_find_frag(ci
, id
))
484 } else if (!RB_EMPTY_ROOT(&ci
->i_fragtree
)) {
485 rb_node
= rb_first(&ci
->i_fragtree
);
486 frag
= rb_entry(rb_node
, struct ceph_inode_frag
, node
);
487 if (frag
->frag
!= ceph_frag_make(0, 0) || rb_next(rb_node
))
490 if (!update
&& dirinfo
) {
491 id
= le32_to_cpu(dirinfo
->frag
);
492 if (id
!= __ceph_choose_frag(ci
, id
, NULL
, NULL
))
499 sort(fragtree
->splits
, nsplits
, sizeof(fragtree
->splits
[0]),
500 frag_tree_split_cmp
, NULL
);
503 doutc(cl
, "%p %llx.%llx\n", inode
, ceph_vinop(inode
));
504 rb_node
= rb_first(&ci
->i_fragtree
);
505 for (i
= 0; i
< nsplits
; i
++) {
506 id
= le32_to_cpu(fragtree
->splits
[i
].frag
);
507 split_by
= le32_to_cpu(fragtree
->splits
[i
].by
);
508 if (split_by
== 0 || ceph_frag_bits(id
) + split_by
> 24) {
509 pr_err_client(cl
, "%p %llx.%llx invalid split %d/%u, "
510 "frag %x split by %d\n", inode
,
511 ceph_vinop(inode
), i
, nsplits
, id
, split_by
);
516 frag
= rb_entry(rb_node
, struct ceph_inode_frag
, node
);
517 if (ceph_frag_compare(frag
->frag
, id
) >= 0) {
518 if (frag
->frag
!= id
)
521 rb_node
= rb_next(rb_node
);
524 rb_node
= rb_next(rb_node
);
525 /* delete stale split/leaf node */
526 if (frag
->split_by
> 0 ||
527 !is_frag_child(frag
->frag
, prev_frag
)) {
528 rb_erase(&frag
->node
, &ci
->i_fragtree
);
529 if (frag
->split_by
> 0)
530 ci
->i_fragtree_nsplits
--;
536 frag
= __get_or_create_frag(ci
, id
);
540 if (frag
->split_by
== 0)
541 ci
->i_fragtree_nsplits
++;
542 frag
->split_by
= split_by
;
543 doutc(cl
, " frag %x split by %d\n", frag
->frag
, frag
->split_by
);
547 frag
= rb_entry(rb_node
, struct ceph_inode_frag
, node
);
548 rb_node
= rb_next(rb_node
);
549 /* delete stale split/leaf node */
550 if (frag
->split_by
> 0 ||
551 !is_frag_child(frag
->frag
, prev_frag
)) {
552 rb_erase(&frag
->node
, &ci
->i_fragtree
);
553 if (frag
->split_by
> 0)
554 ci
->i_fragtree_nsplits
--;
559 mutex_unlock(&ci
->i_fragtree_mutex
);
564 * initialize a newly allocated inode.
566 struct inode
*ceph_alloc_inode(struct super_block
*sb
)
568 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(sb
);
569 struct ceph_inode_info
*ci
;
572 ci
= alloc_inode_sb(sb
, ceph_inode_cachep
, GFP_NOFS
);
576 doutc(fsc
->client
, "%p\n", &ci
->netfs
.inode
);
578 /* Set parameters for the netfs library */
579 netfs_inode_init(&ci
->netfs
, &ceph_netfs_ops
, false);
581 spin_lock_init(&ci
->i_ceph_lock
);
584 ci
->i_inline_version
= 0;
585 ci
->i_time_warp_seq
= 0;
586 ci
->i_ceph_flags
= 0;
587 atomic64_set(&ci
->i_ordered_count
, 1);
588 atomic64_set(&ci
->i_release_count
, 1);
589 atomic64_set(&ci
->i_complete_seq
[0], 0);
590 atomic64_set(&ci
->i_complete_seq
[1], 0);
591 ci
->i_symlink
= NULL
;
596 memset(&ci
->i_dir_layout
, 0, sizeof(ci
->i_dir_layout
));
597 memset(&ci
->i_cached_layout
, 0, sizeof(ci
->i_cached_layout
));
598 RCU_INIT_POINTER(ci
->i_layout
.pool_ns
, NULL
);
600 ci
->i_fragtree
= RB_ROOT
;
601 mutex_init(&ci
->i_fragtree_mutex
);
603 ci
->i_xattrs
.blob
= NULL
;
604 ci
->i_xattrs
.prealloc_blob
= NULL
;
605 ci
->i_xattrs
.dirty
= false;
606 ci
->i_xattrs
.index
= RB_ROOT
;
607 ci
->i_xattrs
.count
= 0;
608 ci
->i_xattrs
.names_size
= 0;
609 ci
->i_xattrs
.vals_size
= 0;
610 ci
->i_xattrs
.version
= 0;
611 ci
->i_xattrs
.index_version
= 0;
613 ci
->i_caps
= RB_ROOT
;
614 ci
->i_auth_cap
= NULL
;
615 ci
->i_dirty_caps
= 0;
616 ci
->i_flushing_caps
= 0;
617 INIT_LIST_HEAD(&ci
->i_dirty_item
);
618 INIT_LIST_HEAD(&ci
->i_flushing_item
);
619 ci
->i_prealloc_cap_flush
= NULL
;
620 INIT_LIST_HEAD(&ci
->i_cap_flush_list
);
621 init_waitqueue_head(&ci
->i_cap_wq
);
622 ci
->i_hold_caps_max
= 0;
623 INIT_LIST_HEAD(&ci
->i_cap_delay_list
);
624 INIT_LIST_HEAD(&ci
->i_cap_snaps
);
625 ci
->i_head_snapc
= NULL
;
628 ci
->i_last_rd
= ci
->i_last_wr
= jiffies
- 3600 * HZ
;
629 for (i
= 0; i
< CEPH_FILE_MODE_BITS
; i
++)
630 ci
->i_nr_by_mode
[i
] = 0;
632 mutex_init(&ci
->i_truncate_mutex
);
633 ci
->i_truncate_seq
= 0;
634 ci
->i_truncate_size
= 0;
635 ci
->i_truncate_pending
= 0;
636 ci
->i_truncate_pagecache_size
= 0;
639 ci
->i_reported_size
= 0;
640 ci
->i_wanted_max_size
= 0;
641 ci
->i_requested_max_size
= 0;
645 ci
->i_rdcache_ref
= 0;
649 ci
->i_wrbuffer_ref
= 0;
650 ci
->i_wrbuffer_ref_head
= 0;
651 atomic_set(&ci
->i_filelock_ref
, 0);
652 atomic_set(&ci
->i_shared_gen
, 1);
653 ci
->i_rdcache_gen
= 0;
654 ci
->i_rdcache_revoking
= 0;
656 INIT_LIST_HEAD(&ci
->i_unsafe_dirops
);
657 INIT_LIST_HEAD(&ci
->i_unsafe_iops
);
658 spin_lock_init(&ci
->i_unsafe_lock
);
660 ci
->i_snap_realm
= NULL
;
661 INIT_LIST_HEAD(&ci
->i_snap_realm_item
);
662 INIT_LIST_HEAD(&ci
->i_snap_flush_item
);
664 INIT_WORK(&ci
->i_work
, ceph_inode_work
);
666 memset(&ci
->i_btime
, '\0', sizeof(ci
->i_btime
));
667 #ifdef CONFIG_FS_ENCRYPTION
668 ci
->fscrypt_auth
= NULL
;
669 ci
->fscrypt_auth_len
= 0;
671 return &ci
->netfs
.inode
;
674 void ceph_free_inode(struct inode
*inode
)
676 struct ceph_inode_info
*ci
= ceph_inode(inode
);
678 kfree(ci
->i_symlink
);
679 #ifdef CONFIG_FS_ENCRYPTION
680 kfree(ci
->fscrypt_auth
);
682 fscrypt_free_inode(inode
);
683 kmem_cache_free(ceph_inode_cachep
, ci
);
686 void ceph_evict_inode(struct inode
*inode
)
688 struct ceph_inode_info
*ci
= ceph_inode(inode
);
689 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(inode
->i_sb
);
690 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
691 struct ceph_inode_frag
*frag
;
694 doutc(cl
, "%p ino %llx.%llx\n", inode
, ceph_vinop(inode
));
696 percpu_counter_dec(&mdsc
->metric
.total_inodes
);
698 netfs_wait_for_outstanding_io(inode
);
699 truncate_inode_pages_final(&inode
->i_data
);
700 if (inode
->i_state
& I_PINNING_NETFS_WB
)
701 ceph_fscache_unuse_cookie(inode
, true);
704 ceph_fscache_unregister_inode_cookie(ci
);
705 fscrypt_put_encryption_info(inode
);
707 __ceph_remove_caps(ci
);
709 if (__ceph_has_quota(ci
, QUOTA_GET_ANY
))
710 ceph_adjust_quota_realms_count(inode
, false);
713 * we may still have a snap_realm reference if there are stray
714 * caps in i_snap_caps.
716 if (ci
->i_snap_realm
) {
717 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
718 doutc(cl
, " dropping residual ref to snap realm %p\n",
720 ceph_change_snap_realm(inode
, NULL
);
722 ceph_put_snapid_map(mdsc
, ci
->i_snapid_map
);
723 ci
->i_snap_realm
= NULL
;
727 while ((n
= rb_first(&ci
->i_fragtree
)) != NULL
) {
728 frag
= rb_entry(n
, struct ceph_inode_frag
, node
);
729 rb_erase(n
, &ci
->i_fragtree
);
732 ci
->i_fragtree_nsplits
= 0;
734 __ceph_destroy_xattrs(ci
);
735 if (ci
->i_xattrs
.blob
)
736 ceph_buffer_put(ci
->i_xattrs
.blob
);
737 if (ci
->i_xattrs
.prealloc_blob
)
738 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
740 ceph_put_string(rcu_dereference_raw(ci
->i_layout
.pool_ns
));
741 ceph_put_string(rcu_dereference_raw(ci
->i_cached_layout
.pool_ns
));
744 static inline blkcnt_t
calc_inode_blocks(u64 size
)
746 return (size
+ (1<<9) - 1) >> 9;
750 * Helpers to fill in size, ctime, mtime, and atime. We have to be
751 * careful because either the client or MDS may have more up to date
752 * info, depending on which capabilities are held, and whether
753 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
754 * and size are monotonically increasing, except when utimes() or
755 * truncate() increments the corresponding _seq values.)
757 int ceph_fill_file_size(struct inode
*inode
, int issued
,
758 u32 truncate_seq
, u64 truncate_size
, u64 size
)
760 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
761 struct ceph_inode_info
*ci
= ceph_inode(inode
);
763 loff_t isize
= i_size_read(inode
);
765 if (ceph_seq_cmp(truncate_seq
, ci
->i_truncate_seq
) > 0 ||
766 (truncate_seq
== ci
->i_truncate_seq
&& size
> isize
)) {
767 doutc(cl
, "size %lld -> %llu\n", isize
, size
);
768 if (size
> 0 && S_ISDIR(inode
->i_mode
)) {
769 pr_err_client(cl
, "non-zero size for directory\n");
772 i_size_write(inode
, size
);
773 inode
->i_blocks
= calc_inode_blocks(size
);
775 * If we're expanding, then we should be able to just update
776 * the existing cookie.
779 ceph_fscache_update(inode
);
780 ci
->i_reported_size
= size
;
781 if (truncate_seq
!= ci
->i_truncate_seq
) {
782 doutc(cl
, "truncate_seq %u -> %u\n",
783 ci
->i_truncate_seq
, truncate_seq
);
784 ci
->i_truncate_seq
= truncate_seq
;
786 /* the MDS should have revoked these caps */
787 WARN_ON_ONCE(issued
& (CEPH_CAP_FILE_RD
|
788 CEPH_CAP_FILE_LAZYIO
));
790 * If we hold relevant caps, or in the case where we're
791 * not the only client referencing this file and we
792 * don't hold those caps, then we need to check whether
793 * the file is either opened or mmaped
795 if ((issued
& (CEPH_CAP_FILE_CACHE
|
796 CEPH_CAP_FILE_BUFFER
)) ||
797 mapping_mapped(inode
->i_mapping
) ||
798 __ceph_is_file_opened(ci
)) {
799 ci
->i_truncate_pending
++;
806 * It's possible that the new sizes of the two consecutive
807 * size truncations will be in the same fscrypt last block,
808 * and we need to truncate the corresponding page caches
811 if (ceph_seq_cmp(truncate_seq
, ci
->i_truncate_seq
) >= 0) {
812 doutc(cl
, "truncate_size %lld -> %llu, encrypted %d\n",
813 ci
->i_truncate_size
, truncate_size
,
814 !!IS_ENCRYPTED(inode
));
816 ci
->i_truncate_size
= truncate_size
;
818 if (IS_ENCRYPTED(inode
)) {
819 doutc(cl
, "truncate_pagecache_size %lld -> %llu\n",
820 ci
->i_truncate_pagecache_size
, size
);
821 ci
->i_truncate_pagecache_size
= size
;
823 ci
->i_truncate_pagecache_size
= truncate_size
;
829 void ceph_fill_file_time(struct inode
*inode
, int issued
,
830 u64 time_warp_seq
, struct timespec64
*ctime
,
831 struct timespec64
*mtime
, struct timespec64
*atime
)
833 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
834 struct ceph_inode_info
*ci
= ceph_inode(inode
);
835 struct timespec64 ictime
= inode_get_ctime(inode
);
838 if (issued
& (CEPH_CAP_FILE_EXCL
|
840 CEPH_CAP_FILE_BUFFER
|
842 CEPH_CAP_XATTR_EXCL
)) {
843 if (ci
->i_version
== 0 ||
844 timespec64_compare(ctime
, &ictime
) > 0) {
845 doutc(cl
, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
846 ictime
.tv_sec
, ictime
.tv_nsec
,
847 ctime
->tv_sec
, ctime
->tv_nsec
);
848 inode_set_ctime_to_ts(inode
, *ctime
);
850 if (ci
->i_version
== 0 ||
851 ceph_seq_cmp(time_warp_seq
, ci
->i_time_warp_seq
) > 0) {
852 /* the MDS did a utimes() */
853 doutc(cl
, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
854 inode_get_mtime_sec(inode
),
855 inode_get_mtime_nsec(inode
),
856 mtime
->tv_sec
, mtime
->tv_nsec
,
857 ci
->i_time_warp_seq
, (int)time_warp_seq
);
859 inode_set_mtime_to_ts(inode
, *mtime
);
860 inode_set_atime_to_ts(inode
, *atime
);
861 ci
->i_time_warp_seq
= time_warp_seq
;
862 } else if (time_warp_seq
== ci
->i_time_warp_seq
) {
863 struct timespec64 ts
;
865 /* nobody did utimes(); take the max */
866 ts
= inode_get_mtime(inode
);
867 if (timespec64_compare(mtime
, &ts
) > 0) {
868 doutc(cl
, "mtime %lld.%09ld -> %lld.%09ld inc\n",
869 ts
.tv_sec
, ts
.tv_nsec
,
870 mtime
->tv_sec
, mtime
->tv_nsec
);
871 inode_set_mtime_to_ts(inode
, *mtime
);
873 ts
= inode_get_atime(inode
);
874 if (timespec64_compare(atime
, &ts
) > 0) {
875 doutc(cl
, "atime %lld.%09ld -> %lld.%09ld inc\n",
876 ts
.tv_sec
, ts
.tv_nsec
,
877 atime
->tv_sec
, atime
->tv_nsec
);
878 inode_set_atime_to_ts(inode
, *atime
);
880 } else if (issued
& CEPH_CAP_FILE_EXCL
) {
881 /* we did a utimes(); ignore mds values */
886 /* we have no write|excl caps; whatever the MDS says is true */
887 if (ceph_seq_cmp(time_warp_seq
, ci
->i_time_warp_seq
) >= 0) {
888 inode_set_ctime_to_ts(inode
, *ctime
);
889 inode_set_mtime_to_ts(inode
, *mtime
);
890 inode_set_atime_to_ts(inode
, *atime
);
891 ci
->i_time_warp_seq
= time_warp_seq
;
896 if (warn
) /* time_warp_seq shouldn't go backwards */
897 doutc(cl
, "%p mds time_warp_seq %llu < %u\n", inode
,
898 time_warp_seq
, ci
->i_time_warp_seq
);
901 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
902 static int decode_encrypted_symlink(struct ceph_mds_client
*mdsc
,
904 int enclen
, u8
**decsym
)
906 struct ceph_client
*cl
= mdsc
->fsc
->client
;
910 sym
= kmalloc(enclen
+ 1, GFP_NOFS
);
914 declen
= ceph_base64_decode(encsym
, enclen
, sym
);
917 "can't decode symlink (%d). Content: %.*s\n",
918 declen
, enclen
, encsym
);
922 sym
[declen
+ 1] = '\0';
927 static int decode_encrypted_symlink(struct ceph_mds_client
*mdsc
,
929 int symlen
, u8
**decsym
)
936 * Populate an inode based on info from mds. May be called on new or
939 int ceph_fill_inode(struct inode
*inode
, struct page
*locked_page
,
940 struct ceph_mds_reply_info_in
*iinfo
,
941 struct ceph_mds_reply_dirfrag
*dirinfo
,
942 struct ceph_mds_session
*session
, int cap_fmode
,
943 struct ceph_cap_reservation
*caps_reservation
)
945 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(inode
->i_sb
);
946 struct ceph_client
*cl
= mdsc
->fsc
->client
;
947 struct ceph_mds_reply_inode
*info
= iinfo
->in
;
948 struct ceph_inode_info
*ci
= ceph_inode(inode
);
949 int issued
, new_issued
, info_caps
;
950 struct timespec64 mtime
, atime
, ctime
;
951 struct ceph_buffer
*xattr_blob
= NULL
;
952 struct ceph_buffer
*old_blob
= NULL
;
953 struct ceph_string
*pool_ns
= NULL
;
954 struct ceph_cap
*new_cap
= NULL
;
957 bool queue_trunc
= false;
958 bool new_version
= false;
959 bool fill_inline
= false;
960 umode_t mode
= le32_to_cpu(info
->mode
);
961 dev_t rdev
= le32_to_cpu(info
->rdev
);
963 lockdep_assert_held(&mdsc
->snap_rwsem
);
965 doutc(cl
, "%p ino %llx.%llx v %llu had %llu\n", inode
, ceph_vinop(inode
),
966 le64_to_cpu(info
->version
), ci
->i_version
);
968 /* Once I_NEW is cleared, we can't change type or dev numbers */
969 if (inode
->i_state
& I_NEW
) {
970 inode
->i_mode
= mode
;
972 if (inode_wrong_type(inode
, mode
)) {
973 pr_warn_once_client(cl
,
974 "inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
975 ceph_vinop(inode
), inode
->i_mode
, mode
);
979 if ((S_ISCHR(mode
) || S_ISBLK(mode
)) && inode
->i_rdev
!= rdev
) {
980 pr_warn_once_client(cl
,
981 "dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
982 ceph_vinop(inode
), MAJOR(inode
->i_rdev
),
983 MINOR(inode
->i_rdev
), MAJOR(rdev
),
989 info_caps
= le32_to_cpu(info
->cap
.caps
);
991 /* prealloc new cap struct */
992 if (info_caps
&& ceph_snap(inode
) == CEPH_NOSNAP
) {
993 new_cap
= ceph_get_cap(mdsc
, caps_reservation
);
999 * prealloc xattr data, if it looks like we'll need it. only
1000 * if len > 4 (meaning there are actually xattrs; the first 4
1001 * bytes are the xattr count).
1003 if (iinfo
->xattr_len
> 4) {
1004 xattr_blob
= ceph_buffer_new(iinfo
->xattr_len
, GFP_NOFS
);
1006 pr_err_client(cl
, "ENOMEM xattr blob %d bytes\n",
1010 if (iinfo
->pool_ns_len
> 0)
1011 pool_ns
= ceph_find_or_create_string(iinfo
->pool_ns_data
,
1012 iinfo
->pool_ns_len
);
1014 if (ceph_snap(inode
) != CEPH_NOSNAP
&& !ci
->i_snapid_map
)
1015 ci
->i_snapid_map
= ceph_get_snapid_map(mdsc
, ceph_snap(inode
));
1017 spin_lock(&ci
->i_ceph_lock
);
1020 * provided version will be odd if inode value is projected,
1021 * even if stable. skip the update if we have newer stable
1022 * info (ours>=theirs, e.g. due to racing mds replies), unless
1023 * we are getting projected (unstable) info (in which case the
1024 * version is odd, and we want ours>theirs).
1030 if (ci
->i_version
== 0 ||
1031 ((info
->cap
.flags
& CEPH_CAP_FLAG_AUTH
) &&
1032 le64_to_cpu(info
->version
) > (ci
->i_version
& ~1)))
1035 /* Update change_attribute */
1036 inode_set_max_iversion_raw(inode
, iinfo
->change_attr
);
1038 __ceph_caps_issued(ci
, &issued
);
1039 issued
|= __ceph_caps_dirty(ci
);
1040 new_issued
= ~issued
& info_caps
;
1042 __ceph_update_quota(ci
, iinfo
->max_bytes
, iinfo
->max_files
);
1044 #ifdef CONFIG_FS_ENCRYPTION
1045 if (iinfo
->fscrypt_auth_len
&&
1046 ((inode
->i_state
& I_NEW
) || (ci
->fscrypt_auth_len
== 0))) {
1047 kfree(ci
->fscrypt_auth
);
1048 ci
->fscrypt_auth_len
= iinfo
->fscrypt_auth_len
;
1049 ci
->fscrypt_auth
= iinfo
->fscrypt_auth
;
1050 iinfo
->fscrypt_auth
= NULL
;
1051 iinfo
->fscrypt_auth_len
= 0;
1052 inode_set_flags(inode
, S_ENCRYPTED
, S_ENCRYPTED
);
1056 if ((new_version
|| (new_issued
& CEPH_CAP_AUTH_SHARED
)) &&
1057 (issued
& CEPH_CAP_AUTH_EXCL
) == 0) {
1058 inode
->i_mode
= mode
;
1059 inode
->i_uid
= make_kuid(&init_user_ns
, le32_to_cpu(info
->uid
));
1060 inode
->i_gid
= make_kgid(&init_user_ns
, le32_to_cpu(info
->gid
));
1061 doutc(cl
, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode
,
1062 ceph_vinop(inode
), inode
->i_mode
,
1063 from_kuid(&init_user_ns
, inode
->i_uid
),
1064 from_kgid(&init_user_ns
, inode
->i_gid
));
1065 ceph_decode_timespec64(&ci
->i_btime
, &iinfo
->btime
);
1066 ceph_decode_timespec64(&ci
->i_snap_btime
, &iinfo
->snap_btime
);
1069 /* directories have fl_stripe_unit set to zero */
1070 if (IS_ENCRYPTED(inode
))
1071 inode
->i_blkbits
= CEPH_FSCRYPT_BLOCK_SHIFT
;
1072 else if (le32_to_cpu(info
->layout
.fl_stripe_unit
))
1074 fls(le32_to_cpu(info
->layout
.fl_stripe_unit
)) - 1;
1076 inode
->i_blkbits
= CEPH_BLOCK_SHIFT
;
1078 if ((new_version
|| (new_issued
& CEPH_CAP_LINK_SHARED
)) &&
1079 (issued
& CEPH_CAP_LINK_EXCL
) == 0)
1080 set_nlink(inode
, le32_to_cpu(info
->nlink
));
1082 if (new_version
|| (new_issued
& CEPH_CAP_ANY_RD
)) {
1083 /* be careful with mtime, atime, size */
1084 ceph_decode_timespec64(&atime
, &info
->atime
);
1085 ceph_decode_timespec64(&mtime
, &info
->mtime
);
1086 ceph_decode_timespec64(&ctime
, &info
->ctime
);
1087 ceph_fill_file_time(inode
, issued
,
1088 le32_to_cpu(info
->time_warp_seq
),
1089 &ctime
, &mtime
, &atime
);
1092 if (new_version
|| (info_caps
& CEPH_CAP_FILE_SHARED
)) {
1093 ci
->i_files
= le64_to_cpu(info
->files
);
1094 ci
->i_subdirs
= le64_to_cpu(info
->subdirs
);
1098 (new_issued
& (CEPH_CAP_ANY_FILE_RD
| CEPH_CAP_ANY_FILE_WR
))) {
1099 u64 size
= le64_to_cpu(info
->size
);
1100 s64 old_pool
= ci
->i_layout
.pool_id
;
1101 struct ceph_string
*old_ns
;
1103 ceph_file_layout_from_legacy(&ci
->i_layout
, &info
->layout
);
1104 old_ns
= rcu_dereference_protected(ci
->i_layout
.pool_ns
,
1105 lockdep_is_held(&ci
->i_ceph_lock
));
1106 rcu_assign_pointer(ci
->i_layout
.pool_ns
, pool_ns
);
1108 if (ci
->i_layout
.pool_id
!= old_pool
|| pool_ns
!= old_ns
)
1109 ci
->i_ceph_flags
&= ~CEPH_I_POOL_PERM
;
1113 if (IS_ENCRYPTED(inode
) && size
&&
1114 iinfo
->fscrypt_file_len
== sizeof(__le64
)) {
1115 u64 fsize
= __le64_to_cpu(*(__le64
*)iinfo
->fscrypt_file
);
1117 if (size
== round_up(fsize
, CEPH_FSCRYPT_BLOCK_SIZE
)) {
1121 "fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1126 queue_trunc
= ceph_fill_file_size(inode
, issued
,
1127 le32_to_cpu(info
->truncate_seq
),
1128 le64_to_cpu(info
->truncate_size
),
1130 /* only update max_size on auth cap */
1131 if ((info
->cap
.flags
& CEPH_CAP_FLAG_AUTH
) &&
1132 ci
->i_max_size
!= le64_to_cpu(info
->max_size
)) {
1133 doutc(cl
, "max_size %lld -> %llu\n",
1134 ci
->i_max_size
, le64_to_cpu(info
->max_size
));
1135 ci
->i_max_size
= le64_to_cpu(info
->max_size
);
1139 /* layout and rstat are not tracked by capability, update them if
1140 * the inode info is from auth mds */
1141 if (new_version
|| (info
->cap
.flags
& CEPH_CAP_FLAG_AUTH
)) {
1142 if (S_ISDIR(inode
->i_mode
)) {
1143 ci
->i_dir_layout
= iinfo
->dir_layout
;
1144 ci
->i_rbytes
= le64_to_cpu(info
->rbytes
);
1145 ci
->i_rfiles
= le64_to_cpu(info
->rfiles
);
1146 ci
->i_rsubdirs
= le64_to_cpu(info
->rsubdirs
);
1147 ci
->i_dir_pin
= iinfo
->dir_pin
;
1148 ci
->i_rsnaps
= iinfo
->rsnaps
;
1149 ceph_decode_timespec64(&ci
->i_rctime
, &info
->rctime
);
1154 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1155 if ((ci
->i_xattrs
.version
== 0 || !(issued
& CEPH_CAP_XATTR_EXCL
)) &&
1156 le64_to_cpu(info
->xattr_version
) > ci
->i_xattrs
.version
) {
1157 if (ci
->i_xattrs
.blob
)
1158 old_blob
= ci
->i_xattrs
.blob
;
1159 ci
->i_xattrs
.blob
= xattr_blob
;
1161 memcpy(ci
->i_xattrs
.blob
->vec
.iov_base
,
1162 iinfo
->xattr_data
, iinfo
->xattr_len
);
1163 ci
->i_xattrs
.version
= le64_to_cpu(info
->xattr_version
);
1164 ceph_forget_all_cached_acls(inode
);
1165 ceph_security_invalidate_secctx(inode
);
1169 /* finally update i_version */
1170 if (le64_to_cpu(info
->version
) > ci
->i_version
)
1171 ci
->i_version
= le64_to_cpu(info
->version
);
1173 inode
->i_mapping
->a_ops
= &ceph_aops
;
1175 switch (inode
->i_mode
& S_IFMT
) {
1180 inode
->i_blkbits
= PAGE_SHIFT
;
1181 init_special_inode(inode
, inode
->i_mode
, rdev
);
1182 inode
->i_op
= &ceph_file_iops
;
1185 inode
->i_op
= &ceph_file_iops
;
1186 inode
->i_fop
= &ceph_file_fops
;
1189 if (!ci
->i_symlink
) {
1190 u32 symlen
= iinfo
->symlink_len
;
1193 spin_unlock(&ci
->i_ceph_lock
);
1195 if (IS_ENCRYPTED(inode
)) {
1196 if (symlen
!= i_size_read(inode
))
1198 "%p %llx.%llx BAD symlink size %lld\n",
1199 inode
, ceph_vinop(inode
),
1200 i_size_read(inode
));
1202 err
= decode_encrypted_symlink(mdsc
, iinfo
->symlink
,
1203 symlen
, (u8
**)&sym
);
1206 "decoding encrypted symlink failed: %d\n",
1211 i_size_write(inode
, symlen
);
1212 inode
->i_blocks
= calc_inode_blocks(symlen
);
1214 if (symlen
!= i_size_read(inode
)) {
1216 "%p %llx.%llx BAD symlink size %lld\n",
1217 inode
, ceph_vinop(inode
),
1218 i_size_read(inode
));
1219 i_size_write(inode
, symlen
);
1220 inode
->i_blocks
= calc_inode_blocks(symlen
);
1224 sym
= kstrndup(iinfo
->symlink
, symlen
, GFP_NOFS
);
1229 spin_lock(&ci
->i_ceph_lock
);
1231 ci
->i_symlink
= sym
;
1233 kfree(sym
); /* lost a race */
1236 if (IS_ENCRYPTED(inode
)) {
1238 * Encrypted symlinks need to be decrypted before we can
1239 * cache their targets in i_link. Don't touch it here.
1241 inode
->i_op
= &ceph_encrypted_symlink_iops
;
1243 inode
->i_link
= ci
->i_symlink
;
1244 inode
->i_op
= &ceph_symlink_iops
;
1248 inode
->i_op
= &ceph_dir_iops
;
1249 inode
->i_fop
= &ceph_dir_fops
;
1252 pr_err_client(cl
, "%p %llx.%llx BAD mode 0%o\n", inode
,
1253 ceph_vinop(inode
), inode
->i_mode
);
1256 /* were we issued a capability? */
1258 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1259 ceph_add_cap(inode
, session
,
1260 le64_to_cpu(info
->cap
.cap_id
),
1262 le32_to_cpu(info
->cap
.wanted
),
1263 le32_to_cpu(info
->cap
.seq
),
1264 le32_to_cpu(info
->cap
.mseq
),
1265 le64_to_cpu(info
->cap
.realm
),
1266 info
->cap
.flags
, &new_cap
);
1268 /* set dir completion flag? */
1269 if (S_ISDIR(inode
->i_mode
) &&
1270 ci
->i_files
== 0 && ci
->i_subdirs
== 0 &&
1271 (info_caps
& CEPH_CAP_FILE_SHARED
) &&
1272 (issued
& CEPH_CAP_FILE_EXCL
) == 0 &&
1273 !__ceph_dir_is_complete(ci
)) {
1274 doutc(cl
, " marking %p complete (empty)\n",
1276 i_size_write(inode
, 0);
1277 __ceph_dir_set_complete(ci
,
1278 atomic64_read(&ci
->i_release_count
),
1279 atomic64_read(&ci
->i_ordered_count
));
1284 doutc(cl
, " %p got snap_caps %s\n", inode
,
1285 ceph_cap_string(info_caps
));
1286 ci
->i_snap_caps
|= info_caps
;
1290 if (iinfo
->inline_version
> 0 &&
1291 iinfo
->inline_version
>= ci
->i_inline_version
) {
1292 int cache_caps
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1293 ci
->i_inline_version
= iinfo
->inline_version
;
1294 if (ceph_has_inline_data(ci
) &&
1295 (locked_page
|| (info_caps
& cache_caps
)))
1299 if (cap_fmode
>= 0) {
1301 pr_warn_client(cl
, "mds issued no caps on %llx.%llx\n",
1303 __ceph_touch_fmode(ci
, mdsc
, cap_fmode
);
1306 spin_unlock(&ci
->i_ceph_lock
);
1308 ceph_fscache_register_inode_cookie(inode
);
1311 ceph_fill_inline_data(inode
, locked_page
,
1312 iinfo
->inline_data
, iinfo
->inline_len
);
1315 wake_up_all(&ci
->i_cap_wq
);
1317 /* queue truncate if we saw i_size decrease */
1319 ceph_queue_vmtruncate(inode
);
1321 /* populate frag tree */
1322 if (S_ISDIR(inode
->i_mode
))
1323 ceph_fill_fragtree(inode
, &info
->fragtree
, dirinfo
);
1325 /* update delegation info? */
1327 ceph_fill_dirfrag(inode
, dirinfo
);
1332 ceph_put_cap(mdsc
, new_cap
);
1333 ceph_buffer_put(old_blob
);
1334 ceph_buffer_put(xattr_blob
);
1335 ceph_put_string(pool_ns
);
1340 * caller should hold session s_mutex and dentry->d_lock.
1342 static void __update_dentry_lease(struct inode
*dir
, struct dentry
*dentry
,
1343 struct ceph_mds_reply_lease
*lease
,
1344 struct ceph_mds_session
*session
,
1345 unsigned long from_time
,
1346 struct ceph_mds_session
**old_lease_session
)
1348 struct ceph_client
*cl
= ceph_inode_to_client(dir
);
1349 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1350 unsigned mask
= le16_to_cpu(lease
->mask
);
1351 long unsigned duration
= le32_to_cpu(lease
->duration_ms
);
1352 long unsigned ttl
= from_time
+ (duration
* HZ
) / 1000;
1353 long unsigned half_ttl
= from_time
+ (duration
* HZ
/ 2) / 1000;
1355 doutc(cl
, "%p duration %lu ms ttl %lu\n", dentry
, duration
, ttl
);
1357 /* only track leases on regular dentries */
1358 if (ceph_snap(dir
) != CEPH_NOSNAP
)
1361 if (mask
& CEPH_LEASE_PRIMARY_LINK
)
1362 di
->flags
|= CEPH_DENTRY_PRIMARY_LINK
;
1364 di
->flags
&= ~CEPH_DENTRY_PRIMARY_LINK
;
1366 di
->lease_shared_gen
= atomic_read(&ceph_inode(dir
)->i_shared_gen
);
1367 if (!(mask
& CEPH_LEASE_VALID
)) {
1368 __ceph_dentry_dir_lease_touch(di
);
1372 if (di
->lease_gen
== atomic_read(&session
->s_cap_gen
) &&
1373 time_before(ttl
, di
->time
))
1374 return; /* we already have a newer lease. */
1376 if (di
->lease_session
&& di
->lease_session
!= session
) {
1377 *old_lease_session
= di
->lease_session
;
1378 di
->lease_session
= NULL
;
1381 if (!di
->lease_session
)
1382 di
->lease_session
= ceph_get_mds_session(session
);
1383 di
->lease_gen
= atomic_read(&session
->s_cap_gen
);
1384 di
->lease_seq
= le32_to_cpu(lease
->seq
);
1385 di
->lease_renew_after
= half_ttl
;
1386 di
->lease_renew_from
= 0;
1389 __ceph_dentry_lease_touch(di
);
1392 static inline void update_dentry_lease(struct inode
*dir
, struct dentry
*dentry
,
1393 struct ceph_mds_reply_lease
*lease
,
1394 struct ceph_mds_session
*session
,
1395 unsigned long from_time
)
1397 struct ceph_mds_session
*old_lease_session
= NULL
;
1398 spin_lock(&dentry
->d_lock
);
1399 __update_dentry_lease(dir
, dentry
, lease
, session
, from_time
,
1400 &old_lease_session
);
1401 spin_unlock(&dentry
->d_lock
);
1402 ceph_put_mds_session(old_lease_session
);
1406 * update dentry lease without having parent inode locked
1408 static void update_dentry_lease_careful(struct dentry
*dentry
,
1409 struct ceph_mds_reply_lease
*lease
,
1410 struct ceph_mds_session
*session
,
1411 unsigned long from_time
,
1412 char *dname
, u32 dname_len
,
1413 struct ceph_vino
*pdvino
,
1414 struct ceph_vino
*ptvino
)
1418 struct ceph_mds_session
*old_lease_session
= NULL
;
1420 spin_lock(&dentry
->d_lock
);
1421 /* make sure dentry's name matches target */
1422 if (dentry
->d_name
.len
!= dname_len
||
1423 memcmp(dentry
->d_name
.name
, dname
, dname_len
))
1426 dir
= d_inode(dentry
->d_parent
);
1427 /* make sure parent matches dvino */
1428 if (!ceph_ino_compare(dir
, pdvino
))
1431 /* make sure dentry's inode matches target. NULL ptvino means that
1432 * we expect a negative dentry */
1434 if (d_really_is_negative(dentry
))
1436 if (!ceph_ino_compare(d_inode(dentry
), ptvino
))
1439 if (d_really_is_positive(dentry
))
1443 __update_dentry_lease(dir
, dentry
, lease
, session
,
1444 from_time
, &old_lease_session
);
1446 spin_unlock(&dentry
->d_lock
);
1447 ceph_put_mds_session(old_lease_session
);
1451 * splice a dentry to an inode.
1452 * caller must hold directory i_rwsem for this to be safe.
1454 static int splice_dentry(struct dentry
**pdn
, struct inode
*in
)
1456 struct ceph_client
*cl
= ceph_inode_to_client(in
);
1457 struct dentry
*dn
= *pdn
;
1458 struct dentry
*realdn
;
1460 BUG_ON(d_inode(dn
));
1462 if (S_ISDIR(in
->i_mode
)) {
1463 /* If inode is directory, d_splice_alias() below will remove
1464 * 'realdn' from its origin parent. We need to ensure that
1465 * origin parent's readdir cache will not reference 'realdn'
1467 realdn
= d_find_any_alias(in
);
1469 struct ceph_dentry_info
*di
= ceph_dentry(realdn
);
1470 spin_lock(&realdn
->d_lock
);
1472 realdn
->d_op
->d_prune(realdn
);
1475 di
->lease_shared_gen
= 0;
1478 spin_unlock(&realdn
->d_lock
);
1483 /* dn must be unhashed */
1484 if (!d_unhashed(dn
))
1486 realdn
= d_splice_alias(in
, dn
);
1487 if (IS_ERR(realdn
)) {
1488 pr_err_client(cl
, "error %ld %p inode %p ino %llx.%llx\n",
1489 PTR_ERR(realdn
), dn
, in
, ceph_vinop(in
));
1490 return PTR_ERR(realdn
);
1494 doutc(cl
, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
1495 dn
, d_count(dn
), realdn
, d_count(realdn
),
1496 d_inode(realdn
), ceph_vinop(d_inode(realdn
)));
1500 BUG_ON(!ceph_dentry(dn
));
1501 doutc(cl
, "dn %p attached to %p ino %llx.%llx\n", dn
,
1502 d_inode(dn
), ceph_vinop(d_inode(dn
)));
1508 * Incorporate results into the local cache. This is either just
1509 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1512 * A reply may contain
1513 * a directory inode along with a dentry.
1514 * and/or a target inode
1516 * Called with snap_rwsem (read).
1518 int ceph_fill_trace(struct super_block
*sb
, struct ceph_mds_request
*req
)
1520 struct ceph_mds_session
*session
= req
->r_session
;
1521 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1522 struct inode
*in
= NULL
;
1523 struct ceph_vino tvino
, dvino
;
1524 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(sb
);
1525 struct ceph_client
*cl
= fsc
->client
;
1528 doutc(cl
, "%p is_dentry %d is_target %d\n", req
,
1529 rinfo
->head
->is_dentry
, rinfo
->head
->is_target
);
1531 if (!rinfo
->head
->is_target
&& !rinfo
->head
->is_dentry
) {
1532 doutc(cl
, "reply is empty!\n");
1533 if (rinfo
->head
->result
== 0 && req
->r_parent
)
1534 ceph_invalidate_dir_request(req
);
1538 if (rinfo
->head
->is_dentry
) {
1539 struct inode
*dir
= req
->r_parent
;
1542 err
= ceph_fill_inode(dir
, NULL
, &rinfo
->diri
,
1543 rinfo
->dirfrag
, session
, -1,
1544 &req
->r_caps_reservation
);
1551 if (dir
&& req
->r_op
== CEPH_MDS_OP_LOOKUPNAME
&&
1552 test_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
) &&
1553 !test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
1554 bool is_nokey
= false;
1556 struct dentry
*dn
, *parent
;
1557 struct fscrypt_str oname
= FSTR_INIT(NULL
, 0);
1558 struct ceph_fname fname
= { .dir
= dir
,
1559 .name
= rinfo
->dname
,
1560 .ctext
= rinfo
->altname
,
1561 .name_len
= rinfo
->dname_len
,
1562 .ctext_len
= rinfo
->altname_len
};
1564 BUG_ON(!rinfo
->head
->is_target
);
1565 BUG_ON(req
->r_dentry
);
1567 parent
= d_find_any_alias(dir
);
1570 err
= ceph_fname_alloc_buffer(dir
, &oname
);
1576 err
= ceph_fname_to_usr(&fname
, NULL
, &oname
, &is_nokey
);
1579 ceph_fname_free_buffer(dir
, &oname
);
1582 dname
.name
= oname
.name
;
1583 dname
.len
= oname
.len
;
1584 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
1585 tvino
.ino
= le64_to_cpu(rinfo
->targeti
.in
->ino
);
1586 tvino
.snap
= le64_to_cpu(rinfo
->targeti
.in
->snapid
);
1588 dn
= d_lookup(parent
, &dname
);
1589 doutc(cl
, "d_lookup on parent=%p name=%.*s got %p\n",
1590 parent
, dname
.len
, dname
.name
, dn
);
1593 dn
= d_alloc(parent
, &dname
);
1594 doutc(cl
, "d_alloc %p '%.*s' = %p\n", parent
,
1595 dname
.len
, dname
.name
, dn
);
1598 ceph_fname_free_buffer(dir
, &oname
);
1603 spin_lock(&dn
->d_lock
);
1604 dn
->d_flags
|= DCACHE_NOKEY_NAME
;
1605 spin_unlock(&dn
->d_lock
);
1608 } else if (d_really_is_positive(dn
) &&
1609 (ceph_ino(d_inode(dn
)) != tvino
.ino
||
1610 ceph_snap(d_inode(dn
)) != tvino
.snap
)) {
1611 doutc(cl
, " dn %p points to wrong inode %p\n",
1613 ceph_dir_clear_ordered(dir
);
1618 ceph_fname_free_buffer(dir
, &oname
);
1625 if (rinfo
->head
->is_target
) {
1626 /* Should be filled in by handle_reply */
1627 BUG_ON(!req
->r_target_inode
);
1629 in
= req
->r_target_inode
;
1630 err
= ceph_fill_inode(in
, req
->r_locked_page
, &rinfo
->targeti
,
1632 (!test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
) &&
1633 !test_bit(CEPH_MDS_R_ASYNC
, &req
->r_req_flags
) &&
1634 rinfo
->head
->result
== 0) ? req
->r_fmode
: -1,
1635 &req
->r_caps_reservation
);
1637 pr_err_client(cl
, "badness %p %llx.%llx\n", in
,
1639 req
->r_target_inode
= NULL
;
1640 if (in
->i_state
& I_NEW
)
1641 discard_new_inode(in
);
1646 if (in
->i_state
& I_NEW
)
1647 unlock_new_inode(in
);
1651 * ignore null lease/binding on snapdir ENOENT, or else we
1652 * will have trouble splicing in the virtual snapdir later
1654 if (rinfo
->head
->is_dentry
&&
1655 !test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
) &&
1656 test_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
) &&
1657 (rinfo
->head
->is_target
|| strncmp(req
->r_dentry
->d_name
.name
,
1658 fsc
->mount_options
->snapdir_name
,
1659 req
->r_dentry
->d_name
.len
))) {
1661 * lookup link rename : null -> possibly existing inode
1662 * mknod symlink mkdir : null -> new inode
1663 * unlink : linked -> null
1665 struct inode
*dir
= req
->r_parent
;
1666 struct dentry
*dn
= req
->r_dentry
;
1667 bool have_dir_cap
, have_lease
;
1671 BUG_ON(d_inode(dn
->d_parent
) != dir
);
1673 dvino
.ino
= le64_to_cpu(rinfo
->diri
.in
->ino
);
1674 dvino
.snap
= le64_to_cpu(rinfo
->diri
.in
->snapid
);
1676 BUG_ON(ceph_ino(dir
) != dvino
.ino
);
1677 BUG_ON(ceph_snap(dir
) != dvino
.snap
);
1679 /* do we have a lease on the whole dir? */
1681 (le32_to_cpu(rinfo
->diri
.in
->cap
.caps
) &
1682 CEPH_CAP_FILE_SHARED
);
1684 /* do we have a dn lease? */
1685 have_lease
= have_dir_cap
||
1686 le32_to_cpu(rinfo
->dlease
->duration_ms
);
1688 doutc(cl
, "no dentry lease or dir cap\n");
1691 if (req
->r_old_dentry
&& req
->r_op
== CEPH_MDS_OP_RENAME
) {
1692 struct inode
*olddir
= req
->r_old_dentry_dir
;
1695 doutc(cl
, " src %p '%pd' dst %p '%pd'\n",
1696 req
->r_old_dentry
, req
->r_old_dentry
, dn
, dn
);
1697 doutc(cl
, "doing d_move %p -> %p\n", req
->r_old_dentry
, dn
);
1699 /* d_move screws up sibling dentries' offsets */
1700 ceph_dir_clear_ordered(dir
);
1701 ceph_dir_clear_ordered(olddir
);
1703 d_move(req
->r_old_dentry
, dn
);
1704 doutc(cl
, " src %p '%pd' dst %p '%pd'\n",
1705 req
->r_old_dentry
, req
->r_old_dentry
, dn
, dn
);
1707 /* ensure target dentry is invalidated, despite
1708 rehashing bug in vfs_rename_dir */
1709 ceph_invalidate_dentry_lease(dn
);
1711 doutc(cl
, "dn %p gets new offset %lld\n",
1713 ceph_dentry(req
->r_old_dentry
)->offset
);
1715 /* swap r_dentry and r_old_dentry in case that
1716 * splice_dentry() gets called later. This is safe
1717 * because no other place will use them */
1718 req
->r_dentry
= req
->r_old_dentry
;
1719 req
->r_old_dentry
= dn
;
1724 if (!rinfo
->head
->is_target
) {
1725 doutc(cl
, "null dentry\n");
1726 if (d_really_is_positive(dn
)) {
1727 doutc(cl
, "d_delete %p\n", dn
);
1728 ceph_dir_clear_ordered(dir
);
1730 } else if (have_lease
) {
1735 if (!d_unhashed(dn
) && have_lease
)
1736 update_dentry_lease(dir
, dn
,
1737 rinfo
->dlease
, session
,
1738 req
->r_request_started
);
1742 /* attach proper inode */
1743 if (d_really_is_negative(dn
)) {
1744 ceph_dir_clear_ordered(dir
);
1746 err
= splice_dentry(&req
->r_dentry
, in
);
1749 dn
= req
->r_dentry
; /* may have spliced */
1750 } else if (d_really_is_positive(dn
) && d_inode(dn
) != in
) {
1751 doutc(cl
, " %p links to %p %llx.%llx, not %llx.%llx\n",
1752 dn
, d_inode(dn
), ceph_vinop(d_inode(dn
)),
1759 update_dentry_lease(dir
, dn
,
1760 rinfo
->dlease
, session
,
1761 req
->r_request_started
);
1763 doutc(cl
, " final dn %p\n", dn
);
1764 } else if ((req
->r_op
== CEPH_MDS_OP_LOOKUPSNAP
||
1765 req
->r_op
== CEPH_MDS_OP_MKSNAP
) &&
1766 test_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
) &&
1767 !test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
1768 struct inode
*dir
= req
->r_parent
;
1770 /* fill out a snapdir LOOKUPSNAP dentry */
1772 BUG_ON(ceph_snap(dir
) != CEPH_SNAPDIR
);
1773 BUG_ON(!req
->r_dentry
);
1774 doutc(cl
, " linking snapped dir %p to dn %p\n", in
,
1776 ceph_dir_clear_ordered(dir
);
1778 err
= splice_dentry(&req
->r_dentry
, in
);
1781 } else if (rinfo
->head
->is_dentry
&& req
->r_dentry
) {
1782 /* parent inode is not locked, be careful */
1783 struct ceph_vino
*ptvino
= NULL
;
1784 dvino
.ino
= le64_to_cpu(rinfo
->diri
.in
->ino
);
1785 dvino
.snap
= le64_to_cpu(rinfo
->diri
.in
->snapid
);
1786 if (rinfo
->head
->is_target
) {
1787 tvino
.ino
= le64_to_cpu(rinfo
->targeti
.in
->ino
);
1788 tvino
.snap
= le64_to_cpu(rinfo
->targeti
.in
->snapid
);
1791 update_dentry_lease_careful(req
->r_dentry
, rinfo
->dlease
,
1792 session
, req
->r_request_started
,
1793 rinfo
->dname
, rinfo
->dname_len
,
1797 doutc(cl
, "done err=%d\n", err
);
1802 * Prepopulate our cache with readdir results, leases, etc.
1804 static int readdir_prepopulate_inodes_only(struct ceph_mds_request
*req
,
1805 struct ceph_mds_session
*session
)
1807 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1808 struct ceph_client
*cl
= session
->s_mdsc
->fsc
->client
;
1811 for (i
= 0; i
< rinfo
->dir_nr
; i
++) {
1812 struct ceph_mds_reply_dir_entry
*rde
= rinfo
->dir_entries
+ i
;
1813 struct ceph_vino vino
;
1817 vino
.ino
= le64_to_cpu(rde
->inode
.in
->ino
);
1818 vino
.snap
= le64_to_cpu(rde
->inode
.in
->snapid
);
1820 in
= ceph_get_inode(req
->r_dentry
->d_sb
, vino
, NULL
);
1823 doutc(cl
, "badness got %d\n", err
);
1826 rc
= ceph_fill_inode(in
, NULL
, &rde
->inode
, NULL
, session
,
1827 -1, &req
->r_caps_reservation
);
1829 pr_err_client(cl
, "inode badness on %p got %d\n", in
,
1832 if (in
->i_state
& I_NEW
) {
1834 discard_new_inode(in
);
1836 } else if (in
->i_state
& I_NEW
) {
1837 unlock_new_inode(in
);
1846 void ceph_readdir_cache_release(struct ceph_readdir_cache_control
*ctl
)
1850 put_page(ctl
->page
);
1855 static int fill_readdir_cache(struct inode
*dir
, struct dentry
*dn
,
1856 struct ceph_readdir_cache_control
*ctl
,
1857 struct ceph_mds_request
*req
)
1859 struct ceph_client
*cl
= ceph_inode_to_client(dir
);
1860 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1861 unsigned nsize
= PAGE_SIZE
/ sizeof(struct dentry
*);
1862 unsigned idx
= ctl
->index
% nsize
;
1863 pgoff_t pgoff
= ctl
->index
/ nsize
;
1865 if (!ctl
->page
|| pgoff
!= ctl
->page
->index
) {
1866 ceph_readdir_cache_release(ctl
);
1868 ctl
->page
= grab_cache_page(&dir
->i_data
, pgoff
);
1870 ctl
->page
= find_lock_page(&dir
->i_data
, pgoff
);
1873 return idx
== 0 ? -ENOMEM
: 0;
1875 /* reading/filling the cache are serialized by
1876 * i_rwsem, no need to use page lock */
1877 unlock_page(ctl
->page
);
1878 ctl
->dentries
= kmap(ctl
->page
);
1880 memset(ctl
->dentries
, 0, PAGE_SIZE
);
1883 if (req
->r_dir_release_cnt
== atomic64_read(&ci
->i_release_count
) &&
1884 req
->r_dir_ordered_cnt
== atomic64_read(&ci
->i_ordered_count
)) {
1885 doutc(cl
, "dn %p idx %d\n", dn
, ctl
->index
);
1886 ctl
->dentries
[idx
] = dn
;
1889 doutc(cl
, "disable readdir cache\n");
1895 int ceph_readdir_prepopulate(struct ceph_mds_request
*req
,
1896 struct ceph_mds_session
*session
)
1898 struct dentry
*parent
= req
->r_dentry
;
1899 struct inode
*inode
= d_inode(parent
);
1900 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1901 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1902 struct ceph_client
*cl
= session
->s_mdsc
->fsc
->client
;
1906 int err
= 0, skipped
= 0, ret
, i
;
1907 u32 frag
= le32_to_cpu(req
->r_args
.readdir
.frag
);
1910 struct ceph_readdir_cache_control cache_ctl
= {};
1912 if (test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
))
1913 return readdir_prepopulate_inodes_only(req
, session
);
1915 if (rinfo
->hash_order
) {
1917 last_hash
= ceph_str_hash(ci
->i_dir_layout
.dl_dir_hash
,
1919 strlen(req
->r_path2
));
1920 last_hash
= ceph_frag_value(last_hash
);
1921 } else if (rinfo
->offset_hash
) {
1922 /* mds understands offset_hash */
1923 WARN_ON_ONCE(req
->r_readdir_offset
!= 2);
1924 last_hash
= le32_to_cpu(req
->r_args
.readdir
.offset_hash
);
1928 if (rinfo
->dir_dir
&&
1929 le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
1930 doutc(cl
, "got new frag %x -> %x\n", frag
,
1931 le32_to_cpu(rinfo
->dir_dir
->frag
));
1932 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
1933 if (!rinfo
->hash_order
)
1934 req
->r_readdir_offset
= 2;
1937 if (le32_to_cpu(rinfo
->head
->op
) == CEPH_MDS_OP_LSSNAP
) {
1938 doutc(cl
, "%d items under SNAPDIR dn %p\n",
1939 rinfo
->dir_nr
, parent
);
1941 doutc(cl
, "%d items under dn %p\n", rinfo
->dir_nr
, parent
);
1943 ceph_fill_dirfrag(d_inode(parent
), rinfo
->dir_dir
);
1945 if (ceph_frag_is_leftmost(frag
) &&
1946 req
->r_readdir_offset
== 2 &&
1947 !(rinfo
->hash_order
&& last_hash
)) {
1948 /* note dir version at start of readdir so we can
1949 * tell if any dentries get dropped */
1950 req
->r_dir_release_cnt
=
1951 atomic64_read(&ci
->i_release_count
);
1952 req
->r_dir_ordered_cnt
=
1953 atomic64_read(&ci
->i_ordered_count
);
1954 req
->r_readdir_cache_idx
= 0;
1958 cache_ctl
.index
= req
->r_readdir_cache_idx
;
1959 fpos_offset
= req
->r_readdir_offset
;
1961 /* FIXME: release caps/leases if error occurs */
1962 for (i
= 0; i
< rinfo
->dir_nr
; i
++) {
1963 struct ceph_mds_reply_dir_entry
*rde
= rinfo
->dir_entries
+ i
;
1964 struct ceph_vino tvino
;
1966 dname
.name
= rde
->name
;
1967 dname
.len
= rde
->name_len
;
1968 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
1970 tvino
.ino
= le64_to_cpu(rde
->inode
.in
->ino
);
1971 tvino
.snap
= le64_to_cpu(rde
->inode
.in
->snapid
);
1973 if (rinfo
->hash_order
) {
1974 u32 hash
= ceph_frag_value(rde
->raw_hash
);
1975 if (hash
!= last_hash
)
1978 rde
->offset
= ceph_make_fpos(hash
, fpos_offset
++, true);
1980 rde
->offset
= ceph_make_fpos(frag
, fpos_offset
++, false);
1984 dn
= d_lookup(parent
, &dname
);
1985 doutc(cl
, "d_lookup on parent=%p name=%.*s got %p\n",
1986 parent
, dname
.len
, dname
.name
, dn
);
1989 dn
= d_alloc(parent
, &dname
);
1990 doutc(cl
, "d_alloc %p '%.*s' = %p\n", parent
,
1991 dname
.len
, dname
.name
, dn
);
1993 doutc(cl
, "d_alloc badness\n");
1997 if (rde
->is_nokey
) {
1998 spin_lock(&dn
->d_lock
);
1999 dn
->d_flags
|= DCACHE_NOKEY_NAME
;
2000 spin_unlock(&dn
->d_lock
);
2002 } else if (d_really_is_positive(dn
) &&
2003 (ceph_ino(d_inode(dn
)) != tvino
.ino
||
2004 ceph_snap(d_inode(dn
)) != tvino
.snap
)) {
2005 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
2006 doutc(cl
, " dn %p points to wrong inode %p\n",
2009 spin_lock(&dn
->d_lock
);
2010 if (di
->offset
> 0 &&
2011 di
->lease_shared_gen
==
2012 atomic_read(&ci
->i_shared_gen
)) {
2013 __ceph_dir_clear_ordered(ci
);
2016 spin_unlock(&dn
->d_lock
);
2024 if (d_really_is_positive(dn
)) {
2027 in
= ceph_get_inode(parent
->d_sb
, tvino
, NULL
);
2029 doutc(cl
, "new_inode badness\n");
2037 ret
= ceph_fill_inode(in
, NULL
, &rde
->inode
, NULL
, session
,
2038 -1, &req
->r_caps_reservation
);
2040 pr_err_client(cl
, "badness on %p %llx.%llx\n", in
,
2042 if (d_really_is_negative(dn
)) {
2043 if (in
->i_state
& I_NEW
) {
2045 discard_new_inode(in
);
2053 if (in
->i_state
& I_NEW
)
2054 unlock_new_inode(in
);
2056 if (d_really_is_negative(dn
)) {
2057 if (ceph_security_xattr_deadlock(in
)) {
2058 doutc(cl
, " skip splicing dn %p to inode %p"
2059 " (security xattr deadlock)\n", dn
, in
);
2065 err
= splice_dentry(&dn
, in
);
2070 ceph_dentry(dn
)->offset
= rde
->offset
;
2072 update_dentry_lease(d_inode(parent
), dn
,
2073 rde
->lease
, req
->r_session
,
2074 req
->r_request_started
);
2076 if (err
== 0 && skipped
== 0 && cache_ctl
.index
>= 0) {
2077 ret
= fill_readdir_cache(d_inode(parent
), dn
,
2086 if (err
== 0 && skipped
== 0) {
2087 set_bit(CEPH_MDS_R_DID_PREPOPULATE
, &req
->r_req_flags
);
2088 req
->r_readdir_cache_idx
= cache_ctl
.index
;
2090 ceph_readdir_cache_release(&cache_ctl
);
2091 doutc(cl
, "done\n");
2095 bool ceph_inode_set_size(struct inode
*inode
, loff_t size
)
2097 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
2098 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2101 spin_lock(&ci
->i_ceph_lock
);
2102 doutc(cl
, "set_size %p %llu -> %llu\n", inode
, i_size_read(inode
), size
);
2103 i_size_write(inode
, size
);
2104 ceph_fscache_update(inode
);
2105 inode
->i_blocks
= calc_inode_blocks(size
);
2107 ret
= __ceph_should_report_size(ci
);
2109 spin_unlock(&ci
->i_ceph_lock
);
2114 void ceph_queue_inode_work(struct inode
*inode
, int work_bit
)
2116 struct ceph_fs_client
*fsc
= ceph_inode_to_fs_client(inode
);
2117 struct ceph_client
*cl
= fsc
->client
;
2118 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2119 set_bit(work_bit
, &ci
->i_work_mask
);
2122 if (queue_work(fsc
->inode_wq
, &ci
->i_work
)) {
2123 doutc(cl
, "%p %llx.%llx mask=%lx\n", inode
,
2124 ceph_vinop(inode
), ci
->i_work_mask
);
2126 doutc(cl
, "%p %llx.%llx already queued, mask=%lx\n",
2127 inode
, ceph_vinop(inode
), ci
->i_work_mask
);
2132 static void ceph_do_invalidate_pages(struct inode
*inode
)
2134 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
2135 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2139 ceph_fscache_invalidate(inode
, false);
2141 mutex_lock(&ci
->i_truncate_mutex
);
2143 if (ceph_inode_is_shutdown(inode
)) {
2144 pr_warn_ratelimited_client(cl
,
2145 "%p %llx.%llx is shut down\n", inode
,
2147 mapping_set_error(inode
->i_mapping
, -EIO
);
2148 truncate_pagecache(inode
, 0);
2149 mutex_unlock(&ci
->i_truncate_mutex
);
2153 spin_lock(&ci
->i_ceph_lock
);
2154 doutc(cl
, "%p %llx.%llx gen %d revoking %d\n", inode
,
2155 ceph_vinop(inode
), ci
->i_rdcache_gen
, ci
->i_rdcache_revoking
);
2156 if (ci
->i_rdcache_revoking
!= ci
->i_rdcache_gen
) {
2157 if (__ceph_caps_revoking_other(ci
, NULL
, CEPH_CAP_FILE_CACHE
))
2159 spin_unlock(&ci
->i_ceph_lock
);
2160 mutex_unlock(&ci
->i_truncate_mutex
);
2163 orig_gen
= ci
->i_rdcache_gen
;
2164 spin_unlock(&ci
->i_ceph_lock
);
2166 if (invalidate_inode_pages2(inode
->i_mapping
) < 0) {
2167 pr_err_client(cl
, "invalidate_inode_pages2 %llx.%llx failed\n",
2171 spin_lock(&ci
->i_ceph_lock
);
2172 if (orig_gen
== ci
->i_rdcache_gen
&&
2173 orig_gen
== ci
->i_rdcache_revoking
) {
2174 doutc(cl
, "%p %llx.%llx gen %d successful\n", inode
,
2175 ceph_vinop(inode
), ci
->i_rdcache_gen
);
2176 ci
->i_rdcache_revoking
--;
2179 doutc(cl
, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
2180 inode
, ceph_vinop(inode
), orig_gen
, ci
->i_rdcache_gen
,
2181 ci
->i_rdcache_revoking
);
2182 if (__ceph_caps_revoking_other(ci
, NULL
, CEPH_CAP_FILE_CACHE
))
2185 spin_unlock(&ci
->i_ceph_lock
);
2186 mutex_unlock(&ci
->i_truncate_mutex
);
2189 ceph_check_caps(ci
, 0);
2193 * Make sure any pending truncation is applied before doing anything
2194 * that may depend on it.
2196 void __ceph_do_pending_vmtruncate(struct inode
*inode
)
2198 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
2199 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2201 int wrbuffer_refs
, finish
= 0;
2203 mutex_lock(&ci
->i_truncate_mutex
);
2205 spin_lock(&ci
->i_ceph_lock
);
2206 if (ci
->i_truncate_pending
== 0) {
2207 doutc(cl
, "%p %llx.%llx none pending\n", inode
,
2209 spin_unlock(&ci
->i_ceph_lock
);
2210 mutex_unlock(&ci
->i_truncate_mutex
);
2215 * make sure any dirty snapped pages are flushed before we
2216 * possibly truncate them.. so write AND block!
2218 if (ci
->i_wrbuffer_ref_head
< ci
->i_wrbuffer_ref
) {
2219 spin_unlock(&ci
->i_ceph_lock
);
2220 doutc(cl
, "%p %llx.%llx flushing snaps first\n", inode
,
2222 filemap_write_and_wait_range(&inode
->i_data
, 0,
2223 inode
->i_sb
->s_maxbytes
);
2227 /* there should be no reader or writer */
2228 WARN_ON_ONCE(ci
->i_rd_ref
|| ci
->i_wr_ref
);
2230 to
= ci
->i_truncate_pagecache_size
;
2231 wrbuffer_refs
= ci
->i_wrbuffer_ref
;
2232 doutc(cl
, "%p %llx.%llx (%d) to %lld\n", inode
, ceph_vinop(inode
),
2233 ci
->i_truncate_pending
, to
);
2234 spin_unlock(&ci
->i_ceph_lock
);
2236 ceph_fscache_resize(inode
, to
);
2237 truncate_pagecache(inode
, to
);
2239 spin_lock(&ci
->i_ceph_lock
);
2240 if (to
== ci
->i_truncate_pagecache_size
) {
2241 ci
->i_truncate_pending
= 0;
2244 spin_unlock(&ci
->i_ceph_lock
);
2248 mutex_unlock(&ci
->i_truncate_mutex
);
2250 if (wrbuffer_refs
== 0)
2251 ceph_check_caps(ci
, 0);
2253 wake_up_all(&ci
->i_cap_wq
);
2256 static void ceph_inode_work(struct work_struct
*work
)
2258 struct ceph_inode_info
*ci
= container_of(work
, struct ceph_inode_info
,
2260 struct inode
*inode
= &ci
->netfs
.inode
;
2261 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
2263 if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK
, &ci
->i_work_mask
)) {
2264 doutc(cl
, "writeback %p %llx.%llx\n", inode
, ceph_vinop(inode
));
2265 filemap_fdatawrite(&inode
->i_data
);
2267 if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES
, &ci
->i_work_mask
))
2268 ceph_do_invalidate_pages(inode
);
2270 if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE
, &ci
->i_work_mask
))
2271 __ceph_do_pending_vmtruncate(inode
);
2273 if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS
, &ci
->i_work_mask
))
2274 ceph_check_caps(ci
, 0);
2276 if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS
, &ci
->i_work_mask
))
2277 ceph_flush_snaps(ci
, NULL
);
2282 static const char *ceph_encrypted_get_link(struct dentry
*dentry
,
2283 struct inode
*inode
,
2284 struct delayed_call
*done
)
2286 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2289 return ERR_PTR(-ECHILD
);
2291 return fscrypt_get_symlink(inode
, ci
->i_symlink
, i_size_read(inode
),
2295 static int ceph_encrypted_symlink_getattr(struct mnt_idmap
*idmap
,
2296 const struct path
*path
,
2297 struct kstat
*stat
, u32 request_mask
,
2298 unsigned int query_flags
)
2302 ret
= ceph_getattr(idmap
, path
, stat
, request_mask
, query_flags
);
2305 return fscrypt_symlink_getattr(path
, stat
);
2311 static const struct inode_operations ceph_symlink_iops
= {
2312 .get_link
= simple_get_link
,
2313 .setattr
= ceph_setattr
,
2314 .getattr
= ceph_getattr
,
2315 .listxattr
= ceph_listxattr
,
2318 static const struct inode_operations ceph_encrypted_symlink_iops
= {
2319 .get_link
= ceph_encrypted_get_link
,
2320 .setattr
= ceph_setattr
,
2321 .getattr
= ceph_encrypted_symlink_getattr
,
2322 .listxattr
= ceph_listxattr
,
2326 * Transfer the encrypted last block to the MDS and the MDS
2327 * will help update it when truncating a smaller size.
2329 * We don't support a PAGE_SIZE that is smaller than the
2330 * CEPH_FSCRYPT_BLOCK_SIZE.
2332 static int fill_fscrypt_truncate(struct inode
*inode
,
2333 struct ceph_mds_request
*req
,
2336 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
2337 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2338 int boff
= attr
->ia_size
% CEPH_FSCRYPT_BLOCK_SIZE
;
2339 loff_t pos
, orig_pos
= round_down(attr
->ia_size
,
2340 CEPH_FSCRYPT_BLOCK_SIZE
);
2341 u64 block
= orig_pos
>> CEPH_FSCRYPT_BLOCK_SHIFT
;
2342 struct ceph_pagelist
*pagelist
= NULL
;
2343 struct kvec iov
= {0};
2344 struct iov_iter iter
;
2345 struct page
*page
= NULL
;
2346 struct ceph_fscrypt_truncate_size_header header
;
2348 int len
= CEPH_FSCRYPT_BLOCK_SIZE
;
2349 loff_t i_size
= i_size_read(inode
);
2350 int got
, ret
, issued
;
2353 ret
= __ceph_get_caps(inode
, NULL
, CEPH_CAP_FILE_RD
, 0, -1, &got
);
2357 issued
= __ceph_caps_issued(ci
, NULL
);
2359 doutc(cl
, "size %lld -> %lld got cap refs on %s, issued %s\n",
2360 i_size
, attr
->ia_size
, ceph_cap_string(got
),
2361 ceph_cap_string(issued
));
2363 /* Try to writeback the dirty pagecaches */
2364 if (issued
& (CEPH_CAP_FILE_BUFFER
)) {
2365 loff_t lend
= orig_pos
+ CEPH_FSCRYPT_BLOCK_SHIFT
- 1;
2367 ret
= filemap_write_and_wait_range(inode
->i_mapping
,
2373 page
= __page_cache_alloc(GFP_KERNEL
);
2379 pagelist
= ceph_pagelist_alloc(GFP_KERNEL
);
2385 iov
.iov_base
= kmap_local_page(page
);
2387 iov_iter_kvec(&iter
, READ
, &iov
, 1, len
);
2390 ret
= __ceph_sync_read(inode
, &pos
, &iter
, &retry_op
, &objver
);
2394 /* Insert the header first */
2397 header
.change_attr
= cpu_to_le64(inode_peek_iversion_raw(inode
));
2400 * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
2401 * because in MDS it may need this to do the truncate.
2403 header
.block_size
= cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE
);
2406 * If we hit a hole here, we should just skip filling
2407 * the fscrypt for the request, because once the fscrypt
2408 * is enabled, the file will be split into many blocks
2409 * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
2410 * has a hole, the hole size should be multiple of block
2413 * If the Rados object doesn't exist, it will be set to 0.
2416 doutc(cl
, "hit hole, ppos %lld < size %lld\n", pos
, i_size
);
2418 header
.data_len
= cpu_to_le32(8 + 8 + 4);
2419 header
.file_offset
= 0;
2422 header
.data_len
= cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE
);
2423 header
.file_offset
= cpu_to_le64(orig_pos
);
2425 doutc(cl
, "encrypt block boff/bsize %d/%lu\n", boff
,
2426 CEPH_FSCRYPT_BLOCK_SIZE
);
2428 /* truncate and zero out the extra contents for the last block */
2429 memset(iov
.iov_base
+ boff
, 0, PAGE_SIZE
- boff
);
2431 /* encrypt the last block */
2432 ret
= ceph_fscrypt_encrypt_block_inplace(inode
, page
,
2433 CEPH_FSCRYPT_BLOCK_SIZE
,
2440 /* Insert the header */
2441 ret
= ceph_pagelist_append(pagelist
, &header
, sizeof(header
));
2445 if (header
.block_size
) {
2446 /* Append the last block contents to pagelist */
2447 ret
= ceph_pagelist_append(pagelist
, iov
.iov_base
,
2448 CEPH_FSCRYPT_BLOCK_SIZE
);
2452 req
->r_pagelist
= pagelist
;
2454 doutc(cl
, "%p %llx.%llx size dropping cap refs on %s\n", inode
,
2455 ceph_vinop(inode
), ceph_cap_string(got
));
2456 ceph_put_cap_refs(ci
, got
);
2458 kunmap_local(iov
.iov_base
);
2460 __free_pages(page
, 0);
2461 if (ret
&& pagelist
)
2462 ceph_pagelist_release(pagelist
);
2466 int __ceph_setattr(struct mnt_idmap
*idmap
, struct inode
*inode
,
2467 struct iattr
*attr
, struct ceph_iattr
*cia
)
2469 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2470 unsigned int ia_valid
= attr
->ia_valid
;
2471 struct ceph_mds_request
*req
;
2472 struct ceph_mds_client
*mdsc
= ceph_sb_to_fs_client(inode
->i_sb
)->mdsc
;
2473 struct ceph_client
*cl
= ceph_inode_to_client(inode
);
2474 struct ceph_cap_flush
*prealloc_cf
;
2475 loff_t isize
= i_size_read(inode
);
2477 int release
= 0, dirtied
= 0;
2480 int inode_dirty_flags
= 0;
2481 bool lock_snap_rwsem
= false;
2483 int truncate_retry
= 20; /* The RMW will take around 50ms */
2484 struct dentry
*dentry
;
2488 bool do_sync
= false;
2490 dentry
= d_find_alias(inode
);
2494 path
= ceph_mdsc_build_path(mdsc
, dentry
, &pathlen
, &pathbase
, 0);
2499 err
= ceph_mds_check_access(mdsc
, path
, MAY_WRITE
);
2501 ceph_mdsc_free_path(path
, pathlen
);
2504 /* For none EACCES cases will let the MDS do the mds auth check */
2505 if (err
== -EACCES
) {
2507 } else if (err
< 0) {
2514 prealloc_cf
= ceph_alloc_cap_flush();
2518 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETATTR
,
2521 ceph_free_cap_flush(prealloc_cf
);
2522 return PTR_ERR(req
);
2525 fill_fscrypt
= false;
2526 spin_lock(&ci
->i_ceph_lock
);
2527 issued
= __ceph_caps_issued(ci
, NULL
);
2529 if (!ci
->i_head_snapc
&&
2530 (issued
& (CEPH_CAP_ANY_EXCL
| CEPH_CAP_FILE_WR
))) {
2531 lock_snap_rwsem
= true;
2532 if (!down_read_trylock(&mdsc
->snap_rwsem
)) {
2533 spin_unlock(&ci
->i_ceph_lock
);
2534 down_read(&mdsc
->snap_rwsem
);
2535 spin_lock(&ci
->i_ceph_lock
);
2536 issued
= __ceph_caps_issued(ci
, NULL
);
2540 doutc(cl
, "%p %llx.%llx issued %s\n", inode
, ceph_vinop(inode
),
2541 ceph_cap_string(issued
));
2542 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2543 if (cia
&& cia
->fscrypt_auth
) {
2544 u32 len
= ceph_fscrypt_auth_len(cia
->fscrypt_auth
);
2546 if (len
> sizeof(*cia
->fscrypt_auth
)) {
2548 spin_unlock(&ci
->i_ceph_lock
);
2552 doutc(cl
, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode
,
2553 ceph_vinop(inode
), ci
->fscrypt_auth_len
, len
);
2555 /* It should never be re-set once set */
2556 WARN_ON_ONCE(ci
->fscrypt_auth
);
2558 if (!do_sync
&& (issued
& CEPH_CAP_AUTH_EXCL
)) {
2559 dirtied
|= CEPH_CAP_AUTH_EXCL
;
2560 kfree(ci
->fscrypt_auth
);
2561 ci
->fscrypt_auth
= (u8
*)cia
->fscrypt_auth
;
2562 ci
->fscrypt_auth_len
= len
;
2563 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
2564 ci
->fscrypt_auth_len
!= len
||
2565 memcmp(ci
->fscrypt_auth
, cia
->fscrypt_auth
, len
)) {
2566 req
->r_fscrypt_auth
= cia
->fscrypt_auth
;
2567 mask
|= CEPH_SETATTR_FSCRYPT_AUTH
;
2568 release
|= CEPH_CAP_AUTH_SHARED
;
2570 cia
->fscrypt_auth
= NULL
;
2573 if (cia
&& cia
->fscrypt_auth
) {
2575 spin_unlock(&ci
->i_ceph_lock
);
2578 #endif /* CONFIG_FS_ENCRYPTION */
2580 if (ia_valid
& ATTR_UID
) {
2581 kuid_t fsuid
= from_vfsuid(idmap
, i_user_ns(inode
), attr
->ia_vfsuid
);
2583 doutc(cl
, "%p %llx.%llx uid %d -> %d\n", inode
,
2585 from_kuid(&init_user_ns
, inode
->i_uid
),
2586 from_kuid(&init_user_ns
, attr
->ia_uid
));
2587 if (!do_sync
&& (issued
& CEPH_CAP_AUTH_EXCL
)) {
2588 inode
->i_uid
= fsuid
;
2589 dirtied
|= CEPH_CAP_AUTH_EXCL
;
2590 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
2591 !uid_eq(fsuid
, inode
->i_uid
)) {
2592 req
->r_args
.setattr
.uid
= cpu_to_le32(
2593 from_kuid(&init_user_ns
, fsuid
));
2594 mask
|= CEPH_SETATTR_UID
;
2595 release
|= CEPH_CAP_AUTH_SHARED
;
2598 if (ia_valid
& ATTR_GID
) {
2599 kgid_t fsgid
= from_vfsgid(idmap
, i_user_ns(inode
), attr
->ia_vfsgid
);
2601 doutc(cl
, "%p %llx.%llx gid %d -> %d\n", inode
,
2603 from_kgid(&init_user_ns
, inode
->i_gid
),
2604 from_kgid(&init_user_ns
, attr
->ia_gid
));
2605 if (!do_sync
&& (issued
& CEPH_CAP_AUTH_EXCL
)) {
2606 inode
->i_gid
= fsgid
;
2607 dirtied
|= CEPH_CAP_AUTH_EXCL
;
2608 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
2609 !gid_eq(fsgid
, inode
->i_gid
)) {
2610 req
->r_args
.setattr
.gid
= cpu_to_le32(
2611 from_kgid(&init_user_ns
, fsgid
));
2612 mask
|= CEPH_SETATTR_GID
;
2613 release
|= CEPH_CAP_AUTH_SHARED
;
2616 if (ia_valid
& ATTR_MODE
) {
2617 doutc(cl
, "%p %llx.%llx mode 0%o -> 0%o\n", inode
,
2618 ceph_vinop(inode
), inode
->i_mode
, attr
->ia_mode
);
2619 if (!do_sync
&& (issued
& CEPH_CAP_AUTH_EXCL
)) {
2620 inode
->i_mode
= attr
->ia_mode
;
2621 dirtied
|= CEPH_CAP_AUTH_EXCL
;
2622 } else if ((issued
& CEPH_CAP_AUTH_SHARED
) == 0 ||
2623 attr
->ia_mode
!= inode
->i_mode
) {
2624 inode
->i_mode
= attr
->ia_mode
;
2625 req
->r_args
.setattr
.mode
= cpu_to_le32(attr
->ia_mode
);
2626 mask
|= CEPH_SETATTR_MODE
;
2627 release
|= CEPH_CAP_AUTH_SHARED
;
2631 if (ia_valid
& ATTR_ATIME
) {
2632 struct timespec64 atime
= inode_get_atime(inode
);
2634 doutc(cl
, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
2635 inode
, ceph_vinop(inode
),
2636 atime
.tv_sec
, atime
.tv_nsec
,
2637 attr
->ia_atime
.tv_sec
, attr
->ia_atime
.tv_nsec
);
2638 if (!do_sync
&& (issued
& CEPH_CAP_FILE_EXCL
)) {
2639 ci
->i_time_warp_seq
++;
2640 inode_set_atime_to_ts(inode
, attr
->ia_atime
);
2641 dirtied
|= CEPH_CAP_FILE_EXCL
;
2642 } else if (!do_sync
&& (issued
& CEPH_CAP_FILE_WR
) &&
2643 timespec64_compare(&atime
,
2644 &attr
->ia_atime
) < 0) {
2645 inode_set_atime_to_ts(inode
, attr
->ia_atime
);
2646 dirtied
|= CEPH_CAP_FILE_WR
;
2647 } else if ((issued
& CEPH_CAP_FILE_SHARED
) == 0 ||
2648 !timespec64_equal(&atime
, &attr
->ia_atime
)) {
2649 ceph_encode_timespec64(&req
->r_args
.setattr
.atime
,
2651 mask
|= CEPH_SETATTR_ATIME
;
2652 release
|= CEPH_CAP_FILE_SHARED
|
2653 CEPH_CAP_FILE_RD
| CEPH_CAP_FILE_WR
;
2656 if (ia_valid
& ATTR_SIZE
) {
2657 doutc(cl
, "%p %llx.%llx size %lld -> %lld\n", inode
,
2658 ceph_vinop(inode
), isize
, attr
->ia_size
);
2660 * Only when the new size is smaller and not aligned to
2661 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
2663 if (IS_ENCRYPTED(inode
) && attr
->ia_size
< isize
&&
2664 (attr
->ia_size
% CEPH_FSCRYPT_BLOCK_SIZE
)) {
2665 mask
|= CEPH_SETATTR_SIZE
;
2666 release
|= CEPH_CAP_FILE_SHARED
| CEPH_CAP_FILE_EXCL
|
2667 CEPH_CAP_FILE_RD
| CEPH_CAP_FILE_WR
;
2668 set_bit(CEPH_MDS_R_FSCRYPT_FILE
, &req
->r_req_flags
);
2669 mask
|= CEPH_SETATTR_FSCRYPT_FILE
;
2670 req
->r_args
.setattr
.size
=
2671 cpu_to_le64(round_up(attr
->ia_size
,
2672 CEPH_FSCRYPT_BLOCK_SIZE
));
2673 req
->r_args
.setattr
.old_size
=
2674 cpu_to_le64(round_up(isize
,
2675 CEPH_FSCRYPT_BLOCK_SIZE
));
2676 req
->r_fscrypt_file
= attr
->ia_size
;
2677 fill_fscrypt
= true;
2678 } else if (!do_sync
&& (issued
& CEPH_CAP_FILE_EXCL
) && attr
->ia_size
>= isize
) {
2679 if (attr
->ia_size
> isize
) {
2680 i_size_write(inode
, attr
->ia_size
);
2681 inode
->i_blocks
= calc_inode_blocks(attr
->ia_size
);
2682 ci
->i_reported_size
= attr
->ia_size
;
2683 dirtied
|= CEPH_CAP_FILE_EXCL
;
2684 ia_valid
|= ATTR_MTIME
;
2686 } else if ((issued
& CEPH_CAP_FILE_SHARED
) == 0 ||
2687 attr
->ia_size
!= isize
) {
2688 mask
|= CEPH_SETATTR_SIZE
;
2689 release
|= CEPH_CAP_FILE_SHARED
| CEPH_CAP_FILE_EXCL
|
2690 CEPH_CAP_FILE_RD
| CEPH_CAP_FILE_WR
;
2691 if (IS_ENCRYPTED(inode
) && attr
->ia_size
) {
2692 set_bit(CEPH_MDS_R_FSCRYPT_FILE
, &req
->r_req_flags
);
2693 mask
|= CEPH_SETATTR_FSCRYPT_FILE
;
2694 req
->r_args
.setattr
.size
=
2695 cpu_to_le64(round_up(attr
->ia_size
,
2696 CEPH_FSCRYPT_BLOCK_SIZE
));
2697 req
->r_args
.setattr
.old_size
=
2698 cpu_to_le64(round_up(isize
,
2699 CEPH_FSCRYPT_BLOCK_SIZE
));
2700 req
->r_fscrypt_file
= attr
->ia_size
;
2702 req
->r_args
.setattr
.size
= cpu_to_le64(attr
->ia_size
);
2703 req
->r_args
.setattr
.old_size
= cpu_to_le64(isize
);
2704 req
->r_fscrypt_file
= 0;
2708 if (ia_valid
& ATTR_MTIME
) {
2709 struct timespec64 mtime
= inode_get_mtime(inode
);
2711 doutc(cl
, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
2712 inode
, ceph_vinop(inode
),
2713 mtime
.tv_sec
, mtime
.tv_nsec
,
2714 attr
->ia_mtime
.tv_sec
, attr
->ia_mtime
.tv_nsec
);
2715 if (!do_sync
&& (issued
& CEPH_CAP_FILE_EXCL
)) {
2716 ci
->i_time_warp_seq
++;
2717 inode_set_mtime_to_ts(inode
, attr
->ia_mtime
);
2718 dirtied
|= CEPH_CAP_FILE_EXCL
;
2719 } else if (!do_sync
&& (issued
& CEPH_CAP_FILE_WR
) &&
2720 timespec64_compare(&mtime
, &attr
->ia_mtime
) < 0) {
2721 inode_set_mtime_to_ts(inode
, attr
->ia_mtime
);
2722 dirtied
|= CEPH_CAP_FILE_WR
;
2723 } else if ((issued
& CEPH_CAP_FILE_SHARED
) == 0 ||
2724 !timespec64_equal(&mtime
, &attr
->ia_mtime
)) {
2725 ceph_encode_timespec64(&req
->r_args
.setattr
.mtime
,
2727 mask
|= CEPH_SETATTR_MTIME
;
2728 release
|= CEPH_CAP_FILE_SHARED
|
2729 CEPH_CAP_FILE_RD
| CEPH_CAP_FILE_WR
;
2733 /* these do nothing */
2734 if (ia_valid
& ATTR_CTIME
) {
2735 bool only
= (ia_valid
& (ATTR_SIZE
|ATTR_MTIME
|ATTR_ATIME
|
2736 ATTR_MODE
|ATTR_UID
|ATTR_GID
)) == 0;
2737 doutc(cl
, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
2738 inode
, ceph_vinop(inode
),
2739 inode_get_ctime_sec(inode
),
2740 inode_get_ctime_nsec(inode
),
2741 attr
->ia_ctime
.tv_sec
, attr
->ia_ctime
.tv_nsec
,
2742 only
? "ctime only" : "ignored");
2745 * if kernel wants to dirty ctime but nothing else,
2746 * we need to choose a cap to dirty under, or do
2747 * a almost-no-op setattr
2749 if (issued
& CEPH_CAP_AUTH_EXCL
)
2750 dirtied
|= CEPH_CAP_AUTH_EXCL
;
2751 else if (issued
& CEPH_CAP_FILE_EXCL
)
2752 dirtied
|= CEPH_CAP_FILE_EXCL
;
2753 else if (issued
& CEPH_CAP_XATTR_EXCL
)
2754 dirtied
|= CEPH_CAP_XATTR_EXCL
;
2756 mask
|= CEPH_SETATTR_CTIME
;
2759 if (ia_valid
& ATTR_FILE
)
2760 doutc(cl
, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode
,
2764 inode_dirty_flags
= __ceph_mark_dirty_caps(ci
, dirtied
,
2766 inode_set_ctime_to_ts(inode
, attr
->ia_ctime
);
2767 inode_inc_iversion_raw(inode
);
2771 spin_unlock(&ci
->i_ceph_lock
);
2772 if (lock_snap_rwsem
) {
2773 up_read(&mdsc
->snap_rwsem
);
2774 lock_snap_rwsem
= false;
2777 if (inode_dirty_flags
)
2778 __mark_inode_dirty(inode
, inode_dirty_flags
);
2781 req
->r_inode
= inode
;
2783 req
->r_inode_drop
= release
;
2784 req
->r_args
.setattr
.mask
= cpu_to_le32(mask
);
2785 req
->r_num_caps
= 1;
2786 req
->r_stamp
= attr
->ia_ctime
;
2788 err
= fill_fscrypt_truncate(inode
, req
, attr
);
2794 * The truncate request will return -EAGAIN when the
2795 * last block has been updated just before the MDS
2796 * successfully gets the xlock for the FILE lock. To
2797 * avoid corrupting the file contents we need to retry
2800 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
2801 if (err
== -EAGAIN
&& truncate_retry
--) {
2802 doutc(cl
, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
2803 inode
, ceph_vinop(inode
), err
,
2804 ceph_cap_string(dirtied
), mask
);
2805 ceph_mdsc_put_request(req
);
2806 ceph_free_cap_flush(prealloc_cf
);
2811 doutc(cl
, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode
,
2812 ceph_vinop(inode
), err
, ceph_cap_string(dirtied
), mask
);
2814 ceph_mdsc_put_request(req
);
2815 ceph_free_cap_flush(prealloc_cf
);
2817 if (err
>= 0 && (mask
& CEPH_SETATTR_SIZE
))
2818 __ceph_do_pending_vmtruncate(inode
);
2826 int ceph_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
2829 struct inode
*inode
= d_inode(dentry
);
2830 struct ceph_fs_client
*fsc
= ceph_inode_to_fs_client(inode
);
2833 if (ceph_snap(inode
) != CEPH_NOSNAP
)
2836 if (ceph_inode_is_shutdown(inode
))
2839 err
= fscrypt_prepare_setattr(dentry
, attr
);
2843 err
= setattr_prepare(idmap
, dentry
, attr
);
2847 if ((attr
->ia_valid
& ATTR_SIZE
) &&
2848 attr
->ia_size
> max(i_size_read(inode
), fsc
->max_file_size
))
2851 if ((attr
->ia_valid
& ATTR_SIZE
) &&
2852 ceph_quota_is_max_bytes_exceeded(inode
, attr
->ia_size
))
2855 err
= __ceph_setattr(idmap
, inode
, attr
, NULL
);
2857 if (err
>= 0 && (attr
->ia_valid
& ATTR_MODE
))
2858 err
= posix_acl_chmod(idmap
, dentry
, attr
->ia_mode
);
2863 int ceph_try_to_choose_auth_mds(struct inode
*inode
, int mask
)
2865 int issued
= ceph_caps_issued(ceph_inode(inode
));
2868 * If any 'x' caps is issued we can just choose the auth MDS
2869 * instead of the random replica MDSes. Because only when the
2870 * Locker is in LOCK_EXEC state will the loner client could
2871 * get the 'x' caps. And if we send the getattr requests to
2872 * any replica MDS it must auth pin and tries to rdlock from
2873 * the auth MDS, and then the auth MDS need to do the Locker
2874 * state transition to LOCK_SYNC. And after that the lock state
2877 * This cost much when doing the Locker state transition and
2878 * usually will need to revoke caps from clients.
2880 * And for the 'Xs' caps for getxattr we will also choose the
2881 * auth MDS, because the MDS side code is buggy due to setxattr
2882 * won't notify the replica MDSes when the values changed and
2883 * the replica MDS will return the old values. Though we will
2884 * fix it in MDS code, but this still makes sense for old ceph.
2886 if (((mask
& CEPH_CAP_ANY_SHARED
) && (issued
& CEPH_CAP_ANY_EXCL
))
2887 || (mask
& (CEPH_STAT_RSTAT
| CEPH_STAT_CAP_XATTR
)))
2888 return USE_AUTH_MDS
;
2894 * Verify that we have a lease on the given mask. If not,
2895 * do a getattr against an mds.
2897 int __ceph_do_getattr(struct inode
*inode
, struct page
*locked_page
,
2898 int mask
, bool force
)
2900 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(inode
->i_sb
);
2901 struct ceph_client
*cl
= fsc
->client
;
2902 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
2903 struct ceph_mds_request
*req
;
2907 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
2908 doutc(cl
, "inode %p %llx.%llx SNAPDIR\n", inode
,
2913 doutc(cl
, "inode %p %llx.%llx mask %s mode 0%o\n", inode
,
2914 ceph_vinop(inode
), ceph_cap_string(mask
), inode
->i_mode
);
2915 if (!force
&& ceph_caps_issued_mask_metric(ceph_inode(inode
), mask
, 1))
2918 mode
= ceph_try_to_choose_auth_mds(inode
, mask
);
2919 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_GETATTR
, mode
);
2921 return PTR_ERR(req
);
2922 req
->r_inode
= inode
;
2924 req
->r_num_caps
= 1;
2925 req
->r_args
.getattr
.mask
= cpu_to_le32(mask
);
2926 req
->r_locked_page
= locked_page
;
2927 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
2928 if (locked_page
&& err
== 0) {
2929 u64 inline_version
= req
->r_reply_info
.targeti
.inline_version
;
2930 if (inline_version
== 0) {
2931 /* the reply is supposed to contain inline data */
2933 } else if (inline_version
== CEPH_INLINE_NONE
||
2934 inline_version
== 1) {
2937 err
= req
->r_reply_info
.targeti
.inline_len
;
2940 ceph_mdsc_put_request(req
);
2941 doutc(cl
, "result=%d\n", err
);
2945 int ceph_do_getvxattr(struct inode
*inode
, const char *name
, void *value
,
2948 struct ceph_fs_client
*fsc
= ceph_sb_to_fs_client(inode
->i_sb
);
2949 struct ceph_client
*cl
= fsc
->client
;
2950 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
2951 struct ceph_mds_request
*req
;
2952 int mode
= USE_AUTH_MDS
;
2955 size_t xattr_value_len
;
2957 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_GETVXATTR
, mode
);
2963 req
->r_feature_needed
= CEPHFS_FEATURE_OP_GETVXATTR
;
2964 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
2965 if (!req
->r_path2
) {
2971 req
->r_inode
= inode
;
2972 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
2976 xattr_value
= req
->r_reply_info
.xattr_info
.xattr_value
;
2977 xattr_value_len
= req
->r_reply_info
.xattr_info
.xattr_value_len
;
2979 doutc(cl
, "xattr_value_len:%zu, size:%zu\n", xattr_value_len
, size
);
2981 err
= (int)xattr_value_len
;
2985 if (xattr_value_len
> size
) {
2990 memcpy(value
, xattr_value
, xattr_value_len
);
2992 ceph_mdsc_put_request(req
);
2994 doutc(cl
, "result=%d\n", err
);
3000 * Check inode permissions. We verify we have a valid value for
3001 * the AUTH cap, then call the generic handler.
3003 int ceph_permission(struct mnt_idmap
*idmap
, struct inode
*inode
,
3008 if (mask
& MAY_NOT_BLOCK
)
3011 err
= ceph_do_getattr(inode
, CEPH_CAP_AUTH_SHARED
, false);
3014 err
= generic_permission(idmap
, inode
, mask
);
3018 /* Craft a mask of needed caps given a set of requested statx attrs. */
3019 static int statx_to_caps(u32 want
, umode_t mode
)
3023 if (want
& (STATX_MODE
|STATX_UID
|STATX_GID
|STATX_CTIME
|STATX_BTIME
|STATX_CHANGE_COOKIE
))
3024 mask
|= CEPH_CAP_AUTH_SHARED
;
3026 if (want
& (STATX_NLINK
|STATX_CTIME
|STATX_CHANGE_COOKIE
)) {
3028 * The link count for directories depends on inode->i_subdirs,
3029 * and that is only updated when Fs caps are held.
3032 mask
|= CEPH_CAP_FILE_SHARED
;
3034 mask
|= CEPH_CAP_LINK_SHARED
;
3037 if (want
& (STATX_ATIME
|STATX_MTIME
|STATX_CTIME
|STATX_SIZE
|STATX_BLOCKS
|STATX_CHANGE_COOKIE
))
3038 mask
|= CEPH_CAP_FILE_SHARED
;
3040 if (want
& (STATX_CTIME
|STATX_CHANGE_COOKIE
))
3041 mask
|= CEPH_CAP_XATTR_SHARED
;
3047 * Get all the attributes. If we have sufficient caps for the requested attrs,
3048 * then we can avoid talking to the MDS at all.
3050 int ceph_getattr(struct mnt_idmap
*idmap
, const struct path
*path
,
3051 struct kstat
*stat
, u32 request_mask
, unsigned int flags
)
3053 struct inode
*inode
= d_inode(path
->dentry
);
3054 struct super_block
*sb
= inode
->i_sb
;
3055 struct ceph_inode_info
*ci
= ceph_inode(inode
);
3056 u32 valid_mask
= STATX_BASIC_STATS
;
3059 if (ceph_inode_is_shutdown(inode
))
3062 /* Skip the getattr altogether if we're asked not to sync */
3063 if ((flags
& AT_STATX_SYNC_TYPE
) != AT_STATX_DONT_SYNC
) {
3064 err
= ceph_do_getattr(inode
,
3065 statx_to_caps(request_mask
, inode
->i_mode
),
3066 flags
& AT_STATX_FORCE_SYNC
);
3071 generic_fillattr(idmap
, request_mask
, inode
, stat
);
3072 stat
->ino
= ceph_present_inode(inode
);
3075 * btime on newly-allocated inodes is 0, so if this is still set to
3076 * that, then assume that it's not valid.
3078 if (ci
->i_btime
.tv_sec
|| ci
->i_btime
.tv_nsec
) {
3079 stat
->btime
= ci
->i_btime
;
3080 valid_mask
|= STATX_BTIME
;
3083 if (request_mask
& STATX_CHANGE_COOKIE
) {
3084 stat
->change_cookie
= inode_peek_iversion_raw(inode
);
3085 valid_mask
|= STATX_CHANGE_COOKIE
;
3088 if (ceph_snap(inode
) == CEPH_NOSNAP
)
3089 stat
->dev
= sb
->s_dev
;
3091 stat
->dev
= ci
->i_snapid_map
? ci
->i_snapid_map
->dev
: 0;
3093 if (S_ISDIR(inode
->i_mode
)) {
3094 if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb
), RBYTES
)) {
3095 stat
->size
= ci
->i_rbytes
;
3096 } else if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
3097 struct ceph_inode_info
*pci
;
3098 struct ceph_snap_realm
*realm
;
3099 struct inode
*parent
;
3101 parent
= ceph_lookup_inode(sb
, ceph_ino(inode
));
3103 return PTR_ERR(parent
);
3105 pci
= ceph_inode(parent
);
3106 spin_lock(&pci
->i_ceph_lock
);
3107 realm
= pci
->i_snap_realm
;
3109 stat
->size
= realm
->num_snaps
;
3112 spin_unlock(&pci
->i_ceph_lock
);
3115 stat
->size
= ci
->i_files
+ ci
->i_subdirs
;
3118 stat
->blksize
= 65536;
3120 * Some applications rely on the number of st_nlink
3121 * value on directories to be either 0 (if unlinked)
3122 * or 2 + number of subdirectories.
3124 if (stat
->nlink
== 1)
3125 /* '.' + '..' + subdirs */
3126 stat
->nlink
= 1 + 1 + ci
->i_subdirs
;
3129 stat
->attributes
|= STATX_ATTR_CHANGE_MONOTONIC
;
3130 if (IS_ENCRYPTED(inode
))
3131 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
3132 stat
->attributes_mask
|= (STATX_ATTR_CHANGE_MONOTONIC
|
3133 STATX_ATTR_ENCRYPTED
);
3135 stat
->result_mask
= request_mask
& valid_mask
;
3139 void ceph_inode_shutdown(struct inode
*inode
)
3141 struct ceph_inode_info
*ci
= ceph_inode(inode
);
3144 bool invalidate
= false;
3146 spin_lock(&ci
->i_ceph_lock
);
3147 ci
->i_ceph_flags
|= CEPH_I_SHUTDOWN
;
3148 p
= rb_first(&ci
->i_caps
);
3150 struct ceph_cap
*cap
= rb_entry(p
, struct ceph_cap
, ci_node
);
3153 iputs
+= ceph_purge_inode_cap(inode
, cap
, &invalidate
);
3155 spin_unlock(&ci
->i_ceph_lock
);
3158 ceph_queue_invalidate(inode
);