1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
14 #include "mds_client.h"
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
24 * A cluster of MDS (metadata server) daemons is responsible for
25 * managing the file system namespace (the directory hierarchy and
26 * inodes) and for coordinating shared access to storage. Metadata is
27 * partitioning hierarchically across a number of servers, and that
28 * partition varies over time as the cluster adjusts the distribution
29 * in order to balance load.
31 * The MDS client is primarily responsible to managing synchronous
32 * metadata requests for operations like open, unlink, and so forth.
33 * If there is a MDS failure, we find out about it when we (possibly
34 * request and) receive a new MDS map, and can resubmit affected
37 * For the most part, though, we take advantage of a lossless
38 * communications channel to the MDS, and do not need to worry about
39 * timing out or resubmitting requests.
41 * We maintain a stateful "session" with each MDS we interact with.
42 * Within each session, we sent periodic heartbeat messages to ensure
43 * any capabilities or leases we have been issues remain valid. If
44 * the session times out and goes stale, our leases and capabilities
45 * are no longer valid.
48 struct ceph_reconnect_state
{
50 struct ceph_pagelist
*pagelist
;
54 static void __wake_requests(struct ceph_mds_client
*mdsc
,
55 struct list_head
*head
);
57 static const struct ceph_connection_operations mds_con_ops
;
65 * parse individual inode info
67 static int parse_reply_info_in(void **p
, void *end
,
68 struct ceph_mds_reply_info_in
*info
,
74 *p
+= sizeof(struct ceph_mds_reply_inode
) +
75 sizeof(*info
->in
->fragtree
.splits
) *
76 le32_to_cpu(info
->in
->fragtree
.nsplits
);
78 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
79 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
81 *p
+= info
->symlink_len
;
83 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
84 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
85 sizeof(info
->dir_layout
), bad
);
87 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
89 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
90 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
91 info
->xattr_data
= *p
;
92 *p
+= info
->xattr_len
;
94 if (features
& CEPH_FEATURE_MDS_INLINE_DATA
) {
95 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
96 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
97 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
98 info
->inline_data
= *p
;
99 *p
+= info
->inline_len
;
101 info
->inline_version
= CEPH_INLINE_NONE
;
103 if (features
& CEPH_FEATURE_MDS_QUOTA
) {
104 u8 struct_v
, struct_compat
;
108 * both struct_v and struct_compat are expected to be >= 1
110 ceph_decode_8_safe(p
, end
, struct_v
, bad
);
111 ceph_decode_8_safe(p
, end
, struct_compat
, bad
);
112 if (!struct_v
|| !struct_compat
)
114 ceph_decode_32_safe(p
, end
, struct_len
, bad
);
115 ceph_decode_need(p
, end
, struct_len
, bad
);
116 ceph_decode_64_safe(p
, end
, info
->max_bytes
, bad
);
117 ceph_decode_64_safe(p
, end
, info
->max_files
, bad
);
123 info
->pool_ns_len
= 0;
124 info
->pool_ns_data
= NULL
;
125 if (features
& CEPH_FEATURE_FS_FILE_LAYOUT_V2
) {
126 ceph_decode_32_safe(p
, end
, info
->pool_ns_len
, bad
);
127 if (info
->pool_ns_len
> 0) {
128 ceph_decode_need(p
, end
, info
->pool_ns_len
, bad
);
129 info
->pool_ns_data
= *p
;
130 *p
+= info
->pool_ns_len
;
140 * parse a normal reply, which may contain a (dir+)dentry and/or a
143 static int parse_reply_info_trace(void **p
, void *end
,
144 struct ceph_mds_reply_info_parsed
*info
,
149 if (info
->head
->is_dentry
) {
150 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
154 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
157 *p
+= sizeof(*info
->dirfrag
) +
158 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
159 if (unlikely(*p
> end
))
162 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
163 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
165 *p
+= info
->dname_len
;
167 *p
+= sizeof(*info
->dlease
);
170 if (info
->head
->is_target
) {
171 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
176 if (unlikely(*p
!= end
))
183 pr_err("problem parsing mds trace %d\n", err
);
188 * parse readdir results
190 static int parse_reply_info_dir(void **p
, void *end
,
191 struct ceph_mds_reply_info_parsed
*info
,
198 if (*p
+ sizeof(*info
->dir_dir
) > end
)
200 *p
+= sizeof(*info
->dir_dir
) +
201 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
205 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
206 num
= ceph_decode_32(p
);
208 u16 flags
= ceph_decode_16(p
);
209 info
->dir_end
= !!(flags
& CEPH_READDIR_FRAG_END
);
210 info
->dir_complete
= !!(flags
& CEPH_READDIR_FRAG_COMPLETE
);
211 info
->hash_order
= !!(flags
& CEPH_READDIR_HASH_ORDER
);
212 info
->offset_hash
= !!(flags
& CEPH_READDIR_OFFSET_HASH
);
217 BUG_ON(!info
->dir_entries
);
218 if ((unsigned long)(info
->dir_entries
+ num
) >
219 (unsigned long)info
->dir_entries
+ info
->dir_buf_size
) {
220 pr_err("dir contents are larger than expected\n");
227 struct ceph_mds_reply_dir_entry
*rde
= info
->dir_entries
+ i
;
229 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
230 rde
->name_len
= ceph_decode_32(p
);
231 ceph_decode_need(p
, end
, rde
->name_len
, bad
);
234 dout("parsed dir dname '%.*s'\n", rde
->name_len
, rde
->name
);
236 *p
+= sizeof(struct ceph_mds_reply_lease
);
239 err
= parse_reply_info_in(p
, end
, &rde
->inode
, features
);
242 /* ceph_readdir_prepopulate() will update it */
256 pr_err("problem parsing dir contents %d\n", err
);
261 * parse fcntl F_GETLK results
263 static int parse_reply_info_filelock(void **p
, void *end
,
264 struct ceph_mds_reply_info_parsed
*info
,
267 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
270 info
->filelock_reply
= *p
;
271 *p
+= sizeof(*info
->filelock_reply
);
273 if (unlikely(*p
!= end
))
282 * parse create results
284 static int parse_reply_info_create(void **p
, void *end
,
285 struct ceph_mds_reply_info_parsed
*info
,
288 if (features
& CEPH_FEATURE_REPLY_CREATE_INODE
) {
290 info
->has_create_ino
= false;
292 info
->has_create_ino
= true;
293 info
->ino
= ceph_decode_64(p
);
297 if (unlikely(*p
!= end
))
306 * parse extra results
308 static int parse_reply_info_extra(void **p
, void *end
,
309 struct ceph_mds_reply_info_parsed
*info
,
312 u32 op
= le32_to_cpu(info
->head
->op
);
314 if (op
== CEPH_MDS_OP_GETFILELOCK
)
315 return parse_reply_info_filelock(p
, end
, info
, features
);
316 else if (op
== CEPH_MDS_OP_READDIR
|| op
== CEPH_MDS_OP_LSSNAP
)
317 return parse_reply_info_dir(p
, end
, info
, features
);
318 else if (op
== CEPH_MDS_OP_CREATE
)
319 return parse_reply_info_create(p
, end
, info
, features
);
325 * parse entire mds reply
327 static int parse_reply_info(struct ceph_msg
*msg
,
328 struct ceph_mds_reply_info_parsed
*info
,
335 info
->head
= msg
->front
.iov_base
;
336 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
337 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
340 ceph_decode_32_safe(&p
, end
, len
, bad
);
342 ceph_decode_need(&p
, end
, len
, bad
);
343 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
349 ceph_decode_32_safe(&p
, end
, len
, bad
);
351 ceph_decode_need(&p
, end
, len
, bad
);
352 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
358 ceph_decode_32_safe(&p
, end
, len
, bad
);
359 info
->snapblob_len
= len
;
370 pr_err("mds parse_reply err %d\n", err
);
374 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
376 if (!info
->dir_entries
)
378 free_pages((unsigned long)info
->dir_entries
, get_order(info
->dir_buf_size
));
385 const char *ceph_session_state_name(int s
)
388 case CEPH_MDS_SESSION_NEW
: return "new";
389 case CEPH_MDS_SESSION_OPENING
: return "opening";
390 case CEPH_MDS_SESSION_OPEN
: return "open";
391 case CEPH_MDS_SESSION_HUNG
: return "hung";
392 case CEPH_MDS_SESSION_CLOSING
: return "closing";
393 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
394 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
395 case CEPH_MDS_SESSION_REJECTED
: return "rejected";
396 default: return "???";
400 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
402 if (refcount_inc_not_zero(&s
->s_ref
)) {
403 dout("mdsc get_session %p %d -> %d\n", s
,
404 refcount_read(&s
->s_ref
)-1, refcount_read(&s
->s_ref
));
407 dout("mdsc get_session %p 0 -- FAIL\n", s
);
412 void ceph_put_mds_session(struct ceph_mds_session
*s
)
414 dout("mdsc put_session %p %d -> %d\n", s
,
415 refcount_read(&s
->s_ref
), refcount_read(&s
->s_ref
)-1);
416 if (refcount_dec_and_test(&s
->s_ref
)) {
417 if (s
->s_auth
.authorizer
)
418 ceph_auth_destroy_authorizer(s
->s_auth
.authorizer
);
424 * called under mdsc->mutex
426 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
429 struct ceph_mds_session
*session
;
431 if (mds
>= mdsc
->max_sessions
|| !mdsc
->sessions
[mds
])
433 session
= mdsc
->sessions
[mds
];
434 dout("lookup_mds_session %p %d\n", session
,
435 refcount_read(&session
->s_ref
));
436 get_session(session
);
440 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
442 if (mds
>= mdsc
->max_sessions
|| !mdsc
->sessions
[mds
])
448 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
449 struct ceph_mds_session
*s
)
451 if (s
->s_mds
>= mdsc
->max_sessions
||
452 mdsc
->sessions
[s
->s_mds
] != s
)
458 * create+register a new session for given mds.
459 * called under mdsc->mutex.
461 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
464 struct ceph_mds_session
*s
;
466 if (mds
>= mdsc
->mdsmap
->m_num_mds
)
467 return ERR_PTR(-EINVAL
);
469 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
471 return ERR_PTR(-ENOMEM
);
473 if (mds
>= mdsc
->max_sessions
) {
474 int newmax
= 1 << get_count_order(mds
+ 1);
475 struct ceph_mds_session
**sa
;
477 dout("%s: realloc to %d\n", __func__
, newmax
);
478 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
481 if (mdsc
->sessions
) {
482 memcpy(sa
, mdsc
->sessions
,
483 mdsc
->max_sessions
* sizeof(void *));
484 kfree(mdsc
->sessions
);
487 mdsc
->max_sessions
= newmax
;
490 dout("%s: mds%d\n", __func__
, mds
);
493 s
->s_state
= CEPH_MDS_SESSION_NEW
;
496 mutex_init(&s
->s_mutex
);
498 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
500 spin_lock_init(&s
->s_gen_ttl_lock
);
502 s
->s_cap_ttl
= jiffies
- 1;
504 spin_lock_init(&s
->s_cap_lock
);
505 s
->s_renew_requested
= 0;
507 INIT_LIST_HEAD(&s
->s_caps
);
510 refcount_set(&s
->s_ref
, 1);
511 INIT_LIST_HEAD(&s
->s_waiting
);
512 INIT_LIST_HEAD(&s
->s_unsafe
);
513 s
->s_num_cap_releases
= 0;
514 s
->s_cap_reconnect
= 0;
515 s
->s_cap_iterator
= NULL
;
516 INIT_LIST_HEAD(&s
->s_cap_releases
);
517 INIT_LIST_HEAD(&s
->s_cap_flushing
);
519 mdsc
->sessions
[mds
] = s
;
520 atomic_inc(&mdsc
->num_sessions
);
521 refcount_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
523 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
524 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
530 return ERR_PTR(-ENOMEM
);
534 * called under mdsc->mutex
536 static void __unregister_session(struct ceph_mds_client
*mdsc
,
537 struct ceph_mds_session
*s
)
539 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
540 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
541 mdsc
->sessions
[s
->s_mds
] = NULL
;
542 ceph_con_close(&s
->s_con
);
543 ceph_put_mds_session(s
);
544 atomic_dec(&mdsc
->num_sessions
);
548 * drop session refs in request.
550 * should be last request ref, or hold mdsc->mutex
552 static void put_request_session(struct ceph_mds_request
*req
)
554 if (req
->r_session
) {
555 ceph_put_mds_session(req
->r_session
);
556 req
->r_session
= NULL
;
560 void ceph_mdsc_release_request(struct kref
*kref
)
562 struct ceph_mds_request
*req
= container_of(kref
,
563 struct ceph_mds_request
,
565 destroy_reply_info(&req
->r_reply_info
);
567 ceph_msg_put(req
->r_request
);
569 ceph_msg_put(req
->r_reply
);
571 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
575 ceph_put_cap_refs(ceph_inode(req
->r_parent
), CEPH_CAP_PIN
);
576 iput(req
->r_target_inode
);
579 if (req
->r_old_dentry
)
580 dput(req
->r_old_dentry
);
581 if (req
->r_old_dentry_dir
) {
583 * track (and drop pins for) r_old_dentry_dir
584 * separately, since r_old_dentry's d_parent may have
585 * changed between the dir mutex being dropped and
586 * this request being freed.
588 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
590 iput(req
->r_old_dentry_dir
);
595 ceph_pagelist_release(req
->r_pagelist
);
596 put_request_session(req
);
597 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
601 DEFINE_RB_FUNCS(request
, struct ceph_mds_request
, r_tid
, r_node
)
604 * lookup session, bump ref if found.
606 * called under mdsc->mutex.
608 static struct ceph_mds_request
*
609 lookup_get_request(struct ceph_mds_client
*mdsc
, u64 tid
)
611 struct ceph_mds_request
*req
;
613 req
= lookup_request(&mdsc
->request_tree
, tid
);
615 ceph_mdsc_get_request(req
);
621 * Register an in-flight request, and assign a tid. Link to directory
622 * are modifying (if any).
624 * Called under mdsc->mutex.
626 static void __register_request(struct ceph_mds_client
*mdsc
,
627 struct ceph_mds_request
*req
,
632 req
->r_tid
= ++mdsc
->last_tid
;
633 if (req
->r_num_caps
) {
634 ret
= ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
637 pr_err("__register_request %p "
638 "failed to reserve caps: %d\n", req
, ret
);
639 /* set req->r_err to fail early from __do_request */
644 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
645 ceph_mdsc_get_request(req
);
646 insert_request(&mdsc
->request_tree
, req
);
648 req
->r_uid
= current_fsuid();
649 req
->r_gid
= current_fsgid();
651 if (mdsc
->oldest_tid
== 0 && req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
)
652 mdsc
->oldest_tid
= req
->r_tid
;
656 req
->r_unsafe_dir
= dir
;
660 static void __unregister_request(struct ceph_mds_client
*mdsc
,
661 struct ceph_mds_request
*req
)
663 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
665 /* Never leave an unregistered request on an unsafe list! */
666 list_del_init(&req
->r_unsafe_item
);
668 if (req
->r_tid
== mdsc
->oldest_tid
) {
669 struct rb_node
*p
= rb_next(&req
->r_node
);
670 mdsc
->oldest_tid
= 0;
672 struct ceph_mds_request
*next_req
=
673 rb_entry(p
, struct ceph_mds_request
, r_node
);
674 if (next_req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
) {
675 mdsc
->oldest_tid
= next_req
->r_tid
;
682 erase_request(&mdsc
->request_tree
, req
);
684 if (req
->r_unsafe_dir
&&
685 test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
686 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
687 spin_lock(&ci
->i_unsafe_lock
);
688 list_del_init(&req
->r_unsafe_dir_item
);
689 spin_unlock(&ci
->i_unsafe_lock
);
691 if (req
->r_target_inode
&&
692 test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
693 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
694 spin_lock(&ci
->i_unsafe_lock
);
695 list_del_init(&req
->r_unsafe_target_item
);
696 spin_unlock(&ci
->i_unsafe_lock
);
699 if (req
->r_unsafe_dir
) {
700 iput(req
->r_unsafe_dir
);
701 req
->r_unsafe_dir
= NULL
;
704 complete_all(&req
->r_safe_completion
);
706 ceph_mdsc_put_request(req
);
710 * Walk back up the dentry tree until we hit a dentry representing a
711 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
712 * when calling this) to ensure that the objects won't disappear while we're
713 * working with them. Once we hit a candidate dentry, we attempt to take a
714 * reference to it, and return that as the result.
716 static struct inode
*get_nonsnap_parent(struct dentry
*dentry
)
718 struct inode
*inode
= NULL
;
720 while (dentry
&& !IS_ROOT(dentry
)) {
721 inode
= d_inode_rcu(dentry
);
722 if (!inode
|| ceph_snap(inode
) == CEPH_NOSNAP
)
724 dentry
= dentry
->d_parent
;
727 inode
= igrab(inode
);
732 * Choose mds to send request to next. If there is a hint set in the
733 * request (e.g., due to a prior forward hint from the mds), use that.
734 * Otherwise, consult frag tree and/or caps to identify the
735 * appropriate mds. If all else fails, choose randomly.
737 * Called under mdsc->mutex.
739 static int __choose_mds(struct ceph_mds_client
*mdsc
,
740 struct ceph_mds_request
*req
)
743 struct ceph_inode_info
*ci
;
744 struct ceph_cap
*cap
;
745 int mode
= req
->r_direct_mode
;
747 u32 hash
= req
->r_direct_hash
;
748 bool is_hash
= test_bit(CEPH_MDS_R_DIRECT_IS_HASH
, &req
->r_req_flags
);
751 * is there a specific mds we should try? ignore hint if we have
752 * no session and the mds is not up (active or recovering).
754 if (req
->r_resend_mds
>= 0 &&
755 (__have_session(mdsc
, req
->r_resend_mds
) ||
756 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
757 dout("choose_mds using resend_mds mds%d\n",
759 return req
->r_resend_mds
;
762 if (mode
== USE_RANDOM_MDS
)
767 if (ceph_snap(req
->r_inode
) != CEPH_SNAPDIR
) {
768 inode
= req
->r_inode
;
771 /* req->r_dentry is non-null for LSSNAP request */
773 inode
= get_nonsnap_parent(req
->r_dentry
);
775 dout("__choose_mds using snapdir's parent %p\n", inode
);
777 } else if (req
->r_dentry
) {
778 /* ignore race with rename; old or new d_parent is okay */
779 struct dentry
*parent
;
783 parent
= req
->r_dentry
->d_parent
;
784 dir
= req
->r_parent
? : d_inode_rcu(parent
);
786 if (!dir
|| dir
->i_sb
!= mdsc
->fsc
->sb
) {
787 /* not this fs or parent went negative */
788 inode
= d_inode(req
->r_dentry
);
791 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
792 /* direct snapped/virtual snapdir requests
793 * based on parent dir inode */
794 inode
= get_nonsnap_parent(parent
);
795 dout("__choose_mds using nonsnap parent %p\n", inode
);
798 inode
= d_inode(req
->r_dentry
);
799 if (!inode
|| mode
== USE_AUTH_MDS
) {
802 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
811 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
815 ci
= ceph_inode(inode
);
817 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
818 struct ceph_inode_frag frag
;
821 ceph_choose_frag(ci
, hash
, &frag
, &found
);
823 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
826 /* choose a random replica */
827 get_random_bytes(&r
, 1);
830 dout("choose_mds %p %llx.%llx "
831 "frag %u mds%d (%d/%d)\n",
832 inode
, ceph_vinop(inode
),
835 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
836 CEPH_MDS_STATE_ACTIVE
)
840 /* since this file/dir wasn't known to be
841 * replicated, then we want to look for the
842 * authoritative mds. */
845 /* choose auth mds */
847 dout("choose_mds %p %llx.%llx "
848 "frag %u mds%d (auth)\n",
849 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
850 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
851 CEPH_MDS_STATE_ACTIVE
)
857 spin_lock(&ci
->i_ceph_lock
);
859 if (mode
== USE_AUTH_MDS
)
860 cap
= ci
->i_auth_cap
;
861 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
862 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
864 spin_unlock(&ci
->i_ceph_lock
);
868 mds
= cap
->session
->s_mds
;
869 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
870 inode
, ceph_vinop(inode
), mds
,
871 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
872 spin_unlock(&ci
->i_ceph_lock
);
878 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
879 dout("choose_mds chose random mds%d\n", mds
);
887 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
889 struct ceph_msg
*msg
;
890 struct ceph_mds_session_head
*h
;
892 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
895 pr_err("create_session_msg ENOMEM creating msg\n");
898 h
= msg
->front
.iov_base
;
899 h
->op
= cpu_to_le32(op
);
900 h
->seq
= cpu_to_le64(seq
);
905 static void encode_supported_features(void **p
, void *end
)
907 static const unsigned char bits
[] = CEPHFS_FEATURES_CLIENT_SUPPORTED
;
908 static const size_t count
= ARRAY_SIZE(bits
);
912 size_t size
= ((size_t)bits
[count
- 1] + 64) / 64 * 8;
914 BUG_ON(*p
+ 4 + size
> end
);
915 ceph_encode_32(p
, size
);
917 for (i
= 0; i
< count
; i
++)
918 ((unsigned char*)(*p
))[i
/ 8] |= 1 << (bits
[i
] % 8);
921 BUG_ON(*p
+ 4 > end
);
922 ceph_encode_32(p
, 0);
927 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
928 * to include additional client metadata fields.
930 static struct ceph_msg
*create_session_open_msg(struct ceph_mds_client
*mdsc
, u64 seq
)
932 struct ceph_msg
*msg
;
933 struct ceph_mds_session_head
*h
;
936 int metadata_key_count
= 0;
937 struct ceph_options
*opt
= mdsc
->fsc
->client
->options
;
938 struct ceph_mount_options
*fsopt
= mdsc
->fsc
->mount_options
;
941 const char* metadata
[][2] = {
942 {"hostname", mdsc
->nodename
},
943 {"kernel_version", init_utsname()->release
},
944 {"entity_id", opt
->name
? : ""},
945 {"root", fsopt
->server_path
? : "/"},
949 /* Calculate serialized length of metadata */
950 extra_bytes
= 4; /* map length */
951 for (i
= 0; metadata
[i
][0]; ++i
) {
952 extra_bytes
+= 8 + strlen(metadata
[i
][0]) +
953 strlen(metadata
[i
][1]);
954 metadata_key_count
++;
956 /* supported feature */
957 extra_bytes
+= 4 + 8;
959 /* Allocate the message */
960 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
) + extra_bytes
,
963 pr_err("create_session_msg ENOMEM creating msg\n");
966 p
= msg
->front
.iov_base
;
967 end
= p
+ msg
->front
.iov_len
;
970 h
->op
= cpu_to_le32(CEPH_SESSION_REQUEST_OPEN
);
971 h
->seq
= cpu_to_le64(seq
);
974 * Serialize client metadata into waiting buffer space, using
975 * the format that userspace expects for map<string, string>
977 * ClientSession messages with metadata are v2
979 msg
->hdr
.version
= cpu_to_le16(3);
980 msg
->hdr
.compat_version
= cpu_to_le16(1);
982 /* The write pointer, following the session_head structure */
985 /* Number of entries in the map */
986 ceph_encode_32(&p
, metadata_key_count
);
988 /* Two length-prefixed strings for each entry in the map */
989 for (i
= 0; metadata
[i
][0]; ++i
) {
990 size_t const key_len
= strlen(metadata
[i
][0]);
991 size_t const val_len
= strlen(metadata
[i
][1]);
993 ceph_encode_32(&p
, key_len
);
994 memcpy(p
, metadata
[i
][0], key_len
);
996 ceph_encode_32(&p
, val_len
);
997 memcpy(p
, metadata
[i
][1], val_len
);
1001 encode_supported_features(&p
, end
);
1002 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1003 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1009 * send session open request.
1011 * called under mdsc->mutex
1013 static int __open_session(struct ceph_mds_client
*mdsc
,
1014 struct ceph_mds_session
*session
)
1016 struct ceph_msg
*msg
;
1018 int mds
= session
->s_mds
;
1020 /* wait for mds to go active? */
1021 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
1022 dout("open_session to mds%d (%s)\n", mds
,
1023 ceph_mds_state_name(mstate
));
1024 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
1025 session
->s_renew_requested
= jiffies
;
1027 /* send connect message */
1028 msg
= create_session_open_msg(mdsc
, session
->s_seq
);
1031 ceph_con_send(&session
->s_con
, msg
);
1036 * open sessions for any export targets for the given mds
1038 * called under mdsc->mutex
1040 static struct ceph_mds_session
*
1041 __open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
1043 struct ceph_mds_session
*session
;
1045 session
= __ceph_lookup_mds_session(mdsc
, target
);
1047 session
= register_session(mdsc
, target
);
1048 if (IS_ERR(session
))
1051 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
1052 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
1053 __open_session(mdsc
, session
);
1058 struct ceph_mds_session
*
1059 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
1061 struct ceph_mds_session
*session
;
1063 dout("open_export_target_session to mds%d\n", target
);
1065 mutex_lock(&mdsc
->mutex
);
1066 session
= __open_export_target_session(mdsc
, target
);
1067 mutex_unlock(&mdsc
->mutex
);
1072 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
1073 struct ceph_mds_session
*session
)
1075 struct ceph_mds_info
*mi
;
1076 struct ceph_mds_session
*ts
;
1077 int i
, mds
= session
->s_mds
;
1079 if (mds
>= mdsc
->mdsmap
->m_num_mds
)
1082 mi
= &mdsc
->mdsmap
->m_info
[mds
];
1083 dout("open_export_target_sessions for mds%d (%d targets)\n",
1084 session
->s_mds
, mi
->num_export_targets
);
1086 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
1087 ts
= __open_export_target_session(mdsc
, mi
->export_targets
[i
]);
1089 ceph_put_mds_session(ts
);
1093 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
1094 struct ceph_mds_session
*session
)
1096 mutex_lock(&mdsc
->mutex
);
1097 __open_export_target_sessions(mdsc
, session
);
1098 mutex_unlock(&mdsc
->mutex
);
1105 static void detach_cap_releases(struct ceph_mds_session
*session
,
1106 struct list_head
*target
)
1108 lockdep_assert_held(&session
->s_cap_lock
);
1110 list_splice_init(&session
->s_cap_releases
, target
);
1111 session
->s_num_cap_releases
= 0;
1112 dout("dispose_cap_releases mds%d\n", session
->s_mds
);
1115 static void dispose_cap_releases(struct ceph_mds_client
*mdsc
,
1116 struct list_head
*dispose
)
1118 while (!list_empty(dispose
)) {
1119 struct ceph_cap
*cap
;
1120 /* zero out the in-progress message */
1121 cap
= list_first_entry(dispose
, struct ceph_cap
, session_caps
);
1122 list_del(&cap
->session_caps
);
1123 ceph_put_cap(mdsc
, cap
);
1127 static void cleanup_session_requests(struct ceph_mds_client
*mdsc
,
1128 struct ceph_mds_session
*session
)
1130 struct ceph_mds_request
*req
;
1133 dout("cleanup_session_requests mds%d\n", session
->s_mds
);
1134 mutex_lock(&mdsc
->mutex
);
1135 while (!list_empty(&session
->s_unsafe
)) {
1136 req
= list_first_entry(&session
->s_unsafe
,
1137 struct ceph_mds_request
, r_unsafe_item
);
1138 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1140 __unregister_request(mdsc
, req
);
1142 /* zero r_attempts, so kick_requests() will re-send requests */
1143 p
= rb_first(&mdsc
->request_tree
);
1145 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1147 if (req
->r_session
&&
1148 req
->r_session
->s_mds
== session
->s_mds
)
1149 req
->r_attempts
= 0;
1151 mutex_unlock(&mdsc
->mutex
);
1155 * Helper to safely iterate over all caps associated with a session, with
1156 * special care taken to handle a racing __ceph_remove_cap().
1158 * Caller must hold session s_mutex.
1160 static int iterate_session_caps(struct ceph_mds_session
*session
,
1161 int (*cb
)(struct inode
*, struct ceph_cap
*,
1164 struct list_head
*p
;
1165 struct ceph_cap
*cap
;
1166 struct inode
*inode
, *last_inode
= NULL
;
1167 struct ceph_cap
*old_cap
= NULL
;
1170 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
1171 spin_lock(&session
->s_cap_lock
);
1172 p
= session
->s_caps
.next
;
1173 while (p
!= &session
->s_caps
) {
1174 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
1175 inode
= igrab(&cap
->ci
->vfs_inode
);
1180 session
->s_cap_iterator
= cap
;
1181 spin_unlock(&session
->s_cap_lock
);
1188 ceph_put_cap(session
->s_mdsc
, old_cap
);
1192 ret
= cb(inode
, cap
, arg
);
1195 spin_lock(&session
->s_cap_lock
);
1198 dout("iterate_session_caps finishing cap %p removal\n",
1200 BUG_ON(cap
->session
!= session
);
1201 cap
->session
= NULL
;
1202 list_del_init(&cap
->session_caps
);
1203 session
->s_nr_caps
--;
1204 if (cap
->queue_release
) {
1205 list_add_tail(&cap
->session_caps
,
1206 &session
->s_cap_releases
);
1207 session
->s_num_cap_releases
++;
1209 old_cap
= cap
; /* put_cap it w/o locks held */
1217 session
->s_cap_iterator
= NULL
;
1218 spin_unlock(&session
->s_cap_lock
);
1222 ceph_put_cap(session
->s_mdsc
, old_cap
);
1227 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1230 struct ceph_fs_client
*fsc
= (struct ceph_fs_client
*)arg
;
1231 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1232 LIST_HEAD(to_remove
);
1234 bool invalidate
= false;
1236 dout("removing cap %p, ci is %p, inode is %p\n",
1237 cap
, ci
, &ci
->vfs_inode
);
1238 spin_lock(&ci
->i_ceph_lock
);
1239 __ceph_remove_cap(cap
, false);
1240 if (!ci
->i_auth_cap
) {
1241 struct ceph_cap_flush
*cf
;
1242 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1244 ci
->i_ceph_flags
|= CEPH_I_CAP_DROPPED
;
1246 if (ci
->i_wrbuffer_ref
> 0 &&
1247 READ_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
1250 while (!list_empty(&ci
->i_cap_flush_list
)) {
1251 cf
= list_first_entry(&ci
->i_cap_flush_list
,
1252 struct ceph_cap_flush
, i_list
);
1253 list_move(&cf
->i_list
, &to_remove
);
1256 spin_lock(&mdsc
->cap_dirty_lock
);
1258 list_for_each_entry(cf
, &to_remove
, i_list
)
1259 list_del(&cf
->g_list
);
1261 if (!list_empty(&ci
->i_dirty_item
)) {
1262 pr_warn_ratelimited(
1263 " dropping dirty %s state for %p %lld\n",
1264 ceph_cap_string(ci
->i_dirty_caps
),
1265 inode
, ceph_ino(inode
));
1266 ci
->i_dirty_caps
= 0;
1267 list_del_init(&ci
->i_dirty_item
);
1270 if (!list_empty(&ci
->i_flushing_item
)) {
1271 pr_warn_ratelimited(
1272 " dropping dirty+flushing %s state for %p %lld\n",
1273 ceph_cap_string(ci
->i_flushing_caps
),
1274 inode
, ceph_ino(inode
));
1275 ci
->i_flushing_caps
= 0;
1276 list_del_init(&ci
->i_flushing_item
);
1277 mdsc
->num_cap_flushing
--;
1280 spin_unlock(&mdsc
->cap_dirty_lock
);
1282 if (atomic_read(&ci
->i_filelock_ref
) > 0) {
1283 /* make further file lock syscall return -EIO */
1284 ci
->i_ceph_flags
|= CEPH_I_ERROR_FILELOCK
;
1285 pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1286 inode
, ceph_ino(inode
));
1289 if (!ci
->i_dirty_caps
&& ci
->i_prealloc_cap_flush
) {
1290 list_add(&ci
->i_prealloc_cap_flush
->i_list
, &to_remove
);
1291 ci
->i_prealloc_cap_flush
= NULL
;
1295 ci
->i_wrbuffer_ref_head
== 0 &&
1296 ci
->i_wr_ref
== 0 &&
1297 ci
->i_dirty_caps
== 0 &&
1298 ci
->i_flushing_caps
== 0) {
1299 ceph_put_snap_context(ci
->i_head_snapc
);
1300 ci
->i_head_snapc
= NULL
;
1303 spin_unlock(&ci
->i_ceph_lock
);
1304 while (!list_empty(&to_remove
)) {
1305 struct ceph_cap_flush
*cf
;
1306 cf
= list_first_entry(&to_remove
,
1307 struct ceph_cap_flush
, i_list
);
1308 list_del(&cf
->i_list
);
1309 ceph_free_cap_flush(cf
);
1312 wake_up_all(&ci
->i_cap_wq
);
1314 ceph_queue_invalidate(inode
);
1321 * caller must hold session s_mutex
1323 static void remove_session_caps(struct ceph_mds_session
*session
)
1325 struct ceph_fs_client
*fsc
= session
->s_mdsc
->fsc
;
1326 struct super_block
*sb
= fsc
->sb
;
1329 dout("remove_session_caps on %p\n", session
);
1330 iterate_session_caps(session
, remove_session_caps_cb
, fsc
);
1332 wake_up_all(&fsc
->mdsc
->cap_flushing_wq
);
1334 spin_lock(&session
->s_cap_lock
);
1335 if (session
->s_nr_caps
> 0) {
1336 struct inode
*inode
;
1337 struct ceph_cap
*cap
, *prev
= NULL
;
1338 struct ceph_vino vino
;
1340 * iterate_session_caps() skips inodes that are being
1341 * deleted, we need to wait until deletions are complete.
1342 * __wait_on_freeing_inode() is designed for the job,
1343 * but it is not exported, so use lookup inode function
1346 while (!list_empty(&session
->s_caps
)) {
1347 cap
= list_entry(session
->s_caps
.next
,
1348 struct ceph_cap
, session_caps
);
1352 vino
= cap
->ci
->i_vino
;
1353 spin_unlock(&session
->s_cap_lock
);
1355 inode
= ceph_find_inode(sb
, vino
);
1358 spin_lock(&session
->s_cap_lock
);
1362 // drop cap expires and unlock s_cap_lock
1363 detach_cap_releases(session
, &dispose
);
1365 BUG_ON(session
->s_nr_caps
> 0);
1366 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1367 spin_unlock(&session
->s_cap_lock
);
1368 dispose_cap_releases(session
->s_mdsc
, &dispose
);
1372 * wake up any threads waiting on this session's caps. if the cap is
1373 * old (didn't get renewed on the client reconnect), remove it now.
1375 * caller must hold s_mutex.
1377 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1380 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1383 spin_lock(&ci
->i_ceph_lock
);
1384 ci
->i_wanted_max_size
= 0;
1385 ci
->i_requested_max_size
= 0;
1386 spin_unlock(&ci
->i_ceph_lock
);
1388 wake_up_all(&ci
->i_cap_wq
);
1392 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1395 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1396 iterate_session_caps(session
, wake_up_session_cb
,
1397 (void *)(unsigned long)reconnect
);
1401 * Send periodic message to MDS renewing all currently held caps. The
1402 * ack will reset the expiration for all caps from this session.
1404 * caller holds s_mutex
1406 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1407 struct ceph_mds_session
*session
)
1409 struct ceph_msg
*msg
;
1412 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1413 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1414 pr_info("mds%d caps stale\n", session
->s_mds
);
1415 session
->s_renew_requested
= jiffies
;
1417 /* do not try to renew caps until a recovering mds has reconnected
1418 * with its clients. */
1419 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1420 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1421 dout("send_renew_caps ignoring mds%d (%s)\n",
1422 session
->s_mds
, ceph_mds_state_name(state
));
1426 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1427 ceph_mds_state_name(state
));
1428 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1429 ++session
->s_renew_seq
);
1432 ceph_con_send(&session
->s_con
, msg
);
1436 static int send_flushmsg_ack(struct ceph_mds_client
*mdsc
,
1437 struct ceph_mds_session
*session
, u64 seq
)
1439 struct ceph_msg
*msg
;
1441 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1442 session
->s_mds
, ceph_session_state_name(session
->s_state
), seq
);
1443 msg
= create_session_msg(CEPH_SESSION_FLUSHMSG_ACK
, seq
);
1446 ceph_con_send(&session
->s_con
, msg
);
1452 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1454 * Called under session->s_mutex
1456 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1457 struct ceph_mds_session
*session
, int is_renew
)
1462 spin_lock(&session
->s_cap_lock
);
1463 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1465 session
->s_cap_ttl
= session
->s_renew_requested
+
1466 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1469 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1470 pr_info("mds%d caps renewed\n", session
->s_mds
);
1473 pr_info("mds%d caps still stale\n", session
->s_mds
);
1476 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1477 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1478 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1479 spin_unlock(&session
->s_cap_lock
);
1482 wake_up_session_caps(session
, 0);
1486 * send a session close request
1488 static int request_close_session(struct ceph_mds_client
*mdsc
,
1489 struct ceph_mds_session
*session
)
1491 struct ceph_msg
*msg
;
1493 dout("request_close_session mds%d state %s seq %lld\n",
1494 session
->s_mds
, ceph_session_state_name(session
->s_state
),
1496 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1499 ceph_con_send(&session
->s_con
, msg
);
1504 * Called with s_mutex held.
1506 static int __close_session(struct ceph_mds_client
*mdsc
,
1507 struct ceph_mds_session
*session
)
1509 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1511 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1512 return request_close_session(mdsc
, session
);
1515 static bool drop_negative_children(struct dentry
*dentry
)
1517 struct dentry
*child
;
1518 bool all_negative
= true;
1520 if (!d_is_dir(dentry
))
1523 spin_lock(&dentry
->d_lock
);
1524 list_for_each_entry(child
, &dentry
->d_subdirs
, d_child
) {
1525 if (d_really_is_positive(child
)) {
1526 all_negative
= false;
1530 spin_unlock(&dentry
->d_lock
);
1533 shrink_dcache_parent(dentry
);
1535 return all_negative
;
1539 * Trim old(er) caps.
1541 * Because we can't cache an inode without one or more caps, we do
1542 * this indirectly: if a cap is unused, we prune its aliases, at which
1543 * point the inode will hopefully get dropped to.
1545 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1546 * memory pressure from the MDS, though, so it needn't be perfect.
1548 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1550 struct ceph_mds_session
*session
= arg
;
1551 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1552 int used
, wanted
, oissued
, mine
;
1554 if (session
->s_trim_caps
<= 0)
1557 spin_lock(&ci
->i_ceph_lock
);
1558 mine
= cap
->issued
| cap
->implemented
;
1559 used
= __ceph_caps_used(ci
);
1560 wanted
= __ceph_caps_file_wanted(ci
);
1561 oissued
= __ceph_caps_issued_other(ci
, cap
);
1563 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1564 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1565 ceph_cap_string(used
), ceph_cap_string(wanted
));
1566 if (cap
== ci
->i_auth_cap
) {
1567 if (ci
->i_dirty_caps
|| ci
->i_flushing_caps
||
1568 !list_empty(&ci
->i_cap_snaps
))
1570 if ((used
| wanted
) & CEPH_CAP_ANY_WR
)
1572 /* Note: it's possible that i_filelock_ref becomes non-zero
1573 * after dropping auth caps. It doesn't hurt because reply
1574 * of lock mds request will re-add auth caps. */
1575 if (atomic_read(&ci
->i_filelock_ref
) > 0)
1578 /* The inode has cached pages, but it's no longer used.
1579 * we can safely drop it */
1580 if (wanted
== 0 && used
== CEPH_CAP_FILE_CACHE
&&
1581 !(oissued
& CEPH_CAP_FILE_CACHE
)) {
1585 if ((used
| wanted
) & ~oissued
& mine
)
1586 goto out
; /* we need these caps */
1589 /* we aren't the only cap.. just remove us */
1590 __ceph_remove_cap(cap
, true);
1591 session
->s_trim_caps
--;
1593 struct dentry
*dentry
;
1594 /* try dropping referring dentries */
1595 spin_unlock(&ci
->i_ceph_lock
);
1596 dentry
= d_find_any_alias(inode
);
1597 if (dentry
&& drop_negative_children(dentry
)) {
1600 d_prune_aliases(inode
);
1601 count
= atomic_read(&inode
->i_count
);
1603 session
->s_trim_caps
--;
1604 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1613 spin_unlock(&ci
->i_ceph_lock
);
1618 * Trim session cap count down to some max number.
1620 int ceph_trim_caps(struct ceph_mds_client
*mdsc
,
1621 struct ceph_mds_session
*session
,
1624 int trim_caps
= session
->s_nr_caps
- max_caps
;
1626 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1627 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1628 if (trim_caps
> 0) {
1629 session
->s_trim_caps
= trim_caps
;
1630 iterate_session_caps(session
, trim_caps_cb
, session
);
1631 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1632 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1633 trim_caps
- session
->s_trim_caps
);
1634 session
->s_trim_caps
= 0;
1637 ceph_send_cap_releases(mdsc
, session
);
1641 static int check_caps_flush(struct ceph_mds_client
*mdsc
,
1646 spin_lock(&mdsc
->cap_dirty_lock
);
1647 if (!list_empty(&mdsc
->cap_flush_list
)) {
1648 struct ceph_cap_flush
*cf
=
1649 list_first_entry(&mdsc
->cap_flush_list
,
1650 struct ceph_cap_flush
, g_list
);
1651 if (cf
->tid
<= want_flush_tid
) {
1652 dout("check_caps_flush still flushing tid "
1653 "%llu <= %llu\n", cf
->tid
, want_flush_tid
);
1657 spin_unlock(&mdsc
->cap_dirty_lock
);
1662 * flush all dirty inode data to disk.
1664 * returns true if we've flushed through want_flush_tid
1666 static void wait_caps_flush(struct ceph_mds_client
*mdsc
,
1669 dout("check_caps_flush want %llu\n", want_flush_tid
);
1671 wait_event(mdsc
->cap_flushing_wq
,
1672 check_caps_flush(mdsc
, want_flush_tid
));
1674 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid
);
1678 * called under s_mutex
1680 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1681 struct ceph_mds_session
*session
)
1683 struct ceph_msg
*msg
= NULL
;
1684 struct ceph_mds_cap_release
*head
;
1685 struct ceph_mds_cap_item
*item
;
1686 struct ceph_osd_client
*osdc
= &mdsc
->fsc
->client
->osdc
;
1687 struct ceph_cap
*cap
;
1688 LIST_HEAD(tmp_list
);
1689 int num_cap_releases
;
1690 __le32 barrier
, *cap_barrier
;
1692 down_read(&osdc
->lock
);
1693 barrier
= cpu_to_le32(osdc
->epoch_barrier
);
1694 up_read(&osdc
->lock
);
1696 spin_lock(&session
->s_cap_lock
);
1698 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1699 num_cap_releases
= session
->s_num_cap_releases
;
1700 session
->s_num_cap_releases
= 0;
1701 spin_unlock(&session
->s_cap_lock
);
1703 while (!list_empty(&tmp_list
)) {
1705 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
,
1706 PAGE_SIZE
, GFP_NOFS
, false);
1709 head
= msg
->front
.iov_base
;
1710 head
->num
= cpu_to_le32(0);
1711 msg
->front
.iov_len
= sizeof(*head
);
1713 msg
->hdr
.version
= cpu_to_le16(2);
1714 msg
->hdr
.compat_version
= cpu_to_le16(1);
1717 cap
= list_first_entry(&tmp_list
, struct ceph_cap
,
1719 list_del(&cap
->session_caps
);
1722 head
= msg
->front
.iov_base
;
1723 le32_add_cpu(&head
->num
, 1);
1724 item
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1725 item
->ino
= cpu_to_le64(cap
->cap_ino
);
1726 item
->cap_id
= cpu_to_le64(cap
->cap_id
);
1727 item
->migrate_seq
= cpu_to_le32(cap
->mseq
);
1728 item
->seq
= cpu_to_le32(cap
->issue_seq
);
1729 msg
->front
.iov_len
+= sizeof(*item
);
1731 ceph_put_cap(mdsc
, cap
);
1733 if (le32_to_cpu(head
->num
) == CEPH_CAPS_PER_RELEASE
) {
1734 // Append cap_barrier field
1735 cap_barrier
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1736 *cap_barrier
= barrier
;
1737 msg
->front
.iov_len
+= sizeof(*cap_barrier
);
1739 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1740 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1741 ceph_con_send(&session
->s_con
, msg
);
1746 BUG_ON(num_cap_releases
!= 0);
1748 spin_lock(&session
->s_cap_lock
);
1749 if (!list_empty(&session
->s_cap_releases
))
1751 spin_unlock(&session
->s_cap_lock
);
1754 // Append cap_barrier field
1755 cap_barrier
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1756 *cap_barrier
= barrier
;
1757 msg
->front
.iov_len
+= sizeof(*cap_barrier
);
1759 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1760 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1761 ceph_con_send(&session
->s_con
, msg
);
1765 pr_err("send_cap_releases mds%d, failed to allocate message\n",
1767 spin_lock(&session
->s_cap_lock
);
1768 list_splice(&tmp_list
, &session
->s_cap_releases
);
1769 session
->s_num_cap_releases
+= num_cap_releases
;
1770 spin_unlock(&session
->s_cap_lock
);
1777 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
1780 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1781 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1782 struct ceph_mount_options
*opt
= req
->r_mdsc
->fsc
->mount_options
;
1783 size_t size
= sizeof(struct ceph_mds_reply_dir_entry
);
1784 int order
, num_entries
;
1786 spin_lock(&ci
->i_ceph_lock
);
1787 num_entries
= ci
->i_files
+ ci
->i_subdirs
;
1788 spin_unlock(&ci
->i_ceph_lock
);
1789 num_entries
= max(num_entries
, 1);
1790 num_entries
= min(num_entries
, opt
->max_readdir
);
1792 order
= get_order(size
* num_entries
);
1793 while (order
>= 0) {
1794 rinfo
->dir_entries
= (void*)__get_free_pages(GFP_KERNEL
|
1797 if (rinfo
->dir_entries
)
1801 if (!rinfo
->dir_entries
)
1804 num_entries
= (PAGE_SIZE
<< order
) / size
;
1805 num_entries
= min(num_entries
, opt
->max_readdir
);
1807 rinfo
->dir_buf_size
= PAGE_SIZE
<< order
;
1808 req
->r_num_caps
= num_entries
+ 1;
1809 req
->r_args
.readdir
.max_entries
= cpu_to_le32(num_entries
);
1810 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(opt
->max_readdir_bytes
);
1815 * Create an mds request.
1817 struct ceph_mds_request
*
1818 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1820 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1821 struct timespec64 ts
;
1824 return ERR_PTR(-ENOMEM
);
1826 mutex_init(&req
->r_fill_mutex
);
1828 req
->r_started
= jiffies
;
1829 req
->r_resend_mds
= -1;
1830 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1831 INIT_LIST_HEAD(&req
->r_unsafe_target_item
);
1833 kref_init(&req
->r_kref
);
1834 RB_CLEAR_NODE(&req
->r_node
);
1835 INIT_LIST_HEAD(&req
->r_wait
);
1836 init_completion(&req
->r_completion
);
1837 init_completion(&req
->r_safe_completion
);
1838 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1840 ktime_get_coarse_real_ts64(&ts
);
1841 req
->r_stamp
= timespec64_trunc(ts
, mdsc
->fsc
->sb
->s_time_gran
);
1844 req
->r_direct_mode
= mode
;
1849 * return oldest (lowest) request, tid in request tree, 0 if none.
1851 * called under mdsc->mutex.
1853 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1855 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1857 return rb_entry(rb_first(&mdsc
->request_tree
),
1858 struct ceph_mds_request
, r_node
);
1861 static inline u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1863 return mdsc
->oldest_tid
;
1867 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1868 * on build_path_from_dentry in fs/cifs/dir.c.
1870 * If @stop_on_nosnap, generate path relative to the first non-snapped
1873 * Encode hidden .snap dirs as a double /, i.e.
1874 * foo/.snap/bar -> foo//bar
1876 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1879 struct dentry
*temp
;
1885 return ERR_PTR(-EINVAL
);
1889 seq
= read_seqbegin(&rename_lock
);
1891 for (temp
= dentry
; !IS_ROOT(temp
);) {
1892 struct inode
*inode
= d_inode(temp
);
1893 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1894 len
++; /* slash only */
1895 else if (stop_on_nosnap
&& inode
&&
1896 ceph_snap(inode
) == CEPH_NOSNAP
)
1899 len
+= 1 + temp
->d_name
.len
;
1900 temp
= temp
->d_parent
;
1904 len
--; /* no leading '/' */
1906 path
= kmalloc(len
+1, GFP_NOFS
);
1908 return ERR_PTR(-ENOMEM
);
1910 path
[pos
] = 0; /* trailing null */
1912 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1913 struct inode
*inode
;
1915 spin_lock(&temp
->d_lock
);
1916 inode
= d_inode(temp
);
1917 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1918 dout("build_path path+%d: %p SNAPDIR\n",
1920 } else if (stop_on_nosnap
&& inode
&&
1921 ceph_snap(inode
) == CEPH_NOSNAP
) {
1922 spin_unlock(&temp
->d_lock
);
1925 pos
-= temp
->d_name
.len
;
1927 spin_unlock(&temp
->d_lock
);
1930 strncpy(path
+ pos
, temp
->d_name
.name
,
1933 spin_unlock(&temp
->d_lock
);
1936 temp
= temp
->d_parent
;
1939 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1940 pr_err("build_path did not end path lookup where "
1941 "expected, namelen is %d, pos is %d\n", len
, pos
);
1942 /* presumably this is only possible if racing with a
1943 rename of one of the parent directories (we can not
1944 lock the dentries above us to prevent this, but
1945 retrying should be harmless) */
1950 *base
= ceph_ino(d_inode(temp
));
1952 dout("build_path on %p %d built %llx '%.*s'\n",
1953 dentry
, d_count(dentry
), *base
, len
, path
);
1957 /* Duplicate the dentry->d_name.name safely */
1958 static int clone_dentry_name(struct dentry
*dentry
, const char **ppath
,
1965 len
= READ_ONCE(dentry
->d_name
.len
);
1966 name
= kmalloc(len
+ 1, GFP_NOFS
);
1970 spin_lock(&dentry
->d_lock
);
1971 if (dentry
->d_name
.len
!= len
) {
1972 spin_unlock(&dentry
->d_lock
);
1976 memcpy(name
, dentry
->d_name
.name
, len
);
1977 spin_unlock(&dentry
->d_lock
);
1985 static int build_dentry_path(struct dentry
*dentry
, struct inode
*dir
,
1986 const char **ppath
, int *ppathlen
, u64
*pino
,
1987 bool *pfreepath
, bool parent_locked
)
1994 dir
= d_inode_rcu(dentry
->d_parent
);
1995 if (dir
&& ceph_snap(dir
) == CEPH_NOSNAP
) {
1996 *pino
= ceph_ino(dir
);
1998 if (parent_locked
) {
1999 *ppath
= dentry
->d_name
.name
;
2000 *ppathlen
= dentry
->d_name
.len
;
2002 ret
= clone_dentry_name(dentry
, ppath
, ppathlen
);
2010 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
2012 return PTR_ERR(path
);
2018 static int build_inode_path(struct inode
*inode
,
2019 const char **ppath
, int *ppathlen
, u64
*pino
,
2022 struct dentry
*dentry
;
2025 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
2026 *pino
= ceph_ino(inode
);
2030 dentry
= d_find_alias(inode
);
2031 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
2034 return PTR_ERR(path
);
2041 * request arguments may be specified via an inode *, a dentry *, or
2042 * an explicit ino+path.
2044 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
2045 struct inode
*rdiri
, const char *rpath
,
2046 u64 rino
, const char **ppath
, int *pathlen
,
2047 u64
*ino
, bool *freepath
, bool parent_locked
)
2052 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
2053 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
2055 } else if (rdentry
) {
2056 r
= build_dentry_path(rdentry
, rdiri
, ppath
, pathlen
, ino
,
2057 freepath
, parent_locked
);
2058 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
2060 } else if (rpath
|| rino
) {
2063 *pathlen
= rpath
? strlen(rpath
) : 0;
2064 dout(" path %.*s\n", *pathlen
, rpath
);
2071 * called under mdsc->mutex
2073 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
2074 struct ceph_mds_request
*req
,
2075 int mds
, bool drop_cap_releases
)
2077 struct ceph_msg
*msg
;
2078 struct ceph_mds_request_head
*head
;
2079 const char *path1
= NULL
;
2080 const char *path2
= NULL
;
2081 u64 ino1
= 0, ino2
= 0;
2082 int pathlen1
= 0, pathlen2
= 0;
2083 bool freepath1
= false, freepath2
= false;
2089 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
2090 req
->r_parent
, req
->r_path1
, req
->r_ino1
.ino
,
2091 &path1
, &pathlen1
, &ino1
, &freepath1
,
2092 test_bit(CEPH_MDS_R_PARENT_LOCKED
,
2093 &req
->r_req_flags
));
2099 /* If r_old_dentry is set, then assume that its parent is locked */
2100 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
2101 req
->r_old_dentry_dir
,
2102 req
->r_path2
, req
->r_ino2
.ino
,
2103 &path2
, &pathlen2
, &ino2
, &freepath2
, true);
2109 len
= sizeof(*head
) +
2110 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
)) +
2111 sizeof(struct ceph_timespec
);
2113 /* calculate (max) length for cap releases */
2114 len
+= sizeof(struct ceph_mds_request_release
) *
2115 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
2116 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
2117 if (req
->r_dentry_drop
)
2118 len
+= req
->r_dentry
->d_name
.len
;
2119 if (req
->r_old_dentry_drop
)
2120 len
+= req
->r_old_dentry
->d_name
.len
;
2122 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
2124 msg
= ERR_PTR(-ENOMEM
);
2128 msg
->hdr
.version
= cpu_to_le16(2);
2129 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
2131 head
= msg
->front
.iov_base
;
2132 p
= msg
->front
.iov_base
+ sizeof(*head
);
2133 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
2135 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
2136 head
->op
= cpu_to_le32(req
->r_op
);
2137 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
2138 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
2139 head
->args
= req
->r_args
;
2141 ceph_encode_filepath(&p
, end
, ino1
, path1
);
2142 ceph_encode_filepath(&p
, end
, ino2
, path2
);
2144 /* make note of release offset, in case we need to replay */
2145 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
2149 if (req
->r_inode_drop
)
2150 releases
+= ceph_encode_inode_release(&p
,
2151 req
->r_inode
? req
->r_inode
: d_inode(req
->r_dentry
),
2152 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
2153 if (req
->r_dentry_drop
)
2154 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
2155 req
->r_parent
, mds
, req
->r_dentry_drop
,
2156 req
->r_dentry_unless
);
2157 if (req
->r_old_dentry_drop
)
2158 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
2159 req
->r_old_dentry_dir
, mds
,
2160 req
->r_old_dentry_drop
,
2161 req
->r_old_dentry_unless
);
2162 if (req
->r_old_inode_drop
)
2163 releases
+= ceph_encode_inode_release(&p
,
2164 d_inode(req
->r_old_dentry
),
2165 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
2167 if (drop_cap_releases
) {
2169 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2172 head
->num_releases
= cpu_to_le16(releases
);
2176 struct ceph_timespec ts
;
2177 ceph_encode_timespec64(&ts
, &req
->r_stamp
);
2178 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2182 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2183 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2185 if (req
->r_pagelist
) {
2186 struct ceph_pagelist
*pagelist
= req
->r_pagelist
;
2187 refcount_inc(&pagelist
->refcnt
);
2188 ceph_msg_data_add_pagelist(msg
, pagelist
);
2189 msg
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2191 msg
->hdr
.data_len
= 0;
2194 msg
->hdr
.data_off
= cpu_to_le16(0);
2198 kfree((char *)path2
);
2201 kfree((char *)path1
);
2207 * called under mdsc->mutex if error, under no mutex if
2210 static void complete_request(struct ceph_mds_client
*mdsc
,
2211 struct ceph_mds_request
*req
)
2213 if (req
->r_callback
)
2214 req
->r_callback(mdsc
, req
);
2216 complete_all(&req
->r_completion
);
2220 * called under mdsc->mutex
2222 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
2223 struct ceph_mds_request
*req
,
2224 int mds
, bool drop_cap_releases
)
2226 struct ceph_mds_request_head
*rhead
;
2227 struct ceph_msg
*msg
;
2232 struct ceph_cap
*cap
=
2233 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
2236 req
->r_sent_on_mseq
= cap
->mseq
;
2238 req
->r_sent_on_mseq
= -1;
2240 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
2241 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
2243 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
2246 * Replay. Do not regenerate message (and rebuild
2247 * paths, etc.); just use the original message.
2248 * Rebuilding paths will break for renames because
2249 * d_move mangles the src name.
2251 msg
= req
->r_request
;
2252 rhead
= msg
->front
.iov_base
;
2254 flags
= le32_to_cpu(rhead
->flags
);
2255 flags
|= CEPH_MDS_FLAG_REPLAY
;
2256 rhead
->flags
= cpu_to_le32(flags
);
2258 if (req
->r_target_inode
)
2259 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
2261 rhead
->num_retry
= req
->r_attempts
- 1;
2263 /* remove cap/dentry releases from message */
2264 rhead
->num_releases
= 0;
2267 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2269 struct ceph_timespec ts
;
2270 ceph_encode_timespec64(&ts
, &req
->r_stamp
);
2271 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2274 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2275 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2279 if (req
->r_request
) {
2280 ceph_msg_put(req
->r_request
);
2281 req
->r_request
= NULL
;
2283 msg
= create_request_message(mdsc
, req
, mds
, drop_cap_releases
);
2285 req
->r_err
= PTR_ERR(msg
);
2286 return PTR_ERR(msg
);
2288 req
->r_request
= msg
;
2290 rhead
= msg
->front
.iov_base
;
2291 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
2292 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
))
2293 flags
|= CEPH_MDS_FLAG_REPLAY
;
2295 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
2296 rhead
->flags
= cpu_to_le32(flags
);
2297 rhead
->num_fwd
= req
->r_num_fwd
;
2298 rhead
->num_retry
= req
->r_attempts
- 1;
2301 dout(" r_parent = %p\n", req
->r_parent
);
2306 * send request, or put it on the appropriate wait list.
2308 static void __do_request(struct ceph_mds_client
*mdsc
,
2309 struct ceph_mds_request
*req
)
2311 struct ceph_mds_session
*session
= NULL
;
2315 if (req
->r_err
|| test_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
)) {
2316 if (test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
))
2317 __unregister_request(mdsc
, req
);
2321 if (req
->r_timeout
&&
2322 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
2323 dout("do_request timed out\n");
2327 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
2328 dout("do_request forced umount\n");
2332 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_MOUNTING
) {
2333 if (mdsc
->mdsmap_err
) {
2334 err
= mdsc
->mdsmap_err
;
2335 dout("do_request mdsmap err %d\n", err
);
2338 if (mdsc
->mdsmap
->m_epoch
== 0) {
2339 dout("do_request no mdsmap, waiting for map\n");
2340 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2343 if (!(mdsc
->fsc
->mount_options
->flags
&
2344 CEPH_MOUNT_OPT_MOUNTWAIT
) &&
2345 !ceph_mdsmap_is_cluster_available(mdsc
->mdsmap
)) {
2346 err
= -EHOSTUNREACH
;
2351 put_request_session(req
);
2353 mds
= __choose_mds(mdsc
, req
);
2355 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
2356 dout("do_request no mds or not active, waiting for map\n");
2357 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2361 /* get, open session */
2362 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2364 session
= register_session(mdsc
, mds
);
2365 if (IS_ERR(session
)) {
2366 err
= PTR_ERR(session
);
2370 req
->r_session
= get_session(session
);
2372 dout("do_request mds%d session %p state %s\n", mds
, session
,
2373 ceph_session_state_name(session
->s_state
));
2374 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
2375 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
2376 if (session
->s_state
== CEPH_MDS_SESSION_REJECTED
) {
2380 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
2381 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
2382 __open_session(mdsc
, session
);
2383 list_add(&req
->r_wait
, &session
->s_waiting
);
2388 req
->r_resend_mds
= -1; /* forget any previous mds hint */
2390 if (req
->r_request_started
== 0) /* note request start time */
2391 req
->r_request_started
= jiffies
;
2393 err
= __prepare_send_request(mdsc
, req
, mds
, false);
2395 ceph_msg_get(req
->r_request
);
2396 ceph_con_send(&session
->s_con
, req
->r_request
);
2400 ceph_put_mds_session(session
);
2403 dout("__do_request early error %d\n", err
);
2405 complete_request(mdsc
, req
);
2406 __unregister_request(mdsc
, req
);
2412 * called under mdsc->mutex
2414 static void __wake_requests(struct ceph_mds_client
*mdsc
,
2415 struct list_head
*head
)
2417 struct ceph_mds_request
*req
;
2418 LIST_HEAD(tmp_list
);
2420 list_splice_init(head
, &tmp_list
);
2422 while (!list_empty(&tmp_list
)) {
2423 req
= list_entry(tmp_list
.next
,
2424 struct ceph_mds_request
, r_wait
);
2425 list_del_init(&req
->r_wait
);
2426 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
2427 __do_request(mdsc
, req
);
2432 * Wake up threads with requests pending for @mds, so that they can
2433 * resubmit their requests to a possibly different mds.
2435 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
2437 struct ceph_mds_request
*req
;
2438 struct rb_node
*p
= rb_first(&mdsc
->request_tree
);
2440 dout("kick_requests mds%d\n", mds
);
2442 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2444 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
))
2446 if (req
->r_attempts
> 0)
2447 continue; /* only new requests */
2448 if (req
->r_session
&&
2449 req
->r_session
->s_mds
== mds
) {
2450 dout(" kicking tid %llu\n", req
->r_tid
);
2451 list_del_init(&req
->r_wait
);
2452 __do_request(mdsc
, req
);
2457 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
2458 struct ceph_mds_request
*req
)
2460 dout("submit_request on %p\n", req
);
2461 mutex_lock(&mdsc
->mutex
);
2462 __register_request(mdsc
, req
, NULL
);
2463 __do_request(mdsc
, req
);
2464 mutex_unlock(&mdsc
->mutex
);
2468 * Synchrously perform an mds request. Take care of all of the
2469 * session setup, forwarding, retry details.
2471 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2473 struct ceph_mds_request
*req
)
2477 dout("do_request on %p\n", req
);
2479 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2481 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2483 ceph_get_cap_refs(ceph_inode(req
->r_parent
), CEPH_CAP_PIN
);
2484 if (req
->r_old_dentry_dir
)
2485 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2489 mutex_lock(&mdsc
->mutex
);
2490 __register_request(mdsc
, req
, dir
);
2491 __do_request(mdsc
, req
);
2499 mutex_unlock(&mdsc
->mutex
);
2500 dout("do_request waiting\n");
2501 if (!req
->r_timeout
&& req
->r_wait_for_completion
) {
2502 err
= req
->r_wait_for_completion(mdsc
, req
);
2504 long timeleft
= wait_for_completion_killable_timeout(
2506 ceph_timeout_jiffies(req
->r_timeout
));
2510 err
= -EIO
; /* timed out */
2512 err
= timeleft
; /* killed */
2514 dout("do_request waited, got %d\n", err
);
2515 mutex_lock(&mdsc
->mutex
);
2517 /* only abort if we didn't race with a real reply */
2518 if (test_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
)) {
2519 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2520 } else if (err
< 0) {
2521 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2524 * ensure we aren't running concurrently with
2525 * ceph_fill_trace or ceph_readdir_prepopulate, which
2526 * rely on locks (dir mutex) held by our caller.
2528 mutex_lock(&req
->r_fill_mutex
);
2530 set_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
);
2531 mutex_unlock(&req
->r_fill_mutex
);
2533 if (req
->r_parent
&&
2534 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2535 ceph_invalidate_dir_request(req
);
2541 mutex_unlock(&mdsc
->mutex
);
2542 dout("do_request %p done, result %d\n", req
, err
);
2547 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2548 * namespace request.
2550 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2552 struct inode
*dir
= req
->r_parent
;
2553 struct inode
*old_dir
= req
->r_old_dentry_dir
;
2555 dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir
, old_dir
);
2557 ceph_dir_clear_complete(dir
);
2559 ceph_dir_clear_complete(old_dir
);
2561 ceph_invalidate_dentry_lease(req
->r_dentry
);
2562 if (req
->r_old_dentry
)
2563 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2569 * We take the session mutex and parse and process the reply immediately.
2570 * This preserves the logical ordering of replies, capabilities, etc., sent
2571 * by the MDS as they are applied to our local cache.
2573 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2575 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2576 struct ceph_mds_request
*req
;
2577 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2578 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2579 struct ceph_snap_realm
*realm
;
2582 int mds
= session
->s_mds
;
2584 if (msg
->front
.iov_len
< sizeof(*head
)) {
2585 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2590 /* get request, session */
2591 tid
= le64_to_cpu(msg
->hdr
.tid
);
2592 mutex_lock(&mdsc
->mutex
);
2593 req
= lookup_get_request(mdsc
, tid
);
2595 dout("handle_reply on unknown tid %llu\n", tid
);
2596 mutex_unlock(&mdsc
->mutex
);
2599 dout("handle_reply %p\n", req
);
2601 /* correct session? */
2602 if (req
->r_session
!= session
) {
2603 pr_err("mdsc_handle_reply got %llu on session mds%d"
2604 " not mds%d\n", tid
, session
->s_mds
,
2605 req
->r_session
? req
->r_session
->s_mds
: -1);
2606 mutex_unlock(&mdsc
->mutex
);
2611 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
) && !head
->safe
) ||
2612 (test_bit(CEPH_MDS_R_GOT_SAFE
, &req
->r_req_flags
) && head
->safe
)) {
2613 pr_warn("got a dup %s reply on %llu from mds%d\n",
2614 head
->safe
? "safe" : "unsafe", tid
, mds
);
2615 mutex_unlock(&mdsc
->mutex
);
2618 if (test_bit(CEPH_MDS_R_GOT_SAFE
, &req
->r_req_flags
)) {
2619 pr_warn("got unsafe after safe on %llu from mds%d\n",
2621 mutex_unlock(&mdsc
->mutex
);
2625 result
= le32_to_cpu(head
->result
);
2629 * if we're not talking to the authority, send to them
2630 * if the authority has changed while we weren't looking,
2631 * send to new authority
2632 * Otherwise we just have to return an ESTALE
2634 if (result
== -ESTALE
) {
2635 dout("got ESTALE on request %llu\n", req
->r_tid
);
2636 req
->r_resend_mds
= -1;
2637 if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2638 dout("not using auth, setting for that now\n");
2639 req
->r_direct_mode
= USE_AUTH_MDS
;
2640 __do_request(mdsc
, req
);
2641 mutex_unlock(&mdsc
->mutex
);
2644 int mds
= __choose_mds(mdsc
, req
);
2645 if (mds
>= 0 && mds
!= req
->r_session
->s_mds
) {
2646 dout("but auth changed, so resending\n");
2647 __do_request(mdsc
, req
);
2648 mutex_unlock(&mdsc
->mutex
);
2652 dout("have to return ESTALE on request %llu\n", req
->r_tid
);
2657 set_bit(CEPH_MDS_R_GOT_SAFE
, &req
->r_req_flags
);
2658 __unregister_request(mdsc
, req
);
2660 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
2662 * We already handled the unsafe response, now do the
2663 * cleanup. No need to examine the response; the MDS
2664 * doesn't include any result info in the safe
2665 * response. And even if it did, there is nothing
2666 * useful we could do with a revised return value.
2668 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2670 /* last unsafe request during umount? */
2671 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2672 complete_all(&mdsc
->safe_umount_waiters
);
2673 mutex_unlock(&mdsc
->mutex
);
2677 set_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
);
2678 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2679 if (req
->r_unsafe_dir
) {
2680 struct ceph_inode_info
*ci
=
2681 ceph_inode(req
->r_unsafe_dir
);
2682 spin_lock(&ci
->i_unsafe_lock
);
2683 list_add_tail(&req
->r_unsafe_dir_item
,
2684 &ci
->i_unsafe_dirops
);
2685 spin_unlock(&ci
->i_unsafe_lock
);
2689 dout("handle_reply tid %lld result %d\n", tid
, result
);
2690 rinfo
= &req
->r_reply_info
;
2691 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2692 mutex_unlock(&mdsc
->mutex
);
2694 mutex_lock(&session
->s_mutex
);
2696 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2703 if (rinfo
->snapblob_len
) {
2704 down_write(&mdsc
->snap_rwsem
);
2705 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2706 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2707 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
,
2709 downgrade_write(&mdsc
->snap_rwsem
);
2711 down_read(&mdsc
->snap_rwsem
);
2714 /* insert trace into our cache */
2715 mutex_lock(&req
->r_fill_mutex
);
2716 current
->journal_info
= req
;
2717 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
);
2719 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2720 req
->r_op
== CEPH_MDS_OP_LSSNAP
))
2721 ceph_readdir_prepopulate(req
, req
->r_session
);
2722 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2724 current
->journal_info
= NULL
;
2725 mutex_unlock(&req
->r_fill_mutex
);
2727 up_read(&mdsc
->snap_rwsem
);
2729 ceph_put_snap_realm(mdsc
, realm
);
2731 if (err
== 0 && req
->r_target_inode
&&
2732 test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
2733 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
2734 spin_lock(&ci
->i_unsafe_lock
);
2735 list_add_tail(&req
->r_unsafe_target_item
, &ci
->i_unsafe_iops
);
2736 spin_unlock(&ci
->i_unsafe_lock
);
2739 mutex_lock(&mdsc
->mutex
);
2740 if (!test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
2744 req
->r_reply
= ceph_msg_get(msg
);
2745 set_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
);
2748 dout("reply arrived after request %lld was aborted\n", tid
);
2750 mutex_unlock(&mdsc
->mutex
);
2752 mutex_unlock(&session
->s_mutex
);
2754 /* kick calling process */
2755 complete_request(mdsc
, req
);
2757 ceph_mdsc_put_request(req
);
2764 * handle mds notification that our request has been forwarded.
2766 static void handle_forward(struct ceph_mds_client
*mdsc
,
2767 struct ceph_mds_session
*session
,
2768 struct ceph_msg
*msg
)
2770 struct ceph_mds_request
*req
;
2771 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2775 void *p
= msg
->front
.iov_base
;
2776 void *end
= p
+ msg
->front
.iov_len
;
2778 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2779 next_mds
= ceph_decode_32(&p
);
2780 fwd_seq
= ceph_decode_32(&p
);
2782 mutex_lock(&mdsc
->mutex
);
2783 req
= lookup_get_request(mdsc
, tid
);
2785 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2786 goto out
; /* dup reply? */
2789 if (test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
2790 dout("forward tid %llu aborted, unregistering\n", tid
);
2791 __unregister_request(mdsc
, req
);
2792 } else if (fwd_seq
<= req
->r_num_fwd
) {
2793 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2794 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2796 /* resend. forward race not possible; mds would drop */
2797 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2799 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
));
2800 req
->r_attempts
= 0;
2801 req
->r_num_fwd
= fwd_seq
;
2802 req
->r_resend_mds
= next_mds
;
2803 put_request_session(req
);
2804 __do_request(mdsc
, req
);
2806 ceph_mdsc_put_request(req
);
2808 mutex_unlock(&mdsc
->mutex
);
2812 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2816 * handle a mds session control message
2818 static void handle_session(struct ceph_mds_session
*session
,
2819 struct ceph_msg
*msg
)
2821 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2824 int mds
= session
->s_mds
;
2825 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2829 if (msg
->front
.iov_len
< sizeof(*h
))
2831 op
= le32_to_cpu(h
->op
);
2832 seq
= le64_to_cpu(h
->seq
);
2834 mutex_lock(&mdsc
->mutex
);
2835 if (op
== CEPH_SESSION_CLOSE
) {
2836 get_session(session
);
2837 __unregister_session(mdsc
, session
);
2839 /* FIXME: this ttl calculation is generous */
2840 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2841 mutex_unlock(&mdsc
->mutex
);
2843 mutex_lock(&session
->s_mutex
);
2845 dout("handle_session mds%d %s %p state %s seq %llu\n",
2846 mds
, ceph_session_op_name(op
), session
,
2847 ceph_session_state_name(session
->s_state
), seq
);
2849 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2850 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2851 pr_info("mds%d came back\n", session
->s_mds
);
2855 case CEPH_SESSION_OPEN
:
2856 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2857 pr_info("mds%d reconnect success\n", session
->s_mds
);
2858 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2859 renewed_caps(mdsc
, session
, 0);
2862 __close_session(mdsc
, session
);
2865 case CEPH_SESSION_RENEWCAPS
:
2866 if (session
->s_renew_seq
== seq
)
2867 renewed_caps(mdsc
, session
, 1);
2870 case CEPH_SESSION_CLOSE
:
2871 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2872 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2873 cleanup_session_requests(mdsc
, session
);
2874 remove_session_caps(session
);
2875 wake
= 2; /* for good measure */
2876 wake_up_all(&mdsc
->session_close_wq
);
2879 case CEPH_SESSION_STALE
:
2880 pr_info("mds%d caps went stale, renewing\n",
2882 spin_lock(&session
->s_gen_ttl_lock
);
2883 session
->s_cap_gen
++;
2884 session
->s_cap_ttl
= jiffies
- 1;
2885 spin_unlock(&session
->s_gen_ttl_lock
);
2886 send_renew_caps(mdsc
, session
);
2889 case CEPH_SESSION_RECALL_STATE
:
2890 ceph_trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2893 case CEPH_SESSION_FLUSHMSG
:
2894 send_flushmsg_ack(mdsc
, session
, seq
);
2897 case CEPH_SESSION_FORCE_RO
:
2898 dout("force_session_readonly %p\n", session
);
2899 spin_lock(&session
->s_cap_lock
);
2900 session
->s_readonly
= true;
2901 spin_unlock(&session
->s_cap_lock
);
2902 wake_up_session_caps(session
, 0);
2905 case CEPH_SESSION_REJECT
:
2906 WARN_ON(session
->s_state
!= CEPH_MDS_SESSION_OPENING
);
2907 pr_info("mds%d rejected session\n", session
->s_mds
);
2908 session
->s_state
= CEPH_MDS_SESSION_REJECTED
;
2909 cleanup_session_requests(mdsc
, session
);
2910 remove_session_caps(session
);
2911 wake
= 2; /* for good measure */
2915 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2919 mutex_unlock(&session
->s_mutex
);
2921 mutex_lock(&mdsc
->mutex
);
2922 __wake_requests(mdsc
, &session
->s_waiting
);
2924 kick_requests(mdsc
, mds
);
2925 mutex_unlock(&mdsc
->mutex
);
2927 if (op
== CEPH_SESSION_CLOSE
)
2928 ceph_put_mds_session(session
);
2932 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2933 (int)msg
->front
.iov_len
);
2940 * called under session->mutex.
2942 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2943 struct ceph_mds_session
*session
)
2945 struct ceph_mds_request
*req
, *nreq
;
2949 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2951 mutex_lock(&mdsc
->mutex
);
2952 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2953 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
, true);
2955 ceph_msg_get(req
->r_request
);
2956 ceph_con_send(&session
->s_con
, req
->r_request
);
2961 * also re-send old requests when MDS enters reconnect stage. So that MDS
2962 * can process completed request in clientreplay stage.
2964 p
= rb_first(&mdsc
->request_tree
);
2966 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2968 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
))
2970 if (req
->r_attempts
== 0)
2971 continue; /* only old requests */
2972 if (req
->r_session
&&
2973 req
->r_session
->s_mds
== session
->s_mds
) {
2974 err
= __prepare_send_request(mdsc
, req
,
2975 session
->s_mds
, true);
2977 ceph_msg_get(req
->r_request
);
2978 ceph_con_send(&session
->s_con
, req
->r_request
);
2982 mutex_unlock(&mdsc
->mutex
);
2986 * Encode information about a cap for a reconnect with the MDS.
2988 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2992 struct ceph_mds_cap_reconnect v2
;
2993 struct ceph_mds_cap_reconnect_v1 v1
;
2995 struct ceph_inode_info
*ci
= cap
->ci
;
2996 struct ceph_reconnect_state
*recon_state
= arg
;
2997 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
3002 struct dentry
*dentry
;
3004 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3005 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
3006 ceph_cap_string(cap
->issued
));
3007 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
3011 dentry
= d_find_alias(inode
);
3013 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
3015 err
= PTR_ERR(path
);
3024 spin_lock(&ci
->i_ceph_lock
);
3025 cap
->seq
= 0; /* reset cap seq */
3026 cap
->issue_seq
= 0; /* and issue_seq */
3027 cap
->mseq
= 0; /* and migrate_seq */
3028 cap
->cap_gen
= cap
->session
->s_cap_gen
;
3030 if (recon_state
->msg_version
>= 2) {
3031 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
3032 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
3033 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
3034 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
3035 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
3036 rec
.v2
.flock_len
= (__force __le32
)
3037 ((ci
->i_ceph_flags
& CEPH_I_ERROR_FILELOCK
) ? 0 : 1);
3039 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
3040 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
3041 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
3042 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
3043 ceph_encode_timespec64(&rec
.v1
.mtime
, &inode
->i_mtime
);
3044 ceph_encode_timespec64(&rec
.v1
.atime
, &inode
->i_atime
);
3045 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
3046 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
3049 if (list_empty(&ci
->i_cap_snaps
)) {
3050 snap_follows
= ci
->i_head_snapc
? ci
->i_head_snapc
->seq
: 0;
3052 struct ceph_cap_snap
*capsnap
=
3053 list_first_entry(&ci
->i_cap_snaps
,
3054 struct ceph_cap_snap
, ci_item
);
3055 snap_follows
= capsnap
->follows
;
3057 spin_unlock(&ci
->i_ceph_lock
);
3059 if (recon_state
->msg_version
>= 2) {
3060 int num_fcntl_locks
, num_flock_locks
;
3061 struct ceph_filelock
*flocks
= NULL
;
3062 size_t struct_len
, total_len
= 0;
3066 if (rec
.v2
.flock_len
) {
3067 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
3069 num_fcntl_locks
= 0;
3070 num_flock_locks
= 0;
3072 if (num_fcntl_locks
+ num_flock_locks
> 0) {
3073 flocks
= kmalloc_array(num_fcntl_locks
+ num_flock_locks
,
3074 sizeof(struct ceph_filelock
),
3080 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
3095 if (recon_state
->msg_version
>= 3) {
3096 /* version, compat_version and struct_len */
3097 total_len
= 2 * sizeof(u8
) + sizeof(u32
);
3101 * number of encoded locks is stable, so copy to pagelist
3103 struct_len
= 2 * sizeof(u32
) +
3104 (num_fcntl_locks
+ num_flock_locks
) *
3105 sizeof(struct ceph_filelock
);
3106 rec
.v2
.flock_len
= cpu_to_le32(struct_len
);
3108 struct_len
+= sizeof(rec
.v2
);
3109 struct_len
+= sizeof(u32
) + pathlen
;
3112 struct_len
+= sizeof(u64
); /* snap_follows */
3114 total_len
+= struct_len
;
3115 err
= ceph_pagelist_reserve(pagelist
, total_len
);
3118 if (recon_state
->msg_version
>= 3) {
3119 ceph_pagelist_encode_8(pagelist
, struct_v
);
3120 ceph_pagelist_encode_8(pagelist
, 1);
3121 ceph_pagelist_encode_32(pagelist
, struct_len
);
3123 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
3124 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v2
));
3125 ceph_locks_to_pagelist(flocks
, pagelist
,
3129 ceph_pagelist_encode_64(pagelist
, snap_follows
);
3133 size_t size
= sizeof(u32
) + pathlen
+ sizeof(rec
.v1
);
3134 err
= ceph_pagelist_reserve(pagelist
, size
);
3136 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
3137 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v1
));
3141 recon_state
->nr_caps
++;
3151 * If an MDS fails and recovers, clients need to reconnect in order to
3152 * reestablish shared state. This includes all caps issued through
3153 * this session _and_ the snap_realm hierarchy. Because it's not
3154 * clear which snap realms the mds cares about, we send everything we
3155 * know about.. that ensures we'll then get any new info the
3156 * recovering MDS might have.
3158 * This is a relatively heavyweight operation, but it's rare.
3160 * called with mdsc->mutex held.
3162 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
3163 struct ceph_mds_session
*session
)
3165 struct ceph_msg
*reply
;
3167 int mds
= session
->s_mds
;
3170 struct ceph_pagelist
*pagelist
;
3171 struct ceph_reconnect_state recon_state
;
3174 pr_info("mds%d reconnect start\n", mds
);
3176 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
3178 goto fail_nopagelist
;
3179 ceph_pagelist_init(pagelist
);
3181 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
3185 mutex_lock(&session
->s_mutex
);
3186 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
3189 dout("session %p state %s\n", session
,
3190 ceph_session_state_name(session
->s_state
));
3192 spin_lock(&session
->s_gen_ttl_lock
);
3193 session
->s_cap_gen
++;
3194 spin_unlock(&session
->s_gen_ttl_lock
);
3196 spin_lock(&session
->s_cap_lock
);
3197 /* don't know if session is readonly */
3198 session
->s_readonly
= 0;
3200 * notify __ceph_remove_cap() that we are composing cap reconnect.
3201 * If a cap get released before being added to the cap reconnect,
3202 * __ceph_remove_cap() should skip queuing cap release.
3204 session
->s_cap_reconnect
= 1;
3205 /* drop old cap expires; we're about to reestablish that state */
3206 detach_cap_releases(session
, &dispose
);
3207 spin_unlock(&session
->s_cap_lock
);
3208 dispose_cap_releases(mdsc
, &dispose
);
3210 /* trim unused caps to reduce MDS's cache rejoin time */
3211 if (mdsc
->fsc
->sb
->s_root
)
3212 shrink_dcache_parent(mdsc
->fsc
->sb
->s_root
);
3214 ceph_con_close(&session
->s_con
);
3215 ceph_con_open(&session
->s_con
,
3216 CEPH_ENTITY_TYPE_MDS
, mds
,
3217 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
3219 /* replay unsafe requests */
3220 replay_unsafe_requests(mdsc
, session
);
3222 down_read(&mdsc
->snap_rwsem
);
3224 /* traverse this session's caps */
3225 s_nr_caps
= session
->s_nr_caps
;
3226 err
= ceph_pagelist_encode_32(pagelist
, s_nr_caps
);
3230 recon_state
.nr_caps
= 0;
3231 recon_state
.pagelist
= pagelist
;
3232 if (session
->s_con
.peer_features
& CEPH_FEATURE_MDSENC
)
3233 recon_state
.msg_version
= 3;
3234 else if (session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
)
3235 recon_state
.msg_version
= 2;
3237 recon_state
.msg_version
= 1;
3238 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
3242 spin_lock(&session
->s_cap_lock
);
3243 session
->s_cap_reconnect
= 0;
3244 spin_unlock(&session
->s_cap_lock
);
3247 * snaprealms. we provide mds with the ino, seq (version), and
3248 * parent for all of our realms. If the mds has any newer info,
3251 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
3252 struct ceph_snap_realm
*realm
=
3253 rb_entry(p
, struct ceph_snap_realm
, node
);
3254 struct ceph_mds_snaprealm_reconnect sr_rec
;
3256 dout(" adding snap realm %llx seq %lld parent %llx\n",
3257 realm
->ino
, realm
->seq
, realm
->parent_ino
);
3258 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
3259 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
3260 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
3261 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
3266 reply
->hdr
.version
= cpu_to_le16(recon_state
.msg_version
);
3268 /* raced with cap release? */
3269 if (s_nr_caps
!= recon_state
.nr_caps
) {
3270 struct page
*page
= list_first_entry(&pagelist
->head
,
3272 __le32
*addr
= kmap_atomic(page
);
3273 *addr
= cpu_to_le32(recon_state
.nr_caps
);
3274 kunmap_atomic(addr
);
3277 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
3278 ceph_msg_data_add_pagelist(reply
, pagelist
);
3280 ceph_early_kick_flushing_caps(mdsc
, session
);
3282 ceph_con_send(&session
->s_con
, reply
);
3284 mutex_unlock(&session
->s_mutex
);
3286 mutex_lock(&mdsc
->mutex
);
3287 __wake_requests(mdsc
, &session
->s_waiting
);
3288 mutex_unlock(&mdsc
->mutex
);
3290 up_read(&mdsc
->snap_rwsem
);
3294 ceph_msg_put(reply
);
3295 up_read(&mdsc
->snap_rwsem
);
3296 mutex_unlock(&session
->s_mutex
);
3298 ceph_pagelist_release(pagelist
);
3300 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
3306 * compare old and new mdsmaps, kicking requests
3307 * and closing out old connections as necessary
3309 * called under mdsc->mutex.
3311 static void check_new_map(struct ceph_mds_client
*mdsc
,
3312 struct ceph_mdsmap
*newmap
,
3313 struct ceph_mdsmap
*oldmap
)
3316 int oldstate
, newstate
;
3317 struct ceph_mds_session
*s
;
3319 dout("check_new_map new %u old %u\n",
3320 newmap
->m_epoch
, oldmap
->m_epoch
);
3322 for (i
= 0; i
< oldmap
->m_num_mds
&& i
< mdsc
->max_sessions
; i
++) {
3323 if (!mdsc
->sessions
[i
])
3325 s
= mdsc
->sessions
[i
];
3326 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
3327 newstate
= ceph_mdsmap_get_state(newmap
, i
);
3329 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3330 i
, ceph_mds_state_name(oldstate
),
3331 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
3332 ceph_mds_state_name(newstate
),
3333 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
3334 ceph_session_state_name(s
->s_state
));
3336 if (i
>= newmap
->m_num_mds
||
3337 memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
3338 ceph_mdsmap_get_addr(newmap
, i
),
3339 sizeof(struct ceph_entity_addr
))) {
3340 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
3341 /* the session never opened, just close it
3344 __unregister_session(mdsc
, s
);
3345 __wake_requests(mdsc
, &s
->s_waiting
);
3346 ceph_put_mds_session(s
);
3347 } else if (i
>= newmap
->m_num_mds
) {
3348 /* force close session for stopped mds */
3350 __unregister_session(mdsc
, s
);
3351 __wake_requests(mdsc
, &s
->s_waiting
);
3352 kick_requests(mdsc
, i
);
3353 mutex_unlock(&mdsc
->mutex
);
3355 mutex_lock(&s
->s_mutex
);
3356 cleanup_session_requests(mdsc
, s
);
3357 remove_session_caps(s
);
3358 mutex_unlock(&s
->s_mutex
);
3360 ceph_put_mds_session(s
);
3362 mutex_lock(&mdsc
->mutex
);
3365 mutex_unlock(&mdsc
->mutex
);
3366 mutex_lock(&s
->s_mutex
);
3367 mutex_lock(&mdsc
->mutex
);
3368 ceph_con_close(&s
->s_con
);
3369 mutex_unlock(&s
->s_mutex
);
3370 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
3372 } else if (oldstate
== newstate
) {
3373 continue; /* nothing new with this mds */
3379 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
3380 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
3381 mutex_unlock(&mdsc
->mutex
);
3382 send_mds_reconnect(mdsc
, s
);
3383 mutex_lock(&mdsc
->mutex
);
3387 * kick request on any mds that has gone active.
3389 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
3390 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
3391 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
3392 oldstate
!= CEPH_MDS_STATE_STARTING
)
3393 pr_info("mds%d recovery completed\n", s
->s_mds
);
3394 kick_requests(mdsc
, i
);
3395 ceph_kick_flushing_caps(mdsc
, s
);
3396 wake_up_session_caps(s
, 1);
3400 for (i
= 0; i
< newmap
->m_num_mds
&& i
< mdsc
->max_sessions
; i
++) {
3401 s
= mdsc
->sessions
[i
];
3404 if (!ceph_mdsmap_is_laggy(newmap
, i
))
3406 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3407 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
3408 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3409 dout(" connecting to export targets of laggy mds%d\n",
3411 __open_export_target_sessions(mdsc
, s
);
3423 * caller must hold session s_mutex, dentry->d_lock
3425 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
3427 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
3429 ceph_put_mds_session(di
->lease_session
);
3430 di
->lease_session
= NULL
;
3433 static void handle_lease(struct ceph_mds_client
*mdsc
,
3434 struct ceph_mds_session
*session
,
3435 struct ceph_msg
*msg
)
3437 struct super_block
*sb
= mdsc
->fsc
->sb
;
3438 struct inode
*inode
;
3439 struct dentry
*parent
, *dentry
;
3440 struct ceph_dentry_info
*di
;
3441 int mds
= session
->s_mds
;
3442 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
3444 struct ceph_vino vino
;
3448 dout("handle_lease from mds%d\n", mds
);
3451 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
3453 vino
.ino
= le64_to_cpu(h
->ino
);
3454 vino
.snap
= CEPH_NOSNAP
;
3455 seq
= le32_to_cpu(h
->seq
);
3456 dname
.len
= get_unaligned_le32(h
+ 1);
3457 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
) + dname
.len
)
3459 dname
.name
= (void *)(h
+ 1) + sizeof(u32
);
3462 inode
= ceph_find_inode(sb
, vino
);
3463 dout("handle_lease %s, ino %llx %p %.*s\n",
3464 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
3465 dname
.len
, dname
.name
);
3467 mutex_lock(&session
->s_mutex
);
3471 dout("handle_lease no inode %llx\n", vino
.ino
);
3476 parent
= d_find_alias(inode
);
3478 dout("no parent dentry on inode %p\n", inode
);
3480 goto release
; /* hrm... */
3482 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
3483 dentry
= d_lookup(parent
, &dname
);
3488 spin_lock(&dentry
->d_lock
);
3489 di
= ceph_dentry(dentry
);
3490 switch (h
->action
) {
3491 case CEPH_MDS_LEASE_REVOKE
:
3492 if (di
->lease_session
== session
) {
3493 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
3494 h
->seq
= cpu_to_le32(di
->lease_seq
);
3495 __ceph_mdsc_drop_dentry_lease(dentry
);
3500 case CEPH_MDS_LEASE_RENEW
:
3501 if (di
->lease_session
== session
&&
3502 di
->lease_gen
== session
->s_cap_gen
&&
3503 di
->lease_renew_from
&&
3504 di
->lease_renew_after
== 0) {
3505 unsigned long duration
=
3506 msecs_to_jiffies(le32_to_cpu(h
->duration_ms
));
3508 di
->lease_seq
= seq
;
3509 di
->time
= di
->lease_renew_from
+ duration
;
3510 di
->lease_renew_after
= di
->lease_renew_from
+
3512 di
->lease_renew_from
= 0;
3516 spin_unlock(&dentry
->d_lock
);
3523 /* let's just reuse the same message */
3524 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
3526 ceph_con_send(&session
->s_con
, msg
);
3530 mutex_unlock(&session
->s_mutex
);
3534 pr_err("corrupt lease message\n");
3538 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
3539 struct inode
*inode
,
3540 struct dentry
*dentry
, char action
,
3543 struct ceph_msg
*msg
;
3544 struct ceph_mds_lease
*lease
;
3545 int len
= sizeof(*lease
) + sizeof(u32
);
3548 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3549 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
3550 dnamelen
= dentry
->d_name
.len
;
3553 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
3556 lease
= msg
->front
.iov_base
;
3557 lease
->action
= action
;
3558 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
3559 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
3560 lease
->seq
= cpu_to_le32(seq
);
3561 put_unaligned_le32(dnamelen
, lease
+ 1);
3562 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
3565 * if this is a preemptive lease RELEASE, no need to
3566 * flush request stream, since the actual request will
3569 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
3571 ceph_con_send(&session
->s_con
, msg
);
3575 * lock unlock sessions, to wait ongoing session activities
3577 static void lock_unlock_sessions(struct ceph_mds_client
*mdsc
)
3581 mutex_lock(&mdsc
->mutex
);
3582 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3583 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3586 mutex_unlock(&mdsc
->mutex
);
3587 mutex_lock(&s
->s_mutex
);
3588 mutex_unlock(&s
->s_mutex
);
3589 ceph_put_mds_session(s
);
3590 mutex_lock(&mdsc
->mutex
);
3592 mutex_unlock(&mdsc
->mutex
);
3598 * delayed work -- periodically trim expired leases, renew caps with mds
3600 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
3603 unsigned hz
= round_jiffies_relative(HZ
* delay
);
3604 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
3607 static void delayed_work(struct work_struct
*work
)
3610 struct ceph_mds_client
*mdsc
=
3611 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
3615 dout("mdsc delayed_work\n");
3616 ceph_check_delayed_caps(mdsc
);
3618 mutex_lock(&mdsc
->mutex
);
3619 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
3620 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
3621 mdsc
->last_renew_caps
);
3623 mdsc
->last_renew_caps
= jiffies
;
3625 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3626 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3629 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3630 dout("resending session close request for mds%d\n",
3632 request_close_session(mdsc
, s
);
3633 ceph_put_mds_session(s
);
3636 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
3637 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
3638 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
3639 pr_info("mds%d hung\n", s
->s_mds
);
3642 if (s
->s_state
== CEPH_MDS_SESSION_NEW
||
3643 s
->s_state
== CEPH_MDS_SESSION_RESTARTING
||
3644 s
->s_state
== CEPH_MDS_SESSION_REJECTED
) {
3645 /* this mds is failed or recovering, just wait */
3646 ceph_put_mds_session(s
);
3649 mutex_unlock(&mdsc
->mutex
);
3651 mutex_lock(&s
->s_mutex
);
3653 send_renew_caps(mdsc
, s
);
3655 ceph_con_keepalive(&s
->s_con
);
3656 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3657 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
3658 ceph_send_cap_releases(mdsc
, s
);
3659 mutex_unlock(&s
->s_mutex
);
3660 ceph_put_mds_session(s
);
3662 mutex_lock(&mdsc
->mutex
);
3664 mutex_unlock(&mdsc
->mutex
);
3666 schedule_delayed(mdsc
);
3669 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3672 struct ceph_mds_client
*mdsc
;
3674 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3678 mutex_init(&mdsc
->mutex
);
3679 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3680 if (!mdsc
->mdsmap
) {
3686 init_completion(&mdsc
->safe_umount_waiters
);
3687 init_waitqueue_head(&mdsc
->session_close_wq
);
3688 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3689 mdsc
->sessions
= NULL
;
3690 atomic_set(&mdsc
->num_sessions
, 0);
3691 mdsc
->max_sessions
= 0;
3693 atomic64_set(&mdsc
->quotarealms_count
, 0);
3694 mdsc
->last_snap_seq
= 0;
3695 init_rwsem(&mdsc
->snap_rwsem
);
3696 mdsc
->snap_realms
= RB_ROOT
;
3697 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3698 spin_lock_init(&mdsc
->snap_empty_lock
);
3700 mdsc
->oldest_tid
= 0;
3701 mdsc
->request_tree
= RB_ROOT
;
3702 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3703 mdsc
->last_renew_caps
= jiffies
;
3704 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3705 spin_lock_init(&mdsc
->cap_delay_lock
);
3706 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3707 spin_lock_init(&mdsc
->snap_flush_lock
);
3708 mdsc
->last_cap_flush_tid
= 1;
3709 INIT_LIST_HEAD(&mdsc
->cap_flush_list
);
3710 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3711 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3712 mdsc
->num_cap_flushing
= 0;
3713 spin_lock_init(&mdsc
->cap_dirty_lock
);
3714 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3715 spin_lock_init(&mdsc
->dentry_lru_lock
);
3716 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3718 ceph_caps_init(mdsc
);
3719 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3721 init_rwsem(&mdsc
->pool_perm_rwsem
);
3722 mdsc
->pool_perm_tree
= RB_ROOT
;
3724 strscpy(mdsc
->nodename
, utsname()->nodename
,
3725 sizeof(mdsc
->nodename
));
3730 * Wait for safe replies on open mds requests. If we time out, drop
3731 * all requests from the tree to avoid dangling dentry refs.
3733 static void wait_requests(struct ceph_mds_client
*mdsc
)
3735 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3736 struct ceph_mds_request
*req
;
3738 mutex_lock(&mdsc
->mutex
);
3739 if (__get_oldest_req(mdsc
)) {
3740 mutex_unlock(&mdsc
->mutex
);
3742 dout("wait_requests waiting for requests\n");
3743 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3744 ceph_timeout_jiffies(opts
->mount_timeout
));
3746 /* tear down remaining requests */
3747 mutex_lock(&mdsc
->mutex
);
3748 while ((req
= __get_oldest_req(mdsc
))) {
3749 dout("wait_requests timed out on tid %llu\n",
3751 __unregister_request(mdsc
, req
);
3754 mutex_unlock(&mdsc
->mutex
);
3755 dout("wait_requests done\n");
3759 * called before mount is ro, and before dentries are torn down.
3760 * (hmm, does this still race with new lookups?)
3762 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3764 dout("pre_umount\n");
3767 lock_unlock_sessions(mdsc
);
3768 ceph_flush_dirty_caps(mdsc
);
3769 wait_requests(mdsc
);
3772 * wait for reply handlers to drop their request refs and
3773 * their inode/dcache refs
3779 * wait for all write mds requests to flush.
3781 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3783 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3786 mutex_lock(&mdsc
->mutex
);
3787 dout("wait_unsafe_requests want %lld\n", want_tid
);
3789 req
= __get_oldest_req(mdsc
);
3790 while (req
&& req
->r_tid
<= want_tid
) {
3791 /* find next request */
3792 n
= rb_next(&req
->r_node
);
3794 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3797 if (req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
&&
3798 (req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3800 ceph_mdsc_get_request(req
);
3802 ceph_mdsc_get_request(nextreq
);
3803 mutex_unlock(&mdsc
->mutex
);
3804 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3805 req
->r_tid
, want_tid
);
3806 wait_for_completion(&req
->r_safe_completion
);
3807 mutex_lock(&mdsc
->mutex
);
3808 ceph_mdsc_put_request(req
);
3810 break; /* next dne before, so we're done! */
3811 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3812 /* next request was removed from tree */
3813 ceph_mdsc_put_request(nextreq
);
3816 ceph_mdsc_put_request(nextreq
); /* won't go away */
3820 mutex_unlock(&mdsc
->mutex
);
3821 dout("wait_unsafe_requests done\n");
3824 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3826 u64 want_tid
, want_flush
;
3828 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3832 mutex_lock(&mdsc
->mutex
);
3833 want_tid
= mdsc
->last_tid
;
3834 mutex_unlock(&mdsc
->mutex
);
3836 ceph_flush_dirty_caps(mdsc
);
3837 spin_lock(&mdsc
->cap_dirty_lock
);
3838 want_flush
= mdsc
->last_cap_flush_tid
;
3839 if (!list_empty(&mdsc
->cap_flush_list
)) {
3840 struct ceph_cap_flush
*cf
=
3841 list_last_entry(&mdsc
->cap_flush_list
,
3842 struct ceph_cap_flush
, g_list
);
3845 spin_unlock(&mdsc
->cap_dirty_lock
);
3847 dout("sync want tid %lld flush_seq %lld\n",
3848 want_tid
, want_flush
);
3850 wait_unsafe_requests(mdsc
, want_tid
);
3851 wait_caps_flush(mdsc
, want_flush
);
3855 * true if all sessions are closed, or we force unmount
3857 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
, int skipped
)
3859 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3861 return atomic_read(&mdsc
->num_sessions
) <= skipped
;
3865 * called after sb is ro.
3867 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3869 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3870 struct ceph_mds_session
*session
;
3874 dout("close_sessions\n");
3876 /* close sessions */
3877 mutex_lock(&mdsc
->mutex
);
3878 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3879 session
= __ceph_lookup_mds_session(mdsc
, i
);
3882 mutex_unlock(&mdsc
->mutex
);
3883 mutex_lock(&session
->s_mutex
);
3884 if (__close_session(mdsc
, session
) <= 0)
3886 mutex_unlock(&session
->s_mutex
);
3887 ceph_put_mds_session(session
);
3888 mutex_lock(&mdsc
->mutex
);
3890 mutex_unlock(&mdsc
->mutex
);
3892 dout("waiting for sessions to close\n");
3893 wait_event_timeout(mdsc
->session_close_wq
,
3894 done_closing_sessions(mdsc
, skipped
),
3895 ceph_timeout_jiffies(opts
->mount_timeout
));
3897 /* tear down remaining sessions */
3898 mutex_lock(&mdsc
->mutex
);
3899 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3900 if (mdsc
->sessions
[i
]) {
3901 session
= get_session(mdsc
->sessions
[i
]);
3902 __unregister_session(mdsc
, session
);
3903 mutex_unlock(&mdsc
->mutex
);
3904 mutex_lock(&session
->s_mutex
);
3905 remove_session_caps(session
);
3906 mutex_unlock(&session
->s_mutex
);
3907 ceph_put_mds_session(session
);
3908 mutex_lock(&mdsc
->mutex
);
3911 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3912 mutex_unlock(&mdsc
->mutex
);
3914 ceph_cleanup_empty_realms(mdsc
);
3916 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3921 void ceph_mdsc_force_umount(struct ceph_mds_client
*mdsc
)
3923 struct ceph_mds_session
*session
;
3926 dout("force umount\n");
3928 mutex_lock(&mdsc
->mutex
);
3929 for (mds
= 0; mds
< mdsc
->max_sessions
; mds
++) {
3930 session
= __ceph_lookup_mds_session(mdsc
, mds
);
3933 mutex_unlock(&mdsc
->mutex
);
3934 mutex_lock(&session
->s_mutex
);
3935 __close_session(mdsc
, session
);
3936 if (session
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3937 cleanup_session_requests(mdsc
, session
);
3938 remove_session_caps(session
);
3940 mutex_unlock(&session
->s_mutex
);
3941 ceph_put_mds_session(session
);
3942 mutex_lock(&mdsc
->mutex
);
3943 kick_requests(mdsc
, mds
);
3945 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3946 mutex_unlock(&mdsc
->mutex
);
3949 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3952 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3954 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3955 kfree(mdsc
->sessions
);
3956 ceph_caps_finalize(mdsc
);
3957 ceph_pool_perm_destroy(mdsc
);
3960 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3962 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3963 dout("mdsc_destroy %p\n", mdsc
);
3968 /* flush out any connection work with references to us */
3971 ceph_mdsc_stop(mdsc
);
3975 dout("mdsc_destroy %p done\n", mdsc
);
3978 void ceph_mdsc_handle_fsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3980 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3981 const char *mds_namespace
= fsc
->mount_options
->mds_namespace
;
3982 void *p
= msg
->front
.iov_base
;
3983 void *end
= p
+ msg
->front
.iov_len
;
3987 u32 mount_fscid
= (u32
)-1;
3988 u8 struct_v
, struct_cv
;
3991 ceph_decode_need(&p
, end
, sizeof(u32
), bad
);
3992 epoch
= ceph_decode_32(&p
);
3994 dout("handle_fsmap epoch %u\n", epoch
);
3996 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
3997 struct_v
= ceph_decode_8(&p
);
3998 struct_cv
= ceph_decode_8(&p
);
3999 map_len
= ceph_decode_32(&p
);
4001 ceph_decode_need(&p
, end
, sizeof(u32
) * 3, bad
);
4002 p
+= sizeof(u32
) * 2; /* skip epoch and legacy_client_fscid */
4004 num_fs
= ceph_decode_32(&p
);
4005 while (num_fs
-- > 0) {
4006 void *info_p
, *info_end
;
4011 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
4012 info_v
= ceph_decode_8(&p
);
4013 info_cv
= ceph_decode_8(&p
);
4014 info_len
= ceph_decode_32(&p
);
4015 ceph_decode_need(&p
, end
, info_len
, bad
);
4017 info_end
= p
+ info_len
;
4020 ceph_decode_need(&info_p
, info_end
, sizeof(u32
) * 2, bad
);
4021 fscid
= ceph_decode_32(&info_p
);
4022 namelen
= ceph_decode_32(&info_p
);
4023 ceph_decode_need(&info_p
, info_end
, namelen
, bad
);
4025 if (mds_namespace
&&
4026 strlen(mds_namespace
) == namelen
&&
4027 !strncmp(mds_namespace
, (char *)info_p
, namelen
)) {
4028 mount_fscid
= fscid
;
4033 ceph_monc_got_map(&fsc
->client
->monc
, CEPH_SUB_FSMAP
, epoch
);
4034 if (mount_fscid
!= (u32
)-1) {
4035 fsc
->client
->monc
.fs_cluster_id
= mount_fscid
;
4036 ceph_monc_want_map(&fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
4038 ceph_monc_renew_subs(&fsc
->client
->monc
);
4046 pr_err("error decoding fsmap\n");
4048 mutex_lock(&mdsc
->mutex
);
4049 mdsc
->mdsmap_err
= err
;
4050 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
4051 mutex_unlock(&mdsc
->mutex
);
4055 * handle mds map update.
4057 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
4061 void *p
= msg
->front
.iov_base
;
4062 void *end
= p
+ msg
->front
.iov_len
;
4063 struct ceph_mdsmap
*newmap
, *oldmap
;
4064 struct ceph_fsid fsid
;
4067 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
4068 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
4069 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
4071 epoch
= ceph_decode_32(&p
);
4072 maplen
= ceph_decode_32(&p
);
4073 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
4075 /* do we need it? */
4076 mutex_lock(&mdsc
->mutex
);
4077 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
4078 dout("handle_map epoch %u <= our %u\n",
4079 epoch
, mdsc
->mdsmap
->m_epoch
);
4080 mutex_unlock(&mdsc
->mutex
);
4084 newmap
= ceph_mdsmap_decode(&p
, end
);
4085 if (IS_ERR(newmap
)) {
4086 err
= PTR_ERR(newmap
);
4090 /* swap into place */
4092 oldmap
= mdsc
->mdsmap
;
4093 mdsc
->mdsmap
= newmap
;
4094 check_new_map(mdsc
, newmap
, oldmap
);
4095 ceph_mdsmap_destroy(oldmap
);
4097 mdsc
->mdsmap
= newmap
; /* first mds map */
4099 mdsc
->fsc
->max_file_size
= min((loff_t
)mdsc
->mdsmap
->m_max_file_size
,
4102 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
4103 ceph_monc_got_map(&mdsc
->fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
4104 mdsc
->mdsmap
->m_epoch
);
4106 mutex_unlock(&mdsc
->mutex
);
4107 schedule_delayed(mdsc
);
4111 mutex_unlock(&mdsc
->mutex
);
4113 pr_err("error decoding mdsmap %d\n", err
);
4117 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
4119 struct ceph_mds_session
*s
= con
->private;
4121 if (get_session(s
)) {
4122 dout("mdsc con_get %p ok (%d)\n", s
, refcount_read(&s
->s_ref
));
4125 dout("mdsc con_get %p FAIL\n", s
);
4129 static void con_put(struct ceph_connection
*con
)
4131 struct ceph_mds_session
*s
= con
->private;
4133 dout("mdsc con_put %p (%d)\n", s
, refcount_read(&s
->s_ref
) - 1);
4134 ceph_put_mds_session(s
);
4138 * if the client is unresponsive for long enough, the mds will kill
4139 * the session entirely.
4141 static void peer_reset(struct ceph_connection
*con
)
4143 struct ceph_mds_session
*s
= con
->private;
4144 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4146 pr_warn("mds%d closed our session\n", s
->s_mds
);
4147 send_mds_reconnect(mdsc
, s
);
4150 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
4152 struct ceph_mds_session
*s
= con
->private;
4153 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4154 int type
= le16_to_cpu(msg
->hdr
.type
);
4156 mutex_lock(&mdsc
->mutex
);
4157 if (__verify_registered_session(mdsc
, s
) < 0) {
4158 mutex_unlock(&mdsc
->mutex
);
4161 mutex_unlock(&mdsc
->mutex
);
4164 case CEPH_MSG_MDS_MAP
:
4165 ceph_mdsc_handle_mdsmap(mdsc
, msg
);
4167 case CEPH_MSG_FS_MAP_USER
:
4168 ceph_mdsc_handle_fsmap(mdsc
, msg
);
4170 case CEPH_MSG_CLIENT_SESSION
:
4171 handle_session(s
, msg
);
4173 case CEPH_MSG_CLIENT_REPLY
:
4174 handle_reply(s
, msg
);
4176 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
4177 handle_forward(mdsc
, s
, msg
);
4179 case CEPH_MSG_CLIENT_CAPS
:
4180 ceph_handle_caps(s
, msg
);
4182 case CEPH_MSG_CLIENT_SNAP
:
4183 ceph_handle_snap(mdsc
, s
, msg
);
4185 case CEPH_MSG_CLIENT_LEASE
:
4186 handle_lease(mdsc
, s
, msg
);
4188 case CEPH_MSG_CLIENT_QUOTA
:
4189 ceph_handle_quota(mdsc
, s
, msg
);
4193 pr_err("received unknown message type %d %s\n", type
,
4194 ceph_msg_type_name(type
));
4205 * Note: returned pointer is the address of a structure that's
4206 * managed separately. Caller must *not* attempt to free it.
4208 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
4209 int *proto
, int force_new
)
4211 struct ceph_mds_session
*s
= con
->private;
4212 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4213 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4214 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4216 if (force_new
&& auth
->authorizer
) {
4217 ceph_auth_destroy_authorizer(auth
->authorizer
);
4218 auth
->authorizer
= NULL
;
4220 if (!auth
->authorizer
) {
4221 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
4224 return ERR_PTR(ret
);
4226 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
4229 return ERR_PTR(ret
);
4231 *proto
= ac
->protocol
;
4236 static int add_authorizer_challenge(struct ceph_connection
*con
,
4237 void *challenge_buf
, int challenge_buf_len
)
4239 struct ceph_mds_session
*s
= con
->private;
4240 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4241 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4243 return ceph_auth_add_authorizer_challenge(ac
, s
->s_auth
.authorizer
,
4244 challenge_buf
, challenge_buf_len
);
4247 static int verify_authorizer_reply(struct ceph_connection
*con
)
4249 struct ceph_mds_session
*s
= con
->private;
4250 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4251 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4253 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
);
4256 static int invalidate_authorizer(struct ceph_connection
*con
)
4258 struct ceph_mds_session
*s
= con
->private;
4259 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4260 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4262 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
4264 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
4267 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
4268 struct ceph_msg_header
*hdr
, int *skip
)
4270 struct ceph_msg
*msg
;
4271 int type
= (int) le16_to_cpu(hdr
->type
);
4272 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
4278 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
4280 pr_err("unable to allocate msg type %d len %d\n",
4288 static int mds_sign_message(struct ceph_msg
*msg
)
4290 struct ceph_mds_session
*s
= msg
->con
->private;
4291 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4293 return ceph_auth_sign_message(auth
, msg
);
4296 static int mds_check_message_signature(struct ceph_msg
*msg
)
4298 struct ceph_mds_session
*s
= msg
->con
->private;
4299 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4301 return ceph_auth_check_message_signature(auth
, msg
);
4304 static const struct ceph_connection_operations mds_con_ops
= {
4307 .dispatch
= dispatch
,
4308 .get_authorizer
= get_authorizer
,
4309 .add_authorizer_challenge
= add_authorizer_challenge
,
4310 .verify_authorizer_reply
= verify_authorizer_reply
,
4311 .invalidate_authorizer
= invalidate_authorizer
,
4312 .peer_reset
= peer_reset
,
4313 .alloc_msg
= mds_alloc_msg
,
4314 .sign_message
= mds_sign_message
,
4315 .check_message_signature
= mds_check_message_signature
,