1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
14 #include "mds_client.h"
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
24 * A cluster of MDS (metadata server) daemons is responsible for
25 * managing the file system namespace (the directory hierarchy and
26 * inodes) and for coordinating shared access to storage. Metadata is
27 * partitioning hierarchically across a number of servers, and that
28 * partition varies over time as the cluster adjusts the distribution
29 * in order to balance load.
31 * The MDS client is primarily responsible to managing synchronous
32 * metadata requests for operations like open, unlink, and so forth.
33 * If there is a MDS failure, we find out about it when we (possibly
34 * request and) receive a new MDS map, and can resubmit affected
37 * For the most part, though, we take advantage of a lossless
38 * communications channel to the MDS, and do not need to worry about
39 * timing out or resubmitting requests.
41 * We maintain a stateful "session" with each MDS we interact with.
42 * Within each session, we sent periodic heartbeat messages to ensure
43 * any capabilities or leases we have been issues remain valid. If
44 * the session times out and goes stale, our leases and capabilities
45 * are no longer valid.
48 struct ceph_reconnect_state
{
50 struct ceph_pagelist
*pagelist
;
54 static void __wake_requests(struct ceph_mds_client
*mdsc
,
55 struct list_head
*head
);
57 static const struct ceph_connection_operations mds_con_ops
;
65 * parse individual inode info
67 static int parse_reply_info_in(void **p
, void *end
,
68 struct ceph_mds_reply_info_in
*info
,
74 *p
+= sizeof(struct ceph_mds_reply_inode
) +
75 sizeof(*info
->in
->fragtree
.splits
) *
76 le32_to_cpu(info
->in
->fragtree
.nsplits
);
78 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
79 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
81 *p
+= info
->symlink_len
;
83 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
84 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
85 sizeof(info
->dir_layout
), bad
);
87 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
89 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
90 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
91 info
->xattr_data
= *p
;
92 *p
+= info
->xattr_len
;
94 if (features
& CEPH_FEATURE_MDS_INLINE_DATA
) {
95 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
96 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
97 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
98 info
->inline_data
= *p
;
99 *p
+= info
->inline_len
;
101 info
->inline_version
= CEPH_INLINE_NONE
;
103 info
->pool_ns_len
= 0;
104 info
->pool_ns_data
= NULL
;
105 if (features
& CEPH_FEATURE_FS_FILE_LAYOUT_V2
) {
106 ceph_decode_32_safe(p
, end
, info
->pool_ns_len
, bad
);
107 if (info
->pool_ns_len
> 0) {
108 ceph_decode_need(p
, end
, info
->pool_ns_len
, bad
);
109 info
->pool_ns_data
= *p
;
110 *p
+= info
->pool_ns_len
;
120 * parse a normal reply, which may contain a (dir+)dentry and/or a
123 static int parse_reply_info_trace(void **p
, void *end
,
124 struct ceph_mds_reply_info_parsed
*info
,
129 if (info
->head
->is_dentry
) {
130 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
134 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
137 *p
+= sizeof(*info
->dirfrag
) +
138 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
139 if (unlikely(*p
> end
))
142 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
143 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
145 *p
+= info
->dname_len
;
147 *p
+= sizeof(*info
->dlease
);
150 if (info
->head
->is_target
) {
151 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
156 if (unlikely(*p
!= end
))
163 pr_err("problem parsing mds trace %d\n", err
);
168 * parse readdir results
170 static int parse_reply_info_dir(void **p
, void *end
,
171 struct ceph_mds_reply_info_parsed
*info
,
178 if (*p
+ sizeof(*info
->dir_dir
) > end
)
180 *p
+= sizeof(*info
->dir_dir
) +
181 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
185 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
186 num
= ceph_decode_32(p
);
188 u16 flags
= ceph_decode_16(p
);
189 info
->dir_end
= !!(flags
& CEPH_READDIR_FRAG_END
);
190 info
->dir_complete
= !!(flags
& CEPH_READDIR_FRAG_COMPLETE
);
191 info
->hash_order
= !!(flags
& CEPH_READDIR_HASH_ORDER
);
196 BUG_ON(!info
->dir_entries
);
197 if ((unsigned long)(info
->dir_entries
+ num
) >
198 (unsigned long)info
->dir_entries
+ info
->dir_buf_size
) {
199 pr_err("dir contents are larger than expected\n");
206 struct ceph_mds_reply_dir_entry
*rde
= info
->dir_entries
+ i
;
208 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
209 rde
->name_len
= ceph_decode_32(p
);
210 ceph_decode_need(p
, end
, rde
->name_len
, bad
);
213 dout("parsed dir dname '%.*s'\n", rde
->name_len
, rde
->name
);
215 *p
+= sizeof(struct ceph_mds_reply_lease
);
218 err
= parse_reply_info_in(p
, end
, &rde
->inode
, features
);
221 /* ceph_readdir_prepopulate() will update it */
235 pr_err("problem parsing dir contents %d\n", err
);
240 * parse fcntl F_GETLK results
242 static int parse_reply_info_filelock(void **p
, void *end
,
243 struct ceph_mds_reply_info_parsed
*info
,
246 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
249 info
->filelock_reply
= *p
;
250 *p
+= sizeof(*info
->filelock_reply
);
252 if (unlikely(*p
!= end
))
261 * parse create results
263 static int parse_reply_info_create(void **p
, void *end
,
264 struct ceph_mds_reply_info_parsed
*info
,
267 if (features
& CEPH_FEATURE_REPLY_CREATE_INODE
) {
269 info
->has_create_ino
= false;
271 info
->has_create_ino
= true;
272 info
->ino
= ceph_decode_64(p
);
276 if (unlikely(*p
!= end
))
285 * parse extra results
287 static int parse_reply_info_extra(void **p
, void *end
,
288 struct ceph_mds_reply_info_parsed
*info
,
291 u32 op
= le32_to_cpu(info
->head
->op
);
293 if (op
== CEPH_MDS_OP_GETFILELOCK
)
294 return parse_reply_info_filelock(p
, end
, info
, features
);
295 else if (op
== CEPH_MDS_OP_READDIR
|| op
== CEPH_MDS_OP_LSSNAP
)
296 return parse_reply_info_dir(p
, end
, info
, features
);
297 else if (op
== CEPH_MDS_OP_CREATE
)
298 return parse_reply_info_create(p
, end
, info
, features
);
304 * parse entire mds reply
306 static int parse_reply_info(struct ceph_msg
*msg
,
307 struct ceph_mds_reply_info_parsed
*info
,
314 info
->head
= msg
->front
.iov_base
;
315 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
316 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
319 ceph_decode_32_safe(&p
, end
, len
, bad
);
321 ceph_decode_need(&p
, end
, len
, bad
);
322 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
328 ceph_decode_32_safe(&p
, end
, len
, bad
);
330 ceph_decode_need(&p
, end
, len
, bad
);
331 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
337 ceph_decode_32_safe(&p
, end
, len
, bad
);
338 info
->snapblob_len
= len
;
349 pr_err("mds parse_reply err %d\n", err
);
353 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
355 if (!info
->dir_entries
)
357 free_pages((unsigned long)info
->dir_entries
, get_order(info
->dir_buf_size
));
364 const char *ceph_session_state_name(int s
)
367 case CEPH_MDS_SESSION_NEW
: return "new";
368 case CEPH_MDS_SESSION_OPENING
: return "opening";
369 case CEPH_MDS_SESSION_OPEN
: return "open";
370 case CEPH_MDS_SESSION_HUNG
: return "hung";
371 case CEPH_MDS_SESSION_CLOSING
: return "closing";
372 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
373 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
374 case CEPH_MDS_SESSION_REJECTED
: return "rejected";
375 default: return "???";
379 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
381 if (atomic_inc_not_zero(&s
->s_ref
)) {
382 dout("mdsc get_session %p %d -> %d\n", s
,
383 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
386 dout("mdsc get_session %p 0 -- FAIL", s
);
391 void ceph_put_mds_session(struct ceph_mds_session
*s
)
393 dout("mdsc put_session %p %d -> %d\n", s
,
394 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
395 if (atomic_dec_and_test(&s
->s_ref
)) {
396 if (s
->s_auth
.authorizer
)
397 ceph_auth_destroy_authorizer(s
->s_auth
.authorizer
);
403 * called under mdsc->mutex
405 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
408 struct ceph_mds_session
*session
;
410 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
412 session
= mdsc
->sessions
[mds
];
413 dout("lookup_mds_session %p %d\n", session
,
414 atomic_read(&session
->s_ref
));
415 get_session(session
);
419 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
421 if (mds
>= mdsc
->max_sessions
)
423 return mdsc
->sessions
[mds
];
426 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
427 struct ceph_mds_session
*s
)
429 if (s
->s_mds
>= mdsc
->max_sessions
||
430 mdsc
->sessions
[s
->s_mds
] != s
)
436 * create+register a new session for given mds.
437 * called under mdsc->mutex.
439 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
442 struct ceph_mds_session
*s
;
444 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
445 return ERR_PTR(-EINVAL
);
447 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
449 return ERR_PTR(-ENOMEM
);
452 s
->s_state
= CEPH_MDS_SESSION_NEW
;
455 mutex_init(&s
->s_mutex
);
457 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
459 spin_lock_init(&s
->s_gen_ttl_lock
);
461 s
->s_cap_ttl
= jiffies
- 1;
463 spin_lock_init(&s
->s_cap_lock
);
464 s
->s_renew_requested
= 0;
466 INIT_LIST_HEAD(&s
->s_caps
);
469 atomic_set(&s
->s_ref
, 1);
470 INIT_LIST_HEAD(&s
->s_waiting
);
471 INIT_LIST_HEAD(&s
->s_unsafe
);
472 s
->s_num_cap_releases
= 0;
473 s
->s_cap_reconnect
= 0;
474 s
->s_cap_iterator
= NULL
;
475 INIT_LIST_HEAD(&s
->s_cap_releases
);
476 INIT_LIST_HEAD(&s
->s_cap_flushing
);
478 dout("register_session mds%d\n", mds
);
479 if (mds
>= mdsc
->max_sessions
) {
480 int newmax
= 1 << get_count_order(mds
+1);
481 struct ceph_mds_session
**sa
;
483 dout("register_session realloc to %d\n", newmax
);
484 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
487 if (mdsc
->sessions
) {
488 memcpy(sa
, mdsc
->sessions
,
489 mdsc
->max_sessions
* sizeof(void *));
490 kfree(mdsc
->sessions
);
493 mdsc
->max_sessions
= newmax
;
495 mdsc
->sessions
[mds
] = s
;
496 atomic_inc(&mdsc
->num_sessions
);
497 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
499 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
500 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
506 return ERR_PTR(-ENOMEM
);
510 * called under mdsc->mutex
512 static void __unregister_session(struct ceph_mds_client
*mdsc
,
513 struct ceph_mds_session
*s
)
515 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
516 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
517 mdsc
->sessions
[s
->s_mds
] = NULL
;
518 ceph_con_close(&s
->s_con
);
519 ceph_put_mds_session(s
);
520 atomic_dec(&mdsc
->num_sessions
);
524 * drop session refs in request.
526 * should be last request ref, or hold mdsc->mutex
528 static void put_request_session(struct ceph_mds_request
*req
)
530 if (req
->r_session
) {
531 ceph_put_mds_session(req
->r_session
);
532 req
->r_session
= NULL
;
536 void ceph_mdsc_release_request(struct kref
*kref
)
538 struct ceph_mds_request
*req
= container_of(kref
,
539 struct ceph_mds_request
,
541 destroy_reply_info(&req
->r_reply_info
);
543 ceph_msg_put(req
->r_request
);
545 ceph_msg_put(req
->r_reply
);
547 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
550 if (req
->r_locked_dir
)
551 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
552 iput(req
->r_target_inode
);
555 if (req
->r_old_dentry
)
556 dput(req
->r_old_dentry
);
557 if (req
->r_old_dentry_dir
) {
559 * track (and drop pins for) r_old_dentry_dir
560 * separately, since r_old_dentry's d_parent may have
561 * changed between the dir mutex being dropped and
562 * this request being freed.
564 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
566 iput(req
->r_old_dentry_dir
);
571 ceph_pagelist_release(req
->r_pagelist
);
572 put_request_session(req
);
573 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
577 DEFINE_RB_FUNCS(request
, struct ceph_mds_request
, r_tid
, r_node
)
580 * lookup session, bump ref if found.
582 * called under mdsc->mutex.
584 static struct ceph_mds_request
*
585 lookup_get_request(struct ceph_mds_client
*mdsc
, u64 tid
)
587 struct ceph_mds_request
*req
;
589 req
= lookup_request(&mdsc
->request_tree
, tid
);
591 ceph_mdsc_get_request(req
);
597 * Register an in-flight request, and assign a tid. Link to directory
598 * are modifying (if any).
600 * Called under mdsc->mutex.
602 static void __register_request(struct ceph_mds_client
*mdsc
,
603 struct ceph_mds_request
*req
,
606 req
->r_tid
= ++mdsc
->last_tid
;
608 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
610 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
611 ceph_mdsc_get_request(req
);
612 insert_request(&mdsc
->request_tree
, req
);
614 req
->r_uid
= current_fsuid();
615 req
->r_gid
= current_fsgid();
617 if (mdsc
->oldest_tid
== 0 && req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
)
618 mdsc
->oldest_tid
= req
->r_tid
;
622 req
->r_unsafe_dir
= dir
;
626 static void __unregister_request(struct ceph_mds_client
*mdsc
,
627 struct ceph_mds_request
*req
)
629 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
631 /* Never leave an unregistered request on an unsafe list! */
632 list_del_init(&req
->r_unsafe_item
);
634 if (req
->r_tid
== mdsc
->oldest_tid
) {
635 struct rb_node
*p
= rb_next(&req
->r_node
);
636 mdsc
->oldest_tid
= 0;
638 struct ceph_mds_request
*next_req
=
639 rb_entry(p
, struct ceph_mds_request
, r_node
);
640 if (next_req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
) {
641 mdsc
->oldest_tid
= next_req
->r_tid
;
648 erase_request(&mdsc
->request_tree
, req
);
650 if (req
->r_unsafe_dir
&& req
->r_got_unsafe
) {
651 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
652 spin_lock(&ci
->i_unsafe_lock
);
653 list_del_init(&req
->r_unsafe_dir_item
);
654 spin_unlock(&ci
->i_unsafe_lock
);
656 if (req
->r_target_inode
&& req
->r_got_unsafe
) {
657 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
658 spin_lock(&ci
->i_unsafe_lock
);
659 list_del_init(&req
->r_unsafe_target_item
);
660 spin_unlock(&ci
->i_unsafe_lock
);
663 if (req
->r_unsafe_dir
) {
664 iput(req
->r_unsafe_dir
);
665 req
->r_unsafe_dir
= NULL
;
668 complete_all(&req
->r_safe_completion
);
670 ceph_mdsc_put_request(req
);
674 * Choose mds to send request to next. If there is a hint set in the
675 * request (e.g., due to a prior forward hint from the mds), use that.
676 * Otherwise, consult frag tree and/or caps to identify the
677 * appropriate mds. If all else fails, choose randomly.
679 * Called under mdsc->mutex.
681 static struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
684 * we don't need to worry about protecting the d_parent access
685 * here because we never renaming inside the snapped namespace
686 * except to resplice to another snapdir, and either the old or new
687 * result is a valid result.
689 while (!IS_ROOT(dentry
) && ceph_snap(d_inode(dentry
)) != CEPH_NOSNAP
)
690 dentry
= dentry
->d_parent
;
694 static int __choose_mds(struct ceph_mds_client
*mdsc
,
695 struct ceph_mds_request
*req
)
698 struct ceph_inode_info
*ci
;
699 struct ceph_cap
*cap
;
700 int mode
= req
->r_direct_mode
;
702 u32 hash
= req
->r_direct_hash
;
703 bool is_hash
= req
->r_direct_is_hash
;
706 * is there a specific mds we should try? ignore hint if we have
707 * no session and the mds is not up (active or recovering).
709 if (req
->r_resend_mds
>= 0 &&
710 (__have_session(mdsc
, req
->r_resend_mds
) ||
711 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
712 dout("choose_mds using resend_mds mds%d\n",
714 return req
->r_resend_mds
;
717 if (mode
== USE_RANDOM_MDS
)
722 inode
= req
->r_inode
;
723 } else if (req
->r_dentry
) {
724 /* ignore race with rename; old or new d_parent is okay */
725 struct dentry
*parent
= req
->r_dentry
->d_parent
;
726 struct inode
*dir
= d_inode(parent
);
728 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
730 inode
= d_inode(req
->r_dentry
);
731 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
732 /* direct snapped/virtual snapdir requests
733 * based on parent dir inode */
734 struct dentry
*dn
= get_nonsnap_parent(parent
);
736 dout("__choose_mds using nonsnap parent %p\n", inode
);
739 inode
= d_inode(req
->r_dentry
);
740 if (!inode
|| mode
== USE_AUTH_MDS
) {
743 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
749 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
753 ci
= ceph_inode(inode
);
755 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
756 struct ceph_inode_frag frag
;
759 ceph_choose_frag(ci
, hash
, &frag
, &found
);
761 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
764 /* choose a random replica */
765 get_random_bytes(&r
, 1);
768 dout("choose_mds %p %llx.%llx "
769 "frag %u mds%d (%d/%d)\n",
770 inode
, ceph_vinop(inode
),
773 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
774 CEPH_MDS_STATE_ACTIVE
)
778 /* since this file/dir wasn't known to be
779 * replicated, then we want to look for the
780 * authoritative mds. */
783 /* choose auth mds */
785 dout("choose_mds %p %llx.%llx "
786 "frag %u mds%d (auth)\n",
787 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
788 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
789 CEPH_MDS_STATE_ACTIVE
)
795 spin_lock(&ci
->i_ceph_lock
);
797 if (mode
== USE_AUTH_MDS
)
798 cap
= ci
->i_auth_cap
;
799 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
800 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
802 spin_unlock(&ci
->i_ceph_lock
);
805 mds
= cap
->session
->s_mds
;
806 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
807 inode
, ceph_vinop(inode
), mds
,
808 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
809 spin_unlock(&ci
->i_ceph_lock
);
813 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
814 dout("choose_mds chose random mds%d\n", mds
);
822 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
824 struct ceph_msg
*msg
;
825 struct ceph_mds_session_head
*h
;
827 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
830 pr_err("create_session_msg ENOMEM creating msg\n");
833 h
= msg
->front
.iov_base
;
834 h
->op
= cpu_to_le32(op
);
835 h
->seq
= cpu_to_le64(seq
);
841 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
842 * to include additional client metadata fields.
844 static struct ceph_msg
*create_session_open_msg(struct ceph_mds_client
*mdsc
, u64 seq
)
846 struct ceph_msg
*msg
;
847 struct ceph_mds_session_head
*h
;
849 int metadata_bytes
= 0;
850 int metadata_key_count
= 0;
851 struct ceph_options
*opt
= mdsc
->fsc
->client
->options
;
852 struct ceph_mount_options
*fsopt
= mdsc
->fsc
->mount_options
;
855 const char* metadata
[][2] = {
856 {"hostname", utsname()->nodename
},
857 {"kernel_version", utsname()->release
},
858 {"entity_id", opt
->name
? : ""},
859 {"root", fsopt
->server_path
? : "/"},
863 /* Calculate serialized length of metadata */
864 metadata_bytes
= 4; /* map length */
865 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
866 metadata_bytes
+= 8 + strlen(metadata
[i
][0]) +
867 strlen(metadata
[i
][1]);
868 metadata_key_count
++;
871 /* Allocate the message */
872 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
) + metadata_bytes
,
875 pr_err("create_session_msg ENOMEM creating msg\n");
878 h
= msg
->front
.iov_base
;
879 h
->op
= cpu_to_le32(CEPH_SESSION_REQUEST_OPEN
);
880 h
->seq
= cpu_to_le64(seq
);
883 * Serialize client metadata into waiting buffer space, using
884 * the format that userspace expects for map<string, string>
886 * ClientSession messages with metadata are v2
888 msg
->hdr
.version
= cpu_to_le16(2);
889 msg
->hdr
.compat_version
= cpu_to_le16(1);
891 /* The write pointer, following the session_head structure */
892 p
= msg
->front
.iov_base
+ sizeof(*h
);
894 /* Number of entries in the map */
895 ceph_encode_32(&p
, metadata_key_count
);
897 /* Two length-prefixed strings for each entry in the map */
898 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
899 size_t const key_len
= strlen(metadata
[i
][0]);
900 size_t const val_len
= strlen(metadata
[i
][1]);
902 ceph_encode_32(&p
, key_len
);
903 memcpy(p
, metadata
[i
][0], key_len
);
905 ceph_encode_32(&p
, val_len
);
906 memcpy(p
, metadata
[i
][1], val_len
);
914 * send session open request.
916 * called under mdsc->mutex
918 static int __open_session(struct ceph_mds_client
*mdsc
,
919 struct ceph_mds_session
*session
)
921 struct ceph_msg
*msg
;
923 int mds
= session
->s_mds
;
925 /* wait for mds to go active? */
926 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
927 dout("open_session to mds%d (%s)\n", mds
,
928 ceph_mds_state_name(mstate
));
929 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
930 session
->s_renew_requested
= jiffies
;
932 /* send connect message */
933 msg
= create_session_open_msg(mdsc
, session
->s_seq
);
936 ceph_con_send(&session
->s_con
, msg
);
941 * open sessions for any export targets for the given mds
943 * called under mdsc->mutex
945 static struct ceph_mds_session
*
946 __open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
948 struct ceph_mds_session
*session
;
950 session
= __ceph_lookup_mds_session(mdsc
, target
);
952 session
= register_session(mdsc
, target
);
956 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
957 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
958 __open_session(mdsc
, session
);
963 struct ceph_mds_session
*
964 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
966 struct ceph_mds_session
*session
;
968 dout("open_export_target_session to mds%d\n", target
);
970 mutex_lock(&mdsc
->mutex
);
971 session
= __open_export_target_session(mdsc
, target
);
972 mutex_unlock(&mdsc
->mutex
);
977 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
978 struct ceph_mds_session
*session
)
980 struct ceph_mds_info
*mi
;
981 struct ceph_mds_session
*ts
;
982 int i
, mds
= session
->s_mds
;
984 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
987 mi
= &mdsc
->mdsmap
->m_info
[mds
];
988 dout("open_export_target_sessions for mds%d (%d targets)\n",
989 session
->s_mds
, mi
->num_export_targets
);
991 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
992 ts
= __open_export_target_session(mdsc
, mi
->export_targets
[i
]);
994 ceph_put_mds_session(ts
);
998 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
999 struct ceph_mds_session
*session
)
1001 mutex_lock(&mdsc
->mutex
);
1002 __open_export_target_sessions(mdsc
, session
);
1003 mutex_unlock(&mdsc
->mutex
);
1010 /* caller holds s_cap_lock, we drop it */
1011 static void cleanup_cap_releases(struct ceph_mds_client
*mdsc
,
1012 struct ceph_mds_session
*session
)
1013 __releases(session
->s_cap_lock
)
1015 LIST_HEAD(tmp_list
);
1016 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1017 session
->s_num_cap_releases
= 0;
1018 spin_unlock(&session
->s_cap_lock
);
1020 dout("cleanup_cap_releases mds%d\n", session
->s_mds
);
1021 while (!list_empty(&tmp_list
)) {
1022 struct ceph_cap
*cap
;
1023 /* zero out the in-progress message */
1024 cap
= list_first_entry(&tmp_list
,
1025 struct ceph_cap
, session_caps
);
1026 list_del(&cap
->session_caps
);
1027 ceph_put_cap(mdsc
, cap
);
1031 static void cleanup_session_requests(struct ceph_mds_client
*mdsc
,
1032 struct ceph_mds_session
*session
)
1034 struct ceph_mds_request
*req
;
1037 dout("cleanup_session_requests mds%d\n", session
->s_mds
);
1038 mutex_lock(&mdsc
->mutex
);
1039 while (!list_empty(&session
->s_unsafe
)) {
1040 req
= list_first_entry(&session
->s_unsafe
,
1041 struct ceph_mds_request
, r_unsafe_item
);
1042 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1044 __unregister_request(mdsc
, req
);
1046 /* zero r_attempts, so kick_requests() will re-send requests */
1047 p
= rb_first(&mdsc
->request_tree
);
1049 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1051 if (req
->r_session
&&
1052 req
->r_session
->s_mds
== session
->s_mds
)
1053 req
->r_attempts
= 0;
1055 mutex_unlock(&mdsc
->mutex
);
1059 * Helper to safely iterate over all caps associated with a session, with
1060 * special care taken to handle a racing __ceph_remove_cap().
1062 * Caller must hold session s_mutex.
1064 static int iterate_session_caps(struct ceph_mds_session
*session
,
1065 int (*cb
)(struct inode
*, struct ceph_cap
*,
1068 struct list_head
*p
;
1069 struct ceph_cap
*cap
;
1070 struct inode
*inode
, *last_inode
= NULL
;
1071 struct ceph_cap
*old_cap
= NULL
;
1074 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
1075 spin_lock(&session
->s_cap_lock
);
1076 p
= session
->s_caps
.next
;
1077 while (p
!= &session
->s_caps
) {
1078 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
1079 inode
= igrab(&cap
->ci
->vfs_inode
);
1084 session
->s_cap_iterator
= cap
;
1085 spin_unlock(&session
->s_cap_lock
);
1092 ceph_put_cap(session
->s_mdsc
, old_cap
);
1096 ret
= cb(inode
, cap
, arg
);
1099 spin_lock(&session
->s_cap_lock
);
1101 if (cap
->ci
== NULL
) {
1102 dout("iterate_session_caps finishing cap %p removal\n",
1104 BUG_ON(cap
->session
!= session
);
1105 cap
->session
= NULL
;
1106 list_del_init(&cap
->session_caps
);
1107 session
->s_nr_caps
--;
1108 if (cap
->queue_release
) {
1109 list_add_tail(&cap
->session_caps
,
1110 &session
->s_cap_releases
);
1111 session
->s_num_cap_releases
++;
1113 old_cap
= cap
; /* put_cap it w/o locks held */
1121 session
->s_cap_iterator
= NULL
;
1122 spin_unlock(&session
->s_cap_lock
);
1126 ceph_put_cap(session
->s_mdsc
, old_cap
);
1131 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1134 struct ceph_fs_client
*fsc
= (struct ceph_fs_client
*)arg
;
1135 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1136 LIST_HEAD(to_remove
);
1138 bool invalidate
= false;
1140 dout("removing cap %p, ci is %p, inode is %p\n",
1141 cap
, ci
, &ci
->vfs_inode
);
1142 spin_lock(&ci
->i_ceph_lock
);
1143 __ceph_remove_cap(cap
, false);
1144 if (!ci
->i_auth_cap
) {
1145 struct ceph_cap_flush
*cf
;
1146 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1148 ci
->i_ceph_flags
|= CEPH_I_CAP_DROPPED
;
1150 if (ci
->i_wrbuffer_ref
> 0 &&
1151 ACCESS_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
1154 while (!list_empty(&ci
->i_cap_flush_list
)) {
1155 cf
= list_first_entry(&ci
->i_cap_flush_list
,
1156 struct ceph_cap_flush
, i_list
);
1157 list_move(&cf
->i_list
, &to_remove
);
1160 spin_lock(&mdsc
->cap_dirty_lock
);
1162 list_for_each_entry(cf
, &to_remove
, i_list
)
1163 list_del(&cf
->g_list
);
1165 if (!list_empty(&ci
->i_dirty_item
)) {
1166 pr_warn_ratelimited(
1167 " dropping dirty %s state for %p %lld\n",
1168 ceph_cap_string(ci
->i_dirty_caps
),
1169 inode
, ceph_ino(inode
));
1170 ci
->i_dirty_caps
= 0;
1171 list_del_init(&ci
->i_dirty_item
);
1174 if (!list_empty(&ci
->i_flushing_item
)) {
1175 pr_warn_ratelimited(
1176 " dropping dirty+flushing %s state for %p %lld\n",
1177 ceph_cap_string(ci
->i_flushing_caps
),
1178 inode
, ceph_ino(inode
));
1179 ci
->i_flushing_caps
= 0;
1180 list_del_init(&ci
->i_flushing_item
);
1181 mdsc
->num_cap_flushing
--;
1184 spin_unlock(&mdsc
->cap_dirty_lock
);
1186 if (!ci
->i_dirty_caps
&& ci
->i_prealloc_cap_flush
) {
1187 list_add(&ci
->i_prealloc_cap_flush
->i_list
, &to_remove
);
1188 ci
->i_prealloc_cap_flush
= NULL
;
1191 spin_unlock(&ci
->i_ceph_lock
);
1192 while (!list_empty(&to_remove
)) {
1193 struct ceph_cap_flush
*cf
;
1194 cf
= list_first_entry(&to_remove
,
1195 struct ceph_cap_flush
, i_list
);
1196 list_del(&cf
->i_list
);
1197 ceph_free_cap_flush(cf
);
1200 wake_up_all(&ci
->i_cap_wq
);
1202 ceph_queue_invalidate(inode
);
1209 * caller must hold session s_mutex
1211 static void remove_session_caps(struct ceph_mds_session
*session
)
1213 struct ceph_fs_client
*fsc
= session
->s_mdsc
->fsc
;
1214 struct super_block
*sb
= fsc
->sb
;
1215 dout("remove_session_caps on %p\n", session
);
1216 iterate_session_caps(session
, remove_session_caps_cb
, fsc
);
1218 wake_up_all(&fsc
->mdsc
->cap_flushing_wq
);
1220 spin_lock(&session
->s_cap_lock
);
1221 if (session
->s_nr_caps
> 0) {
1222 struct inode
*inode
;
1223 struct ceph_cap
*cap
, *prev
= NULL
;
1224 struct ceph_vino vino
;
1226 * iterate_session_caps() skips inodes that are being
1227 * deleted, we need to wait until deletions are complete.
1228 * __wait_on_freeing_inode() is designed for the job,
1229 * but it is not exported, so use lookup inode function
1232 while (!list_empty(&session
->s_caps
)) {
1233 cap
= list_entry(session
->s_caps
.next
,
1234 struct ceph_cap
, session_caps
);
1238 vino
= cap
->ci
->i_vino
;
1239 spin_unlock(&session
->s_cap_lock
);
1241 inode
= ceph_find_inode(sb
, vino
);
1244 spin_lock(&session
->s_cap_lock
);
1248 // drop cap expires and unlock s_cap_lock
1249 cleanup_cap_releases(session
->s_mdsc
, session
);
1251 BUG_ON(session
->s_nr_caps
> 0);
1252 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1256 * wake up any threads waiting on this session's caps. if the cap is
1257 * old (didn't get renewed on the client reconnect), remove it now.
1259 * caller must hold s_mutex.
1261 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1264 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1267 spin_lock(&ci
->i_ceph_lock
);
1268 ci
->i_wanted_max_size
= 0;
1269 ci
->i_requested_max_size
= 0;
1270 spin_unlock(&ci
->i_ceph_lock
);
1272 wake_up_all(&ci
->i_cap_wq
);
1276 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1279 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1280 iterate_session_caps(session
, wake_up_session_cb
,
1281 (void *)(unsigned long)reconnect
);
1285 * Send periodic message to MDS renewing all currently held caps. The
1286 * ack will reset the expiration for all caps from this session.
1288 * caller holds s_mutex
1290 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1291 struct ceph_mds_session
*session
)
1293 struct ceph_msg
*msg
;
1296 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1297 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1298 pr_info("mds%d caps stale\n", session
->s_mds
);
1299 session
->s_renew_requested
= jiffies
;
1301 /* do not try to renew caps until a recovering mds has reconnected
1302 * with its clients. */
1303 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1304 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1305 dout("send_renew_caps ignoring mds%d (%s)\n",
1306 session
->s_mds
, ceph_mds_state_name(state
));
1310 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1311 ceph_mds_state_name(state
));
1312 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1313 ++session
->s_renew_seq
);
1316 ceph_con_send(&session
->s_con
, msg
);
1320 static int send_flushmsg_ack(struct ceph_mds_client
*mdsc
,
1321 struct ceph_mds_session
*session
, u64 seq
)
1323 struct ceph_msg
*msg
;
1325 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1326 session
->s_mds
, ceph_session_state_name(session
->s_state
), seq
);
1327 msg
= create_session_msg(CEPH_SESSION_FLUSHMSG_ACK
, seq
);
1330 ceph_con_send(&session
->s_con
, msg
);
1336 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1338 * Called under session->s_mutex
1340 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1341 struct ceph_mds_session
*session
, int is_renew
)
1346 spin_lock(&session
->s_cap_lock
);
1347 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1349 session
->s_cap_ttl
= session
->s_renew_requested
+
1350 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1353 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1354 pr_info("mds%d caps renewed\n", session
->s_mds
);
1357 pr_info("mds%d caps still stale\n", session
->s_mds
);
1360 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1361 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1362 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1363 spin_unlock(&session
->s_cap_lock
);
1366 wake_up_session_caps(session
, 0);
1370 * send a session close request
1372 static int request_close_session(struct ceph_mds_client
*mdsc
,
1373 struct ceph_mds_session
*session
)
1375 struct ceph_msg
*msg
;
1377 dout("request_close_session mds%d state %s seq %lld\n",
1378 session
->s_mds
, ceph_session_state_name(session
->s_state
),
1380 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1383 ceph_con_send(&session
->s_con
, msg
);
1388 * Called with s_mutex held.
1390 static int __close_session(struct ceph_mds_client
*mdsc
,
1391 struct ceph_mds_session
*session
)
1393 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1395 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1396 return request_close_session(mdsc
, session
);
1399 static bool drop_negative_children(struct dentry
*dentry
)
1401 struct dentry
*child
;
1402 bool all_negative
= true;
1404 if (!d_is_dir(dentry
))
1407 spin_lock(&dentry
->d_lock
);
1408 list_for_each_entry(child
, &dentry
->d_subdirs
, d_child
) {
1409 if (d_really_is_positive(child
)) {
1410 all_negative
= false;
1414 spin_unlock(&dentry
->d_lock
);
1417 shrink_dcache_parent(dentry
);
1419 return all_negative
;
1423 * Trim old(er) caps.
1425 * Because we can't cache an inode without one or more caps, we do
1426 * this indirectly: if a cap is unused, we prune its aliases, at which
1427 * point the inode will hopefully get dropped to.
1429 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1430 * memory pressure from the MDS, though, so it needn't be perfect.
1432 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1434 struct ceph_mds_session
*session
= arg
;
1435 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1436 int used
, wanted
, oissued
, mine
;
1438 if (session
->s_trim_caps
<= 0)
1441 spin_lock(&ci
->i_ceph_lock
);
1442 mine
= cap
->issued
| cap
->implemented
;
1443 used
= __ceph_caps_used(ci
);
1444 wanted
= __ceph_caps_file_wanted(ci
);
1445 oissued
= __ceph_caps_issued_other(ci
, cap
);
1447 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1448 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1449 ceph_cap_string(used
), ceph_cap_string(wanted
));
1450 if (cap
== ci
->i_auth_cap
) {
1451 if (ci
->i_dirty_caps
|| ci
->i_flushing_caps
||
1452 !list_empty(&ci
->i_cap_snaps
))
1454 if ((used
| wanted
) & CEPH_CAP_ANY_WR
)
1457 /* The inode has cached pages, but it's no longer used.
1458 * we can safely drop it */
1459 if (wanted
== 0 && used
== CEPH_CAP_FILE_CACHE
&&
1460 !(oissued
& CEPH_CAP_FILE_CACHE
)) {
1464 if ((used
| wanted
) & ~oissued
& mine
)
1465 goto out
; /* we need these caps */
1468 /* we aren't the only cap.. just remove us */
1469 __ceph_remove_cap(cap
, true);
1470 session
->s_trim_caps
--;
1472 struct dentry
*dentry
;
1473 /* try dropping referring dentries */
1474 spin_unlock(&ci
->i_ceph_lock
);
1475 dentry
= d_find_any_alias(inode
);
1476 if (dentry
&& drop_negative_children(dentry
)) {
1479 d_prune_aliases(inode
);
1480 count
= atomic_read(&inode
->i_count
);
1482 session
->s_trim_caps
--;
1483 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1492 spin_unlock(&ci
->i_ceph_lock
);
1497 * Trim session cap count down to some max number.
1499 static int trim_caps(struct ceph_mds_client
*mdsc
,
1500 struct ceph_mds_session
*session
,
1503 int trim_caps
= session
->s_nr_caps
- max_caps
;
1505 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1506 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1507 if (trim_caps
> 0) {
1508 session
->s_trim_caps
= trim_caps
;
1509 iterate_session_caps(session
, trim_caps_cb
, session
);
1510 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1511 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1512 trim_caps
- session
->s_trim_caps
);
1513 session
->s_trim_caps
= 0;
1516 ceph_send_cap_releases(mdsc
, session
);
1520 static int check_caps_flush(struct ceph_mds_client
*mdsc
,
1525 spin_lock(&mdsc
->cap_dirty_lock
);
1526 if (!list_empty(&mdsc
->cap_flush_list
)) {
1527 struct ceph_cap_flush
*cf
=
1528 list_first_entry(&mdsc
->cap_flush_list
,
1529 struct ceph_cap_flush
, g_list
);
1530 if (cf
->tid
<= want_flush_tid
) {
1531 dout("check_caps_flush still flushing tid "
1532 "%llu <= %llu\n", cf
->tid
, want_flush_tid
);
1536 spin_unlock(&mdsc
->cap_dirty_lock
);
1541 * flush all dirty inode data to disk.
1543 * returns true if we've flushed through want_flush_tid
1545 static void wait_caps_flush(struct ceph_mds_client
*mdsc
,
1548 dout("check_caps_flush want %llu\n", want_flush_tid
);
1550 wait_event(mdsc
->cap_flushing_wq
,
1551 check_caps_flush(mdsc
, want_flush_tid
));
1553 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid
);
1557 * called under s_mutex
1559 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1560 struct ceph_mds_session
*session
)
1562 struct ceph_msg
*msg
= NULL
;
1563 struct ceph_mds_cap_release
*head
;
1564 struct ceph_mds_cap_item
*item
;
1565 struct ceph_cap
*cap
;
1566 LIST_HEAD(tmp_list
);
1567 int num_cap_releases
;
1569 spin_lock(&session
->s_cap_lock
);
1571 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1572 num_cap_releases
= session
->s_num_cap_releases
;
1573 session
->s_num_cap_releases
= 0;
1574 spin_unlock(&session
->s_cap_lock
);
1576 while (!list_empty(&tmp_list
)) {
1578 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
,
1579 PAGE_SIZE
, GFP_NOFS
, false);
1582 head
= msg
->front
.iov_base
;
1583 head
->num
= cpu_to_le32(0);
1584 msg
->front
.iov_len
= sizeof(*head
);
1586 cap
= list_first_entry(&tmp_list
, struct ceph_cap
,
1588 list_del(&cap
->session_caps
);
1591 head
= msg
->front
.iov_base
;
1592 le32_add_cpu(&head
->num
, 1);
1593 item
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1594 item
->ino
= cpu_to_le64(cap
->cap_ino
);
1595 item
->cap_id
= cpu_to_le64(cap
->cap_id
);
1596 item
->migrate_seq
= cpu_to_le32(cap
->mseq
);
1597 item
->seq
= cpu_to_le32(cap
->issue_seq
);
1598 msg
->front
.iov_len
+= sizeof(*item
);
1600 ceph_put_cap(mdsc
, cap
);
1602 if (le32_to_cpu(head
->num
) == CEPH_CAPS_PER_RELEASE
) {
1603 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1604 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1605 ceph_con_send(&session
->s_con
, msg
);
1610 BUG_ON(num_cap_releases
!= 0);
1612 spin_lock(&session
->s_cap_lock
);
1613 if (!list_empty(&session
->s_cap_releases
))
1615 spin_unlock(&session
->s_cap_lock
);
1618 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1619 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1620 ceph_con_send(&session
->s_con
, msg
);
1624 pr_err("send_cap_releases mds%d, failed to allocate message\n",
1626 spin_lock(&session
->s_cap_lock
);
1627 list_splice(&tmp_list
, &session
->s_cap_releases
);
1628 session
->s_num_cap_releases
+= num_cap_releases
;
1629 spin_unlock(&session
->s_cap_lock
);
1636 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
1639 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1640 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1641 struct ceph_mount_options
*opt
= req
->r_mdsc
->fsc
->mount_options
;
1642 size_t size
= sizeof(struct ceph_mds_reply_dir_entry
);
1643 int order
, num_entries
;
1645 spin_lock(&ci
->i_ceph_lock
);
1646 num_entries
= ci
->i_files
+ ci
->i_subdirs
;
1647 spin_unlock(&ci
->i_ceph_lock
);
1648 num_entries
= max(num_entries
, 1);
1649 num_entries
= min(num_entries
, opt
->max_readdir
);
1651 order
= get_order(size
* num_entries
);
1652 while (order
>= 0) {
1653 rinfo
->dir_entries
= (void*)__get_free_pages(GFP_KERNEL
|
1656 if (rinfo
->dir_entries
)
1660 if (!rinfo
->dir_entries
)
1663 num_entries
= (PAGE_SIZE
<< order
) / size
;
1664 num_entries
= min(num_entries
, opt
->max_readdir
);
1666 rinfo
->dir_buf_size
= PAGE_SIZE
<< order
;
1667 req
->r_num_caps
= num_entries
+ 1;
1668 req
->r_args
.readdir
.max_entries
= cpu_to_le32(num_entries
);
1669 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(opt
->max_readdir_bytes
);
1674 * Create an mds request.
1676 struct ceph_mds_request
*
1677 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1679 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1682 return ERR_PTR(-ENOMEM
);
1684 mutex_init(&req
->r_fill_mutex
);
1686 req
->r_started
= jiffies
;
1687 req
->r_resend_mds
= -1;
1688 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1689 INIT_LIST_HEAD(&req
->r_unsafe_target_item
);
1691 kref_init(&req
->r_kref
);
1692 RB_CLEAR_NODE(&req
->r_node
);
1693 INIT_LIST_HEAD(&req
->r_wait
);
1694 init_completion(&req
->r_completion
);
1695 init_completion(&req
->r_safe_completion
);
1696 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1698 req
->r_stamp
= current_fs_time(mdsc
->fsc
->sb
);
1701 req
->r_direct_mode
= mode
;
1706 * return oldest (lowest) request, tid in request tree, 0 if none.
1708 * called under mdsc->mutex.
1710 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1712 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1714 return rb_entry(rb_first(&mdsc
->request_tree
),
1715 struct ceph_mds_request
, r_node
);
1718 static inline u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1720 return mdsc
->oldest_tid
;
1724 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1725 * on build_path_from_dentry in fs/cifs/dir.c.
1727 * If @stop_on_nosnap, generate path relative to the first non-snapped
1730 * Encode hidden .snap dirs as a double /, i.e.
1731 * foo/.snap/bar -> foo//bar
1733 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1736 struct dentry
*temp
;
1742 return ERR_PTR(-EINVAL
);
1746 seq
= read_seqbegin(&rename_lock
);
1748 for (temp
= dentry
; !IS_ROOT(temp
);) {
1749 struct inode
*inode
= d_inode(temp
);
1750 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1751 len
++; /* slash only */
1752 else if (stop_on_nosnap
&& inode
&&
1753 ceph_snap(inode
) == CEPH_NOSNAP
)
1756 len
+= 1 + temp
->d_name
.len
;
1757 temp
= temp
->d_parent
;
1761 len
--; /* no leading '/' */
1763 path
= kmalloc(len
+1, GFP_NOFS
);
1765 return ERR_PTR(-ENOMEM
);
1767 path
[pos
] = 0; /* trailing null */
1769 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1770 struct inode
*inode
;
1772 spin_lock(&temp
->d_lock
);
1773 inode
= d_inode(temp
);
1774 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1775 dout("build_path path+%d: %p SNAPDIR\n",
1777 } else if (stop_on_nosnap
&& inode
&&
1778 ceph_snap(inode
) == CEPH_NOSNAP
) {
1779 spin_unlock(&temp
->d_lock
);
1782 pos
-= temp
->d_name
.len
;
1784 spin_unlock(&temp
->d_lock
);
1787 strncpy(path
+ pos
, temp
->d_name
.name
,
1790 spin_unlock(&temp
->d_lock
);
1793 temp
= temp
->d_parent
;
1796 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1797 pr_err("build_path did not end path lookup where "
1798 "expected, namelen is %d, pos is %d\n", len
, pos
);
1799 /* presumably this is only possible if racing with a
1800 rename of one of the parent directories (we can not
1801 lock the dentries above us to prevent this, but
1802 retrying should be harmless) */
1807 *base
= ceph_ino(d_inode(temp
));
1809 dout("build_path on %p %d built %llx '%.*s'\n",
1810 dentry
, d_count(dentry
), *base
, len
, path
);
1814 static int build_dentry_path(struct dentry
*dentry
,
1815 const char **ppath
, int *ppathlen
, u64
*pino
,
1822 dir
= d_inode_rcu(dentry
->d_parent
);
1823 if (dir
&& ceph_snap(dir
) == CEPH_NOSNAP
) {
1824 *pino
= ceph_ino(dir
);
1826 *ppath
= dentry
->d_name
.name
;
1827 *ppathlen
= dentry
->d_name
.len
;
1831 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1833 return PTR_ERR(path
);
1839 static int build_inode_path(struct inode
*inode
,
1840 const char **ppath
, int *ppathlen
, u64
*pino
,
1843 struct dentry
*dentry
;
1846 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1847 *pino
= ceph_ino(inode
);
1851 dentry
= d_find_alias(inode
);
1852 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1855 return PTR_ERR(path
);
1862 * request arguments may be specified via an inode *, a dentry *, or
1863 * an explicit ino+path.
1865 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1866 const char *rpath
, u64 rino
,
1867 const char **ppath
, int *pathlen
,
1868 u64
*ino
, int *freepath
)
1873 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1874 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1876 } else if (rdentry
) {
1877 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1878 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1880 } else if (rpath
|| rino
) {
1883 *pathlen
= rpath
? strlen(rpath
) : 0;
1884 dout(" path %.*s\n", *pathlen
, rpath
);
1891 * called under mdsc->mutex
1893 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1894 struct ceph_mds_request
*req
,
1895 int mds
, bool drop_cap_releases
)
1897 struct ceph_msg
*msg
;
1898 struct ceph_mds_request_head
*head
;
1899 const char *path1
= NULL
;
1900 const char *path2
= NULL
;
1901 u64 ino1
= 0, ino2
= 0;
1902 int pathlen1
= 0, pathlen2
= 0;
1903 int freepath1
= 0, freepath2
= 0;
1909 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1910 req
->r_path1
, req
->r_ino1
.ino
,
1911 &path1
, &pathlen1
, &ino1
, &freepath1
);
1917 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1918 req
->r_path2
, req
->r_ino2
.ino
,
1919 &path2
, &pathlen2
, &ino2
, &freepath2
);
1925 len
= sizeof(*head
) +
1926 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
)) +
1927 sizeof(struct ceph_timespec
);
1929 /* calculate (max) length for cap releases */
1930 len
+= sizeof(struct ceph_mds_request_release
) *
1931 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1932 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1933 if (req
->r_dentry_drop
)
1934 len
+= req
->r_dentry
->d_name
.len
;
1935 if (req
->r_old_dentry_drop
)
1936 len
+= req
->r_old_dentry
->d_name
.len
;
1938 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
1940 msg
= ERR_PTR(-ENOMEM
);
1944 msg
->hdr
.version
= cpu_to_le16(2);
1945 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1947 head
= msg
->front
.iov_base
;
1948 p
= msg
->front
.iov_base
+ sizeof(*head
);
1949 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1951 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1952 head
->op
= cpu_to_le32(req
->r_op
);
1953 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
1954 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
1955 head
->args
= req
->r_args
;
1957 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1958 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1960 /* make note of release offset, in case we need to replay */
1961 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1965 if (req
->r_inode_drop
)
1966 releases
+= ceph_encode_inode_release(&p
,
1967 req
->r_inode
? req
->r_inode
: d_inode(req
->r_dentry
),
1968 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1969 if (req
->r_dentry_drop
)
1970 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1971 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1972 if (req
->r_old_dentry_drop
)
1973 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1974 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1975 if (req
->r_old_inode_drop
)
1976 releases
+= ceph_encode_inode_release(&p
,
1977 d_inode(req
->r_old_dentry
),
1978 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1980 if (drop_cap_releases
) {
1982 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
1985 head
->num_releases
= cpu_to_le16(releases
);
1989 struct ceph_timespec ts
;
1990 ceph_encode_timespec(&ts
, &req
->r_stamp
);
1991 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
1995 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1996 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1998 if (req
->r_pagelist
) {
1999 struct ceph_pagelist
*pagelist
= req
->r_pagelist
;
2000 atomic_inc(&pagelist
->refcnt
);
2001 ceph_msg_data_add_pagelist(msg
, pagelist
);
2002 msg
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2004 msg
->hdr
.data_len
= 0;
2007 msg
->hdr
.data_off
= cpu_to_le16(0);
2011 kfree((char *)path2
);
2014 kfree((char *)path1
);
2020 * called under mdsc->mutex if error, under no mutex if
2023 static void complete_request(struct ceph_mds_client
*mdsc
,
2024 struct ceph_mds_request
*req
)
2026 if (req
->r_callback
)
2027 req
->r_callback(mdsc
, req
);
2029 complete_all(&req
->r_completion
);
2033 * called under mdsc->mutex
2035 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
2036 struct ceph_mds_request
*req
,
2037 int mds
, bool drop_cap_releases
)
2039 struct ceph_mds_request_head
*rhead
;
2040 struct ceph_msg
*msg
;
2045 struct ceph_cap
*cap
=
2046 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
2049 req
->r_sent_on_mseq
= cap
->mseq
;
2051 req
->r_sent_on_mseq
= -1;
2053 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
2054 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
2056 if (req
->r_got_unsafe
) {
2059 * Replay. Do not regenerate message (and rebuild
2060 * paths, etc.); just use the original message.
2061 * Rebuilding paths will break for renames because
2062 * d_move mangles the src name.
2064 msg
= req
->r_request
;
2065 rhead
= msg
->front
.iov_base
;
2067 flags
= le32_to_cpu(rhead
->flags
);
2068 flags
|= CEPH_MDS_FLAG_REPLAY
;
2069 rhead
->flags
= cpu_to_le32(flags
);
2071 if (req
->r_target_inode
)
2072 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
2074 rhead
->num_retry
= req
->r_attempts
- 1;
2076 /* remove cap/dentry releases from message */
2077 rhead
->num_releases
= 0;
2080 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2082 struct ceph_timespec ts
;
2083 ceph_encode_timespec(&ts
, &req
->r_stamp
);
2084 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2087 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2088 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2092 if (req
->r_request
) {
2093 ceph_msg_put(req
->r_request
);
2094 req
->r_request
= NULL
;
2096 msg
= create_request_message(mdsc
, req
, mds
, drop_cap_releases
);
2098 req
->r_err
= PTR_ERR(msg
);
2099 return PTR_ERR(msg
);
2101 req
->r_request
= msg
;
2103 rhead
= msg
->front
.iov_base
;
2104 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
2105 if (req
->r_got_unsafe
)
2106 flags
|= CEPH_MDS_FLAG_REPLAY
;
2107 if (req
->r_locked_dir
)
2108 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
2109 rhead
->flags
= cpu_to_le32(flags
);
2110 rhead
->num_fwd
= req
->r_num_fwd
;
2111 rhead
->num_retry
= req
->r_attempts
- 1;
2114 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
2119 * send request, or put it on the appropriate wait list.
2121 static int __do_request(struct ceph_mds_client
*mdsc
,
2122 struct ceph_mds_request
*req
)
2124 struct ceph_mds_session
*session
= NULL
;
2128 if (req
->r_err
|| req
->r_got_result
) {
2130 __unregister_request(mdsc
, req
);
2134 if (req
->r_timeout
&&
2135 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
2136 dout("do_request timed out\n");
2140 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
2141 dout("do_request forced umount\n");
2146 put_request_session(req
);
2148 mds
= __choose_mds(mdsc
, req
);
2150 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
2151 if (mdsc
->mdsmap_err
) {
2152 err
= mdsc
->mdsmap_err
;
2153 dout("do_request mdsmap err %d\n", err
);
2156 dout("do_request no mds or not active, waiting for map\n");
2157 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2161 /* get, open session */
2162 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2164 session
= register_session(mdsc
, mds
);
2165 if (IS_ERR(session
)) {
2166 err
= PTR_ERR(session
);
2170 req
->r_session
= get_session(session
);
2172 dout("do_request mds%d session %p state %s\n", mds
, session
,
2173 ceph_session_state_name(session
->s_state
));
2174 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
2175 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
2176 if (session
->s_state
== CEPH_MDS_SESSION_REJECTED
) {
2180 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
2181 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
2182 __open_session(mdsc
, session
);
2183 list_add(&req
->r_wait
, &session
->s_waiting
);
2188 req
->r_resend_mds
= -1; /* forget any previous mds hint */
2190 if (req
->r_request_started
== 0) /* note request start time */
2191 req
->r_request_started
= jiffies
;
2193 err
= __prepare_send_request(mdsc
, req
, mds
, false);
2195 ceph_msg_get(req
->r_request
);
2196 ceph_con_send(&session
->s_con
, req
->r_request
);
2200 ceph_put_mds_session(session
);
2203 dout("__do_request early error %d\n", err
);
2205 complete_request(mdsc
, req
);
2206 __unregister_request(mdsc
, req
);
2213 * called under mdsc->mutex
2215 static void __wake_requests(struct ceph_mds_client
*mdsc
,
2216 struct list_head
*head
)
2218 struct ceph_mds_request
*req
;
2219 LIST_HEAD(tmp_list
);
2221 list_splice_init(head
, &tmp_list
);
2223 while (!list_empty(&tmp_list
)) {
2224 req
= list_entry(tmp_list
.next
,
2225 struct ceph_mds_request
, r_wait
);
2226 list_del_init(&req
->r_wait
);
2227 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
2228 __do_request(mdsc
, req
);
2233 * Wake up threads with requests pending for @mds, so that they can
2234 * resubmit their requests to a possibly different mds.
2236 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
2238 struct ceph_mds_request
*req
;
2239 struct rb_node
*p
= rb_first(&mdsc
->request_tree
);
2241 dout("kick_requests mds%d\n", mds
);
2243 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2245 if (req
->r_got_unsafe
)
2247 if (req
->r_attempts
> 0)
2248 continue; /* only new requests */
2249 if (req
->r_session
&&
2250 req
->r_session
->s_mds
== mds
) {
2251 dout(" kicking tid %llu\n", req
->r_tid
);
2252 list_del_init(&req
->r_wait
);
2253 __do_request(mdsc
, req
);
2258 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
2259 struct ceph_mds_request
*req
)
2261 dout("submit_request on %p\n", req
);
2262 mutex_lock(&mdsc
->mutex
);
2263 __register_request(mdsc
, req
, NULL
);
2264 __do_request(mdsc
, req
);
2265 mutex_unlock(&mdsc
->mutex
);
2269 * Synchrously perform an mds request. Take care of all of the
2270 * session setup, forwarding, retry details.
2272 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2274 struct ceph_mds_request
*req
)
2278 dout("do_request on %p\n", req
);
2280 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2282 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2283 if (req
->r_locked_dir
)
2284 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
2285 if (req
->r_old_dentry_dir
)
2286 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2290 mutex_lock(&mdsc
->mutex
);
2291 __register_request(mdsc
, req
, dir
);
2292 __do_request(mdsc
, req
);
2300 mutex_unlock(&mdsc
->mutex
);
2301 dout("do_request waiting\n");
2302 if (!req
->r_timeout
&& req
->r_wait_for_completion
) {
2303 err
= req
->r_wait_for_completion(mdsc
, req
);
2305 long timeleft
= wait_for_completion_killable_timeout(
2307 ceph_timeout_jiffies(req
->r_timeout
));
2311 err
= -EIO
; /* timed out */
2313 err
= timeleft
; /* killed */
2315 dout("do_request waited, got %d\n", err
);
2316 mutex_lock(&mdsc
->mutex
);
2318 /* only abort if we didn't race with a real reply */
2319 if (req
->r_got_result
) {
2320 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2321 } else if (err
< 0) {
2322 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2325 * ensure we aren't running concurrently with
2326 * ceph_fill_trace or ceph_readdir_prepopulate, which
2327 * rely on locks (dir mutex) held by our caller.
2329 mutex_lock(&req
->r_fill_mutex
);
2331 req
->r_aborted
= true;
2332 mutex_unlock(&req
->r_fill_mutex
);
2334 if (req
->r_locked_dir
&&
2335 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2336 ceph_invalidate_dir_request(req
);
2342 mutex_unlock(&mdsc
->mutex
);
2343 dout("do_request %p done, result %d\n", req
, err
);
2348 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2349 * namespace request.
2351 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2353 struct inode
*inode
= req
->r_locked_dir
;
2355 dout("invalidate_dir_request %p (complete, lease(s))\n", inode
);
2357 ceph_dir_clear_complete(inode
);
2359 ceph_invalidate_dentry_lease(req
->r_dentry
);
2360 if (req
->r_old_dentry
)
2361 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2367 * We take the session mutex and parse and process the reply immediately.
2368 * This preserves the logical ordering of replies, capabilities, etc., sent
2369 * by the MDS as they are applied to our local cache.
2371 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2373 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2374 struct ceph_mds_request
*req
;
2375 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2376 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2377 struct ceph_snap_realm
*realm
;
2380 int mds
= session
->s_mds
;
2382 if (msg
->front
.iov_len
< sizeof(*head
)) {
2383 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2388 /* get request, session */
2389 tid
= le64_to_cpu(msg
->hdr
.tid
);
2390 mutex_lock(&mdsc
->mutex
);
2391 req
= lookup_get_request(mdsc
, tid
);
2393 dout("handle_reply on unknown tid %llu\n", tid
);
2394 mutex_unlock(&mdsc
->mutex
);
2397 dout("handle_reply %p\n", req
);
2399 /* correct session? */
2400 if (req
->r_session
!= session
) {
2401 pr_err("mdsc_handle_reply got %llu on session mds%d"
2402 " not mds%d\n", tid
, session
->s_mds
,
2403 req
->r_session
? req
->r_session
->s_mds
: -1);
2404 mutex_unlock(&mdsc
->mutex
);
2409 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2410 (req
->r_got_safe
&& head
->safe
)) {
2411 pr_warn("got a dup %s reply on %llu from mds%d\n",
2412 head
->safe
? "safe" : "unsafe", tid
, mds
);
2413 mutex_unlock(&mdsc
->mutex
);
2416 if (req
->r_got_safe
) {
2417 pr_warn("got unsafe after safe on %llu from mds%d\n",
2419 mutex_unlock(&mdsc
->mutex
);
2423 result
= le32_to_cpu(head
->result
);
2427 * if we're not talking to the authority, send to them
2428 * if the authority has changed while we weren't looking,
2429 * send to new authority
2430 * Otherwise we just have to return an ESTALE
2432 if (result
== -ESTALE
) {
2433 dout("got ESTALE on request %llu", req
->r_tid
);
2434 req
->r_resend_mds
= -1;
2435 if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2436 dout("not using auth, setting for that now");
2437 req
->r_direct_mode
= USE_AUTH_MDS
;
2438 __do_request(mdsc
, req
);
2439 mutex_unlock(&mdsc
->mutex
);
2442 int mds
= __choose_mds(mdsc
, req
);
2443 if (mds
>= 0 && mds
!= req
->r_session
->s_mds
) {
2444 dout("but auth changed, so resending");
2445 __do_request(mdsc
, req
);
2446 mutex_unlock(&mdsc
->mutex
);
2450 dout("have to return ESTALE on request %llu", req
->r_tid
);
2455 req
->r_got_safe
= true;
2456 __unregister_request(mdsc
, req
);
2458 if (req
->r_got_unsafe
) {
2460 * We already handled the unsafe response, now do the
2461 * cleanup. No need to examine the response; the MDS
2462 * doesn't include any result info in the safe
2463 * response. And even if it did, there is nothing
2464 * useful we could do with a revised return value.
2466 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2468 /* last unsafe request during umount? */
2469 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2470 complete_all(&mdsc
->safe_umount_waiters
);
2471 mutex_unlock(&mdsc
->mutex
);
2475 req
->r_got_unsafe
= true;
2476 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2477 if (req
->r_unsafe_dir
) {
2478 struct ceph_inode_info
*ci
=
2479 ceph_inode(req
->r_unsafe_dir
);
2480 spin_lock(&ci
->i_unsafe_lock
);
2481 list_add_tail(&req
->r_unsafe_dir_item
,
2482 &ci
->i_unsafe_dirops
);
2483 spin_unlock(&ci
->i_unsafe_lock
);
2487 dout("handle_reply tid %lld result %d\n", tid
, result
);
2488 rinfo
= &req
->r_reply_info
;
2489 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2490 mutex_unlock(&mdsc
->mutex
);
2492 mutex_lock(&session
->s_mutex
);
2494 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2501 if (rinfo
->snapblob_len
) {
2502 down_write(&mdsc
->snap_rwsem
);
2503 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2504 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2505 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
,
2507 downgrade_write(&mdsc
->snap_rwsem
);
2509 down_read(&mdsc
->snap_rwsem
);
2512 /* insert trace into our cache */
2513 mutex_lock(&req
->r_fill_mutex
);
2514 current
->journal_info
= req
;
2515 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2517 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2518 req
->r_op
== CEPH_MDS_OP_LSSNAP
))
2519 ceph_readdir_prepopulate(req
, req
->r_session
);
2520 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2522 current
->journal_info
= NULL
;
2523 mutex_unlock(&req
->r_fill_mutex
);
2525 up_read(&mdsc
->snap_rwsem
);
2527 ceph_put_snap_realm(mdsc
, realm
);
2529 if (err
== 0 && req
->r_got_unsafe
&& req
->r_target_inode
) {
2530 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
2531 spin_lock(&ci
->i_unsafe_lock
);
2532 list_add_tail(&req
->r_unsafe_target_item
, &ci
->i_unsafe_iops
);
2533 spin_unlock(&ci
->i_unsafe_lock
);
2536 mutex_lock(&mdsc
->mutex
);
2537 if (!req
->r_aborted
) {
2541 req
->r_reply
= ceph_msg_get(msg
);
2542 req
->r_got_result
= true;
2545 dout("reply arrived after request %lld was aborted\n", tid
);
2547 mutex_unlock(&mdsc
->mutex
);
2549 mutex_unlock(&session
->s_mutex
);
2551 /* kick calling process */
2552 complete_request(mdsc
, req
);
2554 ceph_mdsc_put_request(req
);
2561 * handle mds notification that our request has been forwarded.
2563 static void handle_forward(struct ceph_mds_client
*mdsc
,
2564 struct ceph_mds_session
*session
,
2565 struct ceph_msg
*msg
)
2567 struct ceph_mds_request
*req
;
2568 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2572 void *p
= msg
->front
.iov_base
;
2573 void *end
= p
+ msg
->front
.iov_len
;
2575 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2576 next_mds
= ceph_decode_32(&p
);
2577 fwd_seq
= ceph_decode_32(&p
);
2579 mutex_lock(&mdsc
->mutex
);
2580 req
= lookup_get_request(mdsc
, tid
);
2582 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2583 goto out
; /* dup reply? */
2586 if (req
->r_aborted
) {
2587 dout("forward tid %llu aborted, unregistering\n", tid
);
2588 __unregister_request(mdsc
, req
);
2589 } else if (fwd_seq
<= req
->r_num_fwd
) {
2590 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2591 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2593 /* resend. forward race not possible; mds would drop */
2594 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2596 BUG_ON(req
->r_got_result
);
2597 req
->r_attempts
= 0;
2598 req
->r_num_fwd
= fwd_seq
;
2599 req
->r_resend_mds
= next_mds
;
2600 put_request_session(req
);
2601 __do_request(mdsc
, req
);
2603 ceph_mdsc_put_request(req
);
2605 mutex_unlock(&mdsc
->mutex
);
2609 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2613 * handle a mds session control message
2615 static void handle_session(struct ceph_mds_session
*session
,
2616 struct ceph_msg
*msg
)
2618 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2621 int mds
= session
->s_mds
;
2622 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2626 if (msg
->front
.iov_len
!= sizeof(*h
))
2628 op
= le32_to_cpu(h
->op
);
2629 seq
= le64_to_cpu(h
->seq
);
2631 mutex_lock(&mdsc
->mutex
);
2632 if (op
== CEPH_SESSION_CLOSE
)
2633 __unregister_session(mdsc
, session
);
2634 /* FIXME: this ttl calculation is generous */
2635 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2636 mutex_unlock(&mdsc
->mutex
);
2638 mutex_lock(&session
->s_mutex
);
2640 dout("handle_session mds%d %s %p state %s seq %llu\n",
2641 mds
, ceph_session_op_name(op
), session
,
2642 ceph_session_state_name(session
->s_state
), seq
);
2644 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2645 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2646 pr_info("mds%d came back\n", session
->s_mds
);
2650 case CEPH_SESSION_OPEN
:
2651 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2652 pr_info("mds%d reconnect success\n", session
->s_mds
);
2653 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2654 renewed_caps(mdsc
, session
, 0);
2657 __close_session(mdsc
, session
);
2660 case CEPH_SESSION_RENEWCAPS
:
2661 if (session
->s_renew_seq
== seq
)
2662 renewed_caps(mdsc
, session
, 1);
2665 case CEPH_SESSION_CLOSE
:
2666 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2667 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2668 cleanup_session_requests(mdsc
, session
);
2669 remove_session_caps(session
);
2670 wake
= 2; /* for good measure */
2671 wake_up_all(&mdsc
->session_close_wq
);
2674 case CEPH_SESSION_STALE
:
2675 pr_info("mds%d caps went stale, renewing\n",
2677 spin_lock(&session
->s_gen_ttl_lock
);
2678 session
->s_cap_gen
++;
2679 session
->s_cap_ttl
= jiffies
- 1;
2680 spin_unlock(&session
->s_gen_ttl_lock
);
2681 send_renew_caps(mdsc
, session
);
2684 case CEPH_SESSION_RECALL_STATE
:
2685 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2688 case CEPH_SESSION_FLUSHMSG
:
2689 send_flushmsg_ack(mdsc
, session
, seq
);
2692 case CEPH_SESSION_FORCE_RO
:
2693 dout("force_session_readonly %p\n", session
);
2694 spin_lock(&session
->s_cap_lock
);
2695 session
->s_readonly
= true;
2696 spin_unlock(&session
->s_cap_lock
);
2697 wake_up_session_caps(session
, 0);
2700 case CEPH_SESSION_REJECT
:
2701 WARN_ON(session
->s_state
!= CEPH_MDS_SESSION_OPENING
);
2702 pr_info("mds%d rejected session\n", session
->s_mds
);
2703 session
->s_state
= CEPH_MDS_SESSION_REJECTED
;
2704 cleanup_session_requests(mdsc
, session
);
2705 remove_session_caps(session
);
2706 wake
= 2; /* for good measure */
2710 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2714 mutex_unlock(&session
->s_mutex
);
2716 mutex_lock(&mdsc
->mutex
);
2717 __wake_requests(mdsc
, &session
->s_waiting
);
2719 kick_requests(mdsc
, mds
);
2720 mutex_unlock(&mdsc
->mutex
);
2725 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2726 (int)msg
->front
.iov_len
);
2733 * called under session->mutex.
2735 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2736 struct ceph_mds_session
*session
)
2738 struct ceph_mds_request
*req
, *nreq
;
2742 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2744 mutex_lock(&mdsc
->mutex
);
2745 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2746 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
, true);
2748 ceph_msg_get(req
->r_request
);
2749 ceph_con_send(&session
->s_con
, req
->r_request
);
2754 * also re-send old requests when MDS enters reconnect stage. So that MDS
2755 * can process completed request in clientreplay stage.
2757 p
= rb_first(&mdsc
->request_tree
);
2759 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2761 if (req
->r_got_unsafe
)
2763 if (req
->r_attempts
== 0)
2764 continue; /* only old requests */
2765 if (req
->r_session
&&
2766 req
->r_session
->s_mds
== session
->s_mds
) {
2767 err
= __prepare_send_request(mdsc
, req
,
2768 session
->s_mds
, true);
2770 ceph_msg_get(req
->r_request
);
2771 ceph_con_send(&session
->s_con
, req
->r_request
);
2775 mutex_unlock(&mdsc
->mutex
);
2779 * Encode information about a cap for a reconnect with the MDS.
2781 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2785 struct ceph_mds_cap_reconnect v2
;
2786 struct ceph_mds_cap_reconnect_v1 v1
;
2788 struct ceph_inode_info
*ci
;
2789 struct ceph_reconnect_state
*recon_state
= arg
;
2790 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2795 struct dentry
*dentry
;
2799 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2800 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2801 ceph_cap_string(cap
->issued
));
2802 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2806 dentry
= d_find_alias(inode
);
2808 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2810 err
= PTR_ERR(path
);
2819 spin_lock(&ci
->i_ceph_lock
);
2820 cap
->seq
= 0; /* reset cap seq */
2821 cap
->issue_seq
= 0; /* and issue_seq */
2822 cap
->mseq
= 0; /* and migrate_seq */
2823 cap
->cap_gen
= cap
->session
->s_cap_gen
;
2825 if (recon_state
->msg_version
>= 2) {
2826 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2827 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2828 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2829 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2830 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2831 rec
.v2
.flock_len
= 0;
2833 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2834 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2835 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2836 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2837 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2838 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2839 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2840 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2843 if (list_empty(&ci
->i_cap_snaps
)) {
2846 struct ceph_cap_snap
*capsnap
=
2847 list_first_entry(&ci
->i_cap_snaps
,
2848 struct ceph_cap_snap
, ci_item
);
2849 snap_follows
= capsnap
->follows
;
2851 spin_unlock(&ci
->i_ceph_lock
);
2853 if (recon_state
->msg_version
>= 2) {
2854 int num_fcntl_locks
, num_flock_locks
;
2855 struct ceph_filelock
*flocks
;
2856 size_t struct_len
, total_len
= 0;
2860 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
2861 flocks
= kmalloc((num_fcntl_locks
+num_flock_locks
) *
2862 sizeof(struct ceph_filelock
), GFP_NOFS
);
2867 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
2877 if (recon_state
->msg_version
>= 3) {
2878 /* version, compat_version and struct_len */
2879 total_len
= 2 * sizeof(u8
) + sizeof(u32
);
2883 * number of encoded locks is stable, so copy to pagelist
2885 struct_len
= 2 * sizeof(u32
) +
2886 (num_fcntl_locks
+ num_flock_locks
) *
2887 sizeof(struct ceph_filelock
);
2888 rec
.v2
.flock_len
= cpu_to_le32(struct_len
);
2890 struct_len
+= sizeof(rec
.v2
);
2891 struct_len
+= sizeof(u32
) + pathlen
;
2894 struct_len
+= sizeof(u64
); /* snap_follows */
2896 total_len
+= struct_len
;
2897 err
= ceph_pagelist_reserve(pagelist
, total_len
);
2900 if (recon_state
->msg_version
>= 3) {
2901 ceph_pagelist_encode_8(pagelist
, struct_v
);
2902 ceph_pagelist_encode_8(pagelist
, 1);
2903 ceph_pagelist_encode_32(pagelist
, struct_len
);
2905 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2906 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v2
));
2907 ceph_locks_to_pagelist(flocks
, pagelist
,
2911 ceph_pagelist_encode_64(pagelist
, snap_follows
);
2915 size_t size
= sizeof(u32
) + pathlen
+ sizeof(rec
.v1
);
2916 err
= ceph_pagelist_reserve(pagelist
, size
);
2918 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2919 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v1
));
2923 recon_state
->nr_caps
++;
2933 * If an MDS fails and recovers, clients need to reconnect in order to
2934 * reestablish shared state. This includes all caps issued through
2935 * this session _and_ the snap_realm hierarchy. Because it's not
2936 * clear which snap realms the mds cares about, we send everything we
2937 * know about.. that ensures we'll then get any new info the
2938 * recovering MDS might have.
2940 * This is a relatively heavyweight operation, but it's rare.
2942 * called with mdsc->mutex held.
2944 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2945 struct ceph_mds_session
*session
)
2947 struct ceph_msg
*reply
;
2949 int mds
= session
->s_mds
;
2952 struct ceph_pagelist
*pagelist
;
2953 struct ceph_reconnect_state recon_state
;
2955 pr_info("mds%d reconnect start\n", mds
);
2957 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2959 goto fail_nopagelist
;
2960 ceph_pagelist_init(pagelist
);
2962 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
2966 mutex_lock(&session
->s_mutex
);
2967 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2970 dout("session %p state %s\n", session
,
2971 ceph_session_state_name(session
->s_state
));
2973 spin_lock(&session
->s_gen_ttl_lock
);
2974 session
->s_cap_gen
++;
2975 spin_unlock(&session
->s_gen_ttl_lock
);
2977 spin_lock(&session
->s_cap_lock
);
2978 /* don't know if session is readonly */
2979 session
->s_readonly
= 0;
2981 * notify __ceph_remove_cap() that we are composing cap reconnect.
2982 * If a cap get released before being added to the cap reconnect,
2983 * __ceph_remove_cap() should skip queuing cap release.
2985 session
->s_cap_reconnect
= 1;
2986 /* drop old cap expires; we're about to reestablish that state */
2987 cleanup_cap_releases(mdsc
, session
);
2989 /* trim unused caps to reduce MDS's cache rejoin time */
2990 if (mdsc
->fsc
->sb
->s_root
)
2991 shrink_dcache_parent(mdsc
->fsc
->sb
->s_root
);
2993 ceph_con_close(&session
->s_con
);
2994 ceph_con_open(&session
->s_con
,
2995 CEPH_ENTITY_TYPE_MDS
, mds
,
2996 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2998 /* replay unsafe requests */
2999 replay_unsafe_requests(mdsc
, session
);
3001 down_read(&mdsc
->snap_rwsem
);
3003 /* traverse this session's caps */
3004 s_nr_caps
= session
->s_nr_caps
;
3005 err
= ceph_pagelist_encode_32(pagelist
, s_nr_caps
);
3009 recon_state
.nr_caps
= 0;
3010 recon_state
.pagelist
= pagelist
;
3011 if (session
->s_con
.peer_features
& CEPH_FEATURE_MDSENC
)
3012 recon_state
.msg_version
= 3;
3013 else if (session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
)
3014 recon_state
.msg_version
= 2;
3016 recon_state
.msg_version
= 1;
3017 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
3021 spin_lock(&session
->s_cap_lock
);
3022 session
->s_cap_reconnect
= 0;
3023 spin_unlock(&session
->s_cap_lock
);
3026 * snaprealms. we provide mds with the ino, seq (version), and
3027 * parent for all of our realms. If the mds has any newer info,
3030 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
3031 struct ceph_snap_realm
*realm
=
3032 rb_entry(p
, struct ceph_snap_realm
, node
);
3033 struct ceph_mds_snaprealm_reconnect sr_rec
;
3035 dout(" adding snap realm %llx seq %lld parent %llx\n",
3036 realm
->ino
, realm
->seq
, realm
->parent_ino
);
3037 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
3038 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
3039 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
3040 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
3045 reply
->hdr
.version
= cpu_to_le16(recon_state
.msg_version
);
3047 /* raced with cap release? */
3048 if (s_nr_caps
!= recon_state
.nr_caps
) {
3049 struct page
*page
= list_first_entry(&pagelist
->head
,
3051 __le32
*addr
= kmap_atomic(page
);
3052 *addr
= cpu_to_le32(recon_state
.nr_caps
);
3053 kunmap_atomic(addr
);
3056 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
3057 ceph_msg_data_add_pagelist(reply
, pagelist
);
3059 ceph_early_kick_flushing_caps(mdsc
, session
);
3061 ceph_con_send(&session
->s_con
, reply
);
3063 mutex_unlock(&session
->s_mutex
);
3065 mutex_lock(&mdsc
->mutex
);
3066 __wake_requests(mdsc
, &session
->s_waiting
);
3067 mutex_unlock(&mdsc
->mutex
);
3069 up_read(&mdsc
->snap_rwsem
);
3073 ceph_msg_put(reply
);
3074 up_read(&mdsc
->snap_rwsem
);
3075 mutex_unlock(&session
->s_mutex
);
3077 ceph_pagelist_release(pagelist
);
3079 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
3085 * compare old and new mdsmaps, kicking requests
3086 * and closing out old connections as necessary
3088 * called under mdsc->mutex.
3090 static void check_new_map(struct ceph_mds_client
*mdsc
,
3091 struct ceph_mdsmap
*newmap
,
3092 struct ceph_mdsmap
*oldmap
)
3095 int oldstate
, newstate
;
3096 struct ceph_mds_session
*s
;
3098 dout("check_new_map new %u old %u\n",
3099 newmap
->m_epoch
, oldmap
->m_epoch
);
3101 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
3102 if (mdsc
->sessions
[i
] == NULL
)
3104 s
= mdsc
->sessions
[i
];
3105 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
3106 newstate
= ceph_mdsmap_get_state(newmap
, i
);
3108 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3109 i
, ceph_mds_state_name(oldstate
),
3110 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
3111 ceph_mds_state_name(newstate
),
3112 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
3113 ceph_session_state_name(s
->s_state
));
3115 if (i
>= newmap
->m_max_mds
||
3116 memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
3117 ceph_mdsmap_get_addr(newmap
, i
),
3118 sizeof(struct ceph_entity_addr
))) {
3119 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
3120 /* the session never opened, just close it
3122 __wake_requests(mdsc
, &s
->s_waiting
);
3123 __unregister_session(mdsc
, s
);
3126 mutex_unlock(&mdsc
->mutex
);
3127 mutex_lock(&s
->s_mutex
);
3128 mutex_lock(&mdsc
->mutex
);
3129 ceph_con_close(&s
->s_con
);
3130 mutex_unlock(&s
->s_mutex
);
3131 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
3133 } else if (oldstate
== newstate
) {
3134 continue; /* nothing new with this mds */
3140 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
3141 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
3142 mutex_unlock(&mdsc
->mutex
);
3143 send_mds_reconnect(mdsc
, s
);
3144 mutex_lock(&mdsc
->mutex
);
3148 * kick request on any mds that has gone active.
3150 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
3151 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
3152 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
3153 oldstate
!= CEPH_MDS_STATE_STARTING
)
3154 pr_info("mds%d recovery completed\n", s
->s_mds
);
3155 kick_requests(mdsc
, i
);
3156 ceph_kick_flushing_caps(mdsc
, s
);
3157 wake_up_session_caps(s
, 1);
3161 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
3162 s
= mdsc
->sessions
[i
];
3165 if (!ceph_mdsmap_is_laggy(newmap
, i
))
3167 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3168 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
3169 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3170 dout(" connecting to export targets of laggy mds%d\n",
3172 __open_export_target_sessions(mdsc
, s
);
3184 * caller must hold session s_mutex, dentry->d_lock
3186 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
3188 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
3190 ceph_put_mds_session(di
->lease_session
);
3191 di
->lease_session
= NULL
;
3194 static void handle_lease(struct ceph_mds_client
*mdsc
,
3195 struct ceph_mds_session
*session
,
3196 struct ceph_msg
*msg
)
3198 struct super_block
*sb
= mdsc
->fsc
->sb
;
3199 struct inode
*inode
;
3200 struct dentry
*parent
, *dentry
;
3201 struct ceph_dentry_info
*di
;
3202 int mds
= session
->s_mds
;
3203 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
3205 struct ceph_vino vino
;
3209 dout("handle_lease from mds%d\n", mds
);
3212 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
3214 vino
.ino
= le64_to_cpu(h
->ino
);
3215 vino
.snap
= CEPH_NOSNAP
;
3216 seq
= le32_to_cpu(h
->seq
);
3217 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
3218 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
3219 if (dname
.len
!= get_unaligned_le32(h
+1))
3223 inode
= ceph_find_inode(sb
, vino
);
3224 dout("handle_lease %s, ino %llx %p %.*s\n",
3225 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
3226 dname
.len
, dname
.name
);
3228 mutex_lock(&session
->s_mutex
);
3231 if (inode
== NULL
) {
3232 dout("handle_lease no inode %llx\n", vino
.ino
);
3237 parent
= d_find_alias(inode
);
3239 dout("no parent dentry on inode %p\n", inode
);
3241 goto release
; /* hrm... */
3243 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
3244 dentry
= d_lookup(parent
, &dname
);
3249 spin_lock(&dentry
->d_lock
);
3250 di
= ceph_dentry(dentry
);
3251 switch (h
->action
) {
3252 case CEPH_MDS_LEASE_REVOKE
:
3253 if (di
->lease_session
== session
) {
3254 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
3255 h
->seq
= cpu_to_le32(di
->lease_seq
);
3256 __ceph_mdsc_drop_dentry_lease(dentry
);
3261 case CEPH_MDS_LEASE_RENEW
:
3262 if (di
->lease_session
== session
&&
3263 di
->lease_gen
== session
->s_cap_gen
&&
3264 di
->lease_renew_from
&&
3265 di
->lease_renew_after
== 0) {
3266 unsigned long duration
=
3267 msecs_to_jiffies(le32_to_cpu(h
->duration_ms
));
3269 di
->lease_seq
= seq
;
3270 di
->time
= di
->lease_renew_from
+ duration
;
3271 di
->lease_renew_after
= di
->lease_renew_from
+
3273 di
->lease_renew_from
= 0;
3277 spin_unlock(&dentry
->d_lock
);
3284 /* let's just reuse the same message */
3285 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
3287 ceph_con_send(&session
->s_con
, msg
);
3291 mutex_unlock(&session
->s_mutex
);
3295 pr_err("corrupt lease message\n");
3299 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
3300 struct inode
*inode
,
3301 struct dentry
*dentry
, char action
,
3304 struct ceph_msg
*msg
;
3305 struct ceph_mds_lease
*lease
;
3306 int len
= sizeof(*lease
) + sizeof(u32
);
3309 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3310 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
3311 dnamelen
= dentry
->d_name
.len
;
3314 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
3317 lease
= msg
->front
.iov_base
;
3318 lease
->action
= action
;
3319 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
3320 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
3321 lease
->seq
= cpu_to_le32(seq
);
3322 put_unaligned_le32(dnamelen
, lease
+ 1);
3323 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
3326 * if this is a preemptive lease RELEASE, no need to
3327 * flush request stream, since the actual request will
3330 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
3332 ceph_con_send(&session
->s_con
, msg
);
3336 * drop all leases (and dentry refs) in preparation for umount
3338 static void drop_leases(struct ceph_mds_client
*mdsc
)
3342 dout("drop_leases\n");
3343 mutex_lock(&mdsc
->mutex
);
3344 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3345 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3348 mutex_unlock(&mdsc
->mutex
);
3349 mutex_lock(&s
->s_mutex
);
3350 mutex_unlock(&s
->s_mutex
);
3351 ceph_put_mds_session(s
);
3352 mutex_lock(&mdsc
->mutex
);
3354 mutex_unlock(&mdsc
->mutex
);
3360 * delayed work -- periodically trim expired leases, renew caps with mds
3362 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
3365 unsigned hz
= round_jiffies_relative(HZ
* delay
);
3366 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
3369 static void delayed_work(struct work_struct
*work
)
3372 struct ceph_mds_client
*mdsc
=
3373 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
3377 dout("mdsc delayed_work\n");
3378 ceph_check_delayed_caps(mdsc
);
3380 mutex_lock(&mdsc
->mutex
);
3381 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
3382 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
3383 mdsc
->last_renew_caps
);
3385 mdsc
->last_renew_caps
= jiffies
;
3387 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3388 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3391 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3392 dout("resending session close request for mds%d\n",
3394 request_close_session(mdsc
, s
);
3395 ceph_put_mds_session(s
);
3398 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
3399 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
3400 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
3401 pr_info("mds%d hung\n", s
->s_mds
);
3404 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
3405 /* this mds is failed or recovering, just wait */
3406 ceph_put_mds_session(s
);
3409 mutex_unlock(&mdsc
->mutex
);
3411 mutex_lock(&s
->s_mutex
);
3413 send_renew_caps(mdsc
, s
);
3415 ceph_con_keepalive(&s
->s_con
);
3416 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3417 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
3418 ceph_send_cap_releases(mdsc
, s
);
3419 mutex_unlock(&s
->s_mutex
);
3420 ceph_put_mds_session(s
);
3422 mutex_lock(&mdsc
->mutex
);
3424 mutex_unlock(&mdsc
->mutex
);
3426 schedule_delayed(mdsc
);
3429 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3432 struct ceph_mds_client
*mdsc
;
3434 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3439 mutex_init(&mdsc
->mutex
);
3440 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3441 if (mdsc
->mdsmap
== NULL
) {
3446 init_completion(&mdsc
->safe_umount_waiters
);
3447 init_waitqueue_head(&mdsc
->session_close_wq
);
3448 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3449 mdsc
->sessions
= NULL
;
3450 atomic_set(&mdsc
->num_sessions
, 0);
3451 mdsc
->max_sessions
= 0;
3453 mdsc
->last_snap_seq
= 0;
3454 init_rwsem(&mdsc
->snap_rwsem
);
3455 mdsc
->snap_realms
= RB_ROOT
;
3456 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3457 spin_lock_init(&mdsc
->snap_empty_lock
);
3459 mdsc
->oldest_tid
= 0;
3460 mdsc
->request_tree
= RB_ROOT
;
3461 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3462 mdsc
->last_renew_caps
= jiffies
;
3463 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3464 spin_lock_init(&mdsc
->cap_delay_lock
);
3465 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3466 spin_lock_init(&mdsc
->snap_flush_lock
);
3467 mdsc
->last_cap_flush_tid
= 1;
3468 INIT_LIST_HEAD(&mdsc
->cap_flush_list
);
3469 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3470 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3471 mdsc
->num_cap_flushing
= 0;
3472 spin_lock_init(&mdsc
->cap_dirty_lock
);
3473 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3474 spin_lock_init(&mdsc
->dentry_lru_lock
);
3475 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3477 ceph_caps_init(mdsc
);
3478 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3480 init_rwsem(&mdsc
->pool_perm_rwsem
);
3481 mdsc
->pool_perm_tree
= RB_ROOT
;
3487 * Wait for safe replies on open mds requests. If we time out, drop
3488 * all requests from the tree to avoid dangling dentry refs.
3490 static void wait_requests(struct ceph_mds_client
*mdsc
)
3492 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3493 struct ceph_mds_request
*req
;
3495 mutex_lock(&mdsc
->mutex
);
3496 if (__get_oldest_req(mdsc
)) {
3497 mutex_unlock(&mdsc
->mutex
);
3499 dout("wait_requests waiting for requests\n");
3500 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3501 ceph_timeout_jiffies(opts
->mount_timeout
));
3503 /* tear down remaining requests */
3504 mutex_lock(&mdsc
->mutex
);
3505 while ((req
= __get_oldest_req(mdsc
))) {
3506 dout("wait_requests timed out on tid %llu\n",
3508 __unregister_request(mdsc
, req
);
3511 mutex_unlock(&mdsc
->mutex
);
3512 dout("wait_requests done\n");
3516 * called before mount is ro, and before dentries are torn down.
3517 * (hmm, does this still race with new lookups?)
3519 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3521 dout("pre_umount\n");
3525 ceph_flush_dirty_caps(mdsc
);
3526 wait_requests(mdsc
);
3529 * wait for reply handlers to drop their request refs and
3530 * their inode/dcache refs
3536 * wait for all write mds requests to flush.
3538 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3540 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3543 mutex_lock(&mdsc
->mutex
);
3544 dout("wait_unsafe_requests want %lld\n", want_tid
);
3546 req
= __get_oldest_req(mdsc
);
3547 while (req
&& req
->r_tid
<= want_tid
) {
3548 /* find next request */
3549 n
= rb_next(&req
->r_node
);
3551 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3554 if (req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
&&
3555 (req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3557 ceph_mdsc_get_request(req
);
3559 ceph_mdsc_get_request(nextreq
);
3560 mutex_unlock(&mdsc
->mutex
);
3561 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3562 req
->r_tid
, want_tid
);
3563 wait_for_completion(&req
->r_safe_completion
);
3564 mutex_lock(&mdsc
->mutex
);
3565 ceph_mdsc_put_request(req
);
3567 break; /* next dne before, so we're done! */
3568 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3569 /* next request was removed from tree */
3570 ceph_mdsc_put_request(nextreq
);
3573 ceph_mdsc_put_request(nextreq
); /* won't go away */
3577 mutex_unlock(&mdsc
->mutex
);
3578 dout("wait_unsafe_requests done\n");
3581 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3583 u64 want_tid
, want_flush
;
3585 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3589 mutex_lock(&mdsc
->mutex
);
3590 want_tid
= mdsc
->last_tid
;
3591 mutex_unlock(&mdsc
->mutex
);
3593 ceph_flush_dirty_caps(mdsc
);
3594 spin_lock(&mdsc
->cap_dirty_lock
);
3595 want_flush
= mdsc
->last_cap_flush_tid
;
3596 if (!list_empty(&mdsc
->cap_flush_list
)) {
3597 struct ceph_cap_flush
*cf
=
3598 list_last_entry(&mdsc
->cap_flush_list
,
3599 struct ceph_cap_flush
, g_list
);
3602 spin_unlock(&mdsc
->cap_dirty_lock
);
3604 dout("sync want tid %lld flush_seq %lld\n",
3605 want_tid
, want_flush
);
3607 wait_unsafe_requests(mdsc
, want_tid
);
3608 wait_caps_flush(mdsc
, want_flush
);
3612 * true if all sessions are closed, or we force unmount
3614 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
, int skipped
)
3616 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3618 return atomic_read(&mdsc
->num_sessions
) <= skipped
;
3622 * called after sb is ro.
3624 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3626 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3627 struct ceph_mds_session
*session
;
3631 dout("close_sessions\n");
3633 /* close sessions */
3634 mutex_lock(&mdsc
->mutex
);
3635 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3636 session
= __ceph_lookup_mds_session(mdsc
, i
);
3639 mutex_unlock(&mdsc
->mutex
);
3640 mutex_lock(&session
->s_mutex
);
3641 if (__close_session(mdsc
, session
) <= 0)
3643 mutex_unlock(&session
->s_mutex
);
3644 ceph_put_mds_session(session
);
3645 mutex_lock(&mdsc
->mutex
);
3647 mutex_unlock(&mdsc
->mutex
);
3649 dout("waiting for sessions to close\n");
3650 wait_event_timeout(mdsc
->session_close_wq
,
3651 done_closing_sessions(mdsc
, skipped
),
3652 ceph_timeout_jiffies(opts
->mount_timeout
));
3654 /* tear down remaining sessions */
3655 mutex_lock(&mdsc
->mutex
);
3656 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3657 if (mdsc
->sessions
[i
]) {
3658 session
= get_session(mdsc
->sessions
[i
]);
3659 __unregister_session(mdsc
, session
);
3660 mutex_unlock(&mdsc
->mutex
);
3661 mutex_lock(&session
->s_mutex
);
3662 remove_session_caps(session
);
3663 mutex_unlock(&session
->s_mutex
);
3664 ceph_put_mds_session(session
);
3665 mutex_lock(&mdsc
->mutex
);
3668 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3669 mutex_unlock(&mdsc
->mutex
);
3671 ceph_cleanup_empty_realms(mdsc
);
3673 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3678 void ceph_mdsc_force_umount(struct ceph_mds_client
*mdsc
)
3680 struct ceph_mds_session
*session
;
3683 dout("force umount\n");
3685 mutex_lock(&mdsc
->mutex
);
3686 for (mds
= 0; mds
< mdsc
->max_sessions
; mds
++) {
3687 session
= __ceph_lookup_mds_session(mdsc
, mds
);
3690 mutex_unlock(&mdsc
->mutex
);
3691 mutex_lock(&session
->s_mutex
);
3692 __close_session(mdsc
, session
);
3693 if (session
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3694 cleanup_session_requests(mdsc
, session
);
3695 remove_session_caps(session
);
3697 mutex_unlock(&session
->s_mutex
);
3698 ceph_put_mds_session(session
);
3699 mutex_lock(&mdsc
->mutex
);
3700 kick_requests(mdsc
, mds
);
3702 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3703 mutex_unlock(&mdsc
->mutex
);
3706 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3709 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3711 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3712 kfree(mdsc
->sessions
);
3713 ceph_caps_finalize(mdsc
);
3714 ceph_pool_perm_destroy(mdsc
);
3717 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3719 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3721 dout("mdsc_destroy %p\n", mdsc
);
3722 ceph_mdsc_stop(mdsc
);
3724 /* flush out any connection work with references to us */
3729 dout("mdsc_destroy %p done\n", mdsc
);
3732 void ceph_mdsc_handle_fsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3734 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3735 const char *mds_namespace
= fsc
->mount_options
->mds_namespace
;
3736 void *p
= msg
->front
.iov_base
;
3737 void *end
= p
+ msg
->front
.iov_len
;
3741 u32 mount_fscid
= (u32
)-1;
3742 u8 struct_v
, struct_cv
;
3745 ceph_decode_need(&p
, end
, sizeof(u32
), bad
);
3746 epoch
= ceph_decode_32(&p
);
3748 dout("handle_fsmap epoch %u\n", epoch
);
3750 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
3751 struct_v
= ceph_decode_8(&p
);
3752 struct_cv
= ceph_decode_8(&p
);
3753 map_len
= ceph_decode_32(&p
);
3755 ceph_decode_need(&p
, end
, sizeof(u32
) * 3, bad
);
3756 p
+= sizeof(u32
) * 2; /* skip epoch and legacy_client_fscid */
3758 num_fs
= ceph_decode_32(&p
);
3759 while (num_fs
-- > 0) {
3760 void *info_p
, *info_end
;
3765 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
3766 info_v
= ceph_decode_8(&p
);
3767 info_cv
= ceph_decode_8(&p
);
3768 info_len
= ceph_decode_32(&p
);
3769 ceph_decode_need(&p
, end
, info_len
, bad
);
3771 info_end
= p
+ info_len
;
3774 ceph_decode_need(&info_p
, info_end
, sizeof(u32
) * 2, bad
);
3775 fscid
= ceph_decode_32(&info_p
);
3776 namelen
= ceph_decode_32(&info_p
);
3777 ceph_decode_need(&info_p
, info_end
, namelen
, bad
);
3779 if (mds_namespace
&&
3780 strlen(mds_namespace
) == namelen
&&
3781 !strncmp(mds_namespace
, (char *)info_p
, namelen
)) {
3782 mount_fscid
= fscid
;
3787 ceph_monc_got_map(&fsc
->client
->monc
, CEPH_SUB_FSMAP
, epoch
);
3788 if (mount_fscid
!= (u32
)-1) {
3789 fsc
->client
->monc
.fs_cluster_id
= mount_fscid
;
3790 ceph_monc_want_map(&fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
3792 ceph_monc_renew_subs(&fsc
->client
->monc
);
3799 pr_err("error decoding fsmap\n");
3801 mutex_lock(&mdsc
->mutex
);
3802 mdsc
->mdsmap_err
= -ENOENT
;
3803 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3804 mutex_unlock(&mdsc
->mutex
);
3809 * handle mds map update.
3811 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3815 void *p
= msg
->front
.iov_base
;
3816 void *end
= p
+ msg
->front
.iov_len
;
3817 struct ceph_mdsmap
*newmap
, *oldmap
;
3818 struct ceph_fsid fsid
;
3821 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3822 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3823 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3825 epoch
= ceph_decode_32(&p
);
3826 maplen
= ceph_decode_32(&p
);
3827 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3829 /* do we need it? */
3830 mutex_lock(&mdsc
->mutex
);
3831 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3832 dout("handle_map epoch %u <= our %u\n",
3833 epoch
, mdsc
->mdsmap
->m_epoch
);
3834 mutex_unlock(&mdsc
->mutex
);
3838 newmap
= ceph_mdsmap_decode(&p
, end
);
3839 if (IS_ERR(newmap
)) {
3840 err
= PTR_ERR(newmap
);
3844 /* swap into place */
3846 oldmap
= mdsc
->mdsmap
;
3847 mdsc
->mdsmap
= newmap
;
3848 check_new_map(mdsc
, newmap
, oldmap
);
3849 ceph_mdsmap_destroy(oldmap
);
3851 mdsc
->mdsmap
= newmap
; /* first mds map */
3853 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3855 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3856 ceph_monc_got_map(&mdsc
->fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
3857 mdsc
->mdsmap
->m_epoch
);
3859 mutex_unlock(&mdsc
->mutex
);
3860 schedule_delayed(mdsc
);
3864 mutex_unlock(&mdsc
->mutex
);
3866 pr_err("error decoding mdsmap %d\n", err
);
3870 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3872 struct ceph_mds_session
*s
= con
->private;
3874 if (get_session(s
)) {
3875 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3878 dout("mdsc con_get %p FAIL\n", s
);
3882 static void con_put(struct ceph_connection
*con
)
3884 struct ceph_mds_session
*s
= con
->private;
3886 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
) - 1);
3887 ceph_put_mds_session(s
);
3891 * if the client is unresponsive for long enough, the mds will kill
3892 * the session entirely.
3894 static void peer_reset(struct ceph_connection
*con
)
3896 struct ceph_mds_session
*s
= con
->private;
3897 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3899 pr_warn("mds%d closed our session\n", s
->s_mds
);
3900 send_mds_reconnect(mdsc
, s
);
3903 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3905 struct ceph_mds_session
*s
= con
->private;
3906 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3907 int type
= le16_to_cpu(msg
->hdr
.type
);
3909 mutex_lock(&mdsc
->mutex
);
3910 if (__verify_registered_session(mdsc
, s
) < 0) {
3911 mutex_unlock(&mdsc
->mutex
);
3914 mutex_unlock(&mdsc
->mutex
);
3917 case CEPH_MSG_MDS_MAP
:
3918 ceph_mdsc_handle_mdsmap(mdsc
, msg
);
3920 case CEPH_MSG_FS_MAP_USER
:
3921 ceph_mdsc_handle_fsmap(mdsc
, msg
);
3923 case CEPH_MSG_CLIENT_SESSION
:
3924 handle_session(s
, msg
);
3926 case CEPH_MSG_CLIENT_REPLY
:
3927 handle_reply(s
, msg
);
3929 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3930 handle_forward(mdsc
, s
, msg
);
3932 case CEPH_MSG_CLIENT_CAPS
:
3933 ceph_handle_caps(s
, msg
);
3935 case CEPH_MSG_CLIENT_SNAP
:
3936 ceph_handle_snap(mdsc
, s
, msg
);
3938 case CEPH_MSG_CLIENT_LEASE
:
3939 handle_lease(mdsc
, s
, msg
);
3943 pr_err("received unknown message type %d %s\n", type
,
3944 ceph_msg_type_name(type
));
3955 * Note: returned pointer is the address of a structure that's
3956 * managed separately. Caller must *not* attempt to free it.
3958 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
3959 int *proto
, int force_new
)
3961 struct ceph_mds_session
*s
= con
->private;
3962 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3963 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3964 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3966 if (force_new
&& auth
->authorizer
) {
3967 ceph_auth_destroy_authorizer(auth
->authorizer
);
3968 auth
->authorizer
= NULL
;
3970 if (!auth
->authorizer
) {
3971 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3974 return ERR_PTR(ret
);
3976 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3979 return ERR_PTR(ret
);
3981 *proto
= ac
->protocol
;
3987 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3989 struct ceph_mds_session
*s
= con
->private;
3990 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3991 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3993 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
, len
);
3996 static int invalidate_authorizer(struct ceph_connection
*con
)
3998 struct ceph_mds_session
*s
= con
->private;
3999 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4000 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4002 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
4004 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
4007 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
4008 struct ceph_msg_header
*hdr
, int *skip
)
4010 struct ceph_msg
*msg
;
4011 int type
= (int) le16_to_cpu(hdr
->type
);
4012 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
4018 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
4020 pr_err("unable to allocate msg type %d len %d\n",
4028 static int mds_sign_message(struct ceph_msg
*msg
)
4030 struct ceph_mds_session
*s
= msg
->con
->private;
4031 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4033 return ceph_auth_sign_message(auth
, msg
);
4036 static int mds_check_message_signature(struct ceph_msg
*msg
)
4038 struct ceph_mds_session
*s
= msg
->con
->private;
4039 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4041 return ceph_auth_check_message_signature(auth
, msg
);
4044 static const struct ceph_connection_operations mds_con_ops
= {
4047 .dispatch
= dispatch
,
4048 .get_authorizer
= get_authorizer
,
4049 .verify_authorizer_reply
= verify_authorizer_reply
,
4050 .invalidate_authorizer
= invalidate_authorizer
,
4051 .peer_reset
= peer_reset
,
4052 .alloc_msg
= mds_alloc_msg
,
4053 .sign_message
= mds_sign_message
,
4054 .check_message_signature
= mds_check_message_signature
,