1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
14 #include "mds_client.h"
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
24 * A cluster of MDS (metadata server) daemons is responsible for
25 * managing the file system namespace (the directory hierarchy and
26 * inodes) and for coordinating shared access to storage. Metadata is
27 * partitioning hierarchically across a number of servers, and that
28 * partition varies over time as the cluster adjusts the distribution
29 * in order to balance load.
31 * The MDS client is primarily responsible to managing synchronous
32 * metadata requests for operations like open, unlink, and so forth.
33 * If there is a MDS failure, we find out about it when we (possibly
34 * request and) receive a new MDS map, and can resubmit affected
37 * For the most part, though, we take advantage of a lossless
38 * communications channel to the MDS, and do not need to worry about
39 * timing out or resubmitting requests.
41 * We maintain a stateful "session" with each MDS we interact with.
42 * Within each session, we sent periodic heartbeat messages to ensure
43 * any capabilities or leases we have been issues remain valid. If
44 * the session times out and goes stale, our leases and capabilities
45 * are no longer valid.
48 struct ceph_reconnect_state
{
50 struct ceph_pagelist
*pagelist
;
54 static void __wake_requests(struct ceph_mds_client
*mdsc
,
55 struct list_head
*head
);
57 static const struct ceph_connection_operations mds_con_ops
;
65 * parse individual inode info
67 static int parse_reply_info_in(void **p
, void *end
,
68 struct ceph_mds_reply_info_in
*info
,
74 *p
+= sizeof(struct ceph_mds_reply_inode
) +
75 sizeof(*info
->in
->fragtree
.splits
) *
76 le32_to_cpu(info
->in
->fragtree
.nsplits
);
78 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
79 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
81 *p
+= info
->symlink_len
;
83 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
84 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
85 sizeof(info
->dir_layout
), bad
);
87 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
89 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
90 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
91 info
->xattr_data
= *p
;
92 *p
+= info
->xattr_len
;
94 if (features
& CEPH_FEATURE_MDS_INLINE_DATA
) {
95 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
96 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
97 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
98 info
->inline_data
= *p
;
99 *p
+= info
->inline_len
;
101 info
->inline_version
= CEPH_INLINE_NONE
;
103 info
->pool_ns_len
= 0;
104 info
->pool_ns_data
= NULL
;
105 if (features
& CEPH_FEATURE_FS_FILE_LAYOUT_V2
) {
106 ceph_decode_32_safe(p
, end
, info
->pool_ns_len
, bad
);
107 if (info
->pool_ns_len
> 0) {
108 ceph_decode_need(p
, end
, info
->pool_ns_len
, bad
);
109 info
->pool_ns_data
= *p
;
110 *p
+= info
->pool_ns_len
;
120 * parse a normal reply, which may contain a (dir+)dentry and/or a
123 static int parse_reply_info_trace(void **p
, void *end
,
124 struct ceph_mds_reply_info_parsed
*info
,
129 if (info
->head
->is_dentry
) {
130 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
134 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
137 *p
+= sizeof(*info
->dirfrag
) +
138 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
139 if (unlikely(*p
> end
))
142 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
143 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
145 *p
+= info
->dname_len
;
147 *p
+= sizeof(*info
->dlease
);
150 if (info
->head
->is_target
) {
151 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
156 if (unlikely(*p
!= end
))
163 pr_err("problem parsing mds trace %d\n", err
);
168 * parse readdir results
170 static int parse_reply_info_dir(void **p
, void *end
,
171 struct ceph_mds_reply_info_parsed
*info
,
178 if (*p
+ sizeof(*info
->dir_dir
) > end
)
180 *p
+= sizeof(*info
->dir_dir
) +
181 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
185 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
186 num
= ceph_decode_32(p
);
188 u16 flags
= ceph_decode_16(p
);
189 info
->dir_end
= !!(flags
& CEPH_READDIR_FRAG_END
);
190 info
->dir_complete
= !!(flags
& CEPH_READDIR_FRAG_COMPLETE
);
191 info
->hash_order
= !!(flags
& CEPH_READDIR_HASH_ORDER
);
196 BUG_ON(!info
->dir_entries
);
197 if ((unsigned long)(info
->dir_entries
+ num
) >
198 (unsigned long)info
->dir_entries
+ info
->dir_buf_size
) {
199 pr_err("dir contents are larger than expected\n");
206 struct ceph_mds_reply_dir_entry
*rde
= info
->dir_entries
+ i
;
208 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
209 rde
->name_len
= ceph_decode_32(p
);
210 ceph_decode_need(p
, end
, rde
->name_len
, bad
);
213 dout("parsed dir dname '%.*s'\n", rde
->name_len
, rde
->name
);
215 *p
+= sizeof(struct ceph_mds_reply_lease
);
218 err
= parse_reply_info_in(p
, end
, &rde
->inode
, features
);
221 /* ceph_readdir_prepopulate() will update it */
235 pr_err("problem parsing dir contents %d\n", err
);
240 * parse fcntl F_GETLK results
242 static int parse_reply_info_filelock(void **p
, void *end
,
243 struct ceph_mds_reply_info_parsed
*info
,
246 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
249 info
->filelock_reply
= *p
;
250 *p
+= sizeof(*info
->filelock_reply
);
252 if (unlikely(*p
!= end
))
261 * parse create results
263 static int parse_reply_info_create(void **p
, void *end
,
264 struct ceph_mds_reply_info_parsed
*info
,
267 if (features
& CEPH_FEATURE_REPLY_CREATE_INODE
) {
269 info
->has_create_ino
= false;
271 info
->has_create_ino
= true;
272 info
->ino
= ceph_decode_64(p
);
276 if (unlikely(*p
!= end
))
285 * parse extra results
287 static int parse_reply_info_extra(void **p
, void *end
,
288 struct ceph_mds_reply_info_parsed
*info
,
291 if (info
->head
->op
== CEPH_MDS_OP_GETFILELOCK
)
292 return parse_reply_info_filelock(p
, end
, info
, features
);
293 else if (info
->head
->op
== CEPH_MDS_OP_READDIR
||
294 info
->head
->op
== CEPH_MDS_OP_LSSNAP
)
295 return parse_reply_info_dir(p
, end
, info
, features
);
296 else if (info
->head
->op
== CEPH_MDS_OP_CREATE
)
297 return parse_reply_info_create(p
, end
, info
, features
);
303 * parse entire mds reply
305 static int parse_reply_info(struct ceph_msg
*msg
,
306 struct ceph_mds_reply_info_parsed
*info
,
313 info
->head
= msg
->front
.iov_base
;
314 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
315 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
318 ceph_decode_32_safe(&p
, end
, len
, bad
);
320 ceph_decode_need(&p
, end
, len
, bad
);
321 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
327 ceph_decode_32_safe(&p
, end
, len
, bad
);
329 ceph_decode_need(&p
, end
, len
, bad
);
330 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
336 ceph_decode_32_safe(&p
, end
, len
, bad
);
337 info
->snapblob_len
= len
;
348 pr_err("mds parse_reply err %d\n", err
);
352 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
354 if (!info
->dir_entries
)
356 free_pages((unsigned long)info
->dir_entries
, get_order(info
->dir_buf_size
));
363 const char *ceph_session_state_name(int s
)
366 case CEPH_MDS_SESSION_NEW
: return "new";
367 case CEPH_MDS_SESSION_OPENING
: return "opening";
368 case CEPH_MDS_SESSION_OPEN
: return "open";
369 case CEPH_MDS_SESSION_HUNG
: return "hung";
370 case CEPH_MDS_SESSION_CLOSING
: return "closing";
371 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
372 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
373 default: return "???";
377 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
379 if (atomic_inc_not_zero(&s
->s_ref
)) {
380 dout("mdsc get_session %p %d -> %d\n", s
,
381 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
384 dout("mdsc get_session %p 0 -- FAIL", s
);
389 void ceph_put_mds_session(struct ceph_mds_session
*s
)
391 dout("mdsc put_session %p %d -> %d\n", s
,
392 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
393 if (atomic_dec_and_test(&s
->s_ref
)) {
394 if (s
->s_auth
.authorizer
)
395 ceph_auth_destroy_authorizer(s
->s_auth
.authorizer
);
401 * called under mdsc->mutex
403 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
406 struct ceph_mds_session
*session
;
408 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
410 session
= mdsc
->sessions
[mds
];
411 dout("lookup_mds_session %p %d\n", session
,
412 atomic_read(&session
->s_ref
));
413 get_session(session
);
417 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
419 if (mds
>= mdsc
->max_sessions
)
421 return mdsc
->sessions
[mds
];
424 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
425 struct ceph_mds_session
*s
)
427 if (s
->s_mds
>= mdsc
->max_sessions
||
428 mdsc
->sessions
[s
->s_mds
] != s
)
434 * create+register a new session for given mds.
435 * called under mdsc->mutex.
437 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
440 struct ceph_mds_session
*s
;
442 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
443 return ERR_PTR(-EINVAL
);
445 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
447 return ERR_PTR(-ENOMEM
);
450 s
->s_state
= CEPH_MDS_SESSION_NEW
;
453 mutex_init(&s
->s_mutex
);
455 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
457 spin_lock_init(&s
->s_gen_ttl_lock
);
459 s
->s_cap_ttl
= jiffies
- 1;
461 spin_lock_init(&s
->s_cap_lock
);
462 s
->s_renew_requested
= 0;
464 INIT_LIST_HEAD(&s
->s_caps
);
467 atomic_set(&s
->s_ref
, 1);
468 INIT_LIST_HEAD(&s
->s_waiting
);
469 INIT_LIST_HEAD(&s
->s_unsafe
);
470 s
->s_num_cap_releases
= 0;
471 s
->s_cap_reconnect
= 0;
472 s
->s_cap_iterator
= NULL
;
473 INIT_LIST_HEAD(&s
->s_cap_releases
);
474 INIT_LIST_HEAD(&s
->s_cap_flushing
);
476 dout("register_session mds%d\n", mds
);
477 if (mds
>= mdsc
->max_sessions
) {
478 int newmax
= 1 << get_count_order(mds
+1);
479 struct ceph_mds_session
**sa
;
481 dout("register_session realloc to %d\n", newmax
);
482 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
485 if (mdsc
->sessions
) {
486 memcpy(sa
, mdsc
->sessions
,
487 mdsc
->max_sessions
* sizeof(void *));
488 kfree(mdsc
->sessions
);
491 mdsc
->max_sessions
= newmax
;
493 mdsc
->sessions
[mds
] = s
;
494 atomic_inc(&mdsc
->num_sessions
);
495 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
497 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
498 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
504 return ERR_PTR(-ENOMEM
);
508 * called under mdsc->mutex
510 static void __unregister_session(struct ceph_mds_client
*mdsc
,
511 struct ceph_mds_session
*s
)
513 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
514 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
515 mdsc
->sessions
[s
->s_mds
] = NULL
;
516 ceph_con_close(&s
->s_con
);
517 ceph_put_mds_session(s
);
518 atomic_dec(&mdsc
->num_sessions
);
522 * drop session refs in request.
524 * should be last request ref, or hold mdsc->mutex
526 static void put_request_session(struct ceph_mds_request
*req
)
528 if (req
->r_session
) {
529 ceph_put_mds_session(req
->r_session
);
530 req
->r_session
= NULL
;
534 void ceph_mdsc_release_request(struct kref
*kref
)
536 struct ceph_mds_request
*req
= container_of(kref
,
537 struct ceph_mds_request
,
539 destroy_reply_info(&req
->r_reply_info
);
541 ceph_msg_put(req
->r_request
);
543 ceph_msg_put(req
->r_reply
);
545 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
548 if (req
->r_locked_dir
)
549 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
550 iput(req
->r_target_inode
);
553 if (req
->r_old_dentry
)
554 dput(req
->r_old_dentry
);
555 if (req
->r_old_dentry_dir
) {
557 * track (and drop pins for) r_old_dentry_dir
558 * separately, since r_old_dentry's d_parent may have
559 * changed between the dir mutex being dropped and
560 * this request being freed.
562 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
564 iput(req
->r_old_dentry_dir
);
569 ceph_pagelist_release(req
->r_pagelist
);
570 put_request_session(req
);
571 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
575 DEFINE_RB_FUNCS(request
, struct ceph_mds_request
, r_tid
, r_node
)
578 * lookup session, bump ref if found.
580 * called under mdsc->mutex.
582 static struct ceph_mds_request
*
583 lookup_get_request(struct ceph_mds_client
*mdsc
, u64 tid
)
585 struct ceph_mds_request
*req
;
587 req
= lookup_request(&mdsc
->request_tree
, tid
);
589 ceph_mdsc_get_request(req
);
595 * Register an in-flight request, and assign a tid. Link to directory
596 * are modifying (if any).
598 * Called under mdsc->mutex.
600 static void __register_request(struct ceph_mds_client
*mdsc
,
601 struct ceph_mds_request
*req
,
604 req
->r_tid
= ++mdsc
->last_tid
;
606 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
608 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
609 ceph_mdsc_get_request(req
);
610 insert_request(&mdsc
->request_tree
, req
);
612 req
->r_uid
= current_fsuid();
613 req
->r_gid
= current_fsgid();
615 if (mdsc
->oldest_tid
== 0 && req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
)
616 mdsc
->oldest_tid
= req
->r_tid
;
620 req
->r_unsafe_dir
= dir
;
624 static void __unregister_request(struct ceph_mds_client
*mdsc
,
625 struct ceph_mds_request
*req
)
627 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
629 if (req
->r_tid
== mdsc
->oldest_tid
) {
630 struct rb_node
*p
= rb_next(&req
->r_node
);
631 mdsc
->oldest_tid
= 0;
633 struct ceph_mds_request
*next_req
=
634 rb_entry(p
, struct ceph_mds_request
, r_node
);
635 if (next_req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
) {
636 mdsc
->oldest_tid
= next_req
->r_tid
;
643 erase_request(&mdsc
->request_tree
, req
);
645 if (req
->r_unsafe_dir
&& req
->r_got_unsafe
) {
646 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
647 spin_lock(&ci
->i_unsafe_lock
);
648 list_del_init(&req
->r_unsafe_dir_item
);
649 spin_unlock(&ci
->i_unsafe_lock
);
651 if (req
->r_target_inode
&& req
->r_got_unsafe
) {
652 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
653 spin_lock(&ci
->i_unsafe_lock
);
654 list_del_init(&req
->r_unsafe_target_item
);
655 spin_unlock(&ci
->i_unsafe_lock
);
658 if (req
->r_unsafe_dir
) {
659 iput(req
->r_unsafe_dir
);
660 req
->r_unsafe_dir
= NULL
;
663 complete_all(&req
->r_safe_completion
);
665 ceph_mdsc_put_request(req
);
669 * Choose mds to send request to next. If there is a hint set in the
670 * request (e.g., due to a prior forward hint from the mds), use that.
671 * Otherwise, consult frag tree and/or caps to identify the
672 * appropriate mds. If all else fails, choose randomly.
674 * Called under mdsc->mutex.
676 static struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
679 * we don't need to worry about protecting the d_parent access
680 * here because we never renaming inside the snapped namespace
681 * except to resplice to another snapdir, and either the old or new
682 * result is a valid result.
684 while (!IS_ROOT(dentry
) && ceph_snap(d_inode(dentry
)) != CEPH_NOSNAP
)
685 dentry
= dentry
->d_parent
;
689 static int __choose_mds(struct ceph_mds_client
*mdsc
,
690 struct ceph_mds_request
*req
)
693 struct ceph_inode_info
*ci
;
694 struct ceph_cap
*cap
;
695 int mode
= req
->r_direct_mode
;
697 u32 hash
= req
->r_direct_hash
;
698 bool is_hash
= req
->r_direct_is_hash
;
701 * is there a specific mds we should try? ignore hint if we have
702 * no session and the mds is not up (active or recovering).
704 if (req
->r_resend_mds
>= 0 &&
705 (__have_session(mdsc
, req
->r_resend_mds
) ||
706 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
707 dout("choose_mds using resend_mds mds%d\n",
709 return req
->r_resend_mds
;
712 if (mode
== USE_RANDOM_MDS
)
717 inode
= req
->r_inode
;
718 } else if (req
->r_dentry
) {
719 /* ignore race with rename; old or new d_parent is okay */
720 struct dentry
*parent
= req
->r_dentry
->d_parent
;
721 struct inode
*dir
= d_inode(parent
);
723 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
725 inode
= d_inode(req
->r_dentry
);
726 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
727 /* direct snapped/virtual snapdir requests
728 * based on parent dir inode */
729 struct dentry
*dn
= get_nonsnap_parent(parent
);
731 dout("__choose_mds using nonsnap parent %p\n", inode
);
734 inode
= d_inode(req
->r_dentry
);
735 if (!inode
|| mode
== USE_AUTH_MDS
) {
738 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
744 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
748 ci
= ceph_inode(inode
);
750 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
751 struct ceph_inode_frag frag
;
754 ceph_choose_frag(ci
, hash
, &frag
, &found
);
756 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
759 /* choose a random replica */
760 get_random_bytes(&r
, 1);
763 dout("choose_mds %p %llx.%llx "
764 "frag %u mds%d (%d/%d)\n",
765 inode
, ceph_vinop(inode
),
768 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
769 CEPH_MDS_STATE_ACTIVE
)
773 /* since this file/dir wasn't known to be
774 * replicated, then we want to look for the
775 * authoritative mds. */
778 /* choose auth mds */
780 dout("choose_mds %p %llx.%llx "
781 "frag %u mds%d (auth)\n",
782 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
783 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
784 CEPH_MDS_STATE_ACTIVE
)
790 spin_lock(&ci
->i_ceph_lock
);
792 if (mode
== USE_AUTH_MDS
)
793 cap
= ci
->i_auth_cap
;
794 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
795 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
797 spin_unlock(&ci
->i_ceph_lock
);
800 mds
= cap
->session
->s_mds
;
801 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
802 inode
, ceph_vinop(inode
), mds
,
803 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
804 spin_unlock(&ci
->i_ceph_lock
);
808 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
809 dout("choose_mds chose random mds%d\n", mds
);
817 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
819 struct ceph_msg
*msg
;
820 struct ceph_mds_session_head
*h
;
822 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
825 pr_err("create_session_msg ENOMEM creating msg\n");
828 h
= msg
->front
.iov_base
;
829 h
->op
= cpu_to_le32(op
);
830 h
->seq
= cpu_to_le64(seq
);
836 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
837 * to include additional client metadata fields.
839 static struct ceph_msg
*create_session_open_msg(struct ceph_mds_client
*mdsc
, u64 seq
)
841 struct ceph_msg
*msg
;
842 struct ceph_mds_session_head
*h
;
844 int metadata_bytes
= 0;
845 int metadata_key_count
= 0;
846 struct ceph_options
*opt
= mdsc
->fsc
->client
->options
;
847 struct ceph_mount_options
*fsopt
= mdsc
->fsc
->mount_options
;
850 const char* metadata
[][2] = {
851 {"hostname", utsname()->nodename
},
852 {"kernel_version", utsname()->release
},
853 {"entity_id", opt
->name
? : ""},
854 {"root", fsopt
->server_path
? : "/"},
858 /* Calculate serialized length of metadata */
859 metadata_bytes
= 4; /* map length */
860 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
861 metadata_bytes
+= 8 + strlen(metadata
[i
][0]) +
862 strlen(metadata
[i
][1]);
863 metadata_key_count
++;
866 /* Allocate the message */
867 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
) + metadata_bytes
,
870 pr_err("create_session_msg ENOMEM creating msg\n");
873 h
= msg
->front
.iov_base
;
874 h
->op
= cpu_to_le32(CEPH_SESSION_REQUEST_OPEN
);
875 h
->seq
= cpu_to_le64(seq
);
878 * Serialize client metadata into waiting buffer space, using
879 * the format that userspace expects for map<string, string>
881 * ClientSession messages with metadata are v2
883 msg
->hdr
.version
= cpu_to_le16(2);
884 msg
->hdr
.compat_version
= cpu_to_le16(1);
886 /* The write pointer, following the session_head structure */
887 p
= msg
->front
.iov_base
+ sizeof(*h
);
889 /* Number of entries in the map */
890 ceph_encode_32(&p
, metadata_key_count
);
892 /* Two length-prefixed strings for each entry in the map */
893 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
894 size_t const key_len
= strlen(metadata
[i
][0]);
895 size_t const val_len
= strlen(metadata
[i
][1]);
897 ceph_encode_32(&p
, key_len
);
898 memcpy(p
, metadata
[i
][0], key_len
);
900 ceph_encode_32(&p
, val_len
);
901 memcpy(p
, metadata
[i
][1], val_len
);
909 * send session open request.
911 * called under mdsc->mutex
913 static int __open_session(struct ceph_mds_client
*mdsc
,
914 struct ceph_mds_session
*session
)
916 struct ceph_msg
*msg
;
918 int mds
= session
->s_mds
;
920 /* wait for mds to go active? */
921 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
922 dout("open_session to mds%d (%s)\n", mds
,
923 ceph_mds_state_name(mstate
));
924 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
925 session
->s_renew_requested
= jiffies
;
927 /* send connect message */
928 msg
= create_session_open_msg(mdsc
, session
->s_seq
);
931 ceph_con_send(&session
->s_con
, msg
);
936 * open sessions for any export targets for the given mds
938 * called under mdsc->mutex
940 static struct ceph_mds_session
*
941 __open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
943 struct ceph_mds_session
*session
;
945 session
= __ceph_lookup_mds_session(mdsc
, target
);
947 session
= register_session(mdsc
, target
);
951 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
952 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
953 __open_session(mdsc
, session
);
958 struct ceph_mds_session
*
959 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
961 struct ceph_mds_session
*session
;
963 dout("open_export_target_session to mds%d\n", target
);
965 mutex_lock(&mdsc
->mutex
);
966 session
= __open_export_target_session(mdsc
, target
);
967 mutex_unlock(&mdsc
->mutex
);
972 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
973 struct ceph_mds_session
*session
)
975 struct ceph_mds_info
*mi
;
976 struct ceph_mds_session
*ts
;
977 int i
, mds
= session
->s_mds
;
979 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
982 mi
= &mdsc
->mdsmap
->m_info
[mds
];
983 dout("open_export_target_sessions for mds%d (%d targets)\n",
984 session
->s_mds
, mi
->num_export_targets
);
986 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
987 ts
= __open_export_target_session(mdsc
, mi
->export_targets
[i
]);
989 ceph_put_mds_session(ts
);
993 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
994 struct ceph_mds_session
*session
)
996 mutex_lock(&mdsc
->mutex
);
997 __open_export_target_sessions(mdsc
, session
);
998 mutex_unlock(&mdsc
->mutex
);
1005 /* caller holds s_cap_lock, we drop it */
1006 static void cleanup_cap_releases(struct ceph_mds_client
*mdsc
,
1007 struct ceph_mds_session
*session
)
1008 __releases(session
->s_cap_lock
)
1010 LIST_HEAD(tmp_list
);
1011 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1012 session
->s_num_cap_releases
= 0;
1013 spin_unlock(&session
->s_cap_lock
);
1015 dout("cleanup_cap_releases mds%d\n", session
->s_mds
);
1016 while (!list_empty(&tmp_list
)) {
1017 struct ceph_cap
*cap
;
1018 /* zero out the in-progress message */
1019 cap
= list_first_entry(&tmp_list
,
1020 struct ceph_cap
, session_caps
);
1021 list_del(&cap
->session_caps
);
1022 ceph_put_cap(mdsc
, cap
);
1026 static void cleanup_session_requests(struct ceph_mds_client
*mdsc
,
1027 struct ceph_mds_session
*session
)
1029 struct ceph_mds_request
*req
;
1032 dout("cleanup_session_requests mds%d\n", session
->s_mds
);
1033 mutex_lock(&mdsc
->mutex
);
1034 while (!list_empty(&session
->s_unsafe
)) {
1035 req
= list_first_entry(&session
->s_unsafe
,
1036 struct ceph_mds_request
, r_unsafe_item
);
1037 list_del_init(&req
->r_unsafe_item
);
1038 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1040 __unregister_request(mdsc
, req
);
1042 /* zero r_attempts, so kick_requests() will re-send requests */
1043 p
= rb_first(&mdsc
->request_tree
);
1045 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1047 if (req
->r_session
&&
1048 req
->r_session
->s_mds
== session
->s_mds
)
1049 req
->r_attempts
= 0;
1051 mutex_unlock(&mdsc
->mutex
);
1055 * Helper to safely iterate over all caps associated with a session, with
1056 * special care taken to handle a racing __ceph_remove_cap().
1058 * Caller must hold session s_mutex.
1060 static int iterate_session_caps(struct ceph_mds_session
*session
,
1061 int (*cb
)(struct inode
*, struct ceph_cap
*,
1064 struct list_head
*p
;
1065 struct ceph_cap
*cap
;
1066 struct inode
*inode
, *last_inode
= NULL
;
1067 struct ceph_cap
*old_cap
= NULL
;
1070 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
1071 spin_lock(&session
->s_cap_lock
);
1072 p
= session
->s_caps
.next
;
1073 while (p
!= &session
->s_caps
) {
1074 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
1075 inode
= igrab(&cap
->ci
->vfs_inode
);
1080 session
->s_cap_iterator
= cap
;
1081 spin_unlock(&session
->s_cap_lock
);
1088 ceph_put_cap(session
->s_mdsc
, old_cap
);
1092 ret
= cb(inode
, cap
, arg
);
1095 spin_lock(&session
->s_cap_lock
);
1097 if (cap
->ci
== NULL
) {
1098 dout("iterate_session_caps finishing cap %p removal\n",
1100 BUG_ON(cap
->session
!= session
);
1101 cap
->session
= NULL
;
1102 list_del_init(&cap
->session_caps
);
1103 session
->s_nr_caps
--;
1104 if (cap
->queue_release
) {
1105 list_add_tail(&cap
->session_caps
,
1106 &session
->s_cap_releases
);
1107 session
->s_num_cap_releases
++;
1109 old_cap
= cap
; /* put_cap it w/o locks held */
1117 session
->s_cap_iterator
= NULL
;
1118 spin_unlock(&session
->s_cap_lock
);
1122 ceph_put_cap(session
->s_mdsc
, old_cap
);
1127 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1130 struct ceph_fs_client
*fsc
= (struct ceph_fs_client
*)arg
;
1131 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1132 LIST_HEAD(to_remove
);
1134 bool invalidate
= false;
1136 dout("removing cap %p, ci is %p, inode is %p\n",
1137 cap
, ci
, &ci
->vfs_inode
);
1138 spin_lock(&ci
->i_ceph_lock
);
1139 __ceph_remove_cap(cap
, false);
1140 if (!ci
->i_auth_cap
) {
1141 struct ceph_cap_flush
*cf
;
1142 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1144 ci
->i_ceph_flags
|= CEPH_I_CAP_DROPPED
;
1146 if (ci
->i_wrbuffer_ref
> 0 &&
1147 ACCESS_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
1150 while (!list_empty(&ci
->i_cap_flush_list
)) {
1151 cf
= list_first_entry(&ci
->i_cap_flush_list
,
1152 struct ceph_cap_flush
, i_list
);
1153 list_del(&cf
->i_list
);
1154 list_add(&cf
->i_list
, &to_remove
);
1157 spin_lock(&mdsc
->cap_dirty_lock
);
1159 list_for_each_entry(cf
, &to_remove
, i_list
)
1160 list_del(&cf
->g_list
);
1162 if (!list_empty(&ci
->i_dirty_item
)) {
1163 pr_warn_ratelimited(
1164 " dropping dirty %s state for %p %lld\n",
1165 ceph_cap_string(ci
->i_dirty_caps
),
1166 inode
, ceph_ino(inode
));
1167 ci
->i_dirty_caps
= 0;
1168 list_del_init(&ci
->i_dirty_item
);
1171 if (!list_empty(&ci
->i_flushing_item
)) {
1172 pr_warn_ratelimited(
1173 " dropping dirty+flushing %s state for %p %lld\n",
1174 ceph_cap_string(ci
->i_flushing_caps
),
1175 inode
, ceph_ino(inode
));
1176 ci
->i_flushing_caps
= 0;
1177 list_del_init(&ci
->i_flushing_item
);
1178 mdsc
->num_cap_flushing
--;
1181 spin_unlock(&mdsc
->cap_dirty_lock
);
1183 if (!ci
->i_dirty_caps
&& ci
->i_prealloc_cap_flush
) {
1184 list_add(&ci
->i_prealloc_cap_flush
->i_list
, &to_remove
);
1185 ci
->i_prealloc_cap_flush
= NULL
;
1188 spin_unlock(&ci
->i_ceph_lock
);
1189 while (!list_empty(&to_remove
)) {
1190 struct ceph_cap_flush
*cf
;
1191 cf
= list_first_entry(&to_remove
,
1192 struct ceph_cap_flush
, i_list
);
1193 list_del(&cf
->i_list
);
1194 ceph_free_cap_flush(cf
);
1197 wake_up_all(&ci
->i_cap_wq
);
1199 ceph_queue_invalidate(inode
);
1206 * caller must hold session s_mutex
1208 static void remove_session_caps(struct ceph_mds_session
*session
)
1210 struct ceph_fs_client
*fsc
= session
->s_mdsc
->fsc
;
1211 struct super_block
*sb
= fsc
->sb
;
1212 dout("remove_session_caps on %p\n", session
);
1213 iterate_session_caps(session
, remove_session_caps_cb
, fsc
);
1215 wake_up_all(&fsc
->mdsc
->cap_flushing_wq
);
1217 spin_lock(&session
->s_cap_lock
);
1218 if (session
->s_nr_caps
> 0) {
1219 struct inode
*inode
;
1220 struct ceph_cap
*cap
, *prev
= NULL
;
1221 struct ceph_vino vino
;
1223 * iterate_session_caps() skips inodes that are being
1224 * deleted, we need to wait until deletions are complete.
1225 * __wait_on_freeing_inode() is designed for the job,
1226 * but it is not exported, so use lookup inode function
1229 while (!list_empty(&session
->s_caps
)) {
1230 cap
= list_entry(session
->s_caps
.next
,
1231 struct ceph_cap
, session_caps
);
1235 vino
= cap
->ci
->i_vino
;
1236 spin_unlock(&session
->s_cap_lock
);
1238 inode
= ceph_find_inode(sb
, vino
);
1241 spin_lock(&session
->s_cap_lock
);
1245 // drop cap expires and unlock s_cap_lock
1246 cleanup_cap_releases(session
->s_mdsc
, session
);
1248 BUG_ON(session
->s_nr_caps
> 0);
1249 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1253 * wake up any threads waiting on this session's caps. if the cap is
1254 * old (didn't get renewed on the client reconnect), remove it now.
1256 * caller must hold s_mutex.
1258 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1261 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1264 spin_lock(&ci
->i_ceph_lock
);
1265 ci
->i_wanted_max_size
= 0;
1266 ci
->i_requested_max_size
= 0;
1267 spin_unlock(&ci
->i_ceph_lock
);
1269 wake_up_all(&ci
->i_cap_wq
);
1273 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1276 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1277 iterate_session_caps(session
, wake_up_session_cb
,
1278 (void *)(unsigned long)reconnect
);
1282 * Send periodic message to MDS renewing all currently held caps. The
1283 * ack will reset the expiration for all caps from this session.
1285 * caller holds s_mutex
1287 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1288 struct ceph_mds_session
*session
)
1290 struct ceph_msg
*msg
;
1293 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1294 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1295 pr_info("mds%d caps stale\n", session
->s_mds
);
1296 session
->s_renew_requested
= jiffies
;
1298 /* do not try to renew caps until a recovering mds has reconnected
1299 * with its clients. */
1300 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1301 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1302 dout("send_renew_caps ignoring mds%d (%s)\n",
1303 session
->s_mds
, ceph_mds_state_name(state
));
1307 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1308 ceph_mds_state_name(state
));
1309 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1310 ++session
->s_renew_seq
);
1313 ceph_con_send(&session
->s_con
, msg
);
1317 static int send_flushmsg_ack(struct ceph_mds_client
*mdsc
,
1318 struct ceph_mds_session
*session
, u64 seq
)
1320 struct ceph_msg
*msg
;
1322 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1323 session
->s_mds
, ceph_session_state_name(session
->s_state
), seq
);
1324 msg
= create_session_msg(CEPH_SESSION_FLUSHMSG_ACK
, seq
);
1327 ceph_con_send(&session
->s_con
, msg
);
1333 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1335 * Called under session->s_mutex
1337 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1338 struct ceph_mds_session
*session
, int is_renew
)
1343 spin_lock(&session
->s_cap_lock
);
1344 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1346 session
->s_cap_ttl
= session
->s_renew_requested
+
1347 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1350 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1351 pr_info("mds%d caps renewed\n", session
->s_mds
);
1354 pr_info("mds%d caps still stale\n", session
->s_mds
);
1357 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1358 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1359 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1360 spin_unlock(&session
->s_cap_lock
);
1363 wake_up_session_caps(session
, 0);
1367 * send a session close request
1369 static int request_close_session(struct ceph_mds_client
*mdsc
,
1370 struct ceph_mds_session
*session
)
1372 struct ceph_msg
*msg
;
1374 dout("request_close_session mds%d state %s seq %lld\n",
1375 session
->s_mds
, ceph_session_state_name(session
->s_state
),
1377 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1380 ceph_con_send(&session
->s_con
, msg
);
1385 * Called with s_mutex held.
1387 static int __close_session(struct ceph_mds_client
*mdsc
,
1388 struct ceph_mds_session
*session
)
1390 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1392 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1393 return request_close_session(mdsc
, session
);
1397 * Trim old(er) caps.
1399 * Because we can't cache an inode without one or more caps, we do
1400 * this indirectly: if a cap is unused, we prune its aliases, at which
1401 * point the inode will hopefully get dropped to.
1403 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1404 * memory pressure from the MDS, though, so it needn't be perfect.
1406 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1408 struct ceph_mds_session
*session
= arg
;
1409 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1410 int used
, wanted
, oissued
, mine
;
1412 if (session
->s_trim_caps
<= 0)
1415 spin_lock(&ci
->i_ceph_lock
);
1416 mine
= cap
->issued
| cap
->implemented
;
1417 used
= __ceph_caps_used(ci
);
1418 wanted
= __ceph_caps_file_wanted(ci
);
1419 oissued
= __ceph_caps_issued_other(ci
, cap
);
1421 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1422 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1423 ceph_cap_string(used
), ceph_cap_string(wanted
));
1424 if (cap
== ci
->i_auth_cap
) {
1425 if (ci
->i_dirty_caps
|| ci
->i_flushing_caps
||
1426 !list_empty(&ci
->i_cap_snaps
))
1428 if ((used
| wanted
) & CEPH_CAP_ANY_WR
)
1431 /* The inode has cached pages, but it's no longer used.
1432 * we can safely drop it */
1433 if (wanted
== 0 && used
== CEPH_CAP_FILE_CACHE
&&
1434 !(oissued
& CEPH_CAP_FILE_CACHE
)) {
1438 if ((used
| wanted
) & ~oissued
& mine
)
1439 goto out
; /* we need these caps */
1441 session
->s_trim_caps
--;
1443 /* we aren't the only cap.. just remove us */
1444 __ceph_remove_cap(cap
, true);
1446 /* try dropping referring dentries */
1447 spin_unlock(&ci
->i_ceph_lock
);
1448 d_prune_aliases(inode
);
1449 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1450 inode
, cap
, atomic_read(&inode
->i_count
));
1455 spin_unlock(&ci
->i_ceph_lock
);
1460 * Trim session cap count down to some max number.
1462 static int trim_caps(struct ceph_mds_client
*mdsc
,
1463 struct ceph_mds_session
*session
,
1466 int trim_caps
= session
->s_nr_caps
- max_caps
;
1468 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1469 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1470 if (trim_caps
> 0) {
1471 session
->s_trim_caps
= trim_caps
;
1472 iterate_session_caps(session
, trim_caps_cb
, session
);
1473 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1474 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1475 trim_caps
- session
->s_trim_caps
);
1476 session
->s_trim_caps
= 0;
1479 ceph_send_cap_releases(mdsc
, session
);
1483 static int check_caps_flush(struct ceph_mds_client
*mdsc
,
1488 spin_lock(&mdsc
->cap_dirty_lock
);
1489 if (!list_empty(&mdsc
->cap_flush_list
)) {
1490 struct ceph_cap_flush
*cf
=
1491 list_first_entry(&mdsc
->cap_flush_list
,
1492 struct ceph_cap_flush
, g_list
);
1493 if (cf
->tid
<= want_flush_tid
) {
1494 dout("check_caps_flush still flushing tid "
1495 "%llu <= %llu\n", cf
->tid
, want_flush_tid
);
1499 spin_unlock(&mdsc
->cap_dirty_lock
);
1504 * flush all dirty inode data to disk.
1506 * returns true if we've flushed through want_flush_tid
1508 static void wait_caps_flush(struct ceph_mds_client
*mdsc
,
1511 dout("check_caps_flush want %llu\n", want_flush_tid
);
1513 wait_event(mdsc
->cap_flushing_wq
,
1514 check_caps_flush(mdsc
, want_flush_tid
));
1516 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid
);
1520 * called under s_mutex
1522 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1523 struct ceph_mds_session
*session
)
1525 struct ceph_msg
*msg
= NULL
;
1526 struct ceph_mds_cap_release
*head
;
1527 struct ceph_mds_cap_item
*item
;
1528 struct ceph_cap
*cap
;
1529 LIST_HEAD(tmp_list
);
1530 int num_cap_releases
;
1532 spin_lock(&session
->s_cap_lock
);
1534 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1535 num_cap_releases
= session
->s_num_cap_releases
;
1536 session
->s_num_cap_releases
= 0;
1537 spin_unlock(&session
->s_cap_lock
);
1539 while (!list_empty(&tmp_list
)) {
1541 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
,
1542 PAGE_SIZE
, GFP_NOFS
, false);
1545 head
= msg
->front
.iov_base
;
1546 head
->num
= cpu_to_le32(0);
1547 msg
->front
.iov_len
= sizeof(*head
);
1549 cap
= list_first_entry(&tmp_list
, struct ceph_cap
,
1551 list_del(&cap
->session_caps
);
1554 head
= msg
->front
.iov_base
;
1555 le32_add_cpu(&head
->num
, 1);
1556 item
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1557 item
->ino
= cpu_to_le64(cap
->cap_ino
);
1558 item
->cap_id
= cpu_to_le64(cap
->cap_id
);
1559 item
->migrate_seq
= cpu_to_le32(cap
->mseq
);
1560 item
->seq
= cpu_to_le32(cap
->issue_seq
);
1561 msg
->front
.iov_len
+= sizeof(*item
);
1563 ceph_put_cap(mdsc
, cap
);
1565 if (le32_to_cpu(head
->num
) == CEPH_CAPS_PER_RELEASE
) {
1566 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1567 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1568 ceph_con_send(&session
->s_con
, msg
);
1573 BUG_ON(num_cap_releases
!= 0);
1575 spin_lock(&session
->s_cap_lock
);
1576 if (!list_empty(&session
->s_cap_releases
))
1578 spin_unlock(&session
->s_cap_lock
);
1581 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1582 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1583 ceph_con_send(&session
->s_con
, msg
);
1587 pr_err("send_cap_releases mds%d, failed to allocate message\n",
1589 spin_lock(&session
->s_cap_lock
);
1590 list_splice(&tmp_list
, &session
->s_cap_releases
);
1591 session
->s_num_cap_releases
+= num_cap_releases
;
1592 spin_unlock(&session
->s_cap_lock
);
1599 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
1602 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1603 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1604 struct ceph_mount_options
*opt
= req
->r_mdsc
->fsc
->mount_options
;
1605 size_t size
= sizeof(struct ceph_mds_reply_dir_entry
);
1606 int order
, num_entries
;
1608 spin_lock(&ci
->i_ceph_lock
);
1609 num_entries
= ci
->i_files
+ ci
->i_subdirs
;
1610 spin_unlock(&ci
->i_ceph_lock
);
1611 num_entries
= max(num_entries
, 1);
1612 num_entries
= min(num_entries
, opt
->max_readdir
);
1614 order
= get_order(size
* num_entries
);
1615 while (order
>= 0) {
1616 rinfo
->dir_entries
= (void*)__get_free_pages(GFP_KERNEL
|
1619 if (rinfo
->dir_entries
)
1623 if (!rinfo
->dir_entries
)
1626 num_entries
= (PAGE_SIZE
<< order
) / size
;
1627 num_entries
= min(num_entries
, opt
->max_readdir
);
1629 rinfo
->dir_buf_size
= PAGE_SIZE
<< order
;
1630 req
->r_num_caps
= num_entries
+ 1;
1631 req
->r_args
.readdir
.max_entries
= cpu_to_le32(num_entries
);
1632 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(opt
->max_readdir_bytes
);
1637 * Create an mds request.
1639 struct ceph_mds_request
*
1640 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1642 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1645 return ERR_PTR(-ENOMEM
);
1647 mutex_init(&req
->r_fill_mutex
);
1649 req
->r_started
= jiffies
;
1650 req
->r_resend_mds
= -1;
1651 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1652 INIT_LIST_HEAD(&req
->r_unsafe_target_item
);
1654 kref_init(&req
->r_kref
);
1655 RB_CLEAR_NODE(&req
->r_node
);
1656 INIT_LIST_HEAD(&req
->r_wait
);
1657 init_completion(&req
->r_completion
);
1658 init_completion(&req
->r_safe_completion
);
1659 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1661 req
->r_stamp
= current_fs_time(mdsc
->fsc
->sb
);
1664 req
->r_direct_mode
= mode
;
1669 * return oldest (lowest) request, tid in request tree, 0 if none.
1671 * called under mdsc->mutex.
1673 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1675 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1677 return rb_entry(rb_first(&mdsc
->request_tree
),
1678 struct ceph_mds_request
, r_node
);
1681 static inline u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1683 return mdsc
->oldest_tid
;
1687 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1688 * on build_path_from_dentry in fs/cifs/dir.c.
1690 * If @stop_on_nosnap, generate path relative to the first non-snapped
1693 * Encode hidden .snap dirs as a double /, i.e.
1694 * foo/.snap/bar -> foo//bar
1696 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1699 struct dentry
*temp
;
1705 return ERR_PTR(-EINVAL
);
1709 seq
= read_seqbegin(&rename_lock
);
1711 for (temp
= dentry
; !IS_ROOT(temp
);) {
1712 struct inode
*inode
= d_inode(temp
);
1713 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1714 len
++; /* slash only */
1715 else if (stop_on_nosnap
&& inode
&&
1716 ceph_snap(inode
) == CEPH_NOSNAP
)
1719 len
+= 1 + temp
->d_name
.len
;
1720 temp
= temp
->d_parent
;
1724 len
--; /* no leading '/' */
1726 path
= kmalloc(len
+1, GFP_NOFS
);
1728 return ERR_PTR(-ENOMEM
);
1730 path
[pos
] = 0; /* trailing null */
1732 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1733 struct inode
*inode
;
1735 spin_lock(&temp
->d_lock
);
1736 inode
= d_inode(temp
);
1737 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1738 dout("build_path path+%d: %p SNAPDIR\n",
1740 } else if (stop_on_nosnap
&& inode
&&
1741 ceph_snap(inode
) == CEPH_NOSNAP
) {
1742 spin_unlock(&temp
->d_lock
);
1745 pos
-= temp
->d_name
.len
;
1747 spin_unlock(&temp
->d_lock
);
1750 strncpy(path
+ pos
, temp
->d_name
.name
,
1753 spin_unlock(&temp
->d_lock
);
1756 temp
= temp
->d_parent
;
1759 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1760 pr_err("build_path did not end path lookup where "
1761 "expected, namelen is %d, pos is %d\n", len
, pos
);
1762 /* presumably this is only possible if racing with a
1763 rename of one of the parent directories (we can not
1764 lock the dentries above us to prevent this, but
1765 retrying should be harmless) */
1770 *base
= ceph_ino(d_inode(temp
));
1772 dout("build_path on %p %d built %llx '%.*s'\n",
1773 dentry
, d_count(dentry
), *base
, len
, path
);
1777 static int build_dentry_path(struct dentry
*dentry
,
1778 const char **ppath
, int *ppathlen
, u64
*pino
,
1783 if (ceph_snap(d_inode(dentry
->d_parent
)) == CEPH_NOSNAP
) {
1784 *pino
= ceph_ino(d_inode(dentry
->d_parent
));
1785 *ppath
= dentry
->d_name
.name
;
1786 *ppathlen
= dentry
->d_name
.len
;
1789 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1791 return PTR_ERR(path
);
1797 static int build_inode_path(struct inode
*inode
,
1798 const char **ppath
, int *ppathlen
, u64
*pino
,
1801 struct dentry
*dentry
;
1804 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1805 *pino
= ceph_ino(inode
);
1809 dentry
= d_find_alias(inode
);
1810 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1813 return PTR_ERR(path
);
1820 * request arguments may be specified via an inode *, a dentry *, or
1821 * an explicit ino+path.
1823 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1824 const char *rpath
, u64 rino
,
1825 const char **ppath
, int *pathlen
,
1826 u64
*ino
, int *freepath
)
1831 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1832 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1834 } else if (rdentry
) {
1835 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1836 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1838 } else if (rpath
|| rino
) {
1841 *pathlen
= rpath
? strlen(rpath
) : 0;
1842 dout(" path %.*s\n", *pathlen
, rpath
);
1849 * called under mdsc->mutex
1851 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1852 struct ceph_mds_request
*req
,
1853 int mds
, bool drop_cap_releases
)
1855 struct ceph_msg
*msg
;
1856 struct ceph_mds_request_head
*head
;
1857 const char *path1
= NULL
;
1858 const char *path2
= NULL
;
1859 u64 ino1
= 0, ino2
= 0;
1860 int pathlen1
= 0, pathlen2
= 0;
1861 int freepath1
= 0, freepath2
= 0;
1867 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1868 req
->r_path1
, req
->r_ino1
.ino
,
1869 &path1
, &pathlen1
, &ino1
, &freepath1
);
1875 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1876 req
->r_path2
, req
->r_ino2
.ino
,
1877 &path2
, &pathlen2
, &ino2
, &freepath2
);
1883 len
= sizeof(*head
) +
1884 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
)) +
1885 sizeof(struct ceph_timespec
);
1887 /* calculate (max) length for cap releases */
1888 len
+= sizeof(struct ceph_mds_request_release
) *
1889 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1890 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1891 if (req
->r_dentry_drop
)
1892 len
+= req
->r_dentry
->d_name
.len
;
1893 if (req
->r_old_dentry_drop
)
1894 len
+= req
->r_old_dentry
->d_name
.len
;
1896 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
1898 msg
= ERR_PTR(-ENOMEM
);
1902 msg
->hdr
.version
= cpu_to_le16(2);
1903 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1905 head
= msg
->front
.iov_base
;
1906 p
= msg
->front
.iov_base
+ sizeof(*head
);
1907 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1909 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1910 head
->op
= cpu_to_le32(req
->r_op
);
1911 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
1912 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
1913 head
->args
= req
->r_args
;
1915 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1916 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1918 /* make note of release offset, in case we need to replay */
1919 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1923 if (req
->r_inode_drop
)
1924 releases
+= ceph_encode_inode_release(&p
,
1925 req
->r_inode
? req
->r_inode
: d_inode(req
->r_dentry
),
1926 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1927 if (req
->r_dentry_drop
)
1928 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1929 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1930 if (req
->r_old_dentry_drop
)
1931 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1932 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1933 if (req
->r_old_inode_drop
)
1934 releases
+= ceph_encode_inode_release(&p
,
1935 d_inode(req
->r_old_dentry
),
1936 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1938 if (drop_cap_releases
) {
1940 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
1943 head
->num_releases
= cpu_to_le16(releases
);
1947 struct ceph_timespec ts
;
1948 ceph_encode_timespec(&ts
, &req
->r_stamp
);
1949 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
1953 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1954 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1956 if (req
->r_pagelist
) {
1957 struct ceph_pagelist
*pagelist
= req
->r_pagelist
;
1958 atomic_inc(&pagelist
->refcnt
);
1959 ceph_msg_data_add_pagelist(msg
, pagelist
);
1960 msg
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
1962 msg
->hdr
.data_len
= 0;
1965 msg
->hdr
.data_off
= cpu_to_le16(0);
1969 kfree((char *)path2
);
1972 kfree((char *)path1
);
1978 * called under mdsc->mutex if error, under no mutex if
1981 static void complete_request(struct ceph_mds_client
*mdsc
,
1982 struct ceph_mds_request
*req
)
1984 if (req
->r_callback
)
1985 req
->r_callback(mdsc
, req
);
1987 complete_all(&req
->r_completion
);
1991 * called under mdsc->mutex
1993 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
1994 struct ceph_mds_request
*req
,
1995 int mds
, bool drop_cap_releases
)
1997 struct ceph_mds_request_head
*rhead
;
1998 struct ceph_msg
*msg
;
2003 struct ceph_cap
*cap
=
2004 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
2007 req
->r_sent_on_mseq
= cap
->mseq
;
2009 req
->r_sent_on_mseq
= -1;
2011 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
2012 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
2014 if (req
->r_got_unsafe
) {
2017 * Replay. Do not regenerate message (and rebuild
2018 * paths, etc.); just use the original message.
2019 * Rebuilding paths will break for renames because
2020 * d_move mangles the src name.
2022 msg
= req
->r_request
;
2023 rhead
= msg
->front
.iov_base
;
2025 flags
= le32_to_cpu(rhead
->flags
);
2026 flags
|= CEPH_MDS_FLAG_REPLAY
;
2027 rhead
->flags
= cpu_to_le32(flags
);
2029 if (req
->r_target_inode
)
2030 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
2032 rhead
->num_retry
= req
->r_attempts
- 1;
2034 /* remove cap/dentry releases from message */
2035 rhead
->num_releases
= 0;
2038 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2040 struct ceph_timespec ts
;
2041 ceph_encode_timespec(&ts
, &req
->r_stamp
);
2042 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2045 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2046 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2050 if (req
->r_request
) {
2051 ceph_msg_put(req
->r_request
);
2052 req
->r_request
= NULL
;
2054 msg
= create_request_message(mdsc
, req
, mds
, drop_cap_releases
);
2056 req
->r_err
= PTR_ERR(msg
);
2057 return PTR_ERR(msg
);
2059 req
->r_request
= msg
;
2061 rhead
= msg
->front
.iov_base
;
2062 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
2063 if (req
->r_got_unsafe
)
2064 flags
|= CEPH_MDS_FLAG_REPLAY
;
2065 if (req
->r_locked_dir
)
2066 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
2067 rhead
->flags
= cpu_to_le32(flags
);
2068 rhead
->num_fwd
= req
->r_num_fwd
;
2069 rhead
->num_retry
= req
->r_attempts
- 1;
2072 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
2077 * send request, or put it on the appropriate wait list.
2079 static int __do_request(struct ceph_mds_client
*mdsc
,
2080 struct ceph_mds_request
*req
)
2082 struct ceph_mds_session
*session
= NULL
;
2086 if (req
->r_err
|| req
->r_got_result
) {
2088 __unregister_request(mdsc
, req
);
2092 if (req
->r_timeout
&&
2093 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
2094 dout("do_request timed out\n");
2098 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
2099 dout("do_request forced umount\n");
2104 put_request_session(req
);
2106 mds
= __choose_mds(mdsc
, req
);
2108 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
2109 if (mdsc
->mdsmap_err
) {
2110 err
= mdsc
->mdsmap_err
;
2111 dout("do_request mdsmap err %d\n", err
);
2114 dout("do_request no mds or not active, waiting for map\n");
2115 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2119 /* get, open session */
2120 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2122 session
= register_session(mdsc
, mds
);
2123 if (IS_ERR(session
)) {
2124 err
= PTR_ERR(session
);
2128 req
->r_session
= get_session(session
);
2130 dout("do_request mds%d session %p state %s\n", mds
, session
,
2131 ceph_session_state_name(session
->s_state
));
2132 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
2133 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
2134 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
2135 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
2136 __open_session(mdsc
, session
);
2137 list_add(&req
->r_wait
, &session
->s_waiting
);
2142 req
->r_resend_mds
= -1; /* forget any previous mds hint */
2144 if (req
->r_request_started
== 0) /* note request start time */
2145 req
->r_request_started
= jiffies
;
2147 err
= __prepare_send_request(mdsc
, req
, mds
, false);
2149 ceph_msg_get(req
->r_request
);
2150 ceph_con_send(&session
->s_con
, req
->r_request
);
2154 ceph_put_mds_session(session
);
2157 dout("__do_request early error %d\n", err
);
2159 complete_request(mdsc
, req
);
2160 __unregister_request(mdsc
, req
);
2167 * called under mdsc->mutex
2169 static void __wake_requests(struct ceph_mds_client
*mdsc
,
2170 struct list_head
*head
)
2172 struct ceph_mds_request
*req
;
2173 LIST_HEAD(tmp_list
);
2175 list_splice_init(head
, &tmp_list
);
2177 while (!list_empty(&tmp_list
)) {
2178 req
= list_entry(tmp_list
.next
,
2179 struct ceph_mds_request
, r_wait
);
2180 list_del_init(&req
->r_wait
);
2181 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
2182 __do_request(mdsc
, req
);
2187 * Wake up threads with requests pending for @mds, so that they can
2188 * resubmit their requests to a possibly different mds.
2190 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
2192 struct ceph_mds_request
*req
;
2193 struct rb_node
*p
= rb_first(&mdsc
->request_tree
);
2195 dout("kick_requests mds%d\n", mds
);
2197 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2199 if (req
->r_got_unsafe
)
2201 if (req
->r_attempts
> 0)
2202 continue; /* only new requests */
2203 if (req
->r_session
&&
2204 req
->r_session
->s_mds
== mds
) {
2205 dout(" kicking tid %llu\n", req
->r_tid
);
2206 list_del_init(&req
->r_wait
);
2207 __do_request(mdsc
, req
);
2212 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
2213 struct ceph_mds_request
*req
)
2215 dout("submit_request on %p\n", req
);
2216 mutex_lock(&mdsc
->mutex
);
2217 __register_request(mdsc
, req
, NULL
);
2218 __do_request(mdsc
, req
);
2219 mutex_unlock(&mdsc
->mutex
);
2223 * Synchrously perform an mds request. Take care of all of the
2224 * session setup, forwarding, retry details.
2226 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2228 struct ceph_mds_request
*req
)
2232 dout("do_request on %p\n", req
);
2234 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2236 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2237 if (req
->r_locked_dir
)
2238 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
2239 if (req
->r_old_dentry_dir
)
2240 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2244 mutex_lock(&mdsc
->mutex
);
2245 __register_request(mdsc
, req
, dir
);
2246 __do_request(mdsc
, req
);
2254 mutex_unlock(&mdsc
->mutex
);
2255 dout("do_request waiting\n");
2256 if (!req
->r_timeout
&& req
->r_wait_for_completion
) {
2257 err
= req
->r_wait_for_completion(mdsc
, req
);
2259 long timeleft
= wait_for_completion_killable_timeout(
2261 ceph_timeout_jiffies(req
->r_timeout
));
2265 err
= -EIO
; /* timed out */
2267 err
= timeleft
; /* killed */
2269 dout("do_request waited, got %d\n", err
);
2270 mutex_lock(&mdsc
->mutex
);
2272 /* only abort if we didn't race with a real reply */
2273 if (req
->r_got_result
) {
2274 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2275 } else if (err
< 0) {
2276 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2279 * ensure we aren't running concurrently with
2280 * ceph_fill_trace or ceph_readdir_prepopulate, which
2281 * rely on locks (dir mutex) held by our caller.
2283 mutex_lock(&req
->r_fill_mutex
);
2285 req
->r_aborted
= true;
2286 mutex_unlock(&req
->r_fill_mutex
);
2288 if (req
->r_locked_dir
&&
2289 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2290 ceph_invalidate_dir_request(req
);
2296 mutex_unlock(&mdsc
->mutex
);
2297 dout("do_request %p done, result %d\n", req
, err
);
2302 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2303 * namespace request.
2305 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2307 struct inode
*inode
= req
->r_locked_dir
;
2309 dout("invalidate_dir_request %p (complete, lease(s))\n", inode
);
2311 ceph_dir_clear_complete(inode
);
2313 ceph_invalidate_dentry_lease(req
->r_dentry
);
2314 if (req
->r_old_dentry
)
2315 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2321 * We take the session mutex and parse and process the reply immediately.
2322 * This preserves the logical ordering of replies, capabilities, etc., sent
2323 * by the MDS as they are applied to our local cache.
2325 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2327 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2328 struct ceph_mds_request
*req
;
2329 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2330 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2331 struct ceph_snap_realm
*realm
;
2334 int mds
= session
->s_mds
;
2336 if (msg
->front
.iov_len
< sizeof(*head
)) {
2337 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2342 /* get request, session */
2343 tid
= le64_to_cpu(msg
->hdr
.tid
);
2344 mutex_lock(&mdsc
->mutex
);
2345 req
= lookup_get_request(mdsc
, tid
);
2347 dout("handle_reply on unknown tid %llu\n", tid
);
2348 mutex_unlock(&mdsc
->mutex
);
2351 dout("handle_reply %p\n", req
);
2353 /* correct session? */
2354 if (req
->r_session
!= session
) {
2355 pr_err("mdsc_handle_reply got %llu on session mds%d"
2356 " not mds%d\n", tid
, session
->s_mds
,
2357 req
->r_session
? req
->r_session
->s_mds
: -1);
2358 mutex_unlock(&mdsc
->mutex
);
2363 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2364 (req
->r_got_safe
&& head
->safe
)) {
2365 pr_warn("got a dup %s reply on %llu from mds%d\n",
2366 head
->safe
? "safe" : "unsafe", tid
, mds
);
2367 mutex_unlock(&mdsc
->mutex
);
2370 if (req
->r_got_safe
) {
2371 pr_warn("got unsafe after safe on %llu from mds%d\n",
2373 mutex_unlock(&mdsc
->mutex
);
2377 result
= le32_to_cpu(head
->result
);
2381 * if we're not talking to the authority, send to them
2382 * if the authority has changed while we weren't looking,
2383 * send to new authority
2384 * Otherwise we just have to return an ESTALE
2386 if (result
== -ESTALE
) {
2387 dout("got ESTALE on request %llu", req
->r_tid
);
2388 req
->r_resend_mds
= -1;
2389 if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2390 dout("not using auth, setting for that now");
2391 req
->r_direct_mode
= USE_AUTH_MDS
;
2392 __do_request(mdsc
, req
);
2393 mutex_unlock(&mdsc
->mutex
);
2396 int mds
= __choose_mds(mdsc
, req
);
2397 if (mds
>= 0 && mds
!= req
->r_session
->s_mds
) {
2398 dout("but auth changed, so resending");
2399 __do_request(mdsc
, req
);
2400 mutex_unlock(&mdsc
->mutex
);
2404 dout("have to return ESTALE on request %llu", req
->r_tid
);
2409 req
->r_got_safe
= true;
2410 __unregister_request(mdsc
, req
);
2412 if (req
->r_got_unsafe
) {
2414 * We already handled the unsafe response, now do the
2415 * cleanup. No need to examine the response; the MDS
2416 * doesn't include any result info in the safe
2417 * response. And even if it did, there is nothing
2418 * useful we could do with a revised return value.
2420 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2421 list_del_init(&req
->r_unsafe_item
);
2423 /* last unsafe request during umount? */
2424 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2425 complete_all(&mdsc
->safe_umount_waiters
);
2426 mutex_unlock(&mdsc
->mutex
);
2430 req
->r_got_unsafe
= true;
2431 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2432 if (req
->r_unsafe_dir
) {
2433 struct ceph_inode_info
*ci
=
2434 ceph_inode(req
->r_unsafe_dir
);
2435 spin_lock(&ci
->i_unsafe_lock
);
2436 list_add_tail(&req
->r_unsafe_dir_item
,
2437 &ci
->i_unsafe_dirops
);
2438 spin_unlock(&ci
->i_unsafe_lock
);
2442 dout("handle_reply tid %lld result %d\n", tid
, result
);
2443 rinfo
= &req
->r_reply_info
;
2444 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2445 mutex_unlock(&mdsc
->mutex
);
2447 mutex_lock(&session
->s_mutex
);
2449 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2456 if (rinfo
->snapblob_len
) {
2457 down_write(&mdsc
->snap_rwsem
);
2458 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2459 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2460 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
,
2462 downgrade_write(&mdsc
->snap_rwsem
);
2464 down_read(&mdsc
->snap_rwsem
);
2467 /* insert trace into our cache */
2468 mutex_lock(&req
->r_fill_mutex
);
2469 current
->journal_info
= req
;
2470 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2472 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2473 req
->r_op
== CEPH_MDS_OP_LSSNAP
))
2474 ceph_readdir_prepopulate(req
, req
->r_session
);
2475 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2477 current
->journal_info
= NULL
;
2478 mutex_unlock(&req
->r_fill_mutex
);
2480 up_read(&mdsc
->snap_rwsem
);
2482 ceph_put_snap_realm(mdsc
, realm
);
2484 if (err
== 0 && req
->r_got_unsafe
&& req
->r_target_inode
) {
2485 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
2486 spin_lock(&ci
->i_unsafe_lock
);
2487 list_add_tail(&req
->r_unsafe_target_item
, &ci
->i_unsafe_iops
);
2488 spin_unlock(&ci
->i_unsafe_lock
);
2491 mutex_lock(&mdsc
->mutex
);
2492 if (!req
->r_aborted
) {
2496 req
->r_reply
= ceph_msg_get(msg
);
2497 req
->r_got_result
= true;
2500 dout("reply arrived after request %lld was aborted\n", tid
);
2502 mutex_unlock(&mdsc
->mutex
);
2504 mutex_unlock(&session
->s_mutex
);
2506 /* kick calling process */
2507 complete_request(mdsc
, req
);
2509 ceph_mdsc_put_request(req
);
2516 * handle mds notification that our request has been forwarded.
2518 static void handle_forward(struct ceph_mds_client
*mdsc
,
2519 struct ceph_mds_session
*session
,
2520 struct ceph_msg
*msg
)
2522 struct ceph_mds_request
*req
;
2523 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2527 void *p
= msg
->front
.iov_base
;
2528 void *end
= p
+ msg
->front
.iov_len
;
2530 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2531 next_mds
= ceph_decode_32(&p
);
2532 fwd_seq
= ceph_decode_32(&p
);
2534 mutex_lock(&mdsc
->mutex
);
2535 req
= lookup_get_request(mdsc
, tid
);
2537 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2538 goto out
; /* dup reply? */
2541 if (req
->r_aborted
) {
2542 dout("forward tid %llu aborted, unregistering\n", tid
);
2543 __unregister_request(mdsc
, req
);
2544 } else if (fwd_seq
<= req
->r_num_fwd
) {
2545 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2546 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2548 /* resend. forward race not possible; mds would drop */
2549 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2551 BUG_ON(req
->r_got_result
);
2552 req
->r_attempts
= 0;
2553 req
->r_num_fwd
= fwd_seq
;
2554 req
->r_resend_mds
= next_mds
;
2555 put_request_session(req
);
2556 __do_request(mdsc
, req
);
2558 ceph_mdsc_put_request(req
);
2560 mutex_unlock(&mdsc
->mutex
);
2564 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2568 * handle a mds session control message
2570 static void handle_session(struct ceph_mds_session
*session
,
2571 struct ceph_msg
*msg
)
2573 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2576 int mds
= session
->s_mds
;
2577 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2581 if (msg
->front
.iov_len
!= sizeof(*h
))
2583 op
= le32_to_cpu(h
->op
);
2584 seq
= le64_to_cpu(h
->seq
);
2586 mutex_lock(&mdsc
->mutex
);
2587 if (op
== CEPH_SESSION_CLOSE
)
2588 __unregister_session(mdsc
, session
);
2589 /* FIXME: this ttl calculation is generous */
2590 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2591 mutex_unlock(&mdsc
->mutex
);
2593 mutex_lock(&session
->s_mutex
);
2595 dout("handle_session mds%d %s %p state %s seq %llu\n",
2596 mds
, ceph_session_op_name(op
), session
,
2597 ceph_session_state_name(session
->s_state
), seq
);
2599 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2600 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2601 pr_info("mds%d came back\n", session
->s_mds
);
2605 case CEPH_SESSION_OPEN
:
2606 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2607 pr_info("mds%d reconnect success\n", session
->s_mds
);
2608 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2609 renewed_caps(mdsc
, session
, 0);
2612 __close_session(mdsc
, session
);
2615 case CEPH_SESSION_RENEWCAPS
:
2616 if (session
->s_renew_seq
== seq
)
2617 renewed_caps(mdsc
, session
, 1);
2620 case CEPH_SESSION_CLOSE
:
2621 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2622 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2623 cleanup_session_requests(mdsc
, session
);
2624 remove_session_caps(session
);
2625 wake
= 2; /* for good measure */
2626 wake_up_all(&mdsc
->session_close_wq
);
2629 case CEPH_SESSION_STALE
:
2630 pr_info("mds%d caps went stale, renewing\n",
2632 spin_lock(&session
->s_gen_ttl_lock
);
2633 session
->s_cap_gen
++;
2634 session
->s_cap_ttl
= jiffies
- 1;
2635 spin_unlock(&session
->s_gen_ttl_lock
);
2636 send_renew_caps(mdsc
, session
);
2639 case CEPH_SESSION_RECALL_STATE
:
2640 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2643 case CEPH_SESSION_FLUSHMSG
:
2644 send_flushmsg_ack(mdsc
, session
, seq
);
2647 case CEPH_SESSION_FORCE_RO
:
2648 dout("force_session_readonly %p\n", session
);
2649 spin_lock(&session
->s_cap_lock
);
2650 session
->s_readonly
= true;
2651 spin_unlock(&session
->s_cap_lock
);
2652 wake_up_session_caps(session
, 0);
2656 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2660 mutex_unlock(&session
->s_mutex
);
2662 mutex_lock(&mdsc
->mutex
);
2663 __wake_requests(mdsc
, &session
->s_waiting
);
2665 kick_requests(mdsc
, mds
);
2666 mutex_unlock(&mdsc
->mutex
);
2671 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2672 (int)msg
->front
.iov_len
);
2679 * called under session->mutex.
2681 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2682 struct ceph_mds_session
*session
)
2684 struct ceph_mds_request
*req
, *nreq
;
2688 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2690 mutex_lock(&mdsc
->mutex
);
2691 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2692 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
, true);
2694 ceph_msg_get(req
->r_request
);
2695 ceph_con_send(&session
->s_con
, req
->r_request
);
2700 * also re-send old requests when MDS enters reconnect stage. So that MDS
2701 * can process completed request in clientreplay stage.
2703 p
= rb_first(&mdsc
->request_tree
);
2705 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2707 if (req
->r_got_unsafe
)
2709 if (req
->r_attempts
== 0)
2710 continue; /* only old requests */
2711 if (req
->r_session
&&
2712 req
->r_session
->s_mds
== session
->s_mds
) {
2713 err
= __prepare_send_request(mdsc
, req
,
2714 session
->s_mds
, true);
2716 ceph_msg_get(req
->r_request
);
2717 ceph_con_send(&session
->s_con
, req
->r_request
);
2721 mutex_unlock(&mdsc
->mutex
);
2725 * Encode information about a cap for a reconnect with the MDS.
2727 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2731 struct ceph_mds_cap_reconnect v2
;
2732 struct ceph_mds_cap_reconnect_v1 v1
;
2734 struct ceph_inode_info
*ci
;
2735 struct ceph_reconnect_state
*recon_state
= arg
;
2736 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2741 struct dentry
*dentry
;
2745 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2746 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2747 ceph_cap_string(cap
->issued
));
2748 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2752 dentry
= d_find_alias(inode
);
2754 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2756 err
= PTR_ERR(path
);
2765 spin_lock(&ci
->i_ceph_lock
);
2766 cap
->seq
= 0; /* reset cap seq */
2767 cap
->issue_seq
= 0; /* and issue_seq */
2768 cap
->mseq
= 0; /* and migrate_seq */
2769 cap
->cap_gen
= cap
->session
->s_cap_gen
;
2771 if (recon_state
->msg_version
>= 2) {
2772 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2773 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2774 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2775 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2776 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2777 rec
.v2
.flock_len
= 0;
2779 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2780 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2781 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2782 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2783 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2784 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2785 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2786 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2789 if (list_empty(&ci
->i_cap_snaps
)) {
2792 struct ceph_cap_snap
*capsnap
=
2793 list_first_entry(&ci
->i_cap_snaps
,
2794 struct ceph_cap_snap
, ci_item
);
2795 snap_follows
= capsnap
->follows
;
2797 spin_unlock(&ci
->i_ceph_lock
);
2799 if (recon_state
->msg_version
>= 2) {
2800 int num_fcntl_locks
, num_flock_locks
;
2801 struct ceph_filelock
*flocks
;
2802 size_t struct_len
, total_len
= 0;
2806 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
2807 flocks
= kmalloc((num_fcntl_locks
+num_flock_locks
) *
2808 sizeof(struct ceph_filelock
), GFP_NOFS
);
2813 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
2823 if (recon_state
->msg_version
>= 3) {
2824 /* version, compat_version and struct_len */
2825 total_len
= 2 * sizeof(u8
) + sizeof(u32
);
2829 * number of encoded locks is stable, so copy to pagelist
2831 struct_len
= 2 * sizeof(u32
) +
2832 (num_fcntl_locks
+ num_flock_locks
) *
2833 sizeof(struct ceph_filelock
);
2834 rec
.v2
.flock_len
= cpu_to_le32(struct_len
);
2836 struct_len
+= sizeof(rec
.v2
);
2837 struct_len
+= sizeof(u32
) + pathlen
;
2840 struct_len
+= sizeof(u64
); /* snap_follows */
2842 total_len
+= struct_len
;
2843 err
= ceph_pagelist_reserve(pagelist
, total_len
);
2846 if (recon_state
->msg_version
>= 3) {
2847 ceph_pagelist_encode_8(pagelist
, struct_v
);
2848 ceph_pagelist_encode_8(pagelist
, 1);
2849 ceph_pagelist_encode_32(pagelist
, struct_len
);
2851 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2852 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v2
));
2853 ceph_locks_to_pagelist(flocks
, pagelist
,
2857 ceph_pagelist_encode_64(pagelist
, snap_follows
);
2861 size_t size
= sizeof(u32
) + pathlen
+ sizeof(rec
.v1
);
2862 err
= ceph_pagelist_reserve(pagelist
, size
);
2864 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2865 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v1
));
2869 recon_state
->nr_caps
++;
2879 * If an MDS fails and recovers, clients need to reconnect in order to
2880 * reestablish shared state. This includes all caps issued through
2881 * this session _and_ the snap_realm hierarchy. Because it's not
2882 * clear which snap realms the mds cares about, we send everything we
2883 * know about.. that ensures we'll then get any new info the
2884 * recovering MDS might have.
2886 * This is a relatively heavyweight operation, but it's rare.
2888 * called with mdsc->mutex held.
2890 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2891 struct ceph_mds_session
*session
)
2893 struct ceph_msg
*reply
;
2895 int mds
= session
->s_mds
;
2898 struct ceph_pagelist
*pagelist
;
2899 struct ceph_reconnect_state recon_state
;
2901 pr_info("mds%d reconnect start\n", mds
);
2903 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2905 goto fail_nopagelist
;
2906 ceph_pagelist_init(pagelist
);
2908 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
2912 mutex_lock(&session
->s_mutex
);
2913 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2916 dout("session %p state %s\n", session
,
2917 ceph_session_state_name(session
->s_state
));
2919 spin_lock(&session
->s_gen_ttl_lock
);
2920 session
->s_cap_gen
++;
2921 spin_unlock(&session
->s_gen_ttl_lock
);
2923 spin_lock(&session
->s_cap_lock
);
2924 /* don't know if session is readonly */
2925 session
->s_readonly
= 0;
2927 * notify __ceph_remove_cap() that we are composing cap reconnect.
2928 * If a cap get released before being added to the cap reconnect,
2929 * __ceph_remove_cap() should skip queuing cap release.
2931 session
->s_cap_reconnect
= 1;
2932 /* drop old cap expires; we're about to reestablish that state */
2933 cleanup_cap_releases(mdsc
, session
);
2935 /* trim unused caps to reduce MDS's cache rejoin time */
2936 if (mdsc
->fsc
->sb
->s_root
)
2937 shrink_dcache_parent(mdsc
->fsc
->sb
->s_root
);
2939 ceph_con_close(&session
->s_con
);
2940 ceph_con_open(&session
->s_con
,
2941 CEPH_ENTITY_TYPE_MDS
, mds
,
2942 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2944 /* replay unsafe requests */
2945 replay_unsafe_requests(mdsc
, session
);
2947 down_read(&mdsc
->snap_rwsem
);
2949 /* traverse this session's caps */
2950 s_nr_caps
= session
->s_nr_caps
;
2951 err
= ceph_pagelist_encode_32(pagelist
, s_nr_caps
);
2955 recon_state
.nr_caps
= 0;
2956 recon_state
.pagelist
= pagelist
;
2957 if (session
->s_con
.peer_features
& CEPH_FEATURE_MDSENC
)
2958 recon_state
.msg_version
= 3;
2959 else if (session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
)
2960 recon_state
.msg_version
= 2;
2962 recon_state
.msg_version
= 1;
2963 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
2967 spin_lock(&session
->s_cap_lock
);
2968 session
->s_cap_reconnect
= 0;
2969 spin_unlock(&session
->s_cap_lock
);
2972 * snaprealms. we provide mds with the ino, seq (version), and
2973 * parent for all of our realms. If the mds has any newer info,
2976 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
2977 struct ceph_snap_realm
*realm
=
2978 rb_entry(p
, struct ceph_snap_realm
, node
);
2979 struct ceph_mds_snaprealm_reconnect sr_rec
;
2981 dout(" adding snap realm %llx seq %lld parent %llx\n",
2982 realm
->ino
, realm
->seq
, realm
->parent_ino
);
2983 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
2984 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
2985 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
2986 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
2991 reply
->hdr
.version
= cpu_to_le16(recon_state
.msg_version
);
2993 /* raced with cap release? */
2994 if (s_nr_caps
!= recon_state
.nr_caps
) {
2995 struct page
*page
= list_first_entry(&pagelist
->head
,
2997 __le32
*addr
= kmap_atomic(page
);
2998 *addr
= cpu_to_le32(recon_state
.nr_caps
);
2999 kunmap_atomic(addr
);
3002 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
3003 ceph_msg_data_add_pagelist(reply
, pagelist
);
3005 ceph_early_kick_flushing_caps(mdsc
, session
);
3007 ceph_con_send(&session
->s_con
, reply
);
3009 mutex_unlock(&session
->s_mutex
);
3011 mutex_lock(&mdsc
->mutex
);
3012 __wake_requests(mdsc
, &session
->s_waiting
);
3013 mutex_unlock(&mdsc
->mutex
);
3015 up_read(&mdsc
->snap_rwsem
);
3019 ceph_msg_put(reply
);
3020 up_read(&mdsc
->snap_rwsem
);
3021 mutex_unlock(&session
->s_mutex
);
3023 ceph_pagelist_release(pagelist
);
3025 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
3031 * compare old and new mdsmaps, kicking requests
3032 * and closing out old connections as necessary
3034 * called under mdsc->mutex.
3036 static void check_new_map(struct ceph_mds_client
*mdsc
,
3037 struct ceph_mdsmap
*newmap
,
3038 struct ceph_mdsmap
*oldmap
)
3041 int oldstate
, newstate
;
3042 struct ceph_mds_session
*s
;
3044 dout("check_new_map new %u old %u\n",
3045 newmap
->m_epoch
, oldmap
->m_epoch
);
3047 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
3048 if (mdsc
->sessions
[i
] == NULL
)
3050 s
= mdsc
->sessions
[i
];
3051 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
3052 newstate
= ceph_mdsmap_get_state(newmap
, i
);
3054 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3055 i
, ceph_mds_state_name(oldstate
),
3056 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
3057 ceph_mds_state_name(newstate
),
3058 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
3059 ceph_session_state_name(s
->s_state
));
3061 if (i
>= newmap
->m_max_mds
||
3062 memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
3063 ceph_mdsmap_get_addr(newmap
, i
),
3064 sizeof(struct ceph_entity_addr
))) {
3065 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
3066 /* the session never opened, just close it
3068 __wake_requests(mdsc
, &s
->s_waiting
);
3069 __unregister_session(mdsc
, s
);
3072 mutex_unlock(&mdsc
->mutex
);
3073 mutex_lock(&s
->s_mutex
);
3074 mutex_lock(&mdsc
->mutex
);
3075 ceph_con_close(&s
->s_con
);
3076 mutex_unlock(&s
->s_mutex
);
3077 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
3079 } else if (oldstate
== newstate
) {
3080 continue; /* nothing new with this mds */
3086 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
3087 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
3088 mutex_unlock(&mdsc
->mutex
);
3089 send_mds_reconnect(mdsc
, s
);
3090 mutex_lock(&mdsc
->mutex
);
3094 * kick request on any mds that has gone active.
3096 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
3097 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
3098 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
3099 oldstate
!= CEPH_MDS_STATE_STARTING
)
3100 pr_info("mds%d recovery completed\n", s
->s_mds
);
3101 kick_requests(mdsc
, i
);
3102 ceph_kick_flushing_caps(mdsc
, s
);
3103 wake_up_session_caps(s
, 1);
3107 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
3108 s
= mdsc
->sessions
[i
];
3111 if (!ceph_mdsmap_is_laggy(newmap
, i
))
3113 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3114 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
3115 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3116 dout(" connecting to export targets of laggy mds%d\n",
3118 __open_export_target_sessions(mdsc
, s
);
3130 * caller must hold session s_mutex, dentry->d_lock
3132 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
3134 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
3136 ceph_put_mds_session(di
->lease_session
);
3137 di
->lease_session
= NULL
;
3140 static void handle_lease(struct ceph_mds_client
*mdsc
,
3141 struct ceph_mds_session
*session
,
3142 struct ceph_msg
*msg
)
3144 struct super_block
*sb
= mdsc
->fsc
->sb
;
3145 struct inode
*inode
;
3146 struct dentry
*parent
, *dentry
;
3147 struct ceph_dentry_info
*di
;
3148 int mds
= session
->s_mds
;
3149 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
3151 struct ceph_vino vino
;
3155 dout("handle_lease from mds%d\n", mds
);
3158 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
3160 vino
.ino
= le64_to_cpu(h
->ino
);
3161 vino
.snap
= CEPH_NOSNAP
;
3162 seq
= le32_to_cpu(h
->seq
);
3163 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
3164 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
3165 if (dname
.len
!= get_unaligned_le32(h
+1))
3169 inode
= ceph_find_inode(sb
, vino
);
3170 dout("handle_lease %s, ino %llx %p %.*s\n",
3171 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
3172 dname
.len
, dname
.name
);
3174 mutex_lock(&session
->s_mutex
);
3177 if (inode
== NULL
) {
3178 dout("handle_lease no inode %llx\n", vino
.ino
);
3183 parent
= d_find_alias(inode
);
3185 dout("no parent dentry on inode %p\n", inode
);
3187 goto release
; /* hrm... */
3189 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
3190 dentry
= d_lookup(parent
, &dname
);
3195 spin_lock(&dentry
->d_lock
);
3196 di
= ceph_dentry(dentry
);
3197 switch (h
->action
) {
3198 case CEPH_MDS_LEASE_REVOKE
:
3199 if (di
->lease_session
== session
) {
3200 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
3201 h
->seq
= cpu_to_le32(di
->lease_seq
);
3202 __ceph_mdsc_drop_dentry_lease(dentry
);
3207 case CEPH_MDS_LEASE_RENEW
:
3208 if (di
->lease_session
== session
&&
3209 di
->lease_gen
== session
->s_cap_gen
&&
3210 di
->lease_renew_from
&&
3211 di
->lease_renew_after
== 0) {
3212 unsigned long duration
=
3213 msecs_to_jiffies(le32_to_cpu(h
->duration_ms
));
3215 di
->lease_seq
= seq
;
3216 di
->time
= di
->lease_renew_from
+ duration
;
3217 di
->lease_renew_after
= di
->lease_renew_from
+
3219 di
->lease_renew_from
= 0;
3223 spin_unlock(&dentry
->d_lock
);
3230 /* let's just reuse the same message */
3231 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
3233 ceph_con_send(&session
->s_con
, msg
);
3237 mutex_unlock(&session
->s_mutex
);
3241 pr_err("corrupt lease message\n");
3245 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
3246 struct inode
*inode
,
3247 struct dentry
*dentry
, char action
,
3250 struct ceph_msg
*msg
;
3251 struct ceph_mds_lease
*lease
;
3252 int len
= sizeof(*lease
) + sizeof(u32
);
3255 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3256 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
3257 dnamelen
= dentry
->d_name
.len
;
3260 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
3263 lease
= msg
->front
.iov_base
;
3264 lease
->action
= action
;
3265 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
3266 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
3267 lease
->seq
= cpu_to_le32(seq
);
3268 put_unaligned_le32(dnamelen
, lease
+ 1);
3269 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
3272 * if this is a preemptive lease RELEASE, no need to
3273 * flush request stream, since the actual request will
3276 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
3278 ceph_con_send(&session
->s_con
, msg
);
3282 * drop all leases (and dentry refs) in preparation for umount
3284 static void drop_leases(struct ceph_mds_client
*mdsc
)
3288 dout("drop_leases\n");
3289 mutex_lock(&mdsc
->mutex
);
3290 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3291 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3294 mutex_unlock(&mdsc
->mutex
);
3295 mutex_lock(&s
->s_mutex
);
3296 mutex_unlock(&s
->s_mutex
);
3297 ceph_put_mds_session(s
);
3298 mutex_lock(&mdsc
->mutex
);
3300 mutex_unlock(&mdsc
->mutex
);
3306 * delayed work -- periodically trim expired leases, renew caps with mds
3308 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
3311 unsigned hz
= round_jiffies_relative(HZ
* delay
);
3312 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
3315 static void delayed_work(struct work_struct
*work
)
3318 struct ceph_mds_client
*mdsc
=
3319 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
3323 dout("mdsc delayed_work\n");
3324 ceph_check_delayed_caps(mdsc
);
3326 mutex_lock(&mdsc
->mutex
);
3327 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
3328 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
3329 mdsc
->last_renew_caps
);
3331 mdsc
->last_renew_caps
= jiffies
;
3333 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3334 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3337 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3338 dout("resending session close request for mds%d\n",
3340 request_close_session(mdsc
, s
);
3341 ceph_put_mds_session(s
);
3344 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
3345 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
3346 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
3347 pr_info("mds%d hung\n", s
->s_mds
);
3350 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
3351 /* this mds is failed or recovering, just wait */
3352 ceph_put_mds_session(s
);
3355 mutex_unlock(&mdsc
->mutex
);
3357 mutex_lock(&s
->s_mutex
);
3359 send_renew_caps(mdsc
, s
);
3361 ceph_con_keepalive(&s
->s_con
);
3362 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3363 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
3364 ceph_send_cap_releases(mdsc
, s
);
3365 mutex_unlock(&s
->s_mutex
);
3366 ceph_put_mds_session(s
);
3368 mutex_lock(&mdsc
->mutex
);
3370 mutex_unlock(&mdsc
->mutex
);
3372 schedule_delayed(mdsc
);
3375 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3378 struct ceph_mds_client
*mdsc
;
3380 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3385 mutex_init(&mdsc
->mutex
);
3386 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3387 if (mdsc
->mdsmap
== NULL
) {
3392 init_completion(&mdsc
->safe_umount_waiters
);
3393 init_waitqueue_head(&mdsc
->session_close_wq
);
3394 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3395 mdsc
->sessions
= NULL
;
3396 atomic_set(&mdsc
->num_sessions
, 0);
3397 mdsc
->max_sessions
= 0;
3399 mdsc
->last_snap_seq
= 0;
3400 init_rwsem(&mdsc
->snap_rwsem
);
3401 mdsc
->snap_realms
= RB_ROOT
;
3402 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3403 spin_lock_init(&mdsc
->snap_empty_lock
);
3405 mdsc
->oldest_tid
= 0;
3406 mdsc
->request_tree
= RB_ROOT
;
3407 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3408 mdsc
->last_renew_caps
= jiffies
;
3409 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3410 spin_lock_init(&mdsc
->cap_delay_lock
);
3411 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3412 spin_lock_init(&mdsc
->snap_flush_lock
);
3413 mdsc
->last_cap_flush_tid
= 1;
3414 INIT_LIST_HEAD(&mdsc
->cap_flush_list
);
3415 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3416 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3417 mdsc
->num_cap_flushing
= 0;
3418 spin_lock_init(&mdsc
->cap_dirty_lock
);
3419 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3420 spin_lock_init(&mdsc
->dentry_lru_lock
);
3421 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3423 ceph_caps_init(mdsc
);
3424 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3426 init_rwsem(&mdsc
->pool_perm_rwsem
);
3427 mdsc
->pool_perm_tree
= RB_ROOT
;
3433 * Wait for safe replies on open mds requests. If we time out, drop
3434 * all requests from the tree to avoid dangling dentry refs.
3436 static void wait_requests(struct ceph_mds_client
*mdsc
)
3438 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3439 struct ceph_mds_request
*req
;
3441 mutex_lock(&mdsc
->mutex
);
3442 if (__get_oldest_req(mdsc
)) {
3443 mutex_unlock(&mdsc
->mutex
);
3445 dout("wait_requests waiting for requests\n");
3446 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3447 ceph_timeout_jiffies(opts
->mount_timeout
));
3449 /* tear down remaining requests */
3450 mutex_lock(&mdsc
->mutex
);
3451 while ((req
= __get_oldest_req(mdsc
))) {
3452 dout("wait_requests timed out on tid %llu\n",
3454 __unregister_request(mdsc
, req
);
3457 mutex_unlock(&mdsc
->mutex
);
3458 dout("wait_requests done\n");
3462 * called before mount is ro, and before dentries are torn down.
3463 * (hmm, does this still race with new lookups?)
3465 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3467 dout("pre_umount\n");
3471 ceph_flush_dirty_caps(mdsc
);
3472 wait_requests(mdsc
);
3475 * wait for reply handlers to drop their request refs and
3476 * their inode/dcache refs
3482 * wait for all write mds requests to flush.
3484 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3486 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3489 mutex_lock(&mdsc
->mutex
);
3490 dout("wait_unsafe_requests want %lld\n", want_tid
);
3492 req
= __get_oldest_req(mdsc
);
3493 while (req
&& req
->r_tid
<= want_tid
) {
3494 /* find next request */
3495 n
= rb_next(&req
->r_node
);
3497 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3500 if (req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
&&
3501 (req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3503 ceph_mdsc_get_request(req
);
3505 ceph_mdsc_get_request(nextreq
);
3506 mutex_unlock(&mdsc
->mutex
);
3507 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3508 req
->r_tid
, want_tid
);
3509 wait_for_completion(&req
->r_safe_completion
);
3510 mutex_lock(&mdsc
->mutex
);
3511 ceph_mdsc_put_request(req
);
3513 break; /* next dne before, so we're done! */
3514 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3515 /* next request was removed from tree */
3516 ceph_mdsc_put_request(nextreq
);
3519 ceph_mdsc_put_request(nextreq
); /* won't go away */
3523 mutex_unlock(&mdsc
->mutex
);
3524 dout("wait_unsafe_requests done\n");
3527 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3529 u64 want_tid
, want_flush
;
3531 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3535 mutex_lock(&mdsc
->mutex
);
3536 want_tid
= mdsc
->last_tid
;
3537 mutex_unlock(&mdsc
->mutex
);
3539 ceph_flush_dirty_caps(mdsc
);
3540 spin_lock(&mdsc
->cap_dirty_lock
);
3541 want_flush
= mdsc
->last_cap_flush_tid
;
3542 if (!list_empty(&mdsc
->cap_flush_list
)) {
3543 struct ceph_cap_flush
*cf
=
3544 list_last_entry(&mdsc
->cap_flush_list
,
3545 struct ceph_cap_flush
, g_list
);
3548 spin_unlock(&mdsc
->cap_dirty_lock
);
3550 dout("sync want tid %lld flush_seq %lld\n",
3551 want_tid
, want_flush
);
3553 wait_unsafe_requests(mdsc
, want_tid
);
3554 wait_caps_flush(mdsc
, want_flush
);
3558 * true if all sessions are closed, or we force unmount
3560 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
)
3562 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3564 return atomic_read(&mdsc
->num_sessions
) == 0;
3568 * called after sb is ro.
3570 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3572 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3573 struct ceph_mds_session
*session
;
3576 dout("close_sessions\n");
3578 /* close sessions */
3579 mutex_lock(&mdsc
->mutex
);
3580 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3581 session
= __ceph_lookup_mds_session(mdsc
, i
);
3584 mutex_unlock(&mdsc
->mutex
);
3585 mutex_lock(&session
->s_mutex
);
3586 __close_session(mdsc
, session
);
3587 mutex_unlock(&session
->s_mutex
);
3588 ceph_put_mds_session(session
);
3589 mutex_lock(&mdsc
->mutex
);
3591 mutex_unlock(&mdsc
->mutex
);
3593 dout("waiting for sessions to close\n");
3594 wait_event_timeout(mdsc
->session_close_wq
, done_closing_sessions(mdsc
),
3595 ceph_timeout_jiffies(opts
->mount_timeout
));
3597 /* tear down remaining sessions */
3598 mutex_lock(&mdsc
->mutex
);
3599 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3600 if (mdsc
->sessions
[i
]) {
3601 session
= get_session(mdsc
->sessions
[i
]);
3602 __unregister_session(mdsc
, session
);
3603 mutex_unlock(&mdsc
->mutex
);
3604 mutex_lock(&session
->s_mutex
);
3605 remove_session_caps(session
);
3606 mutex_unlock(&session
->s_mutex
);
3607 ceph_put_mds_session(session
);
3608 mutex_lock(&mdsc
->mutex
);
3611 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3612 mutex_unlock(&mdsc
->mutex
);
3614 ceph_cleanup_empty_realms(mdsc
);
3616 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3621 void ceph_mdsc_force_umount(struct ceph_mds_client
*mdsc
)
3623 struct ceph_mds_session
*session
;
3626 dout("force umount\n");
3628 mutex_lock(&mdsc
->mutex
);
3629 for (mds
= 0; mds
< mdsc
->max_sessions
; mds
++) {
3630 session
= __ceph_lookup_mds_session(mdsc
, mds
);
3633 mutex_unlock(&mdsc
->mutex
);
3634 mutex_lock(&session
->s_mutex
);
3635 __close_session(mdsc
, session
);
3636 if (session
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3637 cleanup_session_requests(mdsc
, session
);
3638 remove_session_caps(session
);
3640 mutex_unlock(&session
->s_mutex
);
3641 ceph_put_mds_session(session
);
3642 mutex_lock(&mdsc
->mutex
);
3643 kick_requests(mdsc
, mds
);
3645 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3646 mutex_unlock(&mdsc
->mutex
);
3649 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3652 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3654 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3655 kfree(mdsc
->sessions
);
3656 ceph_caps_finalize(mdsc
);
3657 ceph_pool_perm_destroy(mdsc
);
3660 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3662 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3664 dout("mdsc_destroy %p\n", mdsc
);
3665 ceph_mdsc_stop(mdsc
);
3667 /* flush out any connection work with references to us */
3672 dout("mdsc_destroy %p done\n", mdsc
);
3675 void ceph_mdsc_handle_fsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3677 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3678 const char *mds_namespace
= fsc
->mount_options
->mds_namespace
;
3679 void *p
= msg
->front
.iov_base
;
3680 void *end
= p
+ msg
->front
.iov_len
;
3684 u32 mount_fscid
= (u32
)-1;
3685 u8 struct_v
, struct_cv
;
3688 ceph_decode_need(&p
, end
, sizeof(u32
), bad
);
3689 epoch
= ceph_decode_32(&p
);
3691 dout("handle_fsmap epoch %u\n", epoch
);
3693 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
3694 struct_v
= ceph_decode_8(&p
);
3695 struct_cv
= ceph_decode_8(&p
);
3696 map_len
= ceph_decode_32(&p
);
3698 ceph_decode_need(&p
, end
, sizeof(u32
) * 3, bad
);
3699 p
+= sizeof(u32
) * 2; /* skip epoch and legacy_client_fscid */
3701 num_fs
= ceph_decode_32(&p
);
3702 while (num_fs
-- > 0) {
3703 void *info_p
, *info_end
;
3708 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
3709 info_v
= ceph_decode_8(&p
);
3710 info_cv
= ceph_decode_8(&p
);
3711 info_len
= ceph_decode_32(&p
);
3712 ceph_decode_need(&p
, end
, info_len
, bad
);
3714 info_end
= p
+ info_len
;
3717 ceph_decode_need(&info_p
, info_end
, sizeof(u32
) * 2, bad
);
3718 fscid
= ceph_decode_32(&info_p
);
3719 namelen
= ceph_decode_32(&info_p
);
3720 ceph_decode_need(&info_p
, info_end
, namelen
, bad
);
3722 if (mds_namespace
&&
3723 strlen(mds_namespace
) == namelen
&&
3724 !strncmp(mds_namespace
, (char *)info_p
, namelen
)) {
3725 mount_fscid
= fscid
;
3730 ceph_monc_got_map(&fsc
->client
->monc
, CEPH_SUB_FSMAP
, epoch
);
3731 if (mount_fscid
!= (u32
)-1) {
3732 fsc
->client
->monc
.fs_cluster_id
= mount_fscid
;
3733 ceph_monc_want_map(&fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
3735 ceph_monc_renew_subs(&fsc
->client
->monc
);
3742 pr_err("error decoding fsmap\n");
3744 mutex_lock(&mdsc
->mutex
);
3745 mdsc
->mdsmap_err
= -ENOENT
;
3746 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3747 mutex_unlock(&mdsc
->mutex
);
3752 * handle mds map update.
3754 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3758 void *p
= msg
->front
.iov_base
;
3759 void *end
= p
+ msg
->front
.iov_len
;
3760 struct ceph_mdsmap
*newmap
, *oldmap
;
3761 struct ceph_fsid fsid
;
3764 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3765 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3766 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3768 epoch
= ceph_decode_32(&p
);
3769 maplen
= ceph_decode_32(&p
);
3770 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3772 /* do we need it? */
3773 mutex_lock(&mdsc
->mutex
);
3774 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3775 dout("handle_map epoch %u <= our %u\n",
3776 epoch
, mdsc
->mdsmap
->m_epoch
);
3777 mutex_unlock(&mdsc
->mutex
);
3781 newmap
= ceph_mdsmap_decode(&p
, end
);
3782 if (IS_ERR(newmap
)) {
3783 err
= PTR_ERR(newmap
);
3787 /* swap into place */
3789 oldmap
= mdsc
->mdsmap
;
3790 mdsc
->mdsmap
= newmap
;
3791 check_new_map(mdsc
, newmap
, oldmap
);
3792 ceph_mdsmap_destroy(oldmap
);
3794 mdsc
->mdsmap
= newmap
; /* first mds map */
3796 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3798 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3799 ceph_monc_got_map(&mdsc
->fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
3800 mdsc
->mdsmap
->m_epoch
);
3802 mutex_unlock(&mdsc
->mutex
);
3803 schedule_delayed(mdsc
);
3807 mutex_unlock(&mdsc
->mutex
);
3809 pr_err("error decoding mdsmap %d\n", err
);
3813 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3815 struct ceph_mds_session
*s
= con
->private;
3817 if (get_session(s
)) {
3818 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3821 dout("mdsc con_get %p FAIL\n", s
);
3825 static void con_put(struct ceph_connection
*con
)
3827 struct ceph_mds_session
*s
= con
->private;
3829 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
) - 1);
3830 ceph_put_mds_session(s
);
3834 * if the client is unresponsive for long enough, the mds will kill
3835 * the session entirely.
3837 static void peer_reset(struct ceph_connection
*con
)
3839 struct ceph_mds_session
*s
= con
->private;
3840 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3842 pr_warn("mds%d closed our session\n", s
->s_mds
);
3843 send_mds_reconnect(mdsc
, s
);
3846 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3848 struct ceph_mds_session
*s
= con
->private;
3849 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3850 int type
= le16_to_cpu(msg
->hdr
.type
);
3852 mutex_lock(&mdsc
->mutex
);
3853 if (__verify_registered_session(mdsc
, s
) < 0) {
3854 mutex_unlock(&mdsc
->mutex
);
3857 mutex_unlock(&mdsc
->mutex
);
3860 case CEPH_MSG_MDS_MAP
:
3861 ceph_mdsc_handle_mdsmap(mdsc
, msg
);
3863 case CEPH_MSG_FS_MAP_USER
:
3864 ceph_mdsc_handle_fsmap(mdsc
, msg
);
3866 case CEPH_MSG_CLIENT_SESSION
:
3867 handle_session(s
, msg
);
3869 case CEPH_MSG_CLIENT_REPLY
:
3870 handle_reply(s
, msg
);
3872 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3873 handle_forward(mdsc
, s
, msg
);
3875 case CEPH_MSG_CLIENT_CAPS
:
3876 ceph_handle_caps(s
, msg
);
3878 case CEPH_MSG_CLIENT_SNAP
:
3879 ceph_handle_snap(mdsc
, s
, msg
);
3881 case CEPH_MSG_CLIENT_LEASE
:
3882 handle_lease(mdsc
, s
, msg
);
3886 pr_err("received unknown message type %d %s\n", type
,
3887 ceph_msg_type_name(type
));
3898 * Note: returned pointer is the address of a structure that's
3899 * managed separately. Caller must *not* attempt to free it.
3901 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
3902 int *proto
, int force_new
)
3904 struct ceph_mds_session
*s
= con
->private;
3905 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3906 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3907 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3909 if (force_new
&& auth
->authorizer
) {
3910 ceph_auth_destroy_authorizer(auth
->authorizer
);
3911 auth
->authorizer
= NULL
;
3913 if (!auth
->authorizer
) {
3914 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3917 return ERR_PTR(ret
);
3919 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3922 return ERR_PTR(ret
);
3924 *proto
= ac
->protocol
;
3930 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3932 struct ceph_mds_session
*s
= con
->private;
3933 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3934 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3936 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
, len
);
3939 static int invalidate_authorizer(struct ceph_connection
*con
)
3941 struct ceph_mds_session
*s
= con
->private;
3942 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3943 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3945 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3947 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
3950 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
3951 struct ceph_msg_header
*hdr
, int *skip
)
3953 struct ceph_msg
*msg
;
3954 int type
= (int) le16_to_cpu(hdr
->type
);
3955 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
3961 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
3963 pr_err("unable to allocate msg type %d len %d\n",
3971 static int mds_sign_message(struct ceph_msg
*msg
)
3973 struct ceph_mds_session
*s
= msg
->con
->private;
3974 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3976 return ceph_auth_sign_message(auth
, msg
);
3979 static int mds_check_message_signature(struct ceph_msg
*msg
)
3981 struct ceph_mds_session
*s
= msg
->con
->private;
3982 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3984 return ceph_auth_check_message_signature(auth
, msg
);
3987 static const struct ceph_connection_operations mds_con_ops
= {
3990 .dispatch
= dispatch
,
3991 .get_authorizer
= get_authorizer
,
3992 .verify_authorizer_reply
= verify_authorizer_reply
,
3993 .invalidate_authorizer
= invalidate_authorizer
,
3994 .peer_reset
= peer_reset
,
3995 .alloc_msg
= mds_alloc_msg
,
3996 .sign_message
= mds_sign_message
,
3997 .check_message_signature
= mds_check_message_signature
,