1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/sched.h>
7 #include <linux/debugfs.h>
8 #include <linux/seq_file.h>
11 #include "mds_client.h"
13 #include <linux/ceph/ceph_features.h>
14 #include <linux/ceph/messenger.h>
15 #include <linux/ceph/decode.h>
16 #include <linux/ceph/pagelist.h>
17 #include <linux/ceph/auth.h>
18 #include <linux/ceph/debugfs.h>
21 * A cluster of MDS (metadata server) daemons is responsible for
22 * managing the file system namespace (the directory hierarchy and
23 * inodes) and for coordinating shared access to storage. Metadata is
24 * partitioning hierarchically across a number of servers, and that
25 * partition varies over time as the cluster adjusts the distribution
26 * in order to balance load.
28 * The MDS client is primarily responsible to managing synchronous
29 * metadata requests for operations like open, unlink, and so forth.
30 * If there is a MDS failure, we find out about it when we (possibly
31 * request and) receive a new MDS map, and can resubmit affected
34 * For the most part, though, we take advantage of a lossless
35 * communications channel to the MDS, and do not need to worry about
36 * timing out or resubmitting requests.
38 * We maintain a stateful "session" with each MDS we interact with.
39 * Within each session, we sent periodic heartbeat messages to ensure
40 * any capabilities or leases we have been issues remain valid. If
41 * the session times out and goes stale, our leases and capabilities
42 * are no longer valid.
45 struct ceph_reconnect_state
{
46 struct ceph_pagelist
*pagelist
;
50 static void __wake_requests(struct ceph_mds_client
*mdsc
,
51 struct list_head
*head
);
53 static const struct ceph_connection_operations mds_con_ops
;
61 * parse individual inode info
63 static int parse_reply_info_in(void **p
, void *end
,
64 struct ceph_mds_reply_info_in
*info
,
70 *p
+= sizeof(struct ceph_mds_reply_inode
) +
71 sizeof(*info
->in
->fragtree
.splits
) *
72 le32_to_cpu(info
->in
->fragtree
.nsplits
);
74 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
75 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
77 *p
+= info
->symlink_len
;
79 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
80 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
81 sizeof(info
->dir_layout
), bad
);
83 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
85 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
86 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
87 info
->xattr_data
= *p
;
88 *p
+= info
->xattr_len
;
95 * parse a normal reply, which may contain a (dir+)dentry and/or a
98 static int parse_reply_info_trace(void **p
, void *end
,
99 struct ceph_mds_reply_info_parsed
*info
,
104 if (info
->head
->is_dentry
) {
105 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
109 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
112 *p
+= sizeof(*info
->dirfrag
) +
113 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
114 if (unlikely(*p
> end
))
117 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
118 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
120 *p
+= info
->dname_len
;
122 *p
+= sizeof(*info
->dlease
);
125 if (info
->head
->is_target
) {
126 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
131 if (unlikely(*p
!= end
))
138 pr_err("problem parsing mds trace %d\n", err
);
143 * parse readdir results
145 static int parse_reply_info_dir(void **p
, void *end
,
146 struct ceph_mds_reply_info_parsed
*info
,
153 if (*p
+ sizeof(*info
->dir_dir
) > end
)
155 *p
+= sizeof(*info
->dir_dir
) +
156 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
160 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
161 num
= ceph_decode_32(p
);
162 info
->dir_end
= ceph_decode_8(p
);
163 info
->dir_complete
= ceph_decode_8(p
);
167 /* alloc large array */
169 info
->dir_in
= kcalloc(num
, sizeof(*info
->dir_in
) +
170 sizeof(*info
->dir_dname
) +
171 sizeof(*info
->dir_dname_len
) +
172 sizeof(*info
->dir_dlease
),
174 if (info
->dir_in
== NULL
) {
178 info
->dir_dname
= (void *)(info
->dir_in
+ num
);
179 info
->dir_dname_len
= (void *)(info
->dir_dname
+ num
);
180 info
->dir_dlease
= (void *)(info
->dir_dname_len
+ num
);
184 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
185 info
->dir_dname_len
[i
] = ceph_decode_32(p
);
186 ceph_decode_need(p
, end
, info
->dir_dname_len
[i
], bad
);
187 info
->dir_dname
[i
] = *p
;
188 *p
+= info
->dir_dname_len
[i
];
189 dout("parsed dir dname '%.*s'\n", info
->dir_dname_len
[i
],
191 info
->dir_dlease
[i
] = *p
;
192 *p
+= sizeof(struct ceph_mds_reply_lease
);
195 err
= parse_reply_info_in(p
, end
, &info
->dir_in
[i
], features
);
210 pr_err("problem parsing dir contents %d\n", err
);
215 * parse fcntl F_GETLK results
217 static int parse_reply_info_filelock(void **p
, void *end
,
218 struct ceph_mds_reply_info_parsed
*info
,
221 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
224 info
->filelock_reply
= *p
;
225 *p
+= sizeof(*info
->filelock_reply
);
227 if (unlikely(*p
!= end
))
236 * parse create results
238 static int parse_reply_info_create(void **p
, void *end
,
239 struct ceph_mds_reply_info_parsed
*info
,
242 if (features
& CEPH_FEATURE_REPLY_CREATE_INODE
) {
244 info
->has_create_ino
= false;
246 info
->has_create_ino
= true;
247 info
->ino
= ceph_decode_64(p
);
251 if (unlikely(*p
!= end
))
260 * parse extra results
262 static int parse_reply_info_extra(void **p
, void *end
,
263 struct ceph_mds_reply_info_parsed
*info
,
266 if (info
->head
->op
== CEPH_MDS_OP_GETFILELOCK
)
267 return parse_reply_info_filelock(p
, end
, info
, features
);
268 else if (info
->head
->op
== CEPH_MDS_OP_READDIR
||
269 info
->head
->op
== CEPH_MDS_OP_LSSNAP
)
270 return parse_reply_info_dir(p
, end
, info
, features
);
271 else if (info
->head
->op
== CEPH_MDS_OP_CREATE
)
272 return parse_reply_info_create(p
, end
, info
, features
);
278 * parse entire mds reply
280 static int parse_reply_info(struct ceph_msg
*msg
,
281 struct ceph_mds_reply_info_parsed
*info
,
288 info
->head
= msg
->front
.iov_base
;
289 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
290 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
293 ceph_decode_32_safe(&p
, end
, len
, bad
);
295 ceph_decode_need(&p
, end
, len
, bad
);
296 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
302 ceph_decode_32_safe(&p
, end
, len
, bad
);
304 ceph_decode_need(&p
, end
, len
, bad
);
305 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
311 ceph_decode_32_safe(&p
, end
, len
, bad
);
312 info
->snapblob_len
= len
;
323 pr_err("mds parse_reply err %d\n", err
);
327 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
336 static const char *session_state_name(int s
)
339 case CEPH_MDS_SESSION_NEW
: return "new";
340 case CEPH_MDS_SESSION_OPENING
: return "opening";
341 case CEPH_MDS_SESSION_OPEN
: return "open";
342 case CEPH_MDS_SESSION_HUNG
: return "hung";
343 case CEPH_MDS_SESSION_CLOSING
: return "closing";
344 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
345 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
346 default: return "???";
350 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
352 if (atomic_inc_not_zero(&s
->s_ref
)) {
353 dout("mdsc get_session %p %d -> %d\n", s
,
354 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
357 dout("mdsc get_session %p 0 -- FAIL", s
);
362 void ceph_put_mds_session(struct ceph_mds_session
*s
)
364 dout("mdsc put_session %p %d -> %d\n", s
,
365 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
366 if (atomic_dec_and_test(&s
->s_ref
)) {
367 if (s
->s_auth
.authorizer
)
368 ceph_auth_destroy_authorizer(
369 s
->s_mdsc
->fsc
->client
->monc
.auth
,
370 s
->s_auth
.authorizer
);
376 * called under mdsc->mutex
378 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
381 struct ceph_mds_session
*session
;
383 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
385 session
= mdsc
->sessions
[mds
];
386 dout("lookup_mds_session %p %d\n", session
,
387 atomic_read(&session
->s_ref
));
388 get_session(session
);
392 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
394 if (mds
>= mdsc
->max_sessions
)
396 return mdsc
->sessions
[mds
];
399 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
400 struct ceph_mds_session
*s
)
402 if (s
->s_mds
>= mdsc
->max_sessions
||
403 mdsc
->sessions
[s
->s_mds
] != s
)
409 * create+register a new session for given mds.
410 * called under mdsc->mutex.
412 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
415 struct ceph_mds_session
*s
;
417 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
418 return ERR_PTR(-EINVAL
);
420 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
422 return ERR_PTR(-ENOMEM
);
425 s
->s_state
= CEPH_MDS_SESSION_NEW
;
428 mutex_init(&s
->s_mutex
);
430 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
432 spin_lock_init(&s
->s_gen_ttl_lock
);
434 s
->s_cap_ttl
= jiffies
- 1;
436 spin_lock_init(&s
->s_cap_lock
);
437 s
->s_renew_requested
= 0;
439 INIT_LIST_HEAD(&s
->s_caps
);
442 atomic_set(&s
->s_ref
, 1);
443 INIT_LIST_HEAD(&s
->s_waiting
);
444 INIT_LIST_HEAD(&s
->s_unsafe
);
445 s
->s_num_cap_releases
= 0;
446 s
->s_cap_iterator
= NULL
;
447 INIT_LIST_HEAD(&s
->s_cap_releases
);
448 INIT_LIST_HEAD(&s
->s_cap_releases_done
);
449 INIT_LIST_HEAD(&s
->s_cap_flushing
);
450 INIT_LIST_HEAD(&s
->s_cap_snaps_flushing
);
452 dout("register_session mds%d\n", mds
);
453 if (mds
>= mdsc
->max_sessions
) {
454 int newmax
= 1 << get_count_order(mds
+1);
455 struct ceph_mds_session
**sa
;
457 dout("register_session realloc to %d\n", newmax
);
458 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
461 if (mdsc
->sessions
) {
462 memcpy(sa
, mdsc
->sessions
,
463 mdsc
->max_sessions
* sizeof(void *));
464 kfree(mdsc
->sessions
);
467 mdsc
->max_sessions
= newmax
;
469 mdsc
->sessions
[mds
] = s
;
470 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
472 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
473 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
479 return ERR_PTR(-ENOMEM
);
483 * called under mdsc->mutex
485 static void __unregister_session(struct ceph_mds_client
*mdsc
,
486 struct ceph_mds_session
*s
)
488 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
489 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
490 mdsc
->sessions
[s
->s_mds
] = NULL
;
491 ceph_con_close(&s
->s_con
);
492 ceph_put_mds_session(s
);
496 * drop session refs in request.
498 * should be last request ref, or hold mdsc->mutex
500 static void put_request_session(struct ceph_mds_request
*req
)
502 if (req
->r_session
) {
503 ceph_put_mds_session(req
->r_session
);
504 req
->r_session
= NULL
;
508 void ceph_mdsc_release_request(struct kref
*kref
)
510 struct ceph_mds_request
*req
= container_of(kref
,
511 struct ceph_mds_request
,
514 ceph_msg_put(req
->r_request
);
516 ceph_msg_put(req
->r_reply
);
517 destroy_reply_info(&req
->r_reply_info
);
520 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
523 if (req
->r_locked_dir
)
524 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
525 if (req
->r_target_inode
)
526 iput(req
->r_target_inode
);
529 if (req
->r_old_dentry
) {
531 * track (and drop pins for) r_old_dentry_dir
532 * separately, since r_old_dentry's d_parent may have
533 * changed between the dir mutex being dropped and
534 * this request being freed.
536 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
538 dput(req
->r_old_dentry
);
539 iput(req
->r_old_dentry_dir
);
543 put_request_session(req
);
544 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
549 * lookup session, bump ref if found.
551 * called under mdsc->mutex.
553 static struct ceph_mds_request
*__lookup_request(struct ceph_mds_client
*mdsc
,
556 struct ceph_mds_request
*req
;
557 struct rb_node
*n
= mdsc
->request_tree
.rb_node
;
560 req
= rb_entry(n
, struct ceph_mds_request
, r_node
);
561 if (tid
< req
->r_tid
)
563 else if (tid
> req
->r_tid
)
566 ceph_mdsc_get_request(req
);
573 static void __insert_request(struct ceph_mds_client
*mdsc
,
574 struct ceph_mds_request
*new)
576 struct rb_node
**p
= &mdsc
->request_tree
.rb_node
;
577 struct rb_node
*parent
= NULL
;
578 struct ceph_mds_request
*req
= NULL
;
582 req
= rb_entry(parent
, struct ceph_mds_request
, r_node
);
583 if (new->r_tid
< req
->r_tid
)
585 else if (new->r_tid
> req
->r_tid
)
591 rb_link_node(&new->r_node
, parent
, p
);
592 rb_insert_color(&new->r_node
, &mdsc
->request_tree
);
596 * Register an in-flight request, and assign a tid. Link to directory
597 * are modifying (if any).
599 * Called under mdsc->mutex.
601 static void __register_request(struct ceph_mds_client
*mdsc
,
602 struct ceph_mds_request
*req
,
605 req
->r_tid
= ++mdsc
->last_tid
;
607 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
609 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
610 ceph_mdsc_get_request(req
);
611 __insert_request(mdsc
, req
);
613 req
->r_uid
= current_fsuid();
614 req
->r_gid
= current_fsgid();
617 struct ceph_inode_info
*ci
= ceph_inode(dir
);
620 spin_lock(&ci
->i_unsafe_lock
);
621 req
->r_unsafe_dir
= dir
;
622 list_add_tail(&req
->r_unsafe_dir_item
, &ci
->i_unsafe_dirops
);
623 spin_unlock(&ci
->i_unsafe_lock
);
627 static void __unregister_request(struct ceph_mds_client
*mdsc
,
628 struct ceph_mds_request
*req
)
630 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
631 rb_erase(&req
->r_node
, &mdsc
->request_tree
);
632 RB_CLEAR_NODE(&req
->r_node
);
634 if (req
->r_unsafe_dir
) {
635 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
637 spin_lock(&ci
->i_unsafe_lock
);
638 list_del_init(&req
->r_unsafe_dir_item
);
639 spin_unlock(&ci
->i_unsafe_lock
);
641 iput(req
->r_unsafe_dir
);
642 req
->r_unsafe_dir
= NULL
;
645 complete_all(&req
->r_safe_completion
);
647 ceph_mdsc_put_request(req
);
651 * Choose mds to send request to next. If there is a hint set in the
652 * request (e.g., due to a prior forward hint from the mds), use that.
653 * Otherwise, consult frag tree and/or caps to identify the
654 * appropriate mds. If all else fails, choose randomly.
656 * Called under mdsc->mutex.
658 static struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
661 * we don't need to worry about protecting the d_parent access
662 * here because we never renaming inside the snapped namespace
663 * except to resplice to another snapdir, and either the old or new
664 * result is a valid result.
666 while (!IS_ROOT(dentry
) && ceph_snap(dentry
->d_inode
) != CEPH_NOSNAP
)
667 dentry
= dentry
->d_parent
;
671 static int __choose_mds(struct ceph_mds_client
*mdsc
,
672 struct ceph_mds_request
*req
)
675 struct ceph_inode_info
*ci
;
676 struct ceph_cap
*cap
;
677 int mode
= req
->r_direct_mode
;
679 u32 hash
= req
->r_direct_hash
;
680 bool is_hash
= req
->r_direct_is_hash
;
683 * is there a specific mds we should try? ignore hint if we have
684 * no session and the mds is not up (active or recovering).
686 if (req
->r_resend_mds
>= 0 &&
687 (__have_session(mdsc
, req
->r_resend_mds
) ||
688 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
689 dout("choose_mds using resend_mds mds%d\n",
691 return req
->r_resend_mds
;
694 if (mode
== USE_RANDOM_MDS
)
699 inode
= req
->r_inode
;
700 } else if (req
->r_dentry
) {
701 /* ignore race with rename; old or new d_parent is okay */
702 struct dentry
*parent
= req
->r_dentry
->d_parent
;
703 struct inode
*dir
= parent
->d_inode
;
705 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
707 inode
= req
->r_dentry
->d_inode
;
708 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
709 /* direct snapped/virtual snapdir requests
710 * based on parent dir inode */
711 struct dentry
*dn
= get_nonsnap_parent(parent
);
713 dout("__choose_mds using nonsnap parent %p\n", inode
);
714 } else if (req
->r_dentry
->d_inode
) {
716 inode
= req
->r_dentry
->d_inode
;
720 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
725 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
729 ci
= ceph_inode(inode
);
731 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
732 struct ceph_inode_frag frag
;
735 ceph_choose_frag(ci
, hash
, &frag
, &found
);
737 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
740 /* choose a random replica */
741 get_random_bytes(&r
, 1);
744 dout("choose_mds %p %llx.%llx "
745 "frag %u mds%d (%d/%d)\n",
746 inode
, ceph_vinop(inode
),
749 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
750 CEPH_MDS_STATE_ACTIVE
)
754 /* since this file/dir wasn't known to be
755 * replicated, then we want to look for the
756 * authoritative mds. */
759 /* choose auth mds */
761 dout("choose_mds %p %llx.%llx "
762 "frag %u mds%d (auth)\n",
763 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
764 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
765 CEPH_MDS_STATE_ACTIVE
)
771 spin_lock(&ci
->i_ceph_lock
);
773 if (mode
== USE_AUTH_MDS
)
774 cap
= ci
->i_auth_cap
;
775 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
776 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
778 spin_unlock(&ci
->i_ceph_lock
);
781 mds
= cap
->session
->s_mds
;
782 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
783 inode
, ceph_vinop(inode
), mds
,
784 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
785 spin_unlock(&ci
->i_ceph_lock
);
789 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
790 dout("choose_mds chose random mds%d\n", mds
);
798 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
800 struct ceph_msg
*msg
;
801 struct ceph_mds_session_head
*h
;
803 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
806 pr_err("create_session_msg ENOMEM creating msg\n");
809 h
= msg
->front
.iov_base
;
810 h
->op
= cpu_to_le32(op
);
811 h
->seq
= cpu_to_le64(seq
);
816 * send session open request.
818 * called under mdsc->mutex
820 static int __open_session(struct ceph_mds_client
*mdsc
,
821 struct ceph_mds_session
*session
)
823 struct ceph_msg
*msg
;
825 int mds
= session
->s_mds
;
827 /* wait for mds to go active? */
828 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
829 dout("open_session to mds%d (%s)\n", mds
,
830 ceph_mds_state_name(mstate
));
831 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
832 session
->s_renew_requested
= jiffies
;
834 /* send connect message */
835 msg
= create_session_msg(CEPH_SESSION_REQUEST_OPEN
, session
->s_seq
);
838 ceph_con_send(&session
->s_con
, msg
);
843 * open sessions for any export targets for the given mds
845 * called under mdsc->mutex
847 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
848 struct ceph_mds_session
*session
)
850 struct ceph_mds_info
*mi
;
851 struct ceph_mds_session
*ts
;
852 int i
, mds
= session
->s_mds
;
855 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
857 mi
= &mdsc
->mdsmap
->m_info
[mds
];
858 dout("open_export_target_sessions for mds%d (%d targets)\n",
859 session
->s_mds
, mi
->num_export_targets
);
861 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
862 target
= mi
->export_targets
[i
];
863 ts
= __ceph_lookup_mds_session(mdsc
, target
);
865 ts
= register_session(mdsc
, target
);
869 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
870 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
871 __open_session(mdsc
, session
);
873 dout(" mds%d target mds%d %p is %s\n", session
->s_mds
,
874 i
, ts
, session_state_name(ts
->s_state
));
875 ceph_put_mds_session(ts
);
879 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
880 struct ceph_mds_session
*session
)
882 mutex_lock(&mdsc
->mutex
);
883 __open_export_target_sessions(mdsc
, session
);
884 mutex_unlock(&mdsc
->mutex
);
892 * Free preallocated cap messages assigned to this session
894 static void cleanup_cap_releases(struct ceph_mds_session
*session
)
896 struct ceph_msg
*msg
;
898 spin_lock(&session
->s_cap_lock
);
899 while (!list_empty(&session
->s_cap_releases
)) {
900 msg
= list_first_entry(&session
->s_cap_releases
,
901 struct ceph_msg
, list_head
);
902 list_del_init(&msg
->list_head
);
905 while (!list_empty(&session
->s_cap_releases_done
)) {
906 msg
= list_first_entry(&session
->s_cap_releases_done
,
907 struct ceph_msg
, list_head
);
908 list_del_init(&msg
->list_head
);
911 spin_unlock(&session
->s_cap_lock
);
915 * Helper to safely iterate over all caps associated with a session, with
916 * special care taken to handle a racing __ceph_remove_cap().
918 * Caller must hold session s_mutex.
920 static int iterate_session_caps(struct ceph_mds_session
*session
,
921 int (*cb
)(struct inode
*, struct ceph_cap
*,
925 struct ceph_cap
*cap
;
926 struct inode
*inode
, *last_inode
= NULL
;
927 struct ceph_cap
*old_cap
= NULL
;
930 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
931 spin_lock(&session
->s_cap_lock
);
932 p
= session
->s_caps
.next
;
933 while (p
!= &session
->s_caps
) {
934 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
935 inode
= igrab(&cap
->ci
->vfs_inode
);
940 session
->s_cap_iterator
= cap
;
941 spin_unlock(&session
->s_cap_lock
);
948 ceph_put_cap(session
->s_mdsc
, old_cap
);
952 ret
= cb(inode
, cap
, arg
);
955 spin_lock(&session
->s_cap_lock
);
957 if (cap
->ci
== NULL
) {
958 dout("iterate_session_caps finishing cap %p removal\n",
960 BUG_ON(cap
->session
!= session
);
961 list_del_init(&cap
->session_caps
);
962 session
->s_nr_caps
--;
964 old_cap
= cap
; /* put_cap it w/o locks held */
971 session
->s_cap_iterator
= NULL
;
972 spin_unlock(&session
->s_cap_lock
);
977 ceph_put_cap(session
->s_mdsc
, old_cap
);
982 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
985 struct ceph_inode_info
*ci
= ceph_inode(inode
);
988 dout("removing cap %p, ci is %p, inode is %p\n",
989 cap
, ci
, &ci
->vfs_inode
);
990 spin_lock(&ci
->i_ceph_lock
);
991 __ceph_remove_cap(cap
);
992 if (!__ceph_is_any_real_caps(ci
)) {
993 struct ceph_mds_client
*mdsc
=
994 ceph_sb_to_client(inode
->i_sb
)->mdsc
;
996 spin_lock(&mdsc
->cap_dirty_lock
);
997 if (!list_empty(&ci
->i_dirty_item
)) {
998 pr_info(" dropping dirty %s state for %p %lld\n",
999 ceph_cap_string(ci
->i_dirty_caps
),
1000 inode
, ceph_ino(inode
));
1001 ci
->i_dirty_caps
= 0;
1002 list_del_init(&ci
->i_dirty_item
);
1005 if (!list_empty(&ci
->i_flushing_item
)) {
1006 pr_info(" dropping dirty+flushing %s state for %p %lld\n",
1007 ceph_cap_string(ci
->i_flushing_caps
),
1008 inode
, ceph_ino(inode
));
1009 ci
->i_flushing_caps
= 0;
1010 list_del_init(&ci
->i_flushing_item
);
1011 mdsc
->num_cap_flushing
--;
1014 if (drop
&& ci
->i_wrbuffer_ref
) {
1015 pr_info(" dropping dirty data for %p %lld\n",
1016 inode
, ceph_ino(inode
));
1017 ci
->i_wrbuffer_ref
= 0;
1018 ci
->i_wrbuffer_ref_head
= 0;
1021 spin_unlock(&mdsc
->cap_dirty_lock
);
1023 spin_unlock(&ci
->i_ceph_lock
);
1030 * caller must hold session s_mutex
1032 static void remove_session_caps(struct ceph_mds_session
*session
)
1034 dout("remove_session_caps on %p\n", session
);
1035 iterate_session_caps(session
, remove_session_caps_cb
, NULL
);
1037 spin_lock(&session
->s_cap_lock
);
1038 if (session
->s_nr_caps
> 0) {
1039 struct super_block
*sb
= session
->s_mdsc
->fsc
->sb
;
1040 struct inode
*inode
;
1041 struct ceph_cap
*cap
, *prev
= NULL
;
1042 struct ceph_vino vino
;
1044 * iterate_session_caps() skips inodes that are being
1045 * deleted, we need to wait until deletions are complete.
1046 * __wait_on_freeing_inode() is designed for the job,
1047 * but it is not exported, so use lookup inode function
1050 while (!list_empty(&session
->s_caps
)) {
1051 cap
= list_entry(session
->s_caps
.next
,
1052 struct ceph_cap
, session_caps
);
1056 vino
= cap
->ci
->i_vino
;
1057 spin_unlock(&session
->s_cap_lock
);
1059 inode
= ceph_find_inode(sb
, vino
);
1062 spin_lock(&session
->s_cap_lock
);
1065 spin_unlock(&session
->s_cap_lock
);
1067 BUG_ON(session
->s_nr_caps
> 0);
1068 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1069 cleanup_cap_releases(session
);
1073 * wake up any threads waiting on this session's caps. if the cap is
1074 * old (didn't get renewed on the client reconnect), remove it now.
1076 * caller must hold s_mutex.
1078 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1081 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1083 wake_up_all(&ci
->i_cap_wq
);
1085 spin_lock(&ci
->i_ceph_lock
);
1086 ci
->i_wanted_max_size
= 0;
1087 ci
->i_requested_max_size
= 0;
1088 spin_unlock(&ci
->i_ceph_lock
);
1093 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1096 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1097 iterate_session_caps(session
, wake_up_session_cb
,
1098 (void *)(unsigned long)reconnect
);
1102 * Send periodic message to MDS renewing all currently held caps. The
1103 * ack will reset the expiration for all caps from this session.
1105 * caller holds s_mutex
1107 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1108 struct ceph_mds_session
*session
)
1110 struct ceph_msg
*msg
;
1113 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1114 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1115 pr_info("mds%d caps stale\n", session
->s_mds
);
1116 session
->s_renew_requested
= jiffies
;
1118 /* do not try to renew caps until a recovering mds has reconnected
1119 * with its clients. */
1120 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1121 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1122 dout("send_renew_caps ignoring mds%d (%s)\n",
1123 session
->s_mds
, ceph_mds_state_name(state
));
1127 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1128 ceph_mds_state_name(state
));
1129 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1130 ++session
->s_renew_seq
);
1133 ceph_con_send(&session
->s_con
, msg
);
1138 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1140 * Called under session->s_mutex
1142 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1143 struct ceph_mds_session
*session
, int is_renew
)
1148 spin_lock(&session
->s_cap_lock
);
1149 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1151 session
->s_cap_ttl
= session
->s_renew_requested
+
1152 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1155 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1156 pr_info("mds%d caps renewed\n", session
->s_mds
);
1159 pr_info("mds%d caps still stale\n", session
->s_mds
);
1162 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1163 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1164 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1165 spin_unlock(&session
->s_cap_lock
);
1168 wake_up_session_caps(session
, 0);
1172 * send a session close request
1174 static int request_close_session(struct ceph_mds_client
*mdsc
,
1175 struct ceph_mds_session
*session
)
1177 struct ceph_msg
*msg
;
1179 dout("request_close_session mds%d state %s seq %lld\n",
1180 session
->s_mds
, session_state_name(session
->s_state
),
1182 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1185 ceph_con_send(&session
->s_con
, msg
);
1190 * Called with s_mutex held.
1192 static int __close_session(struct ceph_mds_client
*mdsc
,
1193 struct ceph_mds_session
*session
)
1195 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1197 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1198 return request_close_session(mdsc
, session
);
1202 * Trim old(er) caps.
1204 * Because we can't cache an inode without one or more caps, we do
1205 * this indirectly: if a cap is unused, we prune its aliases, at which
1206 * point the inode will hopefully get dropped to.
1208 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1209 * memory pressure from the MDS, though, so it needn't be perfect.
1211 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1213 struct ceph_mds_session
*session
= arg
;
1214 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1215 int used
, oissued
, mine
;
1217 if (session
->s_trim_caps
<= 0)
1220 spin_lock(&ci
->i_ceph_lock
);
1221 mine
= cap
->issued
| cap
->implemented
;
1222 used
= __ceph_caps_used(ci
);
1223 oissued
= __ceph_caps_issued_other(ci
, cap
);
1225 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
1226 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1227 ceph_cap_string(used
));
1228 if (ci
->i_dirty_caps
)
1229 goto out
; /* dirty caps */
1230 if ((used
& ~oissued
) & mine
)
1231 goto out
; /* we need these caps */
1233 session
->s_trim_caps
--;
1235 /* we aren't the only cap.. just remove us */
1236 __queue_cap_release(session
, ceph_ino(inode
), cap
->cap_id
,
1237 cap
->mseq
, cap
->issue_seq
);
1238 __ceph_remove_cap(cap
);
1240 /* try to drop referring dentries */
1241 spin_unlock(&ci
->i_ceph_lock
);
1242 d_prune_aliases(inode
);
1243 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1244 inode
, cap
, atomic_read(&inode
->i_count
));
1249 spin_unlock(&ci
->i_ceph_lock
);
1254 * Trim session cap count down to some max number.
1256 static int trim_caps(struct ceph_mds_client
*mdsc
,
1257 struct ceph_mds_session
*session
,
1260 int trim_caps
= session
->s_nr_caps
- max_caps
;
1262 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1263 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1264 if (trim_caps
> 0) {
1265 session
->s_trim_caps
= trim_caps
;
1266 iterate_session_caps(session
, trim_caps_cb
, session
);
1267 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1268 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1269 trim_caps
- session
->s_trim_caps
);
1270 session
->s_trim_caps
= 0;
1276 * Allocate cap_release messages. If there is a partially full message
1277 * in the queue, try to allocate enough to cover it's remainder, so that
1278 * we can send it immediately.
1280 * Called under s_mutex.
1282 int ceph_add_cap_releases(struct ceph_mds_client
*mdsc
,
1283 struct ceph_mds_session
*session
)
1285 struct ceph_msg
*msg
, *partial
= NULL
;
1286 struct ceph_mds_cap_release
*head
;
1288 int extra
= mdsc
->fsc
->mount_options
->cap_release_safety
;
1291 dout("add_cap_releases %p mds%d extra %d\n", session
, session
->s_mds
,
1294 spin_lock(&session
->s_cap_lock
);
1296 if (!list_empty(&session
->s_cap_releases
)) {
1297 msg
= list_first_entry(&session
->s_cap_releases
,
1300 head
= msg
->front
.iov_base
;
1301 num
= le32_to_cpu(head
->num
);
1303 dout(" partial %p with (%d/%d)\n", msg
, num
,
1304 (int)CEPH_CAPS_PER_RELEASE
);
1305 extra
+= CEPH_CAPS_PER_RELEASE
- num
;
1309 while (session
->s_num_cap_releases
< session
->s_nr_caps
+ extra
) {
1310 spin_unlock(&session
->s_cap_lock
);
1311 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
, PAGE_CACHE_SIZE
,
1315 dout("add_cap_releases %p msg %p now %d\n", session
, msg
,
1316 (int)msg
->front
.iov_len
);
1317 head
= msg
->front
.iov_base
;
1318 head
->num
= cpu_to_le32(0);
1319 msg
->front
.iov_len
= sizeof(*head
);
1320 spin_lock(&session
->s_cap_lock
);
1321 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1322 session
->s_num_cap_releases
+= CEPH_CAPS_PER_RELEASE
;
1326 head
= partial
->front
.iov_base
;
1327 num
= le32_to_cpu(head
->num
);
1328 dout(" queueing partial %p with %d/%d\n", partial
, num
,
1329 (int)CEPH_CAPS_PER_RELEASE
);
1330 list_move_tail(&partial
->list_head
,
1331 &session
->s_cap_releases_done
);
1332 session
->s_num_cap_releases
-= CEPH_CAPS_PER_RELEASE
- num
;
1335 spin_unlock(&session
->s_cap_lock
);
1341 * flush all dirty inode data to disk.
1343 * returns true if we've flushed through want_flush_seq
1345 static int check_cap_flush(struct ceph_mds_client
*mdsc
, u64 want_flush_seq
)
1349 dout("check_cap_flush want %lld\n", want_flush_seq
);
1350 mutex_lock(&mdsc
->mutex
);
1351 for (mds
= 0; ret
&& mds
< mdsc
->max_sessions
; mds
++) {
1352 struct ceph_mds_session
*session
= mdsc
->sessions
[mds
];
1356 get_session(session
);
1357 mutex_unlock(&mdsc
->mutex
);
1359 mutex_lock(&session
->s_mutex
);
1360 if (!list_empty(&session
->s_cap_flushing
)) {
1361 struct ceph_inode_info
*ci
=
1362 list_entry(session
->s_cap_flushing
.next
,
1363 struct ceph_inode_info
,
1365 struct inode
*inode
= &ci
->vfs_inode
;
1367 spin_lock(&ci
->i_ceph_lock
);
1368 if (ci
->i_cap_flush_seq
<= want_flush_seq
) {
1369 dout("check_cap_flush still flushing %p "
1370 "seq %lld <= %lld to mds%d\n", inode
,
1371 ci
->i_cap_flush_seq
, want_flush_seq
,
1375 spin_unlock(&ci
->i_ceph_lock
);
1377 mutex_unlock(&session
->s_mutex
);
1378 ceph_put_mds_session(session
);
1382 mutex_lock(&mdsc
->mutex
);
1385 mutex_unlock(&mdsc
->mutex
);
1386 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq
);
1391 * called under s_mutex
1393 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1394 struct ceph_mds_session
*session
)
1396 struct ceph_msg
*msg
;
1398 dout("send_cap_releases mds%d\n", session
->s_mds
);
1399 spin_lock(&session
->s_cap_lock
);
1400 while (!list_empty(&session
->s_cap_releases_done
)) {
1401 msg
= list_first_entry(&session
->s_cap_releases_done
,
1402 struct ceph_msg
, list_head
);
1403 list_del_init(&msg
->list_head
);
1404 spin_unlock(&session
->s_cap_lock
);
1405 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1406 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1407 ceph_con_send(&session
->s_con
, msg
);
1408 spin_lock(&session
->s_cap_lock
);
1410 spin_unlock(&session
->s_cap_lock
);
1413 static void discard_cap_releases(struct ceph_mds_client
*mdsc
,
1414 struct ceph_mds_session
*session
)
1416 struct ceph_msg
*msg
;
1417 struct ceph_mds_cap_release
*head
;
1420 dout("discard_cap_releases mds%d\n", session
->s_mds
);
1421 spin_lock(&session
->s_cap_lock
);
1423 /* zero out the in-progress message */
1424 msg
= list_first_entry(&session
->s_cap_releases
,
1425 struct ceph_msg
, list_head
);
1426 head
= msg
->front
.iov_base
;
1427 num
= le32_to_cpu(head
->num
);
1428 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
, num
);
1429 head
->num
= cpu_to_le32(0);
1430 msg
->front
.iov_len
= sizeof(*head
);
1431 session
->s_num_cap_releases
+= num
;
1433 /* requeue completed messages */
1434 while (!list_empty(&session
->s_cap_releases_done
)) {
1435 msg
= list_first_entry(&session
->s_cap_releases_done
,
1436 struct ceph_msg
, list_head
);
1437 list_del_init(&msg
->list_head
);
1439 head
= msg
->front
.iov_base
;
1440 num
= le32_to_cpu(head
->num
);
1441 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
,
1443 session
->s_num_cap_releases
+= num
;
1444 head
->num
= cpu_to_le32(0);
1445 msg
->front
.iov_len
= sizeof(*head
);
1446 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1449 spin_unlock(&session
->s_cap_lock
);
1457 * Create an mds request.
1459 struct ceph_mds_request
*
1460 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1462 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1465 return ERR_PTR(-ENOMEM
);
1467 mutex_init(&req
->r_fill_mutex
);
1469 req
->r_started
= jiffies
;
1470 req
->r_resend_mds
= -1;
1471 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1473 kref_init(&req
->r_kref
);
1474 INIT_LIST_HEAD(&req
->r_wait
);
1475 init_completion(&req
->r_completion
);
1476 init_completion(&req
->r_safe_completion
);
1477 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1480 req
->r_direct_mode
= mode
;
1485 * return oldest (lowest) request, tid in request tree, 0 if none.
1487 * called under mdsc->mutex.
1489 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1491 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1493 return rb_entry(rb_first(&mdsc
->request_tree
),
1494 struct ceph_mds_request
, r_node
);
1497 static u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1499 struct ceph_mds_request
*req
= __get_oldest_req(mdsc
);
1507 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1508 * on build_path_from_dentry in fs/cifs/dir.c.
1510 * If @stop_on_nosnap, generate path relative to the first non-snapped
1513 * Encode hidden .snap dirs as a double /, i.e.
1514 * foo/.snap/bar -> foo//bar
1516 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1519 struct dentry
*temp
;
1525 return ERR_PTR(-EINVAL
);
1529 seq
= read_seqbegin(&rename_lock
);
1531 for (temp
= dentry
; !IS_ROOT(temp
);) {
1532 struct inode
*inode
= temp
->d_inode
;
1533 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1534 len
++; /* slash only */
1535 else if (stop_on_nosnap
&& inode
&&
1536 ceph_snap(inode
) == CEPH_NOSNAP
)
1539 len
+= 1 + temp
->d_name
.len
;
1540 temp
= temp
->d_parent
;
1544 len
--; /* no leading '/' */
1546 path
= kmalloc(len
+1, GFP_NOFS
);
1548 return ERR_PTR(-ENOMEM
);
1550 path
[pos
] = 0; /* trailing null */
1552 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1553 struct inode
*inode
;
1555 spin_lock(&temp
->d_lock
);
1556 inode
= temp
->d_inode
;
1557 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1558 dout("build_path path+%d: %p SNAPDIR\n",
1560 } else if (stop_on_nosnap
&& inode
&&
1561 ceph_snap(inode
) == CEPH_NOSNAP
) {
1562 spin_unlock(&temp
->d_lock
);
1565 pos
-= temp
->d_name
.len
;
1567 spin_unlock(&temp
->d_lock
);
1570 strncpy(path
+ pos
, temp
->d_name
.name
,
1573 spin_unlock(&temp
->d_lock
);
1576 temp
= temp
->d_parent
;
1579 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1580 pr_err("build_path did not end path lookup where "
1581 "expected, namelen is %d, pos is %d\n", len
, pos
);
1582 /* presumably this is only possible if racing with a
1583 rename of one of the parent directories (we can not
1584 lock the dentries above us to prevent this, but
1585 retrying should be harmless) */
1590 *base
= ceph_ino(temp
->d_inode
);
1592 dout("build_path on %p %d built %llx '%.*s'\n",
1593 dentry
, d_count(dentry
), *base
, len
, path
);
1597 static int build_dentry_path(struct dentry
*dentry
,
1598 const char **ppath
, int *ppathlen
, u64
*pino
,
1603 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
) {
1604 *pino
= ceph_ino(dentry
->d_parent
->d_inode
);
1605 *ppath
= dentry
->d_name
.name
;
1606 *ppathlen
= dentry
->d_name
.len
;
1609 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1611 return PTR_ERR(path
);
1617 static int build_inode_path(struct inode
*inode
,
1618 const char **ppath
, int *ppathlen
, u64
*pino
,
1621 struct dentry
*dentry
;
1624 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1625 *pino
= ceph_ino(inode
);
1629 dentry
= d_find_alias(inode
);
1630 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1633 return PTR_ERR(path
);
1640 * request arguments may be specified via an inode *, a dentry *, or
1641 * an explicit ino+path.
1643 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1644 const char *rpath
, u64 rino
,
1645 const char **ppath
, int *pathlen
,
1646 u64
*ino
, int *freepath
)
1651 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1652 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1654 } else if (rdentry
) {
1655 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1656 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1658 } else if (rpath
|| rino
) {
1661 *pathlen
= rpath
? strlen(rpath
) : 0;
1662 dout(" path %.*s\n", *pathlen
, rpath
);
1669 * called under mdsc->mutex
1671 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1672 struct ceph_mds_request
*req
,
1675 struct ceph_msg
*msg
;
1676 struct ceph_mds_request_head
*head
;
1677 const char *path1
= NULL
;
1678 const char *path2
= NULL
;
1679 u64 ino1
= 0, ino2
= 0;
1680 int pathlen1
= 0, pathlen2
= 0;
1681 int freepath1
= 0, freepath2
= 0;
1687 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1688 req
->r_path1
, req
->r_ino1
.ino
,
1689 &path1
, &pathlen1
, &ino1
, &freepath1
);
1695 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1696 req
->r_path2
, req
->r_ino2
.ino
,
1697 &path2
, &pathlen2
, &ino2
, &freepath2
);
1703 len
= sizeof(*head
) +
1704 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
));
1706 /* calculate (max) length for cap releases */
1707 len
+= sizeof(struct ceph_mds_request_release
) *
1708 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1709 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1710 if (req
->r_dentry_drop
)
1711 len
+= req
->r_dentry
->d_name
.len
;
1712 if (req
->r_old_dentry_drop
)
1713 len
+= req
->r_old_dentry
->d_name
.len
;
1715 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
1717 msg
= ERR_PTR(-ENOMEM
);
1721 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1723 head
= msg
->front
.iov_base
;
1724 p
= msg
->front
.iov_base
+ sizeof(*head
);
1725 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1727 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1728 head
->op
= cpu_to_le32(req
->r_op
);
1729 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
1730 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
1731 head
->args
= req
->r_args
;
1733 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1734 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1736 /* make note of release offset, in case we need to replay */
1737 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1741 if (req
->r_inode_drop
)
1742 releases
+= ceph_encode_inode_release(&p
,
1743 req
->r_inode
? req
->r_inode
: req
->r_dentry
->d_inode
,
1744 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1745 if (req
->r_dentry_drop
)
1746 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1747 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1748 if (req
->r_old_dentry_drop
)
1749 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1750 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1751 if (req
->r_old_inode_drop
)
1752 releases
+= ceph_encode_inode_release(&p
,
1753 req
->r_old_dentry
->d_inode
,
1754 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1755 head
->num_releases
= cpu_to_le16(releases
);
1758 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1759 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1761 if (req
->r_data_len
) {
1762 /* outbound data set only by ceph_sync_setxattr() */
1763 BUG_ON(!req
->r_pages
);
1764 ceph_msg_data_add_pages(msg
, req
->r_pages
, req
->r_data_len
, 0);
1767 msg
->hdr
.data_len
= cpu_to_le32(req
->r_data_len
);
1768 msg
->hdr
.data_off
= cpu_to_le16(0);
1772 kfree((char *)path2
);
1775 kfree((char *)path1
);
1781 * called under mdsc->mutex if error, under no mutex if
1784 static void complete_request(struct ceph_mds_client
*mdsc
,
1785 struct ceph_mds_request
*req
)
1787 if (req
->r_callback
)
1788 req
->r_callback(mdsc
, req
);
1790 complete_all(&req
->r_completion
);
1794 * called under mdsc->mutex
1796 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
1797 struct ceph_mds_request
*req
,
1800 struct ceph_mds_request_head
*rhead
;
1801 struct ceph_msg
*msg
;
1806 struct ceph_cap
*cap
=
1807 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
1810 req
->r_sent_on_mseq
= cap
->mseq
;
1812 req
->r_sent_on_mseq
= -1;
1814 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
1815 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
1817 if (req
->r_got_unsafe
) {
1819 * Replay. Do not regenerate message (and rebuild
1820 * paths, etc.); just use the original message.
1821 * Rebuilding paths will break for renames because
1822 * d_move mangles the src name.
1824 msg
= req
->r_request
;
1825 rhead
= msg
->front
.iov_base
;
1827 flags
= le32_to_cpu(rhead
->flags
);
1828 flags
|= CEPH_MDS_FLAG_REPLAY
;
1829 rhead
->flags
= cpu_to_le32(flags
);
1831 if (req
->r_target_inode
)
1832 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
1834 rhead
->num_retry
= req
->r_attempts
- 1;
1836 /* remove cap/dentry releases from message */
1837 rhead
->num_releases
= 0;
1838 msg
->hdr
.front_len
= cpu_to_le32(req
->r_request_release_offset
);
1839 msg
->front
.iov_len
= req
->r_request_release_offset
;
1843 if (req
->r_request
) {
1844 ceph_msg_put(req
->r_request
);
1845 req
->r_request
= NULL
;
1847 msg
= create_request_message(mdsc
, req
, mds
);
1849 req
->r_err
= PTR_ERR(msg
);
1850 complete_request(mdsc
, req
);
1851 return PTR_ERR(msg
);
1853 req
->r_request
= msg
;
1855 rhead
= msg
->front
.iov_base
;
1856 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
1857 if (req
->r_got_unsafe
)
1858 flags
|= CEPH_MDS_FLAG_REPLAY
;
1859 if (req
->r_locked_dir
)
1860 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
1861 rhead
->flags
= cpu_to_le32(flags
);
1862 rhead
->num_fwd
= req
->r_num_fwd
;
1863 rhead
->num_retry
= req
->r_attempts
- 1;
1866 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
1871 * send request, or put it on the appropriate wait list.
1873 static int __do_request(struct ceph_mds_client
*mdsc
,
1874 struct ceph_mds_request
*req
)
1876 struct ceph_mds_session
*session
= NULL
;
1880 if (req
->r_err
|| req
->r_got_result
) {
1882 __unregister_request(mdsc
, req
);
1886 if (req
->r_timeout
&&
1887 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
1888 dout("do_request timed out\n");
1893 put_request_session(req
);
1895 mds
= __choose_mds(mdsc
, req
);
1897 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
1898 dout("do_request no mds or not active, waiting for map\n");
1899 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
1903 /* get, open session */
1904 session
= __ceph_lookup_mds_session(mdsc
, mds
);
1906 session
= register_session(mdsc
, mds
);
1907 if (IS_ERR(session
)) {
1908 err
= PTR_ERR(session
);
1912 req
->r_session
= get_session(session
);
1914 dout("do_request mds%d session %p state %s\n", mds
, session
,
1915 session_state_name(session
->s_state
));
1916 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
1917 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
1918 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
1919 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
1920 __open_session(mdsc
, session
);
1921 list_add(&req
->r_wait
, &session
->s_waiting
);
1926 req
->r_resend_mds
= -1; /* forget any previous mds hint */
1928 if (req
->r_request_started
== 0) /* note request start time */
1929 req
->r_request_started
= jiffies
;
1931 err
= __prepare_send_request(mdsc
, req
, mds
);
1933 ceph_msg_get(req
->r_request
);
1934 ceph_con_send(&session
->s_con
, req
->r_request
);
1938 ceph_put_mds_session(session
);
1944 complete_request(mdsc
, req
);
1949 * called under mdsc->mutex
1951 static void __wake_requests(struct ceph_mds_client
*mdsc
,
1952 struct list_head
*head
)
1954 struct ceph_mds_request
*req
;
1955 LIST_HEAD(tmp_list
);
1957 list_splice_init(head
, &tmp_list
);
1959 while (!list_empty(&tmp_list
)) {
1960 req
= list_entry(tmp_list
.next
,
1961 struct ceph_mds_request
, r_wait
);
1962 list_del_init(&req
->r_wait
);
1963 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
1964 __do_request(mdsc
, req
);
1969 * Wake up threads with requests pending for @mds, so that they can
1970 * resubmit their requests to a possibly different mds.
1972 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
1974 struct ceph_mds_request
*req
;
1977 dout("kick_requests mds%d\n", mds
);
1978 for (p
= rb_first(&mdsc
->request_tree
); p
; p
= rb_next(p
)) {
1979 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1980 if (req
->r_got_unsafe
)
1982 if (req
->r_session
&&
1983 req
->r_session
->s_mds
== mds
) {
1984 dout(" kicking tid %llu\n", req
->r_tid
);
1985 __do_request(mdsc
, req
);
1990 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
1991 struct ceph_mds_request
*req
)
1993 dout("submit_request on %p\n", req
);
1994 mutex_lock(&mdsc
->mutex
);
1995 __register_request(mdsc
, req
, NULL
);
1996 __do_request(mdsc
, req
);
1997 mutex_unlock(&mdsc
->mutex
);
2001 * Synchrously perform an mds request. Take care of all of the
2002 * session setup, forwarding, retry details.
2004 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2006 struct ceph_mds_request
*req
)
2010 dout("do_request on %p\n", req
);
2012 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2014 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2015 if (req
->r_locked_dir
)
2016 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
2017 if (req
->r_old_dentry
)
2018 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2022 mutex_lock(&mdsc
->mutex
);
2023 __register_request(mdsc
, req
, dir
);
2024 __do_request(mdsc
, req
);
2028 __unregister_request(mdsc
, req
);
2029 dout("do_request early error %d\n", err
);
2034 mutex_unlock(&mdsc
->mutex
);
2035 dout("do_request waiting\n");
2036 if (req
->r_timeout
) {
2037 err
= (long)wait_for_completion_killable_timeout(
2038 &req
->r_completion
, req
->r_timeout
);
2042 err
= wait_for_completion_killable(&req
->r_completion
);
2044 dout("do_request waited, got %d\n", err
);
2045 mutex_lock(&mdsc
->mutex
);
2047 /* only abort if we didn't race with a real reply */
2048 if (req
->r_got_result
) {
2049 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2050 } else if (err
< 0) {
2051 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2054 * ensure we aren't running concurrently with
2055 * ceph_fill_trace or ceph_readdir_prepopulate, which
2056 * rely on locks (dir mutex) held by our caller.
2058 mutex_lock(&req
->r_fill_mutex
);
2060 req
->r_aborted
= true;
2061 mutex_unlock(&req
->r_fill_mutex
);
2063 if (req
->r_locked_dir
&&
2064 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2065 ceph_invalidate_dir_request(req
);
2071 mutex_unlock(&mdsc
->mutex
);
2072 dout("do_request %p done, result %d\n", req
, err
);
2077 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2078 * namespace request.
2080 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2082 struct inode
*inode
= req
->r_locked_dir
;
2084 dout("invalidate_dir_request %p (complete, lease(s))\n", inode
);
2086 ceph_dir_clear_complete(inode
);
2088 ceph_invalidate_dentry_lease(req
->r_dentry
);
2089 if (req
->r_old_dentry
)
2090 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2096 * We take the session mutex and parse and process the reply immediately.
2097 * This preserves the logical ordering of replies, capabilities, etc., sent
2098 * by the MDS as they are applied to our local cache.
2100 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2102 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2103 struct ceph_mds_request
*req
;
2104 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2105 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2108 int mds
= session
->s_mds
;
2110 if (msg
->front
.iov_len
< sizeof(*head
)) {
2111 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2116 /* get request, session */
2117 tid
= le64_to_cpu(msg
->hdr
.tid
);
2118 mutex_lock(&mdsc
->mutex
);
2119 req
= __lookup_request(mdsc
, tid
);
2121 dout("handle_reply on unknown tid %llu\n", tid
);
2122 mutex_unlock(&mdsc
->mutex
);
2125 dout("handle_reply %p\n", req
);
2127 /* correct session? */
2128 if (req
->r_session
!= session
) {
2129 pr_err("mdsc_handle_reply got %llu on session mds%d"
2130 " not mds%d\n", tid
, session
->s_mds
,
2131 req
->r_session
? req
->r_session
->s_mds
: -1);
2132 mutex_unlock(&mdsc
->mutex
);
2137 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2138 (req
->r_got_safe
&& head
->safe
)) {
2139 pr_warning("got a dup %s reply on %llu from mds%d\n",
2140 head
->safe
? "safe" : "unsafe", tid
, mds
);
2141 mutex_unlock(&mdsc
->mutex
);
2144 if (req
->r_got_safe
&& !head
->safe
) {
2145 pr_warning("got unsafe after safe on %llu from mds%d\n",
2147 mutex_unlock(&mdsc
->mutex
);
2151 result
= le32_to_cpu(head
->result
);
2155 * if we're not talking to the authority, send to them
2156 * if the authority has changed while we weren't looking,
2157 * send to new authority
2158 * Otherwise we just have to return an ESTALE
2160 if (result
== -ESTALE
) {
2161 dout("got ESTALE on request %llu", req
->r_tid
);
2162 if (!req
->r_inode
) {
2163 /* do nothing; not an authority problem */
2164 } else if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2165 dout("not using auth, setting for that now");
2166 req
->r_direct_mode
= USE_AUTH_MDS
;
2167 __do_request(mdsc
, req
);
2168 mutex_unlock(&mdsc
->mutex
);
2171 struct ceph_inode_info
*ci
= ceph_inode(req
->r_inode
);
2172 struct ceph_cap
*cap
= NULL
;
2175 cap
= ceph_get_cap_for_mds(ci
,
2176 req
->r_session
->s_mds
);
2178 dout("already using auth");
2179 if ((!cap
|| cap
!= ci
->i_auth_cap
) ||
2180 (cap
->mseq
!= req
->r_sent_on_mseq
)) {
2181 dout("but cap changed, so resending");
2182 __do_request(mdsc
, req
);
2183 mutex_unlock(&mdsc
->mutex
);
2187 dout("have to return ESTALE on request %llu", req
->r_tid
);
2192 req
->r_got_safe
= true;
2193 __unregister_request(mdsc
, req
);
2195 if (req
->r_got_unsafe
) {
2197 * We already handled the unsafe response, now do the
2198 * cleanup. No need to examine the response; the MDS
2199 * doesn't include any result info in the safe
2200 * response. And even if it did, there is nothing
2201 * useful we could do with a revised return value.
2203 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2204 list_del_init(&req
->r_unsafe_item
);
2206 /* last unsafe request during umount? */
2207 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2208 complete_all(&mdsc
->safe_umount_waiters
);
2209 mutex_unlock(&mdsc
->mutex
);
2213 req
->r_got_unsafe
= true;
2214 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2217 dout("handle_reply tid %lld result %d\n", tid
, result
);
2218 rinfo
= &req
->r_reply_info
;
2219 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2220 mutex_unlock(&mdsc
->mutex
);
2222 mutex_lock(&session
->s_mutex
);
2224 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2230 if (rinfo
->snapblob_len
) {
2231 down_write(&mdsc
->snap_rwsem
);
2232 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2233 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2234 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
);
2235 downgrade_write(&mdsc
->snap_rwsem
);
2237 down_read(&mdsc
->snap_rwsem
);
2240 /* insert trace into our cache */
2241 mutex_lock(&req
->r_fill_mutex
);
2242 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2244 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2245 req
->r_op
== CEPH_MDS_OP_LSSNAP
) &&
2247 ceph_readdir_prepopulate(req
, req
->r_session
);
2248 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2250 mutex_unlock(&req
->r_fill_mutex
);
2252 up_read(&mdsc
->snap_rwsem
);
2254 mutex_lock(&mdsc
->mutex
);
2255 if (!req
->r_aborted
) {
2261 req
->r_got_result
= true;
2264 dout("reply arrived after request %lld was aborted\n", tid
);
2266 mutex_unlock(&mdsc
->mutex
);
2268 ceph_add_cap_releases(mdsc
, req
->r_session
);
2269 mutex_unlock(&session
->s_mutex
);
2271 /* kick calling process */
2272 complete_request(mdsc
, req
);
2274 ceph_mdsc_put_request(req
);
2281 * handle mds notification that our request has been forwarded.
2283 static void handle_forward(struct ceph_mds_client
*mdsc
,
2284 struct ceph_mds_session
*session
,
2285 struct ceph_msg
*msg
)
2287 struct ceph_mds_request
*req
;
2288 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2292 void *p
= msg
->front
.iov_base
;
2293 void *end
= p
+ msg
->front
.iov_len
;
2295 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2296 next_mds
= ceph_decode_32(&p
);
2297 fwd_seq
= ceph_decode_32(&p
);
2299 mutex_lock(&mdsc
->mutex
);
2300 req
= __lookup_request(mdsc
, tid
);
2302 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2303 goto out
; /* dup reply? */
2306 if (req
->r_aborted
) {
2307 dout("forward tid %llu aborted, unregistering\n", tid
);
2308 __unregister_request(mdsc
, req
);
2309 } else if (fwd_seq
<= req
->r_num_fwd
) {
2310 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2311 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2313 /* resend. forward race not possible; mds would drop */
2314 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2316 BUG_ON(req
->r_got_result
);
2317 req
->r_num_fwd
= fwd_seq
;
2318 req
->r_resend_mds
= next_mds
;
2319 put_request_session(req
);
2320 __do_request(mdsc
, req
);
2322 ceph_mdsc_put_request(req
);
2324 mutex_unlock(&mdsc
->mutex
);
2328 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2332 * handle a mds session control message
2334 static void handle_session(struct ceph_mds_session
*session
,
2335 struct ceph_msg
*msg
)
2337 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2340 int mds
= session
->s_mds
;
2341 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2345 if (msg
->front
.iov_len
!= sizeof(*h
))
2347 op
= le32_to_cpu(h
->op
);
2348 seq
= le64_to_cpu(h
->seq
);
2350 mutex_lock(&mdsc
->mutex
);
2351 if (op
== CEPH_SESSION_CLOSE
)
2352 __unregister_session(mdsc
, session
);
2353 /* FIXME: this ttl calculation is generous */
2354 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2355 mutex_unlock(&mdsc
->mutex
);
2357 mutex_lock(&session
->s_mutex
);
2359 dout("handle_session mds%d %s %p state %s seq %llu\n",
2360 mds
, ceph_session_op_name(op
), session
,
2361 session_state_name(session
->s_state
), seq
);
2363 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2364 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2365 pr_info("mds%d came back\n", session
->s_mds
);
2369 case CEPH_SESSION_OPEN
:
2370 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2371 pr_info("mds%d reconnect success\n", session
->s_mds
);
2372 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2373 renewed_caps(mdsc
, session
, 0);
2376 __close_session(mdsc
, session
);
2379 case CEPH_SESSION_RENEWCAPS
:
2380 if (session
->s_renew_seq
== seq
)
2381 renewed_caps(mdsc
, session
, 1);
2384 case CEPH_SESSION_CLOSE
:
2385 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2386 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2387 remove_session_caps(session
);
2388 wake
= 1; /* for good measure */
2389 wake_up_all(&mdsc
->session_close_wq
);
2390 kick_requests(mdsc
, mds
);
2393 case CEPH_SESSION_STALE
:
2394 pr_info("mds%d caps went stale, renewing\n",
2396 spin_lock(&session
->s_gen_ttl_lock
);
2397 session
->s_cap_gen
++;
2398 session
->s_cap_ttl
= jiffies
- 1;
2399 spin_unlock(&session
->s_gen_ttl_lock
);
2400 send_renew_caps(mdsc
, session
);
2403 case CEPH_SESSION_RECALL_STATE
:
2404 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2408 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2412 mutex_unlock(&session
->s_mutex
);
2414 mutex_lock(&mdsc
->mutex
);
2415 __wake_requests(mdsc
, &session
->s_waiting
);
2416 mutex_unlock(&mdsc
->mutex
);
2421 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2422 (int)msg
->front
.iov_len
);
2429 * called under session->mutex.
2431 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2432 struct ceph_mds_session
*session
)
2434 struct ceph_mds_request
*req
, *nreq
;
2437 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2439 mutex_lock(&mdsc
->mutex
);
2440 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2441 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
);
2443 ceph_msg_get(req
->r_request
);
2444 ceph_con_send(&session
->s_con
, req
->r_request
);
2447 mutex_unlock(&mdsc
->mutex
);
2451 * Encode information about a cap for a reconnect with the MDS.
2453 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2457 struct ceph_mds_cap_reconnect v2
;
2458 struct ceph_mds_cap_reconnect_v1 v1
;
2461 struct ceph_inode_info
*ci
;
2462 struct ceph_reconnect_state
*recon_state
= arg
;
2463 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2467 struct dentry
*dentry
;
2471 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2472 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2473 ceph_cap_string(cap
->issued
));
2474 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2478 dentry
= d_find_alias(inode
);
2480 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2482 err
= PTR_ERR(path
);
2489 err
= ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2493 spin_lock(&ci
->i_ceph_lock
);
2494 cap
->seq
= 0; /* reset cap seq */
2495 cap
->issue_seq
= 0; /* and issue_seq */
2496 cap
->mseq
= 0; /* and migrate_seq */
2498 if (recon_state
->flock
) {
2499 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2500 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2501 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2502 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2503 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2504 rec
.v2
.flock_len
= 0;
2505 reclen
= sizeof(rec
.v2
);
2507 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2508 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2509 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2510 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2511 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2512 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2513 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2514 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2515 reclen
= sizeof(rec
.v1
);
2517 spin_unlock(&ci
->i_ceph_lock
);
2519 if (recon_state
->flock
) {
2520 int num_fcntl_locks
, num_flock_locks
;
2521 struct ceph_filelock
*flocks
;
2524 spin_lock(&inode
->i_lock
);
2525 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
2526 spin_unlock(&inode
->i_lock
);
2527 flocks
= kmalloc((num_fcntl_locks
+num_flock_locks
) *
2528 sizeof(struct ceph_filelock
), GFP_NOFS
);
2533 spin_lock(&inode
->i_lock
);
2534 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
2537 spin_unlock(&inode
->i_lock
);
2545 * number of encoded locks is stable, so copy to pagelist
2547 rec
.v2
.flock_len
= cpu_to_le32(2*sizeof(u32
) +
2548 (num_fcntl_locks
+num_flock_locks
) *
2549 sizeof(struct ceph_filelock
));
2550 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2552 err
= ceph_locks_to_pagelist(flocks
, pagelist
,
2557 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2568 * If an MDS fails and recovers, clients need to reconnect in order to
2569 * reestablish shared state. This includes all caps issued through
2570 * this session _and_ the snap_realm hierarchy. Because it's not
2571 * clear which snap realms the mds cares about, we send everything we
2572 * know about.. that ensures we'll then get any new info the
2573 * recovering MDS might have.
2575 * This is a relatively heavyweight operation, but it's rare.
2577 * called with mdsc->mutex held.
2579 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2580 struct ceph_mds_session
*session
)
2582 struct ceph_msg
*reply
;
2584 int mds
= session
->s_mds
;
2586 struct ceph_pagelist
*pagelist
;
2587 struct ceph_reconnect_state recon_state
;
2589 pr_info("mds%d reconnect start\n", mds
);
2591 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2593 goto fail_nopagelist
;
2594 ceph_pagelist_init(pagelist
);
2596 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
2600 mutex_lock(&session
->s_mutex
);
2601 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2604 ceph_con_close(&session
->s_con
);
2605 ceph_con_open(&session
->s_con
,
2606 CEPH_ENTITY_TYPE_MDS
, mds
,
2607 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2609 /* replay unsafe requests */
2610 replay_unsafe_requests(mdsc
, session
);
2612 down_read(&mdsc
->snap_rwsem
);
2614 dout("session %p state %s\n", session
,
2615 session_state_name(session
->s_state
));
2617 /* drop old cap expires; we're about to reestablish that state */
2618 discard_cap_releases(mdsc
, session
);
2620 /* traverse this session's caps */
2621 err
= ceph_pagelist_encode_32(pagelist
, session
->s_nr_caps
);
2625 recon_state
.pagelist
= pagelist
;
2626 recon_state
.flock
= session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
;
2627 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
2632 * snaprealms. we provide mds with the ino, seq (version), and
2633 * parent for all of our realms. If the mds has any newer info,
2636 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
2637 struct ceph_snap_realm
*realm
=
2638 rb_entry(p
, struct ceph_snap_realm
, node
);
2639 struct ceph_mds_snaprealm_reconnect sr_rec
;
2641 dout(" adding snap realm %llx seq %lld parent %llx\n",
2642 realm
->ino
, realm
->seq
, realm
->parent_ino
);
2643 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
2644 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
2645 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
2646 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
2651 if (recon_state
.flock
)
2652 reply
->hdr
.version
= cpu_to_le16(2);
2653 if (pagelist
->length
) {
2654 /* set up outbound data if we have any */
2655 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2656 ceph_msg_data_add_pagelist(reply
, pagelist
);
2658 ceph_con_send(&session
->s_con
, reply
);
2660 mutex_unlock(&session
->s_mutex
);
2662 mutex_lock(&mdsc
->mutex
);
2663 __wake_requests(mdsc
, &session
->s_waiting
);
2664 mutex_unlock(&mdsc
->mutex
);
2666 up_read(&mdsc
->snap_rwsem
);
2670 ceph_msg_put(reply
);
2671 up_read(&mdsc
->snap_rwsem
);
2672 mutex_unlock(&session
->s_mutex
);
2674 ceph_pagelist_release(pagelist
);
2677 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
2683 * compare old and new mdsmaps, kicking requests
2684 * and closing out old connections as necessary
2686 * called under mdsc->mutex.
2688 static void check_new_map(struct ceph_mds_client
*mdsc
,
2689 struct ceph_mdsmap
*newmap
,
2690 struct ceph_mdsmap
*oldmap
)
2693 int oldstate
, newstate
;
2694 struct ceph_mds_session
*s
;
2696 dout("check_new_map new %u old %u\n",
2697 newmap
->m_epoch
, oldmap
->m_epoch
);
2699 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2700 if (mdsc
->sessions
[i
] == NULL
)
2702 s
= mdsc
->sessions
[i
];
2703 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
2704 newstate
= ceph_mdsmap_get_state(newmap
, i
);
2706 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
2707 i
, ceph_mds_state_name(oldstate
),
2708 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
2709 ceph_mds_state_name(newstate
),
2710 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
2711 session_state_name(s
->s_state
));
2713 if (i
>= newmap
->m_max_mds
||
2714 memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
2715 ceph_mdsmap_get_addr(newmap
, i
),
2716 sizeof(struct ceph_entity_addr
))) {
2717 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
2718 /* the session never opened, just close it
2720 __wake_requests(mdsc
, &s
->s_waiting
);
2721 __unregister_session(mdsc
, s
);
2724 mutex_unlock(&mdsc
->mutex
);
2725 mutex_lock(&s
->s_mutex
);
2726 mutex_lock(&mdsc
->mutex
);
2727 ceph_con_close(&s
->s_con
);
2728 mutex_unlock(&s
->s_mutex
);
2729 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
2732 /* kick any requests waiting on the recovering mds */
2733 kick_requests(mdsc
, i
);
2734 } else if (oldstate
== newstate
) {
2735 continue; /* nothing new with this mds */
2741 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
2742 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
2743 mutex_unlock(&mdsc
->mutex
);
2744 send_mds_reconnect(mdsc
, s
);
2745 mutex_lock(&mdsc
->mutex
);
2749 * kick request on any mds that has gone active.
2751 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
2752 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
2753 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
2754 oldstate
!= CEPH_MDS_STATE_STARTING
)
2755 pr_info("mds%d recovery completed\n", s
->s_mds
);
2756 kick_requests(mdsc
, i
);
2757 ceph_kick_flushing_caps(mdsc
, s
);
2758 wake_up_session_caps(s
, 1);
2762 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2763 s
= mdsc
->sessions
[i
];
2766 if (!ceph_mdsmap_is_laggy(newmap
, i
))
2768 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
2769 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
2770 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2771 dout(" connecting to export targets of laggy mds%d\n",
2773 __open_export_target_sessions(mdsc
, s
);
2785 * caller must hold session s_mutex, dentry->d_lock
2787 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
2789 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
2791 ceph_put_mds_session(di
->lease_session
);
2792 di
->lease_session
= NULL
;
2795 static void handle_lease(struct ceph_mds_client
*mdsc
,
2796 struct ceph_mds_session
*session
,
2797 struct ceph_msg
*msg
)
2799 struct super_block
*sb
= mdsc
->fsc
->sb
;
2800 struct inode
*inode
;
2801 struct dentry
*parent
, *dentry
;
2802 struct ceph_dentry_info
*di
;
2803 int mds
= session
->s_mds
;
2804 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
2806 struct ceph_vino vino
;
2810 dout("handle_lease from mds%d\n", mds
);
2813 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
2815 vino
.ino
= le64_to_cpu(h
->ino
);
2816 vino
.snap
= CEPH_NOSNAP
;
2817 seq
= le32_to_cpu(h
->seq
);
2818 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
2819 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
2820 if (dname
.len
!= get_unaligned_le32(h
+1))
2823 mutex_lock(&session
->s_mutex
);
2827 inode
= ceph_find_inode(sb
, vino
);
2828 dout("handle_lease %s, ino %llx %p %.*s\n",
2829 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
2830 dname
.len
, dname
.name
);
2831 if (inode
== NULL
) {
2832 dout("handle_lease no inode %llx\n", vino
.ino
);
2837 parent
= d_find_alias(inode
);
2839 dout("no parent dentry on inode %p\n", inode
);
2841 goto release
; /* hrm... */
2843 dname
.hash
= full_name_hash(dname
.name
, dname
.len
);
2844 dentry
= d_lookup(parent
, &dname
);
2849 spin_lock(&dentry
->d_lock
);
2850 di
= ceph_dentry(dentry
);
2851 switch (h
->action
) {
2852 case CEPH_MDS_LEASE_REVOKE
:
2853 if (di
->lease_session
== session
) {
2854 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
2855 h
->seq
= cpu_to_le32(di
->lease_seq
);
2856 __ceph_mdsc_drop_dentry_lease(dentry
);
2861 case CEPH_MDS_LEASE_RENEW
:
2862 if (di
->lease_session
== session
&&
2863 di
->lease_gen
== session
->s_cap_gen
&&
2864 di
->lease_renew_from
&&
2865 di
->lease_renew_after
== 0) {
2866 unsigned long duration
=
2867 le32_to_cpu(h
->duration_ms
) * HZ
/ 1000;
2869 di
->lease_seq
= seq
;
2870 dentry
->d_time
= di
->lease_renew_from
+ duration
;
2871 di
->lease_renew_after
= di
->lease_renew_from
+
2873 di
->lease_renew_from
= 0;
2877 spin_unlock(&dentry
->d_lock
);
2884 /* let's just reuse the same message */
2885 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
2887 ceph_con_send(&session
->s_con
, msg
);
2891 mutex_unlock(&session
->s_mutex
);
2895 pr_err("corrupt lease message\n");
2899 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
2900 struct inode
*inode
,
2901 struct dentry
*dentry
, char action
,
2904 struct ceph_msg
*msg
;
2905 struct ceph_mds_lease
*lease
;
2906 int len
= sizeof(*lease
) + sizeof(u32
);
2909 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2910 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
2911 dnamelen
= dentry
->d_name
.len
;
2914 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
2917 lease
= msg
->front
.iov_base
;
2918 lease
->action
= action
;
2919 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
2920 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
2921 lease
->seq
= cpu_to_le32(seq
);
2922 put_unaligned_le32(dnamelen
, lease
+ 1);
2923 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
2926 * if this is a preemptive lease RELEASE, no need to
2927 * flush request stream, since the actual request will
2930 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
2932 ceph_con_send(&session
->s_con
, msg
);
2936 * Preemptively release a lease we expect to invalidate anyway.
2937 * Pass @inode always, @dentry is optional.
2939 void ceph_mdsc_lease_release(struct ceph_mds_client
*mdsc
, struct inode
*inode
,
2940 struct dentry
*dentry
)
2942 struct ceph_dentry_info
*di
;
2943 struct ceph_mds_session
*session
;
2946 BUG_ON(inode
== NULL
);
2947 BUG_ON(dentry
== NULL
);
2949 /* is dentry lease valid? */
2950 spin_lock(&dentry
->d_lock
);
2951 di
= ceph_dentry(dentry
);
2952 if (!di
|| !di
->lease_session
||
2953 di
->lease_session
->s_mds
< 0 ||
2954 di
->lease_gen
!= di
->lease_session
->s_cap_gen
||
2955 !time_before(jiffies
, dentry
->d_time
)) {
2956 dout("lease_release inode %p dentry %p -- "
2959 spin_unlock(&dentry
->d_lock
);
2963 /* we do have a lease on this dentry; note mds and seq */
2964 session
= ceph_get_mds_session(di
->lease_session
);
2965 seq
= di
->lease_seq
;
2966 __ceph_mdsc_drop_dentry_lease(dentry
);
2967 spin_unlock(&dentry
->d_lock
);
2969 dout("lease_release inode %p dentry %p to mds%d\n",
2970 inode
, dentry
, session
->s_mds
);
2971 ceph_mdsc_lease_send_msg(session
, inode
, dentry
,
2972 CEPH_MDS_LEASE_RELEASE
, seq
);
2973 ceph_put_mds_session(session
);
2977 * drop all leases (and dentry refs) in preparation for umount
2979 static void drop_leases(struct ceph_mds_client
*mdsc
)
2983 dout("drop_leases\n");
2984 mutex_lock(&mdsc
->mutex
);
2985 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2986 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2989 mutex_unlock(&mdsc
->mutex
);
2990 mutex_lock(&s
->s_mutex
);
2991 mutex_unlock(&s
->s_mutex
);
2992 ceph_put_mds_session(s
);
2993 mutex_lock(&mdsc
->mutex
);
2995 mutex_unlock(&mdsc
->mutex
);
3001 * delayed work -- periodically trim expired leases, renew caps with mds
3003 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
3006 unsigned hz
= round_jiffies_relative(HZ
* delay
);
3007 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
3010 static void delayed_work(struct work_struct
*work
)
3013 struct ceph_mds_client
*mdsc
=
3014 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
3018 dout("mdsc delayed_work\n");
3019 ceph_check_delayed_caps(mdsc
);
3021 mutex_lock(&mdsc
->mutex
);
3022 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
3023 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
3024 mdsc
->last_renew_caps
);
3026 mdsc
->last_renew_caps
= jiffies
;
3028 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3029 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3032 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3033 dout("resending session close request for mds%d\n",
3035 request_close_session(mdsc
, s
);
3036 ceph_put_mds_session(s
);
3039 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
3040 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
3041 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
3042 pr_info("mds%d hung\n", s
->s_mds
);
3045 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
3046 /* this mds is failed or recovering, just wait */
3047 ceph_put_mds_session(s
);
3050 mutex_unlock(&mdsc
->mutex
);
3052 mutex_lock(&s
->s_mutex
);
3054 send_renew_caps(mdsc
, s
);
3056 ceph_con_keepalive(&s
->s_con
);
3057 ceph_add_cap_releases(mdsc
, s
);
3058 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3059 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
3060 ceph_send_cap_releases(mdsc
, s
);
3061 mutex_unlock(&s
->s_mutex
);
3062 ceph_put_mds_session(s
);
3064 mutex_lock(&mdsc
->mutex
);
3066 mutex_unlock(&mdsc
->mutex
);
3068 schedule_delayed(mdsc
);
3071 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3074 struct ceph_mds_client
*mdsc
;
3076 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3081 mutex_init(&mdsc
->mutex
);
3082 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3083 if (mdsc
->mdsmap
== NULL
) {
3088 init_completion(&mdsc
->safe_umount_waiters
);
3089 init_waitqueue_head(&mdsc
->session_close_wq
);
3090 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3091 mdsc
->sessions
= NULL
;
3092 mdsc
->max_sessions
= 0;
3094 init_rwsem(&mdsc
->snap_rwsem
);
3095 mdsc
->snap_realms
= RB_ROOT
;
3096 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3097 spin_lock_init(&mdsc
->snap_empty_lock
);
3099 mdsc
->request_tree
= RB_ROOT
;
3100 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3101 mdsc
->last_renew_caps
= jiffies
;
3102 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3103 spin_lock_init(&mdsc
->cap_delay_lock
);
3104 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3105 spin_lock_init(&mdsc
->snap_flush_lock
);
3106 mdsc
->cap_flush_seq
= 0;
3107 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3108 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3109 mdsc
->num_cap_flushing
= 0;
3110 spin_lock_init(&mdsc
->cap_dirty_lock
);
3111 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3112 spin_lock_init(&mdsc
->dentry_lru_lock
);
3113 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3115 ceph_caps_init(mdsc
);
3116 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3122 * Wait for safe replies on open mds requests. If we time out, drop
3123 * all requests from the tree to avoid dangling dentry refs.
3125 static void wait_requests(struct ceph_mds_client
*mdsc
)
3127 struct ceph_mds_request
*req
;
3128 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3130 mutex_lock(&mdsc
->mutex
);
3131 if (__get_oldest_req(mdsc
)) {
3132 mutex_unlock(&mdsc
->mutex
);
3134 dout("wait_requests waiting for requests\n");
3135 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3136 fsc
->client
->options
->mount_timeout
* HZ
);
3138 /* tear down remaining requests */
3139 mutex_lock(&mdsc
->mutex
);
3140 while ((req
= __get_oldest_req(mdsc
))) {
3141 dout("wait_requests timed out on tid %llu\n",
3143 __unregister_request(mdsc
, req
);
3146 mutex_unlock(&mdsc
->mutex
);
3147 dout("wait_requests done\n");
3151 * called before mount is ro, and before dentries are torn down.
3152 * (hmm, does this still race with new lookups?)
3154 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3156 dout("pre_umount\n");
3160 ceph_flush_dirty_caps(mdsc
);
3161 wait_requests(mdsc
);
3164 * wait for reply handlers to drop their request refs and
3165 * their inode/dcache refs
3171 * wait for all write mds requests to flush.
3173 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3175 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3178 mutex_lock(&mdsc
->mutex
);
3179 dout("wait_unsafe_requests want %lld\n", want_tid
);
3181 req
= __get_oldest_req(mdsc
);
3182 while (req
&& req
->r_tid
<= want_tid
) {
3183 /* find next request */
3184 n
= rb_next(&req
->r_node
);
3186 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3189 if ((req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3191 ceph_mdsc_get_request(req
);
3193 ceph_mdsc_get_request(nextreq
);
3194 mutex_unlock(&mdsc
->mutex
);
3195 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3196 req
->r_tid
, want_tid
);
3197 wait_for_completion(&req
->r_safe_completion
);
3198 mutex_lock(&mdsc
->mutex
);
3199 ceph_mdsc_put_request(req
);
3201 break; /* next dne before, so we're done! */
3202 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3203 /* next request was removed from tree */
3204 ceph_mdsc_put_request(nextreq
);
3207 ceph_mdsc_put_request(nextreq
); /* won't go away */
3211 mutex_unlock(&mdsc
->mutex
);
3212 dout("wait_unsafe_requests done\n");
3215 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3217 u64 want_tid
, want_flush
;
3219 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3223 mutex_lock(&mdsc
->mutex
);
3224 want_tid
= mdsc
->last_tid
;
3225 want_flush
= mdsc
->cap_flush_seq
;
3226 mutex_unlock(&mdsc
->mutex
);
3227 dout("sync want tid %lld flush_seq %lld\n", want_tid
, want_flush
);
3229 ceph_flush_dirty_caps(mdsc
);
3231 wait_unsafe_requests(mdsc
, want_tid
);
3232 wait_event(mdsc
->cap_flushing_wq
, check_cap_flush(mdsc
, want_flush
));
3236 * true if all sessions are closed, or we force unmount
3238 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
)
3242 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3245 mutex_lock(&mdsc
->mutex
);
3246 for (i
= 0; i
< mdsc
->max_sessions
; i
++)
3247 if (mdsc
->sessions
[i
])
3249 mutex_unlock(&mdsc
->mutex
);
3254 * called after sb is ro.
3256 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3258 struct ceph_mds_session
*session
;
3260 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3261 unsigned long timeout
= fsc
->client
->options
->mount_timeout
* HZ
;
3263 dout("close_sessions\n");
3265 /* close sessions */
3266 mutex_lock(&mdsc
->mutex
);
3267 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3268 session
= __ceph_lookup_mds_session(mdsc
, i
);
3271 mutex_unlock(&mdsc
->mutex
);
3272 mutex_lock(&session
->s_mutex
);
3273 __close_session(mdsc
, session
);
3274 mutex_unlock(&session
->s_mutex
);
3275 ceph_put_mds_session(session
);
3276 mutex_lock(&mdsc
->mutex
);
3278 mutex_unlock(&mdsc
->mutex
);
3280 dout("waiting for sessions to close\n");
3281 wait_event_timeout(mdsc
->session_close_wq
, done_closing_sessions(mdsc
),
3284 /* tear down remaining sessions */
3285 mutex_lock(&mdsc
->mutex
);
3286 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3287 if (mdsc
->sessions
[i
]) {
3288 session
= get_session(mdsc
->sessions
[i
]);
3289 __unregister_session(mdsc
, session
);
3290 mutex_unlock(&mdsc
->mutex
);
3291 mutex_lock(&session
->s_mutex
);
3292 remove_session_caps(session
);
3293 mutex_unlock(&session
->s_mutex
);
3294 ceph_put_mds_session(session
);
3295 mutex_lock(&mdsc
->mutex
);
3298 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3299 mutex_unlock(&mdsc
->mutex
);
3301 ceph_cleanup_empty_realms(mdsc
);
3303 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3308 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3311 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3313 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3314 kfree(mdsc
->sessions
);
3315 ceph_caps_finalize(mdsc
);
3318 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3320 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3322 dout("mdsc_destroy %p\n", mdsc
);
3323 ceph_mdsc_stop(mdsc
);
3325 /* flush out any connection work with references to us */
3330 dout("mdsc_destroy %p done\n", mdsc
);
3335 * handle mds map update.
3337 void ceph_mdsc_handle_map(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3341 void *p
= msg
->front
.iov_base
;
3342 void *end
= p
+ msg
->front
.iov_len
;
3343 struct ceph_mdsmap
*newmap
, *oldmap
;
3344 struct ceph_fsid fsid
;
3347 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3348 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3349 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3351 epoch
= ceph_decode_32(&p
);
3352 maplen
= ceph_decode_32(&p
);
3353 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3355 /* do we need it? */
3356 ceph_monc_got_mdsmap(&mdsc
->fsc
->client
->monc
, epoch
);
3357 mutex_lock(&mdsc
->mutex
);
3358 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3359 dout("handle_map epoch %u <= our %u\n",
3360 epoch
, mdsc
->mdsmap
->m_epoch
);
3361 mutex_unlock(&mdsc
->mutex
);
3365 newmap
= ceph_mdsmap_decode(&p
, end
);
3366 if (IS_ERR(newmap
)) {
3367 err
= PTR_ERR(newmap
);
3371 /* swap into place */
3373 oldmap
= mdsc
->mdsmap
;
3374 mdsc
->mdsmap
= newmap
;
3375 check_new_map(mdsc
, newmap
, oldmap
);
3376 ceph_mdsmap_destroy(oldmap
);
3378 mdsc
->mdsmap
= newmap
; /* first mds map */
3380 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3382 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3384 mutex_unlock(&mdsc
->mutex
);
3385 schedule_delayed(mdsc
);
3389 mutex_unlock(&mdsc
->mutex
);
3391 pr_err("error decoding mdsmap %d\n", err
);
3395 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3397 struct ceph_mds_session
*s
= con
->private;
3399 if (get_session(s
)) {
3400 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3403 dout("mdsc con_get %p FAIL\n", s
);
3407 static void con_put(struct ceph_connection
*con
)
3409 struct ceph_mds_session
*s
= con
->private;
3411 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
) - 1);
3412 ceph_put_mds_session(s
);
3416 * if the client is unresponsive for long enough, the mds will kill
3417 * the session entirely.
3419 static void peer_reset(struct ceph_connection
*con
)
3421 struct ceph_mds_session
*s
= con
->private;
3422 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3424 pr_warning("mds%d closed our session\n", s
->s_mds
);
3425 send_mds_reconnect(mdsc
, s
);
3428 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3430 struct ceph_mds_session
*s
= con
->private;
3431 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3432 int type
= le16_to_cpu(msg
->hdr
.type
);
3434 mutex_lock(&mdsc
->mutex
);
3435 if (__verify_registered_session(mdsc
, s
) < 0) {
3436 mutex_unlock(&mdsc
->mutex
);
3439 mutex_unlock(&mdsc
->mutex
);
3442 case CEPH_MSG_MDS_MAP
:
3443 ceph_mdsc_handle_map(mdsc
, msg
);
3445 case CEPH_MSG_CLIENT_SESSION
:
3446 handle_session(s
, msg
);
3448 case CEPH_MSG_CLIENT_REPLY
:
3449 handle_reply(s
, msg
);
3451 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3452 handle_forward(mdsc
, s
, msg
);
3454 case CEPH_MSG_CLIENT_CAPS
:
3455 ceph_handle_caps(s
, msg
);
3457 case CEPH_MSG_CLIENT_SNAP
:
3458 ceph_handle_snap(mdsc
, s
, msg
);
3460 case CEPH_MSG_CLIENT_LEASE
:
3461 handle_lease(mdsc
, s
, msg
);
3465 pr_err("received unknown message type %d %s\n", type
,
3466 ceph_msg_type_name(type
));
3477 * Note: returned pointer is the address of a structure that's
3478 * managed separately. Caller must *not* attempt to free it.
3480 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
3481 int *proto
, int force_new
)
3483 struct ceph_mds_session
*s
= con
->private;
3484 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3485 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3486 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3488 if (force_new
&& auth
->authorizer
) {
3489 ceph_auth_destroy_authorizer(ac
, auth
->authorizer
);
3490 auth
->authorizer
= NULL
;
3492 if (!auth
->authorizer
) {
3493 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3496 return ERR_PTR(ret
);
3498 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3501 return ERR_PTR(ret
);
3503 *proto
= ac
->protocol
;
3509 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3511 struct ceph_mds_session
*s
= con
->private;
3512 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3513 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3515 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
, len
);
3518 static int invalidate_authorizer(struct ceph_connection
*con
)
3520 struct ceph_mds_session
*s
= con
->private;
3521 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3522 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3524 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3526 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
3529 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
3530 struct ceph_msg_header
*hdr
, int *skip
)
3532 struct ceph_msg
*msg
;
3533 int type
= (int) le16_to_cpu(hdr
->type
);
3534 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
3540 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
3542 pr_err("unable to allocate msg type %d len %d\n",
3550 static const struct ceph_connection_operations mds_con_ops
= {
3553 .dispatch
= dispatch
,
3554 .get_authorizer
= get_authorizer
,
3555 .verify_authorizer_reply
= verify_authorizer_reply
,
3556 .invalidate_authorizer
= invalidate_authorizer
,
3557 .peer_reset
= peer_reset
,
3558 .alloc_msg
= mds_alloc_msg
,