1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/sched.h>
7 #include <linux/debugfs.h>
8 #include <linux/seq_file.h>
11 #include "mds_client.h"
13 #include <linux/ceph/messenger.h>
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/pagelist.h>
16 #include <linux/ceph/auth.h>
17 #include <linux/ceph/debugfs.h>
20 * A cluster of MDS (metadata server) daemons is responsible for
21 * managing the file system namespace (the directory hierarchy and
22 * inodes) and for coordinating shared access to storage. Metadata is
23 * partitioning hierarchically across a number of servers, and that
24 * partition varies over time as the cluster adjusts the distribution
25 * in order to balance load.
27 * The MDS client is primarily responsible to managing synchronous
28 * metadata requests for operations like open, unlink, and so forth.
29 * If there is a MDS failure, we find out about it when we (possibly
30 * request and) receive a new MDS map, and can resubmit affected
33 * For the most part, though, we take advantage of a lossless
34 * communications channel to the MDS, and do not need to worry about
35 * timing out or resubmitting requests.
37 * We maintain a stateful "session" with each MDS we interact with.
38 * Within each session, we sent periodic heartbeat messages to ensure
39 * any capabilities or leases we have been issues remain valid. If
40 * the session times out and goes stale, our leases and capabilities
41 * are no longer valid.
44 struct ceph_reconnect_state
{
45 struct ceph_pagelist
*pagelist
;
49 static void __wake_requests(struct ceph_mds_client
*mdsc
,
50 struct list_head
*head
);
52 static const struct ceph_connection_operations mds_con_ops
;
60 * parse individual inode info
62 static int parse_reply_info_in(void **p
, void *end
,
63 struct ceph_mds_reply_info_in
*info
,
69 *p
+= sizeof(struct ceph_mds_reply_inode
) +
70 sizeof(*info
->in
->fragtree
.splits
) *
71 le32_to_cpu(info
->in
->fragtree
.nsplits
);
73 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
74 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
76 *p
+= info
->symlink_len
;
78 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
79 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
80 sizeof(info
->dir_layout
), bad
);
82 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
84 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
85 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
86 info
->xattr_data
= *p
;
87 *p
+= info
->xattr_len
;
94 * parse a normal reply, which may contain a (dir+)dentry and/or a
97 static int parse_reply_info_trace(void **p
, void *end
,
98 struct ceph_mds_reply_info_parsed
*info
,
103 if (info
->head
->is_dentry
) {
104 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
108 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
111 *p
+= sizeof(*info
->dirfrag
) +
112 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
113 if (unlikely(*p
> end
))
116 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
117 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
119 *p
+= info
->dname_len
;
121 *p
+= sizeof(*info
->dlease
);
124 if (info
->head
->is_target
) {
125 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
130 if (unlikely(*p
!= end
))
137 pr_err("problem parsing mds trace %d\n", err
);
142 * parse readdir results
144 static int parse_reply_info_dir(void **p
, void *end
,
145 struct ceph_mds_reply_info_parsed
*info
,
152 if (*p
+ sizeof(*info
->dir_dir
) > end
)
154 *p
+= sizeof(*info
->dir_dir
) +
155 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
159 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
160 num
= ceph_decode_32(p
);
161 info
->dir_end
= ceph_decode_8(p
);
162 info
->dir_complete
= ceph_decode_8(p
);
166 /* alloc large array */
168 info
->dir_in
= kcalloc(num
, sizeof(*info
->dir_in
) +
169 sizeof(*info
->dir_dname
) +
170 sizeof(*info
->dir_dname_len
) +
171 sizeof(*info
->dir_dlease
),
173 if (info
->dir_in
== NULL
) {
177 info
->dir_dname
= (void *)(info
->dir_in
+ num
);
178 info
->dir_dname_len
= (void *)(info
->dir_dname
+ num
);
179 info
->dir_dlease
= (void *)(info
->dir_dname_len
+ num
);
183 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
184 info
->dir_dname_len
[i
] = ceph_decode_32(p
);
185 ceph_decode_need(p
, end
, info
->dir_dname_len
[i
], bad
);
186 info
->dir_dname
[i
] = *p
;
187 *p
+= info
->dir_dname_len
[i
];
188 dout("parsed dir dname '%.*s'\n", info
->dir_dname_len
[i
],
190 info
->dir_dlease
[i
] = *p
;
191 *p
+= sizeof(struct ceph_mds_reply_lease
);
194 err
= parse_reply_info_in(p
, end
, &info
->dir_in
[i
], features
);
209 pr_err("problem parsing dir contents %d\n", err
);
214 * parse fcntl F_GETLK results
216 static int parse_reply_info_filelock(void **p
, void *end
,
217 struct ceph_mds_reply_info_parsed
*info
,
220 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
223 info
->filelock_reply
= *p
;
224 *p
+= sizeof(*info
->filelock_reply
);
226 if (unlikely(*p
!= end
))
235 * parse extra results
237 static int parse_reply_info_extra(void **p
, void *end
,
238 struct ceph_mds_reply_info_parsed
*info
,
241 if (info
->head
->op
== CEPH_MDS_OP_GETFILELOCK
)
242 return parse_reply_info_filelock(p
, end
, info
, features
);
244 return parse_reply_info_dir(p
, end
, info
, features
);
248 * parse entire mds reply
250 static int parse_reply_info(struct ceph_msg
*msg
,
251 struct ceph_mds_reply_info_parsed
*info
,
258 info
->head
= msg
->front
.iov_base
;
259 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
260 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
263 ceph_decode_32_safe(&p
, end
, len
, bad
);
265 ceph_decode_need(&p
, end
, len
, bad
);
266 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
272 ceph_decode_32_safe(&p
, end
, len
, bad
);
274 ceph_decode_need(&p
, end
, len
, bad
);
275 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
281 ceph_decode_32_safe(&p
, end
, len
, bad
);
282 info
->snapblob_len
= len
;
293 pr_err("mds parse_reply err %d\n", err
);
297 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
306 static const char *session_state_name(int s
)
309 case CEPH_MDS_SESSION_NEW
: return "new";
310 case CEPH_MDS_SESSION_OPENING
: return "opening";
311 case CEPH_MDS_SESSION_OPEN
: return "open";
312 case CEPH_MDS_SESSION_HUNG
: return "hung";
313 case CEPH_MDS_SESSION_CLOSING
: return "closing";
314 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
315 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
316 default: return "???";
320 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
322 if (atomic_inc_not_zero(&s
->s_ref
)) {
323 dout("mdsc get_session %p %d -> %d\n", s
,
324 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
327 dout("mdsc get_session %p 0 -- FAIL", s
);
332 void ceph_put_mds_session(struct ceph_mds_session
*s
)
334 dout("mdsc put_session %p %d -> %d\n", s
,
335 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
336 if (atomic_dec_and_test(&s
->s_ref
)) {
338 s
->s_mdsc
->fsc
->client
->monc
.auth
->ops
->destroy_authorizer(
339 s
->s_mdsc
->fsc
->client
->monc
.auth
,
346 * called under mdsc->mutex
348 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
351 struct ceph_mds_session
*session
;
353 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
355 session
= mdsc
->sessions
[mds
];
356 dout("lookup_mds_session %p %d\n", session
,
357 atomic_read(&session
->s_ref
));
358 get_session(session
);
362 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
364 if (mds
>= mdsc
->max_sessions
)
366 return mdsc
->sessions
[mds
];
369 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
370 struct ceph_mds_session
*s
)
372 if (s
->s_mds
>= mdsc
->max_sessions
||
373 mdsc
->sessions
[s
->s_mds
] != s
)
379 * create+register a new session for given mds.
380 * called under mdsc->mutex.
382 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
385 struct ceph_mds_session
*s
;
387 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
389 return ERR_PTR(-ENOMEM
);
392 s
->s_state
= CEPH_MDS_SESSION_NEW
;
395 mutex_init(&s
->s_mutex
);
397 ceph_con_init(mdsc
->fsc
->client
->msgr
, &s
->s_con
);
398 s
->s_con
.private = s
;
399 s
->s_con
.ops
= &mds_con_ops
;
400 s
->s_con
.peer_name
.type
= CEPH_ENTITY_TYPE_MDS
;
401 s
->s_con
.peer_name
.num
= cpu_to_le64(mds
);
403 spin_lock_init(&s
->s_gen_ttl_lock
);
407 spin_lock_init(&s
->s_cap_lock
);
408 s
->s_renew_requested
= 0;
410 INIT_LIST_HEAD(&s
->s_caps
);
413 atomic_set(&s
->s_ref
, 1);
414 INIT_LIST_HEAD(&s
->s_waiting
);
415 INIT_LIST_HEAD(&s
->s_unsafe
);
416 s
->s_num_cap_releases
= 0;
417 s
->s_cap_iterator
= NULL
;
418 INIT_LIST_HEAD(&s
->s_cap_releases
);
419 INIT_LIST_HEAD(&s
->s_cap_releases_done
);
420 INIT_LIST_HEAD(&s
->s_cap_flushing
);
421 INIT_LIST_HEAD(&s
->s_cap_snaps_flushing
);
423 dout("register_session mds%d\n", mds
);
424 if (mds
>= mdsc
->max_sessions
) {
425 int newmax
= 1 << get_count_order(mds
+1);
426 struct ceph_mds_session
**sa
;
428 dout("register_session realloc to %d\n", newmax
);
429 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
432 if (mdsc
->sessions
) {
433 memcpy(sa
, mdsc
->sessions
,
434 mdsc
->max_sessions
* sizeof(void *));
435 kfree(mdsc
->sessions
);
438 mdsc
->max_sessions
= newmax
;
440 mdsc
->sessions
[mds
] = s
;
441 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
443 ceph_con_open(&s
->s_con
, ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
449 return ERR_PTR(-ENOMEM
);
453 * called under mdsc->mutex
455 static void __unregister_session(struct ceph_mds_client
*mdsc
,
456 struct ceph_mds_session
*s
)
458 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
459 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
460 mdsc
->sessions
[s
->s_mds
] = NULL
;
461 ceph_con_close(&s
->s_con
);
462 ceph_put_mds_session(s
);
466 * drop session refs in request.
468 * should be last request ref, or hold mdsc->mutex
470 static void put_request_session(struct ceph_mds_request
*req
)
472 if (req
->r_session
) {
473 ceph_put_mds_session(req
->r_session
);
474 req
->r_session
= NULL
;
478 void ceph_mdsc_release_request(struct kref
*kref
)
480 struct ceph_mds_request
*req
= container_of(kref
,
481 struct ceph_mds_request
,
484 ceph_msg_put(req
->r_request
);
486 ceph_msg_put(req
->r_reply
);
487 destroy_reply_info(&req
->r_reply_info
);
490 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
493 if (req
->r_locked_dir
)
494 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
495 if (req
->r_target_inode
)
496 iput(req
->r_target_inode
);
499 if (req
->r_old_dentry
) {
501 * track (and drop pins for) r_old_dentry_dir
502 * separately, since r_old_dentry's d_parent may have
503 * changed between the dir mutex being dropped and
504 * this request being freed.
506 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
508 dput(req
->r_old_dentry
);
509 iput(req
->r_old_dentry_dir
);
513 put_request_session(req
);
514 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
519 * lookup session, bump ref if found.
521 * called under mdsc->mutex.
523 static struct ceph_mds_request
*__lookup_request(struct ceph_mds_client
*mdsc
,
526 struct ceph_mds_request
*req
;
527 struct rb_node
*n
= mdsc
->request_tree
.rb_node
;
530 req
= rb_entry(n
, struct ceph_mds_request
, r_node
);
531 if (tid
< req
->r_tid
)
533 else if (tid
> req
->r_tid
)
536 ceph_mdsc_get_request(req
);
543 static void __insert_request(struct ceph_mds_client
*mdsc
,
544 struct ceph_mds_request
*new)
546 struct rb_node
**p
= &mdsc
->request_tree
.rb_node
;
547 struct rb_node
*parent
= NULL
;
548 struct ceph_mds_request
*req
= NULL
;
552 req
= rb_entry(parent
, struct ceph_mds_request
, r_node
);
553 if (new->r_tid
< req
->r_tid
)
555 else if (new->r_tid
> req
->r_tid
)
561 rb_link_node(&new->r_node
, parent
, p
);
562 rb_insert_color(&new->r_node
, &mdsc
->request_tree
);
566 * Register an in-flight request, and assign a tid. Link to directory
567 * are modifying (if any).
569 * Called under mdsc->mutex.
571 static void __register_request(struct ceph_mds_client
*mdsc
,
572 struct ceph_mds_request
*req
,
575 req
->r_tid
= ++mdsc
->last_tid
;
577 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
579 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
580 ceph_mdsc_get_request(req
);
581 __insert_request(mdsc
, req
);
583 req
->r_uid
= current_fsuid();
584 req
->r_gid
= current_fsgid();
587 struct ceph_inode_info
*ci
= ceph_inode(dir
);
590 spin_lock(&ci
->i_unsafe_lock
);
591 req
->r_unsafe_dir
= dir
;
592 list_add_tail(&req
->r_unsafe_dir_item
, &ci
->i_unsafe_dirops
);
593 spin_unlock(&ci
->i_unsafe_lock
);
597 static void __unregister_request(struct ceph_mds_client
*mdsc
,
598 struct ceph_mds_request
*req
)
600 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
601 rb_erase(&req
->r_node
, &mdsc
->request_tree
);
602 RB_CLEAR_NODE(&req
->r_node
);
604 if (req
->r_unsafe_dir
) {
605 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
607 spin_lock(&ci
->i_unsafe_lock
);
608 list_del_init(&req
->r_unsafe_dir_item
);
609 spin_unlock(&ci
->i_unsafe_lock
);
611 iput(req
->r_unsafe_dir
);
612 req
->r_unsafe_dir
= NULL
;
615 ceph_mdsc_put_request(req
);
619 * Choose mds to send request to next. If there is a hint set in the
620 * request (e.g., due to a prior forward hint from the mds), use that.
621 * Otherwise, consult frag tree and/or caps to identify the
622 * appropriate mds. If all else fails, choose randomly.
624 * Called under mdsc->mutex.
626 static struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
629 * we don't need to worry about protecting the d_parent access
630 * here because we never renaming inside the snapped namespace
631 * except to resplice to another snapdir, and either the old or new
632 * result is a valid result.
634 while (!IS_ROOT(dentry
) && ceph_snap(dentry
->d_inode
) != CEPH_NOSNAP
)
635 dentry
= dentry
->d_parent
;
639 static int __choose_mds(struct ceph_mds_client
*mdsc
,
640 struct ceph_mds_request
*req
)
643 struct ceph_inode_info
*ci
;
644 struct ceph_cap
*cap
;
645 int mode
= req
->r_direct_mode
;
647 u32 hash
= req
->r_direct_hash
;
648 bool is_hash
= req
->r_direct_is_hash
;
651 * is there a specific mds we should try? ignore hint if we have
652 * no session and the mds is not up (active or recovering).
654 if (req
->r_resend_mds
>= 0 &&
655 (__have_session(mdsc
, req
->r_resend_mds
) ||
656 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
657 dout("choose_mds using resend_mds mds%d\n",
659 return req
->r_resend_mds
;
662 if (mode
== USE_RANDOM_MDS
)
667 inode
= req
->r_inode
;
668 } else if (req
->r_dentry
) {
669 /* ignore race with rename; old or new d_parent is okay */
670 struct dentry
*parent
= req
->r_dentry
->d_parent
;
671 struct inode
*dir
= parent
->d_inode
;
673 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
675 inode
= req
->r_dentry
->d_inode
;
676 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
677 /* direct snapped/virtual snapdir requests
678 * based on parent dir inode */
679 struct dentry
*dn
= get_nonsnap_parent(parent
);
681 dout("__choose_mds using nonsnap parent %p\n", inode
);
682 } else if (req
->r_dentry
->d_inode
) {
684 inode
= req
->r_dentry
->d_inode
;
688 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
693 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
697 ci
= ceph_inode(inode
);
699 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
700 struct ceph_inode_frag frag
;
703 ceph_choose_frag(ci
, hash
, &frag
, &found
);
705 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
708 /* choose a random replica */
709 get_random_bytes(&r
, 1);
712 dout("choose_mds %p %llx.%llx "
713 "frag %u mds%d (%d/%d)\n",
714 inode
, ceph_vinop(inode
),
717 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
718 CEPH_MDS_STATE_ACTIVE
)
722 /* since this file/dir wasn't known to be
723 * replicated, then we want to look for the
724 * authoritative mds. */
727 /* choose auth mds */
729 dout("choose_mds %p %llx.%llx "
730 "frag %u mds%d (auth)\n",
731 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
732 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
733 CEPH_MDS_STATE_ACTIVE
)
739 spin_lock(&ci
->i_ceph_lock
);
741 if (mode
== USE_AUTH_MDS
)
742 cap
= ci
->i_auth_cap
;
743 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
744 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
746 spin_unlock(&ci
->i_ceph_lock
);
749 mds
= cap
->session
->s_mds
;
750 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
751 inode
, ceph_vinop(inode
), mds
,
752 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
753 spin_unlock(&ci
->i_ceph_lock
);
757 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
758 dout("choose_mds chose random mds%d\n", mds
);
766 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
768 struct ceph_msg
*msg
;
769 struct ceph_mds_session_head
*h
;
771 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
774 pr_err("create_session_msg ENOMEM creating msg\n");
777 h
= msg
->front
.iov_base
;
778 h
->op
= cpu_to_le32(op
);
779 h
->seq
= cpu_to_le64(seq
);
784 * send session open request.
786 * called under mdsc->mutex
788 static int __open_session(struct ceph_mds_client
*mdsc
,
789 struct ceph_mds_session
*session
)
791 struct ceph_msg
*msg
;
793 int mds
= session
->s_mds
;
795 /* wait for mds to go active? */
796 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
797 dout("open_session to mds%d (%s)\n", mds
,
798 ceph_mds_state_name(mstate
));
799 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
800 session
->s_renew_requested
= jiffies
;
802 /* send connect message */
803 msg
= create_session_msg(CEPH_SESSION_REQUEST_OPEN
, session
->s_seq
);
806 ceph_con_send(&session
->s_con
, msg
);
811 * open sessions for any export targets for the given mds
813 * called under mdsc->mutex
815 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
816 struct ceph_mds_session
*session
)
818 struct ceph_mds_info
*mi
;
819 struct ceph_mds_session
*ts
;
820 int i
, mds
= session
->s_mds
;
823 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
825 mi
= &mdsc
->mdsmap
->m_info
[mds
];
826 dout("open_export_target_sessions for mds%d (%d targets)\n",
827 session
->s_mds
, mi
->num_export_targets
);
829 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
830 target
= mi
->export_targets
[i
];
831 ts
= __ceph_lookup_mds_session(mdsc
, target
);
833 ts
= register_session(mdsc
, target
);
837 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
838 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
839 __open_session(mdsc
, session
);
841 dout(" mds%d target mds%d %p is %s\n", session
->s_mds
,
842 i
, ts
, session_state_name(ts
->s_state
));
843 ceph_put_mds_session(ts
);
847 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
848 struct ceph_mds_session
*session
)
850 mutex_lock(&mdsc
->mutex
);
851 __open_export_target_sessions(mdsc
, session
);
852 mutex_unlock(&mdsc
->mutex
);
860 * Free preallocated cap messages assigned to this session
862 static void cleanup_cap_releases(struct ceph_mds_session
*session
)
864 struct ceph_msg
*msg
;
866 spin_lock(&session
->s_cap_lock
);
867 while (!list_empty(&session
->s_cap_releases
)) {
868 msg
= list_first_entry(&session
->s_cap_releases
,
869 struct ceph_msg
, list_head
);
870 list_del_init(&msg
->list_head
);
873 while (!list_empty(&session
->s_cap_releases_done
)) {
874 msg
= list_first_entry(&session
->s_cap_releases_done
,
875 struct ceph_msg
, list_head
);
876 list_del_init(&msg
->list_head
);
879 spin_unlock(&session
->s_cap_lock
);
883 * Helper to safely iterate over all caps associated with a session, with
884 * special care taken to handle a racing __ceph_remove_cap().
886 * Caller must hold session s_mutex.
888 static int iterate_session_caps(struct ceph_mds_session
*session
,
889 int (*cb
)(struct inode
*, struct ceph_cap
*,
893 struct ceph_cap
*cap
;
894 struct inode
*inode
, *last_inode
= NULL
;
895 struct ceph_cap
*old_cap
= NULL
;
898 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
899 spin_lock(&session
->s_cap_lock
);
900 p
= session
->s_caps
.next
;
901 while (p
!= &session
->s_caps
) {
902 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
903 inode
= igrab(&cap
->ci
->vfs_inode
);
908 session
->s_cap_iterator
= cap
;
909 spin_unlock(&session
->s_cap_lock
);
916 ceph_put_cap(session
->s_mdsc
, old_cap
);
920 ret
= cb(inode
, cap
, arg
);
923 spin_lock(&session
->s_cap_lock
);
925 if (cap
->ci
== NULL
) {
926 dout("iterate_session_caps finishing cap %p removal\n",
928 BUG_ON(cap
->session
!= session
);
929 list_del_init(&cap
->session_caps
);
930 session
->s_nr_caps
--;
932 old_cap
= cap
; /* put_cap it w/o locks held */
939 session
->s_cap_iterator
= NULL
;
940 spin_unlock(&session
->s_cap_lock
);
945 ceph_put_cap(session
->s_mdsc
, old_cap
);
950 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
953 struct ceph_inode_info
*ci
= ceph_inode(inode
);
956 dout("removing cap %p, ci is %p, inode is %p\n",
957 cap
, ci
, &ci
->vfs_inode
);
958 spin_lock(&ci
->i_ceph_lock
);
959 __ceph_remove_cap(cap
);
960 if (!__ceph_is_any_real_caps(ci
)) {
961 struct ceph_mds_client
*mdsc
=
962 ceph_sb_to_client(inode
->i_sb
)->mdsc
;
964 spin_lock(&mdsc
->cap_dirty_lock
);
965 if (!list_empty(&ci
->i_dirty_item
)) {
966 pr_info(" dropping dirty %s state for %p %lld\n",
967 ceph_cap_string(ci
->i_dirty_caps
),
968 inode
, ceph_ino(inode
));
969 ci
->i_dirty_caps
= 0;
970 list_del_init(&ci
->i_dirty_item
);
973 if (!list_empty(&ci
->i_flushing_item
)) {
974 pr_info(" dropping dirty+flushing %s state for %p %lld\n",
975 ceph_cap_string(ci
->i_flushing_caps
),
976 inode
, ceph_ino(inode
));
977 ci
->i_flushing_caps
= 0;
978 list_del_init(&ci
->i_flushing_item
);
979 mdsc
->num_cap_flushing
--;
982 if (drop
&& ci
->i_wrbuffer_ref
) {
983 pr_info(" dropping dirty data for %p %lld\n",
984 inode
, ceph_ino(inode
));
985 ci
->i_wrbuffer_ref
= 0;
986 ci
->i_wrbuffer_ref_head
= 0;
989 spin_unlock(&mdsc
->cap_dirty_lock
);
991 spin_unlock(&ci
->i_ceph_lock
);
998 * caller must hold session s_mutex
1000 static void remove_session_caps(struct ceph_mds_session
*session
)
1002 dout("remove_session_caps on %p\n", session
);
1003 iterate_session_caps(session
, remove_session_caps_cb
, NULL
);
1004 BUG_ON(session
->s_nr_caps
> 0);
1005 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1006 cleanup_cap_releases(session
);
1010 * wake up any threads waiting on this session's caps. if the cap is
1011 * old (didn't get renewed on the client reconnect), remove it now.
1013 * caller must hold s_mutex.
1015 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1018 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1020 wake_up_all(&ci
->i_cap_wq
);
1022 spin_lock(&ci
->i_ceph_lock
);
1023 ci
->i_wanted_max_size
= 0;
1024 ci
->i_requested_max_size
= 0;
1025 spin_unlock(&ci
->i_ceph_lock
);
1030 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1033 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1034 iterate_session_caps(session
, wake_up_session_cb
,
1035 (void *)(unsigned long)reconnect
);
1039 * Send periodic message to MDS renewing all currently held caps. The
1040 * ack will reset the expiration for all caps from this session.
1042 * caller holds s_mutex
1044 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1045 struct ceph_mds_session
*session
)
1047 struct ceph_msg
*msg
;
1050 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1051 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1052 pr_info("mds%d caps stale\n", session
->s_mds
);
1053 session
->s_renew_requested
= jiffies
;
1055 /* do not try to renew caps until a recovering mds has reconnected
1056 * with its clients. */
1057 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1058 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1059 dout("send_renew_caps ignoring mds%d (%s)\n",
1060 session
->s_mds
, ceph_mds_state_name(state
));
1064 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1065 ceph_mds_state_name(state
));
1066 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1067 ++session
->s_renew_seq
);
1070 ceph_con_send(&session
->s_con
, msg
);
1075 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1077 * Called under session->s_mutex
1079 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1080 struct ceph_mds_session
*session
, int is_renew
)
1085 spin_lock(&session
->s_cap_lock
);
1086 was_stale
= is_renew
&& (session
->s_cap_ttl
== 0 ||
1087 time_after_eq(jiffies
, session
->s_cap_ttl
));
1089 session
->s_cap_ttl
= session
->s_renew_requested
+
1090 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1093 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1094 pr_info("mds%d caps renewed\n", session
->s_mds
);
1097 pr_info("mds%d caps still stale\n", session
->s_mds
);
1100 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1101 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1102 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1103 spin_unlock(&session
->s_cap_lock
);
1106 wake_up_session_caps(session
, 0);
1110 * send a session close request
1112 static int request_close_session(struct ceph_mds_client
*mdsc
,
1113 struct ceph_mds_session
*session
)
1115 struct ceph_msg
*msg
;
1117 dout("request_close_session mds%d state %s seq %lld\n",
1118 session
->s_mds
, session_state_name(session
->s_state
),
1120 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1123 ceph_con_send(&session
->s_con
, msg
);
1128 * Called with s_mutex held.
1130 static int __close_session(struct ceph_mds_client
*mdsc
,
1131 struct ceph_mds_session
*session
)
1133 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1135 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1136 return request_close_session(mdsc
, session
);
1140 * Trim old(er) caps.
1142 * Because we can't cache an inode without one or more caps, we do
1143 * this indirectly: if a cap is unused, we prune its aliases, at which
1144 * point the inode will hopefully get dropped to.
1146 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1147 * memory pressure from the MDS, though, so it needn't be perfect.
1149 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1151 struct ceph_mds_session
*session
= arg
;
1152 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1153 int used
, oissued
, mine
;
1155 if (session
->s_trim_caps
<= 0)
1158 spin_lock(&ci
->i_ceph_lock
);
1159 mine
= cap
->issued
| cap
->implemented
;
1160 used
= __ceph_caps_used(ci
);
1161 oissued
= __ceph_caps_issued_other(ci
, cap
);
1163 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
1164 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1165 ceph_cap_string(used
));
1166 if (ci
->i_dirty_caps
)
1167 goto out
; /* dirty caps */
1168 if ((used
& ~oissued
) & mine
)
1169 goto out
; /* we need these caps */
1171 session
->s_trim_caps
--;
1173 /* we aren't the only cap.. just remove us */
1174 __ceph_remove_cap(cap
);
1176 /* try to drop referring dentries */
1177 spin_unlock(&ci
->i_ceph_lock
);
1178 d_prune_aliases(inode
);
1179 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1180 inode
, cap
, atomic_read(&inode
->i_count
));
1185 spin_unlock(&ci
->i_ceph_lock
);
1190 * Trim session cap count down to some max number.
1192 static int trim_caps(struct ceph_mds_client
*mdsc
,
1193 struct ceph_mds_session
*session
,
1196 int trim_caps
= session
->s_nr_caps
- max_caps
;
1198 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1199 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1200 if (trim_caps
> 0) {
1201 session
->s_trim_caps
= trim_caps
;
1202 iterate_session_caps(session
, trim_caps_cb
, session
);
1203 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1204 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1205 trim_caps
- session
->s_trim_caps
);
1206 session
->s_trim_caps
= 0;
1212 * Allocate cap_release messages. If there is a partially full message
1213 * in the queue, try to allocate enough to cover it's remainder, so that
1214 * we can send it immediately.
1216 * Called under s_mutex.
1218 int ceph_add_cap_releases(struct ceph_mds_client
*mdsc
,
1219 struct ceph_mds_session
*session
)
1221 struct ceph_msg
*msg
, *partial
= NULL
;
1222 struct ceph_mds_cap_release
*head
;
1224 int extra
= mdsc
->fsc
->mount_options
->cap_release_safety
;
1227 dout("add_cap_releases %p mds%d extra %d\n", session
, session
->s_mds
,
1230 spin_lock(&session
->s_cap_lock
);
1232 if (!list_empty(&session
->s_cap_releases
)) {
1233 msg
= list_first_entry(&session
->s_cap_releases
,
1236 head
= msg
->front
.iov_base
;
1237 num
= le32_to_cpu(head
->num
);
1239 dout(" partial %p with (%d/%d)\n", msg
, num
,
1240 (int)CEPH_CAPS_PER_RELEASE
);
1241 extra
+= CEPH_CAPS_PER_RELEASE
- num
;
1245 while (session
->s_num_cap_releases
< session
->s_nr_caps
+ extra
) {
1246 spin_unlock(&session
->s_cap_lock
);
1247 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
, PAGE_CACHE_SIZE
,
1251 dout("add_cap_releases %p msg %p now %d\n", session
, msg
,
1252 (int)msg
->front
.iov_len
);
1253 head
= msg
->front
.iov_base
;
1254 head
->num
= cpu_to_le32(0);
1255 msg
->front
.iov_len
= sizeof(*head
);
1256 spin_lock(&session
->s_cap_lock
);
1257 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1258 session
->s_num_cap_releases
+= CEPH_CAPS_PER_RELEASE
;
1262 head
= partial
->front
.iov_base
;
1263 num
= le32_to_cpu(head
->num
);
1264 dout(" queueing partial %p with %d/%d\n", partial
, num
,
1265 (int)CEPH_CAPS_PER_RELEASE
);
1266 list_move_tail(&partial
->list_head
,
1267 &session
->s_cap_releases_done
);
1268 session
->s_num_cap_releases
-= CEPH_CAPS_PER_RELEASE
- num
;
1271 spin_unlock(&session
->s_cap_lock
);
1277 * flush all dirty inode data to disk.
1279 * returns true if we've flushed through want_flush_seq
1281 static int check_cap_flush(struct ceph_mds_client
*mdsc
, u64 want_flush_seq
)
1285 dout("check_cap_flush want %lld\n", want_flush_seq
);
1286 mutex_lock(&mdsc
->mutex
);
1287 for (mds
= 0; ret
&& mds
< mdsc
->max_sessions
; mds
++) {
1288 struct ceph_mds_session
*session
= mdsc
->sessions
[mds
];
1292 get_session(session
);
1293 mutex_unlock(&mdsc
->mutex
);
1295 mutex_lock(&session
->s_mutex
);
1296 if (!list_empty(&session
->s_cap_flushing
)) {
1297 struct ceph_inode_info
*ci
=
1298 list_entry(session
->s_cap_flushing
.next
,
1299 struct ceph_inode_info
,
1301 struct inode
*inode
= &ci
->vfs_inode
;
1303 spin_lock(&ci
->i_ceph_lock
);
1304 if (ci
->i_cap_flush_seq
<= want_flush_seq
) {
1305 dout("check_cap_flush still flushing %p "
1306 "seq %lld <= %lld to mds%d\n", inode
,
1307 ci
->i_cap_flush_seq
, want_flush_seq
,
1311 spin_unlock(&ci
->i_ceph_lock
);
1313 mutex_unlock(&session
->s_mutex
);
1314 ceph_put_mds_session(session
);
1318 mutex_lock(&mdsc
->mutex
);
1321 mutex_unlock(&mdsc
->mutex
);
1322 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq
);
1327 * called under s_mutex
1329 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1330 struct ceph_mds_session
*session
)
1332 struct ceph_msg
*msg
;
1334 dout("send_cap_releases mds%d\n", session
->s_mds
);
1335 spin_lock(&session
->s_cap_lock
);
1336 while (!list_empty(&session
->s_cap_releases_done
)) {
1337 msg
= list_first_entry(&session
->s_cap_releases_done
,
1338 struct ceph_msg
, list_head
);
1339 list_del_init(&msg
->list_head
);
1340 spin_unlock(&session
->s_cap_lock
);
1341 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1342 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1343 ceph_con_send(&session
->s_con
, msg
);
1344 spin_lock(&session
->s_cap_lock
);
1346 spin_unlock(&session
->s_cap_lock
);
1349 static void discard_cap_releases(struct ceph_mds_client
*mdsc
,
1350 struct ceph_mds_session
*session
)
1352 struct ceph_msg
*msg
;
1353 struct ceph_mds_cap_release
*head
;
1356 dout("discard_cap_releases mds%d\n", session
->s_mds
);
1357 spin_lock(&session
->s_cap_lock
);
1359 /* zero out the in-progress message */
1360 msg
= list_first_entry(&session
->s_cap_releases
,
1361 struct ceph_msg
, list_head
);
1362 head
= msg
->front
.iov_base
;
1363 num
= le32_to_cpu(head
->num
);
1364 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
, num
);
1365 head
->num
= cpu_to_le32(0);
1366 session
->s_num_cap_releases
+= num
;
1368 /* requeue completed messages */
1369 while (!list_empty(&session
->s_cap_releases_done
)) {
1370 msg
= list_first_entry(&session
->s_cap_releases_done
,
1371 struct ceph_msg
, list_head
);
1372 list_del_init(&msg
->list_head
);
1374 head
= msg
->front
.iov_base
;
1375 num
= le32_to_cpu(head
->num
);
1376 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
,
1378 session
->s_num_cap_releases
+= num
;
1379 head
->num
= cpu_to_le32(0);
1380 msg
->front
.iov_len
= sizeof(*head
);
1381 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1384 spin_unlock(&session
->s_cap_lock
);
1392 * Create an mds request.
1394 struct ceph_mds_request
*
1395 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1397 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1400 return ERR_PTR(-ENOMEM
);
1402 mutex_init(&req
->r_fill_mutex
);
1404 req
->r_started
= jiffies
;
1405 req
->r_resend_mds
= -1;
1406 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1408 kref_init(&req
->r_kref
);
1409 INIT_LIST_HEAD(&req
->r_wait
);
1410 init_completion(&req
->r_completion
);
1411 init_completion(&req
->r_safe_completion
);
1412 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1415 req
->r_direct_mode
= mode
;
1420 * return oldest (lowest) request, tid in request tree, 0 if none.
1422 * called under mdsc->mutex.
1424 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1426 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1428 return rb_entry(rb_first(&mdsc
->request_tree
),
1429 struct ceph_mds_request
, r_node
);
1432 static u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1434 struct ceph_mds_request
*req
= __get_oldest_req(mdsc
);
1442 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1443 * on build_path_from_dentry in fs/cifs/dir.c.
1445 * If @stop_on_nosnap, generate path relative to the first non-snapped
1448 * Encode hidden .snap dirs as a double /, i.e.
1449 * foo/.snap/bar -> foo//bar
1451 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1454 struct dentry
*temp
;
1460 return ERR_PTR(-EINVAL
);
1464 seq
= read_seqbegin(&rename_lock
);
1466 for (temp
= dentry
; !IS_ROOT(temp
);) {
1467 struct inode
*inode
= temp
->d_inode
;
1468 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1469 len
++; /* slash only */
1470 else if (stop_on_nosnap
&& inode
&&
1471 ceph_snap(inode
) == CEPH_NOSNAP
)
1474 len
+= 1 + temp
->d_name
.len
;
1475 temp
= temp
->d_parent
;
1478 pr_err("build_path corrupt dentry %p\n", dentry
);
1479 return ERR_PTR(-EINVAL
);
1484 len
--; /* no leading '/' */
1486 path
= kmalloc(len
+1, GFP_NOFS
);
1488 return ERR_PTR(-ENOMEM
);
1490 path
[pos
] = 0; /* trailing null */
1492 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1493 struct inode
*inode
;
1495 spin_lock(&temp
->d_lock
);
1496 inode
= temp
->d_inode
;
1497 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1498 dout("build_path path+%d: %p SNAPDIR\n",
1500 } else if (stop_on_nosnap
&& inode
&&
1501 ceph_snap(inode
) == CEPH_NOSNAP
) {
1502 spin_unlock(&temp
->d_lock
);
1505 pos
-= temp
->d_name
.len
;
1507 spin_unlock(&temp
->d_lock
);
1510 strncpy(path
+ pos
, temp
->d_name
.name
,
1513 spin_unlock(&temp
->d_lock
);
1516 temp
= temp
->d_parent
;
1519 pr_err("build_path corrupt dentry\n");
1521 return ERR_PTR(-EINVAL
);
1525 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1526 pr_err("build_path did not end path lookup where "
1527 "expected, namelen is %d, pos is %d\n", len
, pos
);
1528 /* presumably this is only possible if racing with a
1529 rename of one of the parent directories (we can not
1530 lock the dentries above us to prevent this, but
1531 retrying should be harmless) */
1536 *base
= ceph_ino(temp
->d_inode
);
1538 dout("build_path on %p %d built %llx '%.*s'\n",
1539 dentry
, dentry
->d_count
, *base
, len
, path
);
1543 static int build_dentry_path(struct dentry
*dentry
,
1544 const char **ppath
, int *ppathlen
, u64
*pino
,
1549 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
) {
1550 *pino
= ceph_ino(dentry
->d_parent
->d_inode
);
1551 *ppath
= dentry
->d_name
.name
;
1552 *ppathlen
= dentry
->d_name
.len
;
1555 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1557 return PTR_ERR(path
);
1563 static int build_inode_path(struct inode
*inode
,
1564 const char **ppath
, int *ppathlen
, u64
*pino
,
1567 struct dentry
*dentry
;
1570 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1571 *pino
= ceph_ino(inode
);
1575 dentry
= d_find_alias(inode
);
1576 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1579 return PTR_ERR(path
);
1586 * request arguments may be specified via an inode *, a dentry *, or
1587 * an explicit ino+path.
1589 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1590 const char *rpath
, u64 rino
,
1591 const char **ppath
, int *pathlen
,
1592 u64
*ino
, int *freepath
)
1597 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1598 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1600 } else if (rdentry
) {
1601 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1602 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1604 } else if (rpath
|| rino
) {
1607 *pathlen
= strlen(rpath
);
1608 dout(" path %.*s\n", *pathlen
, rpath
);
1615 * called under mdsc->mutex
1617 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1618 struct ceph_mds_request
*req
,
1621 struct ceph_msg
*msg
;
1622 struct ceph_mds_request_head
*head
;
1623 const char *path1
= NULL
;
1624 const char *path2
= NULL
;
1625 u64 ino1
= 0, ino2
= 0;
1626 int pathlen1
= 0, pathlen2
= 0;
1627 int freepath1
= 0, freepath2
= 0;
1633 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1634 req
->r_path1
, req
->r_ino1
.ino
,
1635 &path1
, &pathlen1
, &ino1
, &freepath1
);
1641 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1642 req
->r_path2
, req
->r_ino2
.ino
,
1643 &path2
, &pathlen2
, &ino2
, &freepath2
);
1649 len
= sizeof(*head
) +
1650 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
));
1652 /* calculate (max) length for cap releases */
1653 len
+= sizeof(struct ceph_mds_request_release
) *
1654 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1655 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1656 if (req
->r_dentry_drop
)
1657 len
+= req
->r_dentry
->d_name
.len
;
1658 if (req
->r_old_dentry_drop
)
1659 len
+= req
->r_old_dentry
->d_name
.len
;
1661 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
1663 msg
= ERR_PTR(-ENOMEM
);
1667 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1669 head
= msg
->front
.iov_base
;
1670 p
= msg
->front
.iov_base
+ sizeof(*head
);
1671 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1673 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1674 head
->op
= cpu_to_le32(req
->r_op
);
1675 head
->caller_uid
= cpu_to_le32(req
->r_uid
);
1676 head
->caller_gid
= cpu_to_le32(req
->r_gid
);
1677 head
->args
= req
->r_args
;
1679 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1680 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1682 /* make note of release offset, in case we need to replay */
1683 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1687 if (req
->r_inode_drop
)
1688 releases
+= ceph_encode_inode_release(&p
,
1689 req
->r_inode
? req
->r_inode
: req
->r_dentry
->d_inode
,
1690 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1691 if (req
->r_dentry_drop
)
1692 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1693 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1694 if (req
->r_old_dentry_drop
)
1695 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1696 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1697 if (req
->r_old_inode_drop
)
1698 releases
+= ceph_encode_inode_release(&p
,
1699 req
->r_old_dentry
->d_inode
,
1700 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1701 head
->num_releases
= cpu_to_le16(releases
);
1704 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1705 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1707 msg
->pages
= req
->r_pages
;
1708 msg
->nr_pages
= req
->r_num_pages
;
1709 msg
->hdr
.data_len
= cpu_to_le32(req
->r_data_len
);
1710 msg
->hdr
.data_off
= cpu_to_le16(0);
1714 kfree((char *)path2
);
1717 kfree((char *)path1
);
1723 * called under mdsc->mutex if error, under no mutex if
1726 static void complete_request(struct ceph_mds_client
*mdsc
,
1727 struct ceph_mds_request
*req
)
1729 if (req
->r_callback
)
1730 req
->r_callback(mdsc
, req
);
1732 complete_all(&req
->r_completion
);
1736 * called under mdsc->mutex
1738 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
1739 struct ceph_mds_request
*req
,
1742 struct ceph_mds_request_head
*rhead
;
1743 struct ceph_msg
*msg
;
1748 struct ceph_cap
*cap
=
1749 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
1752 req
->r_sent_on_mseq
= cap
->mseq
;
1754 req
->r_sent_on_mseq
= -1;
1756 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
1757 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
1759 if (req
->r_got_unsafe
) {
1761 * Replay. Do not regenerate message (and rebuild
1762 * paths, etc.); just use the original message.
1763 * Rebuilding paths will break for renames because
1764 * d_move mangles the src name.
1766 msg
= req
->r_request
;
1767 rhead
= msg
->front
.iov_base
;
1769 flags
= le32_to_cpu(rhead
->flags
);
1770 flags
|= CEPH_MDS_FLAG_REPLAY
;
1771 rhead
->flags
= cpu_to_le32(flags
);
1773 if (req
->r_target_inode
)
1774 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
1776 rhead
->num_retry
= req
->r_attempts
- 1;
1778 /* remove cap/dentry releases from message */
1779 rhead
->num_releases
= 0;
1780 msg
->hdr
.front_len
= cpu_to_le32(req
->r_request_release_offset
);
1781 msg
->front
.iov_len
= req
->r_request_release_offset
;
1785 if (req
->r_request
) {
1786 ceph_msg_put(req
->r_request
);
1787 req
->r_request
= NULL
;
1789 msg
= create_request_message(mdsc
, req
, mds
);
1791 req
->r_err
= PTR_ERR(msg
);
1792 complete_request(mdsc
, req
);
1793 return PTR_ERR(msg
);
1795 req
->r_request
= msg
;
1797 rhead
= msg
->front
.iov_base
;
1798 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
1799 if (req
->r_got_unsafe
)
1800 flags
|= CEPH_MDS_FLAG_REPLAY
;
1801 if (req
->r_locked_dir
)
1802 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
1803 rhead
->flags
= cpu_to_le32(flags
);
1804 rhead
->num_fwd
= req
->r_num_fwd
;
1805 rhead
->num_retry
= req
->r_attempts
- 1;
1808 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
1813 * send request, or put it on the appropriate wait list.
1815 static int __do_request(struct ceph_mds_client
*mdsc
,
1816 struct ceph_mds_request
*req
)
1818 struct ceph_mds_session
*session
= NULL
;
1822 if (req
->r_err
|| req
->r_got_result
)
1825 if (req
->r_timeout
&&
1826 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
1827 dout("do_request timed out\n");
1832 put_request_session(req
);
1834 mds
= __choose_mds(mdsc
, req
);
1836 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
1837 dout("do_request no mds or not active, waiting for map\n");
1838 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
1842 /* get, open session */
1843 session
= __ceph_lookup_mds_session(mdsc
, mds
);
1845 session
= register_session(mdsc
, mds
);
1846 if (IS_ERR(session
)) {
1847 err
= PTR_ERR(session
);
1851 req
->r_session
= get_session(session
);
1853 dout("do_request mds%d session %p state %s\n", mds
, session
,
1854 session_state_name(session
->s_state
));
1855 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
1856 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
1857 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
1858 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
1859 __open_session(mdsc
, session
);
1860 list_add(&req
->r_wait
, &session
->s_waiting
);
1865 req
->r_resend_mds
= -1; /* forget any previous mds hint */
1867 if (req
->r_request_started
== 0) /* note request start time */
1868 req
->r_request_started
= jiffies
;
1870 err
= __prepare_send_request(mdsc
, req
, mds
);
1872 ceph_msg_get(req
->r_request
);
1873 ceph_con_send(&session
->s_con
, req
->r_request
);
1877 ceph_put_mds_session(session
);
1883 complete_request(mdsc
, req
);
1888 * called under mdsc->mutex
1890 static void __wake_requests(struct ceph_mds_client
*mdsc
,
1891 struct list_head
*head
)
1893 struct ceph_mds_request
*req
, *nreq
;
1895 list_for_each_entry_safe(req
, nreq
, head
, r_wait
) {
1896 list_del_init(&req
->r_wait
);
1897 __do_request(mdsc
, req
);
1902 * Wake up threads with requests pending for @mds, so that they can
1903 * resubmit their requests to a possibly different mds.
1905 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
1907 struct ceph_mds_request
*req
;
1910 dout("kick_requests mds%d\n", mds
);
1911 for (p
= rb_first(&mdsc
->request_tree
); p
; p
= rb_next(p
)) {
1912 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1913 if (req
->r_got_unsafe
)
1915 if (req
->r_session
&&
1916 req
->r_session
->s_mds
== mds
) {
1917 dout(" kicking tid %llu\n", req
->r_tid
);
1918 __do_request(mdsc
, req
);
1923 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
1924 struct ceph_mds_request
*req
)
1926 dout("submit_request on %p\n", req
);
1927 mutex_lock(&mdsc
->mutex
);
1928 __register_request(mdsc
, req
, NULL
);
1929 __do_request(mdsc
, req
);
1930 mutex_unlock(&mdsc
->mutex
);
1934 * Synchrously perform an mds request. Take care of all of the
1935 * session setup, forwarding, retry details.
1937 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
1939 struct ceph_mds_request
*req
)
1943 dout("do_request on %p\n", req
);
1945 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1947 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
1948 if (req
->r_locked_dir
)
1949 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
1950 if (req
->r_old_dentry
)
1951 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
1955 mutex_lock(&mdsc
->mutex
);
1956 __register_request(mdsc
, req
, dir
);
1957 __do_request(mdsc
, req
);
1961 __unregister_request(mdsc
, req
);
1962 dout("do_request early error %d\n", err
);
1967 mutex_unlock(&mdsc
->mutex
);
1968 dout("do_request waiting\n");
1969 if (req
->r_timeout
) {
1970 err
= (long)wait_for_completion_killable_timeout(
1971 &req
->r_completion
, req
->r_timeout
);
1975 err
= wait_for_completion_killable(&req
->r_completion
);
1977 dout("do_request waited, got %d\n", err
);
1978 mutex_lock(&mdsc
->mutex
);
1980 /* only abort if we didn't race with a real reply */
1981 if (req
->r_got_result
) {
1982 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
1983 } else if (err
< 0) {
1984 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
1987 * ensure we aren't running concurrently with
1988 * ceph_fill_trace or ceph_readdir_prepopulate, which
1989 * rely on locks (dir mutex) held by our caller.
1991 mutex_lock(&req
->r_fill_mutex
);
1993 req
->r_aborted
= true;
1994 mutex_unlock(&req
->r_fill_mutex
);
1996 if (req
->r_locked_dir
&&
1997 (req
->r_op
& CEPH_MDS_OP_WRITE
))
1998 ceph_invalidate_dir_request(req
);
2004 mutex_unlock(&mdsc
->mutex
);
2005 dout("do_request %p done, result %d\n", req
, err
);
2010 * Invalidate dir D_COMPLETE, dentry lease state on an aborted MDS
2011 * namespace request.
2013 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2015 struct inode
*inode
= req
->r_locked_dir
;
2016 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2018 dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode
);
2019 spin_lock(&ci
->i_ceph_lock
);
2020 ceph_dir_clear_complete(inode
);
2021 ci
->i_release_count
++;
2022 spin_unlock(&ci
->i_ceph_lock
);
2025 ceph_invalidate_dentry_lease(req
->r_dentry
);
2026 if (req
->r_old_dentry
)
2027 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2033 * We take the session mutex and parse and process the reply immediately.
2034 * This preserves the logical ordering of replies, capabilities, etc., sent
2035 * by the MDS as they are applied to our local cache.
2037 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2039 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2040 struct ceph_mds_request
*req
;
2041 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2042 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2045 int mds
= session
->s_mds
;
2047 if (msg
->front
.iov_len
< sizeof(*head
)) {
2048 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2053 /* get request, session */
2054 tid
= le64_to_cpu(msg
->hdr
.tid
);
2055 mutex_lock(&mdsc
->mutex
);
2056 req
= __lookup_request(mdsc
, tid
);
2058 dout("handle_reply on unknown tid %llu\n", tid
);
2059 mutex_unlock(&mdsc
->mutex
);
2062 dout("handle_reply %p\n", req
);
2064 /* correct session? */
2065 if (req
->r_session
!= session
) {
2066 pr_err("mdsc_handle_reply got %llu on session mds%d"
2067 " not mds%d\n", tid
, session
->s_mds
,
2068 req
->r_session
? req
->r_session
->s_mds
: -1);
2069 mutex_unlock(&mdsc
->mutex
);
2074 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2075 (req
->r_got_safe
&& head
->safe
)) {
2076 pr_warning("got a dup %s reply on %llu from mds%d\n",
2077 head
->safe
? "safe" : "unsafe", tid
, mds
);
2078 mutex_unlock(&mdsc
->mutex
);
2081 if (req
->r_got_safe
&& !head
->safe
) {
2082 pr_warning("got unsafe after safe on %llu from mds%d\n",
2084 mutex_unlock(&mdsc
->mutex
);
2088 result
= le32_to_cpu(head
->result
);
2092 * if we're not talking to the authority, send to them
2093 * if the authority has changed while we weren't looking,
2094 * send to new authority
2095 * Otherwise we just have to return an ESTALE
2097 if (result
== -ESTALE
) {
2098 dout("got ESTALE on request %llu", req
->r_tid
);
2099 if (!req
->r_inode
) {
2100 /* do nothing; not an authority problem */
2101 } else if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2102 dout("not using auth, setting for that now");
2103 req
->r_direct_mode
= USE_AUTH_MDS
;
2104 __do_request(mdsc
, req
);
2105 mutex_unlock(&mdsc
->mutex
);
2108 struct ceph_inode_info
*ci
= ceph_inode(req
->r_inode
);
2109 struct ceph_cap
*cap
= NULL
;
2112 cap
= ceph_get_cap_for_mds(ci
,
2113 req
->r_session
->s_mds
);
2115 dout("already using auth");
2116 if ((!cap
|| cap
!= ci
->i_auth_cap
) ||
2117 (cap
->mseq
!= req
->r_sent_on_mseq
)) {
2118 dout("but cap changed, so resending");
2119 __do_request(mdsc
, req
);
2120 mutex_unlock(&mdsc
->mutex
);
2124 dout("have to return ESTALE on request %llu", req
->r_tid
);
2129 req
->r_got_safe
= true;
2130 __unregister_request(mdsc
, req
);
2131 complete_all(&req
->r_safe_completion
);
2133 if (req
->r_got_unsafe
) {
2135 * We already handled the unsafe response, now do the
2136 * cleanup. No need to examine the response; the MDS
2137 * doesn't include any result info in the safe
2138 * response. And even if it did, there is nothing
2139 * useful we could do with a revised return value.
2141 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2142 list_del_init(&req
->r_unsafe_item
);
2144 /* last unsafe request during umount? */
2145 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2146 complete_all(&mdsc
->safe_umount_waiters
);
2147 mutex_unlock(&mdsc
->mutex
);
2151 req
->r_got_unsafe
= true;
2152 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2155 dout("handle_reply tid %lld result %d\n", tid
, result
);
2156 rinfo
= &req
->r_reply_info
;
2157 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2158 mutex_unlock(&mdsc
->mutex
);
2160 mutex_lock(&session
->s_mutex
);
2162 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2168 if (rinfo
->snapblob_len
) {
2169 down_write(&mdsc
->snap_rwsem
);
2170 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2171 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2172 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
);
2173 downgrade_write(&mdsc
->snap_rwsem
);
2175 down_read(&mdsc
->snap_rwsem
);
2178 /* insert trace into our cache */
2179 mutex_lock(&req
->r_fill_mutex
);
2180 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2182 if (result
== 0 && req
->r_op
!= CEPH_MDS_OP_GETFILELOCK
&&
2184 ceph_readdir_prepopulate(req
, req
->r_session
);
2185 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2187 mutex_unlock(&req
->r_fill_mutex
);
2189 up_read(&mdsc
->snap_rwsem
);
2191 mutex_lock(&mdsc
->mutex
);
2192 if (!req
->r_aborted
) {
2198 req
->r_got_result
= true;
2201 dout("reply arrived after request %lld was aborted\n", tid
);
2203 mutex_unlock(&mdsc
->mutex
);
2205 ceph_add_cap_releases(mdsc
, req
->r_session
);
2206 mutex_unlock(&session
->s_mutex
);
2208 /* kick calling process */
2209 complete_request(mdsc
, req
);
2211 ceph_mdsc_put_request(req
);
2218 * handle mds notification that our request has been forwarded.
2220 static void handle_forward(struct ceph_mds_client
*mdsc
,
2221 struct ceph_mds_session
*session
,
2222 struct ceph_msg
*msg
)
2224 struct ceph_mds_request
*req
;
2225 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2229 void *p
= msg
->front
.iov_base
;
2230 void *end
= p
+ msg
->front
.iov_len
;
2232 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2233 next_mds
= ceph_decode_32(&p
);
2234 fwd_seq
= ceph_decode_32(&p
);
2236 mutex_lock(&mdsc
->mutex
);
2237 req
= __lookup_request(mdsc
, tid
);
2239 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2240 goto out
; /* dup reply? */
2243 if (req
->r_aborted
) {
2244 dout("forward tid %llu aborted, unregistering\n", tid
);
2245 __unregister_request(mdsc
, req
);
2246 } else if (fwd_seq
<= req
->r_num_fwd
) {
2247 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2248 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2250 /* resend. forward race not possible; mds would drop */
2251 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2253 BUG_ON(req
->r_got_result
);
2254 req
->r_num_fwd
= fwd_seq
;
2255 req
->r_resend_mds
= next_mds
;
2256 put_request_session(req
);
2257 __do_request(mdsc
, req
);
2259 ceph_mdsc_put_request(req
);
2261 mutex_unlock(&mdsc
->mutex
);
2265 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2269 * handle a mds session control message
2271 static void handle_session(struct ceph_mds_session
*session
,
2272 struct ceph_msg
*msg
)
2274 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2277 int mds
= session
->s_mds
;
2278 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2282 if (msg
->front
.iov_len
!= sizeof(*h
))
2284 op
= le32_to_cpu(h
->op
);
2285 seq
= le64_to_cpu(h
->seq
);
2287 mutex_lock(&mdsc
->mutex
);
2288 if (op
== CEPH_SESSION_CLOSE
)
2289 __unregister_session(mdsc
, session
);
2290 /* FIXME: this ttl calculation is generous */
2291 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2292 mutex_unlock(&mdsc
->mutex
);
2294 mutex_lock(&session
->s_mutex
);
2296 dout("handle_session mds%d %s %p state %s seq %llu\n",
2297 mds
, ceph_session_op_name(op
), session
,
2298 session_state_name(session
->s_state
), seq
);
2300 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2301 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2302 pr_info("mds%d came back\n", session
->s_mds
);
2306 case CEPH_SESSION_OPEN
:
2307 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2308 pr_info("mds%d reconnect success\n", session
->s_mds
);
2309 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2310 renewed_caps(mdsc
, session
, 0);
2313 __close_session(mdsc
, session
);
2316 case CEPH_SESSION_RENEWCAPS
:
2317 if (session
->s_renew_seq
== seq
)
2318 renewed_caps(mdsc
, session
, 1);
2321 case CEPH_SESSION_CLOSE
:
2322 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2323 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2324 remove_session_caps(session
);
2325 wake
= 1; /* for good measure */
2326 wake_up_all(&mdsc
->session_close_wq
);
2327 kick_requests(mdsc
, mds
);
2330 case CEPH_SESSION_STALE
:
2331 pr_info("mds%d caps went stale, renewing\n",
2333 spin_lock(&session
->s_gen_ttl_lock
);
2334 session
->s_cap_gen
++;
2335 session
->s_cap_ttl
= 0;
2336 spin_unlock(&session
->s_gen_ttl_lock
);
2337 send_renew_caps(mdsc
, session
);
2340 case CEPH_SESSION_RECALL_STATE
:
2341 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2345 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2349 mutex_unlock(&session
->s_mutex
);
2351 mutex_lock(&mdsc
->mutex
);
2352 __wake_requests(mdsc
, &session
->s_waiting
);
2353 mutex_unlock(&mdsc
->mutex
);
2358 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2359 (int)msg
->front
.iov_len
);
2366 * called under session->mutex.
2368 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2369 struct ceph_mds_session
*session
)
2371 struct ceph_mds_request
*req
, *nreq
;
2374 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2376 mutex_lock(&mdsc
->mutex
);
2377 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2378 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
);
2380 ceph_msg_get(req
->r_request
);
2381 ceph_con_send(&session
->s_con
, req
->r_request
);
2384 mutex_unlock(&mdsc
->mutex
);
2388 * Encode information about a cap for a reconnect with the MDS.
2390 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2394 struct ceph_mds_cap_reconnect v2
;
2395 struct ceph_mds_cap_reconnect_v1 v1
;
2398 struct ceph_inode_info
*ci
;
2399 struct ceph_reconnect_state
*recon_state
= arg
;
2400 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2404 struct dentry
*dentry
;
2408 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2409 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2410 ceph_cap_string(cap
->issued
));
2411 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2415 dentry
= d_find_alias(inode
);
2417 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2419 err
= PTR_ERR(path
);
2426 err
= ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2430 spin_lock(&ci
->i_ceph_lock
);
2431 cap
->seq
= 0; /* reset cap seq */
2432 cap
->issue_seq
= 0; /* and issue_seq */
2434 if (recon_state
->flock
) {
2435 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2436 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2437 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2438 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2439 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2440 rec
.v2
.flock_len
= 0;
2441 reclen
= sizeof(rec
.v2
);
2443 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2444 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2445 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2446 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2447 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2448 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2449 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2450 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2451 reclen
= sizeof(rec
.v1
);
2453 spin_unlock(&ci
->i_ceph_lock
);
2455 if (recon_state
->flock
) {
2456 int num_fcntl_locks
, num_flock_locks
;
2457 struct ceph_pagelist_cursor trunc_point
;
2459 ceph_pagelist_set_cursor(pagelist
, &trunc_point
);
2462 ceph_count_locks(inode
, &num_fcntl_locks
,
2464 rec
.v2
.flock_len
= (2*sizeof(u32
) +
2465 (num_fcntl_locks
+num_flock_locks
) *
2466 sizeof(struct ceph_filelock
));
2469 /* pre-alloc pagelist */
2470 ceph_pagelist_truncate(pagelist
, &trunc_point
);
2471 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2473 err
= ceph_pagelist_reserve(pagelist
,
2479 err
= ceph_encode_locks(inode
,
2485 } while (err
== -ENOSPC
);
2487 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2499 * If an MDS fails and recovers, clients need to reconnect in order to
2500 * reestablish shared state. This includes all caps issued through
2501 * this session _and_ the snap_realm hierarchy. Because it's not
2502 * clear which snap realms the mds cares about, we send everything we
2503 * know about.. that ensures we'll then get any new info the
2504 * recovering MDS might have.
2506 * This is a relatively heavyweight operation, but it's rare.
2508 * called with mdsc->mutex held.
2510 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2511 struct ceph_mds_session
*session
)
2513 struct ceph_msg
*reply
;
2515 int mds
= session
->s_mds
;
2517 struct ceph_pagelist
*pagelist
;
2518 struct ceph_reconnect_state recon_state
;
2520 pr_info("mds%d reconnect start\n", mds
);
2522 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2524 goto fail_nopagelist
;
2525 ceph_pagelist_init(pagelist
);
2527 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
2531 mutex_lock(&session
->s_mutex
);
2532 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2535 ceph_con_open(&session
->s_con
,
2536 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2538 /* replay unsafe requests */
2539 replay_unsafe_requests(mdsc
, session
);
2541 down_read(&mdsc
->snap_rwsem
);
2543 dout("session %p state %s\n", session
,
2544 session_state_name(session
->s_state
));
2546 /* drop old cap expires; we're about to reestablish that state */
2547 discard_cap_releases(mdsc
, session
);
2549 /* traverse this session's caps */
2550 err
= ceph_pagelist_encode_32(pagelist
, session
->s_nr_caps
);
2554 recon_state
.pagelist
= pagelist
;
2555 recon_state
.flock
= session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
;
2556 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
2561 * snaprealms. we provide mds with the ino, seq (version), and
2562 * parent for all of our realms. If the mds has any newer info,
2565 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
2566 struct ceph_snap_realm
*realm
=
2567 rb_entry(p
, struct ceph_snap_realm
, node
);
2568 struct ceph_mds_snaprealm_reconnect sr_rec
;
2570 dout(" adding snap realm %llx seq %lld parent %llx\n",
2571 realm
->ino
, realm
->seq
, realm
->parent_ino
);
2572 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
2573 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
2574 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
2575 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
2580 reply
->pagelist
= pagelist
;
2581 if (recon_state
.flock
)
2582 reply
->hdr
.version
= cpu_to_le16(2);
2583 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2584 reply
->nr_pages
= calc_pages_for(0, pagelist
->length
);
2585 ceph_con_send(&session
->s_con
, reply
);
2587 mutex_unlock(&session
->s_mutex
);
2589 mutex_lock(&mdsc
->mutex
);
2590 __wake_requests(mdsc
, &session
->s_waiting
);
2591 mutex_unlock(&mdsc
->mutex
);
2593 up_read(&mdsc
->snap_rwsem
);
2597 ceph_msg_put(reply
);
2598 up_read(&mdsc
->snap_rwsem
);
2599 mutex_unlock(&session
->s_mutex
);
2601 ceph_pagelist_release(pagelist
);
2604 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
2610 * compare old and new mdsmaps, kicking requests
2611 * and closing out old connections as necessary
2613 * called under mdsc->mutex.
2615 static void check_new_map(struct ceph_mds_client
*mdsc
,
2616 struct ceph_mdsmap
*newmap
,
2617 struct ceph_mdsmap
*oldmap
)
2620 int oldstate
, newstate
;
2621 struct ceph_mds_session
*s
;
2623 dout("check_new_map new %u old %u\n",
2624 newmap
->m_epoch
, oldmap
->m_epoch
);
2626 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2627 if (mdsc
->sessions
[i
] == NULL
)
2629 s
= mdsc
->sessions
[i
];
2630 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
2631 newstate
= ceph_mdsmap_get_state(newmap
, i
);
2633 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
2634 i
, ceph_mds_state_name(oldstate
),
2635 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
2636 ceph_mds_state_name(newstate
),
2637 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
2638 session_state_name(s
->s_state
));
2640 if (memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
2641 ceph_mdsmap_get_addr(newmap
, i
),
2642 sizeof(struct ceph_entity_addr
))) {
2643 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
2644 /* the session never opened, just close it
2646 __wake_requests(mdsc
, &s
->s_waiting
);
2647 __unregister_session(mdsc
, s
);
2650 mutex_unlock(&mdsc
->mutex
);
2651 mutex_lock(&s
->s_mutex
);
2652 mutex_lock(&mdsc
->mutex
);
2653 ceph_con_close(&s
->s_con
);
2654 mutex_unlock(&s
->s_mutex
);
2655 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
2658 /* kick any requests waiting on the recovering mds */
2659 kick_requests(mdsc
, i
);
2660 } else if (oldstate
== newstate
) {
2661 continue; /* nothing new with this mds */
2667 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
2668 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
2669 mutex_unlock(&mdsc
->mutex
);
2670 send_mds_reconnect(mdsc
, s
);
2671 mutex_lock(&mdsc
->mutex
);
2675 * kick request on any mds that has gone active.
2677 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
2678 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
2679 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
2680 oldstate
!= CEPH_MDS_STATE_STARTING
)
2681 pr_info("mds%d recovery completed\n", s
->s_mds
);
2682 kick_requests(mdsc
, i
);
2683 ceph_kick_flushing_caps(mdsc
, s
);
2684 wake_up_session_caps(s
, 1);
2688 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2689 s
= mdsc
->sessions
[i
];
2692 if (!ceph_mdsmap_is_laggy(newmap
, i
))
2694 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
2695 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
2696 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2697 dout(" connecting to export targets of laggy mds%d\n",
2699 __open_export_target_sessions(mdsc
, s
);
2711 * caller must hold session s_mutex, dentry->d_lock
2713 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
2715 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
2717 ceph_put_mds_session(di
->lease_session
);
2718 di
->lease_session
= NULL
;
2721 static void handle_lease(struct ceph_mds_client
*mdsc
,
2722 struct ceph_mds_session
*session
,
2723 struct ceph_msg
*msg
)
2725 struct super_block
*sb
= mdsc
->fsc
->sb
;
2726 struct inode
*inode
;
2727 struct dentry
*parent
, *dentry
;
2728 struct ceph_dentry_info
*di
;
2729 int mds
= session
->s_mds
;
2730 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
2732 struct ceph_vino vino
;
2736 dout("handle_lease from mds%d\n", mds
);
2739 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
2741 vino
.ino
= le64_to_cpu(h
->ino
);
2742 vino
.snap
= CEPH_NOSNAP
;
2743 seq
= le32_to_cpu(h
->seq
);
2744 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
2745 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
2746 if (dname
.len
!= get_unaligned_le32(h
+1))
2749 mutex_lock(&session
->s_mutex
);
2753 inode
= ceph_find_inode(sb
, vino
);
2754 dout("handle_lease %s, ino %llx %p %.*s\n",
2755 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
2756 dname
.len
, dname
.name
);
2757 if (inode
== NULL
) {
2758 dout("handle_lease no inode %llx\n", vino
.ino
);
2763 parent
= d_find_alias(inode
);
2765 dout("no parent dentry on inode %p\n", inode
);
2767 goto release
; /* hrm... */
2769 dname
.hash
= full_name_hash(dname
.name
, dname
.len
);
2770 dentry
= d_lookup(parent
, &dname
);
2775 spin_lock(&dentry
->d_lock
);
2776 di
= ceph_dentry(dentry
);
2777 switch (h
->action
) {
2778 case CEPH_MDS_LEASE_REVOKE
:
2779 if (di
->lease_session
== session
) {
2780 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
2781 h
->seq
= cpu_to_le32(di
->lease_seq
);
2782 __ceph_mdsc_drop_dentry_lease(dentry
);
2787 case CEPH_MDS_LEASE_RENEW
:
2788 if (di
->lease_session
== session
&&
2789 di
->lease_gen
== session
->s_cap_gen
&&
2790 di
->lease_renew_from
&&
2791 di
->lease_renew_after
== 0) {
2792 unsigned long duration
=
2793 le32_to_cpu(h
->duration_ms
) * HZ
/ 1000;
2795 di
->lease_seq
= seq
;
2796 dentry
->d_time
= di
->lease_renew_from
+ duration
;
2797 di
->lease_renew_after
= di
->lease_renew_from
+
2799 di
->lease_renew_from
= 0;
2803 spin_unlock(&dentry
->d_lock
);
2810 /* let's just reuse the same message */
2811 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
2813 ceph_con_send(&session
->s_con
, msg
);
2817 mutex_unlock(&session
->s_mutex
);
2821 pr_err("corrupt lease message\n");
2825 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
2826 struct inode
*inode
,
2827 struct dentry
*dentry
, char action
,
2830 struct ceph_msg
*msg
;
2831 struct ceph_mds_lease
*lease
;
2832 int len
= sizeof(*lease
) + sizeof(u32
);
2835 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2836 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
2837 dnamelen
= dentry
->d_name
.len
;
2840 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
2843 lease
= msg
->front
.iov_base
;
2844 lease
->action
= action
;
2845 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
2846 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
2847 lease
->seq
= cpu_to_le32(seq
);
2848 put_unaligned_le32(dnamelen
, lease
+ 1);
2849 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
2852 * if this is a preemptive lease RELEASE, no need to
2853 * flush request stream, since the actual request will
2856 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
2858 ceph_con_send(&session
->s_con
, msg
);
2862 * Preemptively release a lease we expect to invalidate anyway.
2863 * Pass @inode always, @dentry is optional.
2865 void ceph_mdsc_lease_release(struct ceph_mds_client
*mdsc
, struct inode
*inode
,
2866 struct dentry
*dentry
)
2868 struct ceph_dentry_info
*di
;
2869 struct ceph_mds_session
*session
;
2872 BUG_ON(inode
== NULL
);
2873 BUG_ON(dentry
== NULL
);
2875 /* is dentry lease valid? */
2876 spin_lock(&dentry
->d_lock
);
2877 di
= ceph_dentry(dentry
);
2878 if (!di
|| !di
->lease_session
||
2879 di
->lease_session
->s_mds
< 0 ||
2880 di
->lease_gen
!= di
->lease_session
->s_cap_gen
||
2881 !time_before(jiffies
, dentry
->d_time
)) {
2882 dout("lease_release inode %p dentry %p -- "
2885 spin_unlock(&dentry
->d_lock
);
2889 /* we do have a lease on this dentry; note mds and seq */
2890 session
= ceph_get_mds_session(di
->lease_session
);
2891 seq
= di
->lease_seq
;
2892 __ceph_mdsc_drop_dentry_lease(dentry
);
2893 spin_unlock(&dentry
->d_lock
);
2895 dout("lease_release inode %p dentry %p to mds%d\n",
2896 inode
, dentry
, session
->s_mds
);
2897 ceph_mdsc_lease_send_msg(session
, inode
, dentry
,
2898 CEPH_MDS_LEASE_RELEASE
, seq
);
2899 ceph_put_mds_session(session
);
2903 * drop all leases (and dentry refs) in preparation for umount
2905 static void drop_leases(struct ceph_mds_client
*mdsc
)
2909 dout("drop_leases\n");
2910 mutex_lock(&mdsc
->mutex
);
2911 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2912 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2915 mutex_unlock(&mdsc
->mutex
);
2916 mutex_lock(&s
->s_mutex
);
2917 mutex_unlock(&s
->s_mutex
);
2918 ceph_put_mds_session(s
);
2919 mutex_lock(&mdsc
->mutex
);
2921 mutex_unlock(&mdsc
->mutex
);
2927 * delayed work -- periodically trim expired leases, renew caps with mds
2929 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
2932 unsigned hz
= round_jiffies_relative(HZ
* delay
);
2933 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
2936 static void delayed_work(struct work_struct
*work
)
2939 struct ceph_mds_client
*mdsc
=
2940 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
2944 dout("mdsc delayed_work\n");
2945 ceph_check_delayed_caps(mdsc
);
2947 mutex_lock(&mdsc
->mutex
);
2948 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
2949 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
2950 mdsc
->last_renew_caps
);
2952 mdsc
->last_renew_caps
= jiffies
;
2954 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2955 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2958 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2959 dout("resending session close request for mds%d\n",
2961 request_close_session(mdsc
, s
);
2962 ceph_put_mds_session(s
);
2965 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
2966 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
2967 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
2968 pr_info("mds%d hung\n", s
->s_mds
);
2971 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
2972 /* this mds is failed or recovering, just wait */
2973 ceph_put_mds_session(s
);
2976 mutex_unlock(&mdsc
->mutex
);
2978 mutex_lock(&s
->s_mutex
);
2980 send_renew_caps(mdsc
, s
);
2982 ceph_con_keepalive(&s
->s_con
);
2983 ceph_add_cap_releases(mdsc
, s
);
2984 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
2985 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
2986 ceph_send_cap_releases(mdsc
, s
);
2987 mutex_unlock(&s
->s_mutex
);
2988 ceph_put_mds_session(s
);
2990 mutex_lock(&mdsc
->mutex
);
2992 mutex_unlock(&mdsc
->mutex
);
2994 schedule_delayed(mdsc
);
2997 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3000 struct ceph_mds_client
*mdsc
;
3002 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3007 mutex_init(&mdsc
->mutex
);
3008 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3009 if (mdsc
->mdsmap
== NULL
)
3012 init_completion(&mdsc
->safe_umount_waiters
);
3013 init_waitqueue_head(&mdsc
->session_close_wq
);
3014 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3015 mdsc
->sessions
= NULL
;
3016 mdsc
->max_sessions
= 0;
3018 init_rwsem(&mdsc
->snap_rwsem
);
3019 mdsc
->snap_realms
= RB_ROOT
;
3020 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3021 spin_lock_init(&mdsc
->snap_empty_lock
);
3023 mdsc
->request_tree
= RB_ROOT
;
3024 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3025 mdsc
->last_renew_caps
= jiffies
;
3026 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3027 spin_lock_init(&mdsc
->cap_delay_lock
);
3028 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3029 spin_lock_init(&mdsc
->snap_flush_lock
);
3030 mdsc
->cap_flush_seq
= 0;
3031 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3032 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3033 mdsc
->num_cap_flushing
= 0;
3034 spin_lock_init(&mdsc
->cap_dirty_lock
);
3035 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3036 spin_lock_init(&mdsc
->dentry_lru_lock
);
3037 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3039 ceph_caps_init(mdsc
);
3040 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3046 * Wait for safe replies on open mds requests. If we time out, drop
3047 * all requests from the tree to avoid dangling dentry refs.
3049 static void wait_requests(struct ceph_mds_client
*mdsc
)
3051 struct ceph_mds_request
*req
;
3052 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3054 mutex_lock(&mdsc
->mutex
);
3055 if (__get_oldest_req(mdsc
)) {
3056 mutex_unlock(&mdsc
->mutex
);
3058 dout("wait_requests waiting for requests\n");
3059 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3060 fsc
->client
->options
->mount_timeout
* HZ
);
3062 /* tear down remaining requests */
3063 mutex_lock(&mdsc
->mutex
);
3064 while ((req
= __get_oldest_req(mdsc
))) {
3065 dout("wait_requests timed out on tid %llu\n",
3067 __unregister_request(mdsc
, req
);
3070 mutex_unlock(&mdsc
->mutex
);
3071 dout("wait_requests done\n");
3075 * called before mount is ro, and before dentries are torn down.
3076 * (hmm, does this still race with new lookups?)
3078 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3080 dout("pre_umount\n");
3084 ceph_flush_dirty_caps(mdsc
);
3085 wait_requests(mdsc
);
3088 * wait for reply handlers to drop their request refs and
3089 * their inode/dcache refs
3095 * wait for all write mds requests to flush.
3097 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3099 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3102 mutex_lock(&mdsc
->mutex
);
3103 dout("wait_unsafe_requests want %lld\n", want_tid
);
3105 req
= __get_oldest_req(mdsc
);
3106 while (req
&& req
->r_tid
<= want_tid
) {
3107 /* find next request */
3108 n
= rb_next(&req
->r_node
);
3110 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3113 if ((req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3115 ceph_mdsc_get_request(req
);
3117 ceph_mdsc_get_request(nextreq
);
3118 mutex_unlock(&mdsc
->mutex
);
3119 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3120 req
->r_tid
, want_tid
);
3121 wait_for_completion(&req
->r_safe_completion
);
3122 mutex_lock(&mdsc
->mutex
);
3123 ceph_mdsc_put_request(req
);
3125 break; /* next dne before, so we're done! */
3126 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3127 /* next request was removed from tree */
3128 ceph_mdsc_put_request(nextreq
);
3131 ceph_mdsc_put_request(nextreq
); /* won't go away */
3135 mutex_unlock(&mdsc
->mutex
);
3136 dout("wait_unsafe_requests done\n");
3139 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3141 u64 want_tid
, want_flush
;
3143 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3147 mutex_lock(&mdsc
->mutex
);
3148 want_tid
= mdsc
->last_tid
;
3149 want_flush
= mdsc
->cap_flush_seq
;
3150 mutex_unlock(&mdsc
->mutex
);
3151 dout("sync want tid %lld flush_seq %lld\n", want_tid
, want_flush
);
3153 ceph_flush_dirty_caps(mdsc
);
3155 wait_unsafe_requests(mdsc
, want_tid
);
3156 wait_event(mdsc
->cap_flushing_wq
, check_cap_flush(mdsc
, want_flush
));
3160 * true if all sessions are closed, or we force unmount
3162 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
)
3166 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3169 mutex_lock(&mdsc
->mutex
);
3170 for (i
= 0; i
< mdsc
->max_sessions
; i
++)
3171 if (mdsc
->sessions
[i
])
3173 mutex_unlock(&mdsc
->mutex
);
3178 * called after sb is ro.
3180 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3182 struct ceph_mds_session
*session
;
3184 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3185 unsigned long timeout
= fsc
->client
->options
->mount_timeout
* HZ
;
3187 dout("close_sessions\n");
3189 /* close sessions */
3190 mutex_lock(&mdsc
->mutex
);
3191 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3192 session
= __ceph_lookup_mds_session(mdsc
, i
);
3195 mutex_unlock(&mdsc
->mutex
);
3196 mutex_lock(&session
->s_mutex
);
3197 __close_session(mdsc
, session
);
3198 mutex_unlock(&session
->s_mutex
);
3199 ceph_put_mds_session(session
);
3200 mutex_lock(&mdsc
->mutex
);
3202 mutex_unlock(&mdsc
->mutex
);
3204 dout("waiting for sessions to close\n");
3205 wait_event_timeout(mdsc
->session_close_wq
, done_closing_sessions(mdsc
),
3208 /* tear down remaining sessions */
3209 mutex_lock(&mdsc
->mutex
);
3210 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3211 if (mdsc
->sessions
[i
]) {
3212 session
= get_session(mdsc
->sessions
[i
]);
3213 __unregister_session(mdsc
, session
);
3214 mutex_unlock(&mdsc
->mutex
);
3215 mutex_lock(&session
->s_mutex
);
3216 remove_session_caps(session
);
3217 mutex_unlock(&session
->s_mutex
);
3218 ceph_put_mds_session(session
);
3219 mutex_lock(&mdsc
->mutex
);
3222 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3223 mutex_unlock(&mdsc
->mutex
);
3225 ceph_cleanup_empty_realms(mdsc
);
3227 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3232 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3235 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3237 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3238 kfree(mdsc
->sessions
);
3239 ceph_caps_finalize(mdsc
);
3242 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3244 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3246 dout("mdsc_destroy %p\n", mdsc
);
3247 ceph_mdsc_stop(mdsc
);
3249 /* flush out any connection work with references to us */
3254 dout("mdsc_destroy %p done\n", mdsc
);
3259 * handle mds map update.
3261 void ceph_mdsc_handle_map(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3265 void *p
= msg
->front
.iov_base
;
3266 void *end
= p
+ msg
->front
.iov_len
;
3267 struct ceph_mdsmap
*newmap
, *oldmap
;
3268 struct ceph_fsid fsid
;
3271 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3272 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3273 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3275 epoch
= ceph_decode_32(&p
);
3276 maplen
= ceph_decode_32(&p
);
3277 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3279 /* do we need it? */
3280 ceph_monc_got_mdsmap(&mdsc
->fsc
->client
->monc
, epoch
);
3281 mutex_lock(&mdsc
->mutex
);
3282 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3283 dout("handle_map epoch %u <= our %u\n",
3284 epoch
, mdsc
->mdsmap
->m_epoch
);
3285 mutex_unlock(&mdsc
->mutex
);
3289 newmap
= ceph_mdsmap_decode(&p
, end
);
3290 if (IS_ERR(newmap
)) {
3291 err
= PTR_ERR(newmap
);
3295 /* swap into place */
3297 oldmap
= mdsc
->mdsmap
;
3298 mdsc
->mdsmap
= newmap
;
3299 check_new_map(mdsc
, newmap
, oldmap
);
3300 ceph_mdsmap_destroy(oldmap
);
3302 mdsc
->mdsmap
= newmap
; /* first mds map */
3304 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3306 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3308 mutex_unlock(&mdsc
->mutex
);
3309 schedule_delayed(mdsc
);
3313 mutex_unlock(&mdsc
->mutex
);
3315 pr_err("error decoding mdsmap %d\n", err
);
3319 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3321 struct ceph_mds_session
*s
= con
->private;
3323 if (get_session(s
)) {
3324 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3327 dout("mdsc con_get %p FAIL\n", s
);
3331 static void con_put(struct ceph_connection
*con
)
3333 struct ceph_mds_session
*s
= con
->private;
3335 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
) - 1);
3336 ceph_put_mds_session(s
);
3340 * if the client is unresponsive for long enough, the mds will kill
3341 * the session entirely.
3343 static void peer_reset(struct ceph_connection
*con
)
3345 struct ceph_mds_session
*s
= con
->private;
3346 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3348 pr_warning("mds%d closed our session\n", s
->s_mds
);
3349 send_mds_reconnect(mdsc
, s
);
3352 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3354 struct ceph_mds_session
*s
= con
->private;
3355 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3356 int type
= le16_to_cpu(msg
->hdr
.type
);
3358 mutex_lock(&mdsc
->mutex
);
3359 if (__verify_registered_session(mdsc
, s
) < 0) {
3360 mutex_unlock(&mdsc
->mutex
);
3363 mutex_unlock(&mdsc
->mutex
);
3366 case CEPH_MSG_MDS_MAP
:
3367 ceph_mdsc_handle_map(mdsc
, msg
);
3369 case CEPH_MSG_CLIENT_SESSION
:
3370 handle_session(s
, msg
);
3372 case CEPH_MSG_CLIENT_REPLY
:
3373 handle_reply(s
, msg
);
3375 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3376 handle_forward(mdsc
, s
, msg
);
3378 case CEPH_MSG_CLIENT_CAPS
:
3379 ceph_handle_caps(s
, msg
);
3381 case CEPH_MSG_CLIENT_SNAP
:
3382 ceph_handle_snap(mdsc
, s
, msg
);
3384 case CEPH_MSG_CLIENT_LEASE
:
3385 handle_lease(mdsc
, s
, msg
);
3389 pr_err("received unknown message type %d %s\n", type
,
3390 ceph_msg_type_name(type
));
3399 static int get_authorizer(struct ceph_connection
*con
,
3400 void **buf
, int *len
, int *proto
,
3401 void **reply_buf
, int *reply_len
, int force_new
)
3403 struct ceph_mds_session
*s
= con
->private;
3404 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3405 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3408 if (force_new
&& s
->s_authorizer
) {
3409 ac
->ops
->destroy_authorizer(ac
, s
->s_authorizer
);
3410 s
->s_authorizer
= NULL
;
3412 if (s
->s_authorizer
== NULL
) {
3413 if (ac
->ops
->create_authorizer
) {
3414 ret
= ac
->ops
->create_authorizer(
3415 ac
, CEPH_ENTITY_TYPE_MDS
,
3417 &s
->s_authorizer_buf
,
3418 &s
->s_authorizer_buf_len
,
3419 &s
->s_authorizer_reply_buf
,
3420 &s
->s_authorizer_reply_buf_len
);
3426 *proto
= ac
->protocol
;
3427 *buf
= s
->s_authorizer_buf
;
3428 *len
= s
->s_authorizer_buf_len
;
3429 *reply_buf
= s
->s_authorizer_reply_buf
;
3430 *reply_len
= s
->s_authorizer_reply_buf_len
;
3435 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3437 struct ceph_mds_session
*s
= con
->private;
3438 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3439 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3441 return ac
->ops
->verify_authorizer_reply(ac
, s
->s_authorizer
, len
);
3444 static int invalidate_authorizer(struct ceph_connection
*con
)
3446 struct ceph_mds_session
*s
= con
->private;
3447 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3448 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3450 if (ac
->ops
->invalidate_authorizer
)
3451 ac
->ops
->invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3453 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
3456 static const struct ceph_connection_operations mds_con_ops
= {
3459 .dispatch
= dispatch
,
3460 .get_authorizer
= get_authorizer
,
3461 .verify_authorizer_reply
= verify_authorizer_reply
,
3462 .invalidate_authorizer
= invalidate_authorizer
,
3463 .peer_reset
= peer_reset
,