1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
14 #include "mds_client.h"
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
24 * A cluster of MDS (metadata server) daemons is responsible for
25 * managing the file system namespace (the directory hierarchy and
26 * inodes) and for coordinating shared access to storage. Metadata is
27 * partitioning hierarchically across a number of servers, and that
28 * partition varies over time as the cluster adjusts the distribution
29 * in order to balance load.
31 * The MDS client is primarily responsible to managing synchronous
32 * metadata requests for operations like open, unlink, and so forth.
33 * If there is a MDS failure, we find out about it when we (possibly
34 * request and) receive a new MDS map, and can resubmit affected
37 * For the most part, though, we take advantage of a lossless
38 * communications channel to the MDS, and do not need to worry about
39 * timing out or resubmitting requests.
41 * We maintain a stateful "session" with each MDS we interact with.
42 * Within each session, we sent periodic heartbeat messages to ensure
43 * any capabilities or leases we have been issues remain valid. If
44 * the session times out and goes stale, our leases and capabilities
45 * are no longer valid.
48 struct ceph_reconnect_state
{
50 struct ceph_pagelist
*pagelist
;
54 static void __wake_requests(struct ceph_mds_client
*mdsc
,
55 struct list_head
*head
);
57 static const struct ceph_connection_operations mds_con_ops
;
65 * parse individual inode info
67 static int parse_reply_info_in(void **p
, void *end
,
68 struct ceph_mds_reply_info_in
*info
,
74 *p
+= sizeof(struct ceph_mds_reply_inode
) +
75 sizeof(*info
->in
->fragtree
.splits
) *
76 le32_to_cpu(info
->in
->fragtree
.nsplits
);
78 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
79 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
81 *p
+= info
->symlink_len
;
83 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
84 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
85 sizeof(info
->dir_layout
), bad
);
87 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
89 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
90 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
91 info
->xattr_data
= *p
;
92 *p
+= info
->xattr_len
;
94 if (features
& CEPH_FEATURE_MDS_INLINE_DATA
) {
95 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
96 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
97 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
98 info
->inline_data
= *p
;
99 *p
+= info
->inline_len
;
101 info
->inline_version
= CEPH_INLINE_NONE
;
103 if (features
& CEPH_FEATURE_FS_FILE_LAYOUT_V2
) {
104 ceph_decode_32_safe(p
, end
, info
->pool_ns_len
, bad
);
105 ceph_decode_need(p
, end
, info
->pool_ns_len
, bad
);
106 *p
+= info
->pool_ns_len
;
108 info
->pool_ns_len
= 0;
117 * parse a normal reply, which may contain a (dir+)dentry and/or a
120 static int parse_reply_info_trace(void **p
, void *end
,
121 struct ceph_mds_reply_info_parsed
*info
,
126 if (info
->head
->is_dentry
) {
127 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
131 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
134 *p
+= sizeof(*info
->dirfrag
) +
135 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
136 if (unlikely(*p
> end
))
139 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
140 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
142 *p
+= info
->dname_len
;
144 *p
+= sizeof(*info
->dlease
);
147 if (info
->head
->is_target
) {
148 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
153 if (unlikely(*p
!= end
))
160 pr_err("problem parsing mds trace %d\n", err
);
165 * parse readdir results
167 static int parse_reply_info_dir(void **p
, void *end
,
168 struct ceph_mds_reply_info_parsed
*info
,
175 if (*p
+ sizeof(*info
->dir_dir
) > end
)
177 *p
+= sizeof(*info
->dir_dir
) +
178 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
182 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
183 num
= ceph_decode_32(p
);
184 info
->dir_end
= ceph_decode_8(p
);
185 info
->dir_complete
= ceph_decode_8(p
);
189 BUG_ON(!info
->dir_in
);
190 info
->dir_dname
= (void *)(info
->dir_in
+ num
);
191 info
->dir_dname_len
= (void *)(info
->dir_dname
+ num
);
192 info
->dir_dlease
= (void *)(info
->dir_dname_len
+ num
);
193 if ((unsigned long)(info
->dir_dlease
+ num
) >
194 (unsigned long)info
->dir_in
+ info
->dir_buf_size
) {
195 pr_err("dir contents are larger than expected\n");
203 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
204 info
->dir_dname_len
[i
] = ceph_decode_32(p
);
205 ceph_decode_need(p
, end
, info
->dir_dname_len
[i
], bad
);
206 info
->dir_dname
[i
] = *p
;
207 *p
+= info
->dir_dname_len
[i
];
208 dout("parsed dir dname '%.*s'\n", info
->dir_dname_len
[i
],
210 info
->dir_dlease
[i
] = *p
;
211 *p
+= sizeof(struct ceph_mds_reply_lease
);
214 err
= parse_reply_info_in(p
, end
, &info
->dir_in
[i
], features
);
229 pr_err("problem parsing dir contents %d\n", err
);
234 * parse fcntl F_GETLK results
236 static int parse_reply_info_filelock(void **p
, void *end
,
237 struct ceph_mds_reply_info_parsed
*info
,
240 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
243 info
->filelock_reply
= *p
;
244 *p
+= sizeof(*info
->filelock_reply
);
246 if (unlikely(*p
!= end
))
255 * parse create results
257 static int parse_reply_info_create(void **p
, void *end
,
258 struct ceph_mds_reply_info_parsed
*info
,
261 if (features
& CEPH_FEATURE_REPLY_CREATE_INODE
) {
263 info
->has_create_ino
= false;
265 info
->has_create_ino
= true;
266 info
->ino
= ceph_decode_64(p
);
270 if (unlikely(*p
!= end
))
279 * parse extra results
281 static int parse_reply_info_extra(void **p
, void *end
,
282 struct ceph_mds_reply_info_parsed
*info
,
285 if (info
->head
->op
== CEPH_MDS_OP_GETFILELOCK
)
286 return parse_reply_info_filelock(p
, end
, info
, features
);
287 else if (info
->head
->op
== CEPH_MDS_OP_READDIR
||
288 info
->head
->op
== CEPH_MDS_OP_LSSNAP
)
289 return parse_reply_info_dir(p
, end
, info
, features
);
290 else if (info
->head
->op
== CEPH_MDS_OP_CREATE
)
291 return parse_reply_info_create(p
, end
, info
, features
);
297 * parse entire mds reply
299 static int parse_reply_info(struct ceph_msg
*msg
,
300 struct ceph_mds_reply_info_parsed
*info
,
307 info
->head
= msg
->front
.iov_base
;
308 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
309 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
312 ceph_decode_32_safe(&p
, end
, len
, bad
);
314 ceph_decode_need(&p
, end
, len
, bad
);
315 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
321 ceph_decode_32_safe(&p
, end
, len
, bad
);
323 ceph_decode_need(&p
, end
, len
, bad
);
324 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
330 ceph_decode_32_safe(&p
, end
, len
, bad
);
331 info
->snapblob_len
= len
;
342 pr_err("mds parse_reply err %d\n", err
);
346 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
350 free_pages((unsigned long)info
->dir_in
, get_order(info
->dir_buf_size
));
357 const char *ceph_session_state_name(int s
)
360 case CEPH_MDS_SESSION_NEW
: return "new";
361 case CEPH_MDS_SESSION_OPENING
: return "opening";
362 case CEPH_MDS_SESSION_OPEN
: return "open";
363 case CEPH_MDS_SESSION_HUNG
: return "hung";
364 case CEPH_MDS_SESSION_CLOSING
: return "closing";
365 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
366 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
367 default: return "???";
371 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
373 if (atomic_inc_not_zero(&s
->s_ref
)) {
374 dout("mdsc get_session %p %d -> %d\n", s
,
375 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
378 dout("mdsc get_session %p 0 -- FAIL", s
);
383 void ceph_put_mds_session(struct ceph_mds_session
*s
)
385 dout("mdsc put_session %p %d -> %d\n", s
,
386 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
387 if (atomic_dec_and_test(&s
->s_ref
)) {
388 if (s
->s_auth
.authorizer
)
389 ceph_auth_destroy_authorizer(s
->s_auth
.authorizer
);
395 * called under mdsc->mutex
397 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
400 struct ceph_mds_session
*session
;
402 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
404 session
= mdsc
->sessions
[mds
];
405 dout("lookup_mds_session %p %d\n", session
,
406 atomic_read(&session
->s_ref
));
407 get_session(session
);
411 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
413 if (mds
>= mdsc
->max_sessions
)
415 return mdsc
->sessions
[mds
];
418 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
419 struct ceph_mds_session
*s
)
421 if (s
->s_mds
>= mdsc
->max_sessions
||
422 mdsc
->sessions
[s
->s_mds
] != s
)
428 * create+register a new session for given mds.
429 * called under mdsc->mutex.
431 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
434 struct ceph_mds_session
*s
;
436 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
437 return ERR_PTR(-EINVAL
);
439 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
441 return ERR_PTR(-ENOMEM
);
444 s
->s_state
= CEPH_MDS_SESSION_NEW
;
447 mutex_init(&s
->s_mutex
);
449 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
451 spin_lock_init(&s
->s_gen_ttl_lock
);
453 s
->s_cap_ttl
= jiffies
- 1;
455 spin_lock_init(&s
->s_cap_lock
);
456 s
->s_renew_requested
= 0;
458 INIT_LIST_HEAD(&s
->s_caps
);
461 atomic_set(&s
->s_ref
, 1);
462 INIT_LIST_HEAD(&s
->s_waiting
);
463 INIT_LIST_HEAD(&s
->s_unsafe
);
464 s
->s_num_cap_releases
= 0;
465 s
->s_cap_reconnect
= 0;
466 s
->s_cap_iterator
= NULL
;
467 INIT_LIST_HEAD(&s
->s_cap_releases
);
468 INIT_LIST_HEAD(&s
->s_cap_flushing
);
469 INIT_LIST_HEAD(&s
->s_cap_snaps_flushing
);
471 dout("register_session mds%d\n", mds
);
472 if (mds
>= mdsc
->max_sessions
) {
473 int newmax
= 1 << get_count_order(mds
+1);
474 struct ceph_mds_session
**sa
;
476 dout("register_session realloc to %d\n", newmax
);
477 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
480 if (mdsc
->sessions
) {
481 memcpy(sa
, mdsc
->sessions
,
482 mdsc
->max_sessions
* sizeof(void *));
483 kfree(mdsc
->sessions
);
486 mdsc
->max_sessions
= newmax
;
488 mdsc
->sessions
[mds
] = s
;
489 atomic_inc(&mdsc
->num_sessions
);
490 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
492 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
493 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
499 return ERR_PTR(-ENOMEM
);
503 * called under mdsc->mutex
505 static void __unregister_session(struct ceph_mds_client
*mdsc
,
506 struct ceph_mds_session
*s
)
508 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
509 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
510 mdsc
->sessions
[s
->s_mds
] = NULL
;
511 ceph_con_close(&s
->s_con
);
512 ceph_put_mds_session(s
);
513 atomic_dec(&mdsc
->num_sessions
);
517 * drop session refs in request.
519 * should be last request ref, or hold mdsc->mutex
521 static void put_request_session(struct ceph_mds_request
*req
)
523 if (req
->r_session
) {
524 ceph_put_mds_session(req
->r_session
);
525 req
->r_session
= NULL
;
529 void ceph_mdsc_release_request(struct kref
*kref
)
531 struct ceph_mds_request
*req
= container_of(kref
,
532 struct ceph_mds_request
,
534 destroy_reply_info(&req
->r_reply_info
);
536 ceph_msg_put(req
->r_request
);
538 ceph_msg_put(req
->r_reply
);
540 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
543 if (req
->r_locked_dir
)
544 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
545 iput(req
->r_target_inode
);
548 if (req
->r_old_dentry
)
549 dput(req
->r_old_dentry
);
550 if (req
->r_old_dentry_dir
) {
552 * track (and drop pins for) r_old_dentry_dir
553 * separately, since r_old_dentry's d_parent may have
554 * changed between the dir mutex being dropped and
555 * this request being freed.
557 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
559 iput(req
->r_old_dentry_dir
);
564 ceph_pagelist_release(req
->r_pagelist
);
565 put_request_session(req
);
566 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
571 * lookup session, bump ref if found.
573 * called under mdsc->mutex.
575 static struct ceph_mds_request
*__lookup_request(struct ceph_mds_client
*mdsc
,
578 struct ceph_mds_request
*req
;
579 struct rb_node
*n
= mdsc
->request_tree
.rb_node
;
582 req
= rb_entry(n
, struct ceph_mds_request
, r_node
);
583 if (tid
< req
->r_tid
)
585 else if (tid
> req
->r_tid
)
588 ceph_mdsc_get_request(req
);
595 static void __insert_request(struct ceph_mds_client
*mdsc
,
596 struct ceph_mds_request
*new)
598 struct rb_node
**p
= &mdsc
->request_tree
.rb_node
;
599 struct rb_node
*parent
= NULL
;
600 struct ceph_mds_request
*req
= NULL
;
604 req
= rb_entry(parent
, struct ceph_mds_request
, r_node
);
605 if (new->r_tid
< req
->r_tid
)
607 else if (new->r_tid
> req
->r_tid
)
613 rb_link_node(&new->r_node
, parent
, p
);
614 rb_insert_color(&new->r_node
, &mdsc
->request_tree
);
618 * Register an in-flight request, and assign a tid. Link to directory
619 * are modifying (if any).
621 * Called under mdsc->mutex.
623 static void __register_request(struct ceph_mds_client
*mdsc
,
624 struct ceph_mds_request
*req
,
627 req
->r_tid
= ++mdsc
->last_tid
;
629 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
631 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
632 ceph_mdsc_get_request(req
);
633 __insert_request(mdsc
, req
);
635 req
->r_uid
= current_fsuid();
636 req
->r_gid
= current_fsgid();
638 if (mdsc
->oldest_tid
== 0 && req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
)
639 mdsc
->oldest_tid
= req
->r_tid
;
643 req
->r_unsafe_dir
= dir
;
647 static void __unregister_request(struct ceph_mds_client
*mdsc
,
648 struct ceph_mds_request
*req
)
650 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
652 if (req
->r_tid
== mdsc
->oldest_tid
) {
653 struct rb_node
*p
= rb_next(&req
->r_node
);
654 mdsc
->oldest_tid
= 0;
656 struct ceph_mds_request
*next_req
=
657 rb_entry(p
, struct ceph_mds_request
, r_node
);
658 if (next_req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
) {
659 mdsc
->oldest_tid
= next_req
->r_tid
;
666 rb_erase(&req
->r_node
, &mdsc
->request_tree
);
667 RB_CLEAR_NODE(&req
->r_node
);
669 if (req
->r_unsafe_dir
&& req
->r_got_unsafe
) {
670 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
671 spin_lock(&ci
->i_unsafe_lock
);
672 list_del_init(&req
->r_unsafe_dir_item
);
673 spin_unlock(&ci
->i_unsafe_lock
);
675 if (req
->r_target_inode
&& req
->r_got_unsafe
) {
676 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
677 spin_lock(&ci
->i_unsafe_lock
);
678 list_del_init(&req
->r_unsafe_target_item
);
679 spin_unlock(&ci
->i_unsafe_lock
);
682 if (req
->r_unsafe_dir
) {
683 iput(req
->r_unsafe_dir
);
684 req
->r_unsafe_dir
= NULL
;
687 complete_all(&req
->r_safe_completion
);
689 ceph_mdsc_put_request(req
);
693 * Choose mds to send request to next. If there is a hint set in the
694 * request (e.g., due to a prior forward hint from the mds), use that.
695 * Otherwise, consult frag tree and/or caps to identify the
696 * appropriate mds. If all else fails, choose randomly.
698 * Called under mdsc->mutex.
700 static struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
703 * we don't need to worry about protecting the d_parent access
704 * here because we never renaming inside the snapped namespace
705 * except to resplice to another snapdir, and either the old or new
706 * result is a valid result.
708 while (!IS_ROOT(dentry
) && ceph_snap(d_inode(dentry
)) != CEPH_NOSNAP
)
709 dentry
= dentry
->d_parent
;
713 static int __choose_mds(struct ceph_mds_client
*mdsc
,
714 struct ceph_mds_request
*req
)
717 struct ceph_inode_info
*ci
;
718 struct ceph_cap
*cap
;
719 int mode
= req
->r_direct_mode
;
721 u32 hash
= req
->r_direct_hash
;
722 bool is_hash
= req
->r_direct_is_hash
;
725 * is there a specific mds we should try? ignore hint if we have
726 * no session and the mds is not up (active or recovering).
728 if (req
->r_resend_mds
>= 0 &&
729 (__have_session(mdsc
, req
->r_resend_mds
) ||
730 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
731 dout("choose_mds using resend_mds mds%d\n",
733 return req
->r_resend_mds
;
736 if (mode
== USE_RANDOM_MDS
)
741 inode
= req
->r_inode
;
742 } else if (req
->r_dentry
) {
743 /* ignore race with rename; old or new d_parent is okay */
744 struct dentry
*parent
= req
->r_dentry
->d_parent
;
745 struct inode
*dir
= d_inode(parent
);
747 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
749 inode
= d_inode(req
->r_dentry
);
750 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
751 /* direct snapped/virtual snapdir requests
752 * based on parent dir inode */
753 struct dentry
*dn
= get_nonsnap_parent(parent
);
755 dout("__choose_mds using nonsnap parent %p\n", inode
);
758 inode
= d_inode(req
->r_dentry
);
759 if (!inode
|| mode
== USE_AUTH_MDS
) {
762 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
768 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
772 ci
= ceph_inode(inode
);
774 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
775 struct ceph_inode_frag frag
;
778 ceph_choose_frag(ci
, hash
, &frag
, &found
);
780 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
783 /* choose a random replica */
784 get_random_bytes(&r
, 1);
787 dout("choose_mds %p %llx.%llx "
788 "frag %u mds%d (%d/%d)\n",
789 inode
, ceph_vinop(inode
),
792 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
793 CEPH_MDS_STATE_ACTIVE
)
797 /* since this file/dir wasn't known to be
798 * replicated, then we want to look for the
799 * authoritative mds. */
802 /* choose auth mds */
804 dout("choose_mds %p %llx.%llx "
805 "frag %u mds%d (auth)\n",
806 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
807 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
808 CEPH_MDS_STATE_ACTIVE
)
814 spin_lock(&ci
->i_ceph_lock
);
816 if (mode
== USE_AUTH_MDS
)
817 cap
= ci
->i_auth_cap
;
818 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
819 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
821 spin_unlock(&ci
->i_ceph_lock
);
824 mds
= cap
->session
->s_mds
;
825 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
826 inode
, ceph_vinop(inode
), mds
,
827 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
828 spin_unlock(&ci
->i_ceph_lock
);
832 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
833 dout("choose_mds chose random mds%d\n", mds
);
841 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
843 struct ceph_msg
*msg
;
844 struct ceph_mds_session_head
*h
;
846 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
849 pr_err("create_session_msg ENOMEM creating msg\n");
852 h
= msg
->front
.iov_base
;
853 h
->op
= cpu_to_le32(op
);
854 h
->seq
= cpu_to_le64(seq
);
860 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
861 * to include additional client metadata fields.
863 static struct ceph_msg
*create_session_open_msg(struct ceph_mds_client
*mdsc
, u64 seq
)
865 struct ceph_msg
*msg
;
866 struct ceph_mds_session_head
*h
;
868 int metadata_bytes
= 0;
869 int metadata_key_count
= 0;
870 struct ceph_options
*opt
= mdsc
->fsc
->client
->options
;
873 const char* metadata
[][2] = {
874 {"hostname", utsname()->nodename
},
875 {"kernel_version", utsname()->release
},
876 {"entity_id", opt
->name
? opt
->name
: ""},
880 /* Calculate serialized length of metadata */
881 metadata_bytes
= 4; /* map length */
882 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
883 metadata_bytes
+= 8 + strlen(metadata
[i
][0]) +
884 strlen(metadata
[i
][1]);
885 metadata_key_count
++;
888 /* Allocate the message */
889 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
) + metadata_bytes
,
892 pr_err("create_session_msg ENOMEM creating msg\n");
895 h
= msg
->front
.iov_base
;
896 h
->op
= cpu_to_le32(CEPH_SESSION_REQUEST_OPEN
);
897 h
->seq
= cpu_to_le64(seq
);
900 * Serialize client metadata into waiting buffer space, using
901 * the format that userspace expects for map<string, string>
903 * ClientSession messages with metadata are v2
905 msg
->hdr
.version
= cpu_to_le16(2);
906 msg
->hdr
.compat_version
= cpu_to_le16(1);
908 /* The write pointer, following the session_head structure */
909 p
= msg
->front
.iov_base
+ sizeof(*h
);
911 /* Number of entries in the map */
912 ceph_encode_32(&p
, metadata_key_count
);
914 /* Two length-prefixed strings for each entry in the map */
915 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
916 size_t const key_len
= strlen(metadata
[i
][0]);
917 size_t const val_len
= strlen(metadata
[i
][1]);
919 ceph_encode_32(&p
, key_len
);
920 memcpy(p
, metadata
[i
][0], key_len
);
922 ceph_encode_32(&p
, val_len
);
923 memcpy(p
, metadata
[i
][1], val_len
);
931 * send session open request.
933 * called under mdsc->mutex
935 static int __open_session(struct ceph_mds_client
*mdsc
,
936 struct ceph_mds_session
*session
)
938 struct ceph_msg
*msg
;
940 int mds
= session
->s_mds
;
942 /* wait for mds to go active? */
943 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
944 dout("open_session to mds%d (%s)\n", mds
,
945 ceph_mds_state_name(mstate
));
946 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
947 session
->s_renew_requested
= jiffies
;
949 /* send connect message */
950 msg
= create_session_open_msg(mdsc
, session
->s_seq
);
953 ceph_con_send(&session
->s_con
, msg
);
958 * open sessions for any export targets for the given mds
960 * called under mdsc->mutex
962 static struct ceph_mds_session
*
963 __open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
965 struct ceph_mds_session
*session
;
967 session
= __ceph_lookup_mds_session(mdsc
, target
);
969 session
= register_session(mdsc
, target
);
973 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
974 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
975 __open_session(mdsc
, session
);
980 struct ceph_mds_session
*
981 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
983 struct ceph_mds_session
*session
;
985 dout("open_export_target_session to mds%d\n", target
);
987 mutex_lock(&mdsc
->mutex
);
988 session
= __open_export_target_session(mdsc
, target
);
989 mutex_unlock(&mdsc
->mutex
);
994 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
995 struct ceph_mds_session
*session
)
997 struct ceph_mds_info
*mi
;
998 struct ceph_mds_session
*ts
;
999 int i
, mds
= session
->s_mds
;
1001 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
1004 mi
= &mdsc
->mdsmap
->m_info
[mds
];
1005 dout("open_export_target_sessions for mds%d (%d targets)\n",
1006 session
->s_mds
, mi
->num_export_targets
);
1008 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
1009 ts
= __open_export_target_session(mdsc
, mi
->export_targets
[i
]);
1011 ceph_put_mds_session(ts
);
1015 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
1016 struct ceph_mds_session
*session
)
1018 mutex_lock(&mdsc
->mutex
);
1019 __open_export_target_sessions(mdsc
, session
);
1020 mutex_unlock(&mdsc
->mutex
);
1027 /* caller holds s_cap_lock, we drop it */
1028 static void cleanup_cap_releases(struct ceph_mds_client
*mdsc
,
1029 struct ceph_mds_session
*session
)
1030 __releases(session
->s_cap_lock
)
1032 LIST_HEAD(tmp_list
);
1033 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1034 session
->s_num_cap_releases
= 0;
1035 spin_unlock(&session
->s_cap_lock
);
1037 dout("cleanup_cap_releases mds%d\n", session
->s_mds
);
1038 while (!list_empty(&tmp_list
)) {
1039 struct ceph_cap
*cap
;
1040 /* zero out the in-progress message */
1041 cap
= list_first_entry(&tmp_list
,
1042 struct ceph_cap
, session_caps
);
1043 list_del(&cap
->session_caps
);
1044 ceph_put_cap(mdsc
, cap
);
1048 static void cleanup_session_requests(struct ceph_mds_client
*mdsc
,
1049 struct ceph_mds_session
*session
)
1051 struct ceph_mds_request
*req
;
1054 dout("cleanup_session_requests mds%d\n", session
->s_mds
);
1055 mutex_lock(&mdsc
->mutex
);
1056 while (!list_empty(&session
->s_unsafe
)) {
1057 req
= list_first_entry(&session
->s_unsafe
,
1058 struct ceph_mds_request
, r_unsafe_item
);
1059 list_del_init(&req
->r_unsafe_item
);
1060 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1062 __unregister_request(mdsc
, req
);
1064 /* zero r_attempts, so kick_requests() will re-send requests */
1065 p
= rb_first(&mdsc
->request_tree
);
1067 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1069 if (req
->r_session
&&
1070 req
->r_session
->s_mds
== session
->s_mds
)
1071 req
->r_attempts
= 0;
1073 mutex_unlock(&mdsc
->mutex
);
1077 * Helper to safely iterate over all caps associated with a session, with
1078 * special care taken to handle a racing __ceph_remove_cap().
1080 * Caller must hold session s_mutex.
1082 static int iterate_session_caps(struct ceph_mds_session
*session
,
1083 int (*cb
)(struct inode
*, struct ceph_cap
*,
1086 struct list_head
*p
;
1087 struct ceph_cap
*cap
;
1088 struct inode
*inode
, *last_inode
= NULL
;
1089 struct ceph_cap
*old_cap
= NULL
;
1092 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
1093 spin_lock(&session
->s_cap_lock
);
1094 p
= session
->s_caps
.next
;
1095 while (p
!= &session
->s_caps
) {
1096 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
1097 inode
= igrab(&cap
->ci
->vfs_inode
);
1102 session
->s_cap_iterator
= cap
;
1103 spin_unlock(&session
->s_cap_lock
);
1110 ceph_put_cap(session
->s_mdsc
, old_cap
);
1114 ret
= cb(inode
, cap
, arg
);
1117 spin_lock(&session
->s_cap_lock
);
1119 if (cap
->ci
== NULL
) {
1120 dout("iterate_session_caps finishing cap %p removal\n",
1122 BUG_ON(cap
->session
!= session
);
1123 cap
->session
= NULL
;
1124 list_del_init(&cap
->session_caps
);
1125 session
->s_nr_caps
--;
1126 if (cap
->queue_release
) {
1127 list_add_tail(&cap
->session_caps
,
1128 &session
->s_cap_releases
);
1129 session
->s_num_cap_releases
++;
1131 old_cap
= cap
; /* put_cap it w/o locks held */
1139 session
->s_cap_iterator
= NULL
;
1140 spin_unlock(&session
->s_cap_lock
);
1144 ceph_put_cap(session
->s_mdsc
, old_cap
);
1149 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1152 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1153 LIST_HEAD(to_remove
);
1156 dout("removing cap %p, ci is %p, inode is %p\n",
1157 cap
, ci
, &ci
->vfs_inode
);
1158 spin_lock(&ci
->i_ceph_lock
);
1159 __ceph_remove_cap(cap
, false);
1160 if (!ci
->i_auth_cap
) {
1161 struct ceph_cap_flush
*cf
;
1162 struct ceph_mds_client
*mdsc
=
1163 ceph_sb_to_client(inode
->i_sb
)->mdsc
;
1166 struct rb_node
*n
= rb_first(&ci
->i_cap_flush_tree
);
1169 cf
= rb_entry(n
, struct ceph_cap_flush
, i_node
);
1170 rb_erase(&cf
->i_node
, &ci
->i_cap_flush_tree
);
1171 list_add(&cf
->list
, &to_remove
);
1174 spin_lock(&mdsc
->cap_dirty_lock
);
1176 list_for_each_entry(cf
, &to_remove
, list
)
1177 rb_erase(&cf
->g_node
, &mdsc
->cap_flush_tree
);
1179 if (!list_empty(&ci
->i_dirty_item
)) {
1180 pr_warn_ratelimited(
1181 " dropping dirty %s state for %p %lld\n",
1182 ceph_cap_string(ci
->i_dirty_caps
),
1183 inode
, ceph_ino(inode
));
1184 ci
->i_dirty_caps
= 0;
1185 list_del_init(&ci
->i_dirty_item
);
1188 if (!list_empty(&ci
->i_flushing_item
)) {
1189 pr_warn_ratelimited(
1190 " dropping dirty+flushing %s state for %p %lld\n",
1191 ceph_cap_string(ci
->i_flushing_caps
),
1192 inode
, ceph_ino(inode
));
1193 ci
->i_flushing_caps
= 0;
1194 list_del_init(&ci
->i_flushing_item
);
1195 mdsc
->num_cap_flushing
--;
1198 spin_unlock(&mdsc
->cap_dirty_lock
);
1200 if (!ci
->i_dirty_caps
&& ci
->i_prealloc_cap_flush
) {
1201 list_add(&ci
->i_prealloc_cap_flush
->list
, &to_remove
);
1202 ci
->i_prealloc_cap_flush
= NULL
;
1205 spin_unlock(&ci
->i_ceph_lock
);
1206 while (!list_empty(&to_remove
)) {
1207 struct ceph_cap_flush
*cf
;
1208 cf
= list_first_entry(&to_remove
,
1209 struct ceph_cap_flush
, list
);
1210 list_del(&cf
->list
);
1211 ceph_free_cap_flush(cf
);
1219 * caller must hold session s_mutex
1221 static void remove_session_caps(struct ceph_mds_session
*session
)
1223 dout("remove_session_caps on %p\n", session
);
1224 iterate_session_caps(session
, remove_session_caps_cb
, NULL
);
1226 spin_lock(&session
->s_cap_lock
);
1227 if (session
->s_nr_caps
> 0) {
1228 struct super_block
*sb
= session
->s_mdsc
->fsc
->sb
;
1229 struct inode
*inode
;
1230 struct ceph_cap
*cap
, *prev
= NULL
;
1231 struct ceph_vino vino
;
1233 * iterate_session_caps() skips inodes that are being
1234 * deleted, we need to wait until deletions are complete.
1235 * __wait_on_freeing_inode() is designed for the job,
1236 * but it is not exported, so use lookup inode function
1239 while (!list_empty(&session
->s_caps
)) {
1240 cap
= list_entry(session
->s_caps
.next
,
1241 struct ceph_cap
, session_caps
);
1245 vino
= cap
->ci
->i_vino
;
1246 spin_unlock(&session
->s_cap_lock
);
1248 inode
= ceph_find_inode(sb
, vino
);
1251 spin_lock(&session
->s_cap_lock
);
1255 // drop cap expires and unlock s_cap_lock
1256 cleanup_cap_releases(session
->s_mdsc
, session
);
1258 BUG_ON(session
->s_nr_caps
> 0);
1259 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1263 * wake up any threads waiting on this session's caps. if the cap is
1264 * old (didn't get renewed on the client reconnect), remove it now.
1266 * caller must hold s_mutex.
1268 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1271 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1273 wake_up_all(&ci
->i_cap_wq
);
1275 spin_lock(&ci
->i_ceph_lock
);
1276 ci
->i_wanted_max_size
= 0;
1277 ci
->i_requested_max_size
= 0;
1278 spin_unlock(&ci
->i_ceph_lock
);
1283 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1286 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1287 iterate_session_caps(session
, wake_up_session_cb
,
1288 (void *)(unsigned long)reconnect
);
1292 * Send periodic message to MDS renewing all currently held caps. The
1293 * ack will reset the expiration for all caps from this session.
1295 * caller holds s_mutex
1297 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1298 struct ceph_mds_session
*session
)
1300 struct ceph_msg
*msg
;
1303 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1304 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1305 pr_info("mds%d caps stale\n", session
->s_mds
);
1306 session
->s_renew_requested
= jiffies
;
1308 /* do not try to renew caps until a recovering mds has reconnected
1309 * with its clients. */
1310 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1311 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1312 dout("send_renew_caps ignoring mds%d (%s)\n",
1313 session
->s_mds
, ceph_mds_state_name(state
));
1317 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1318 ceph_mds_state_name(state
));
1319 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1320 ++session
->s_renew_seq
);
1323 ceph_con_send(&session
->s_con
, msg
);
1327 static int send_flushmsg_ack(struct ceph_mds_client
*mdsc
,
1328 struct ceph_mds_session
*session
, u64 seq
)
1330 struct ceph_msg
*msg
;
1332 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1333 session
->s_mds
, ceph_session_state_name(session
->s_state
), seq
);
1334 msg
= create_session_msg(CEPH_SESSION_FLUSHMSG_ACK
, seq
);
1337 ceph_con_send(&session
->s_con
, msg
);
1343 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1345 * Called under session->s_mutex
1347 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1348 struct ceph_mds_session
*session
, int is_renew
)
1353 spin_lock(&session
->s_cap_lock
);
1354 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1356 session
->s_cap_ttl
= session
->s_renew_requested
+
1357 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1360 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1361 pr_info("mds%d caps renewed\n", session
->s_mds
);
1364 pr_info("mds%d caps still stale\n", session
->s_mds
);
1367 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1368 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1369 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1370 spin_unlock(&session
->s_cap_lock
);
1373 wake_up_session_caps(session
, 0);
1377 * send a session close request
1379 static int request_close_session(struct ceph_mds_client
*mdsc
,
1380 struct ceph_mds_session
*session
)
1382 struct ceph_msg
*msg
;
1384 dout("request_close_session mds%d state %s seq %lld\n",
1385 session
->s_mds
, ceph_session_state_name(session
->s_state
),
1387 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1390 ceph_con_send(&session
->s_con
, msg
);
1395 * Called with s_mutex held.
1397 static int __close_session(struct ceph_mds_client
*mdsc
,
1398 struct ceph_mds_session
*session
)
1400 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1402 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1403 return request_close_session(mdsc
, session
);
1407 * Trim old(er) caps.
1409 * Because we can't cache an inode without one or more caps, we do
1410 * this indirectly: if a cap is unused, we prune its aliases, at which
1411 * point the inode will hopefully get dropped to.
1413 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1414 * memory pressure from the MDS, though, so it needn't be perfect.
1416 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1418 struct ceph_mds_session
*session
= arg
;
1419 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1420 int used
, wanted
, oissued
, mine
;
1422 if (session
->s_trim_caps
<= 0)
1425 spin_lock(&ci
->i_ceph_lock
);
1426 mine
= cap
->issued
| cap
->implemented
;
1427 used
= __ceph_caps_used(ci
);
1428 wanted
= __ceph_caps_file_wanted(ci
);
1429 oissued
= __ceph_caps_issued_other(ci
, cap
);
1431 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1432 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1433 ceph_cap_string(used
), ceph_cap_string(wanted
));
1434 if (cap
== ci
->i_auth_cap
) {
1435 if (ci
->i_dirty_caps
|| ci
->i_flushing_caps
||
1436 !list_empty(&ci
->i_cap_snaps
))
1438 if ((used
| wanted
) & CEPH_CAP_ANY_WR
)
1441 /* The inode has cached pages, but it's no longer used.
1442 * we can safely drop it */
1443 if (wanted
== 0 && used
== CEPH_CAP_FILE_CACHE
&&
1444 !(oissued
& CEPH_CAP_FILE_CACHE
)) {
1448 if ((used
| wanted
) & ~oissued
& mine
)
1449 goto out
; /* we need these caps */
1451 session
->s_trim_caps
--;
1453 /* we aren't the only cap.. just remove us */
1454 __ceph_remove_cap(cap
, true);
1456 /* try dropping referring dentries */
1457 spin_unlock(&ci
->i_ceph_lock
);
1458 d_prune_aliases(inode
);
1459 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1460 inode
, cap
, atomic_read(&inode
->i_count
));
1465 spin_unlock(&ci
->i_ceph_lock
);
1470 * Trim session cap count down to some max number.
1472 static int trim_caps(struct ceph_mds_client
*mdsc
,
1473 struct ceph_mds_session
*session
,
1476 int trim_caps
= session
->s_nr_caps
- max_caps
;
1478 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1479 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1480 if (trim_caps
> 0) {
1481 session
->s_trim_caps
= trim_caps
;
1482 iterate_session_caps(session
, trim_caps_cb
, session
);
1483 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1484 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1485 trim_caps
- session
->s_trim_caps
);
1486 session
->s_trim_caps
= 0;
1489 ceph_send_cap_releases(mdsc
, session
);
1493 static int check_capsnap_flush(struct ceph_inode_info
*ci
,
1497 spin_lock(&ci
->i_ceph_lock
);
1498 if (want_snap_seq
> 0 && !list_empty(&ci
->i_cap_snaps
)) {
1499 struct ceph_cap_snap
*capsnap
=
1500 list_first_entry(&ci
->i_cap_snaps
,
1501 struct ceph_cap_snap
, ci_item
);
1502 ret
= capsnap
->follows
>= want_snap_seq
;
1504 spin_unlock(&ci
->i_ceph_lock
);
1508 static int check_caps_flush(struct ceph_mds_client
*mdsc
,
1512 struct ceph_cap_flush
*cf
;
1515 spin_lock(&mdsc
->cap_dirty_lock
);
1516 n
= rb_first(&mdsc
->cap_flush_tree
);
1517 cf
= n
? rb_entry(n
, struct ceph_cap_flush
, g_node
) : NULL
;
1518 if (cf
&& cf
->tid
<= want_flush_tid
) {
1519 dout("check_caps_flush still flushing tid %llu <= %llu\n",
1520 cf
->tid
, want_flush_tid
);
1523 spin_unlock(&mdsc
->cap_dirty_lock
);
1528 * flush all dirty inode data to disk.
1530 * returns true if we've flushed through want_flush_tid
1532 static void wait_caps_flush(struct ceph_mds_client
*mdsc
,
1533 u64 want_flush_tid
, u64 want_snap_seq
)
1537 dout("check_caps_flush want %llu snap want %llu\n",
1538 want_flush_tid
, want_snap_seq
);
1539 mutex_lock(&mdsc
->mutex
);
1540 for (mds
= 0; mds
< mdsc
->max_sessions
; ) {
1541 struct ceph_mds_session
*session
= mdsc
->sessions
[mds
];
1542 struct inode
*inode
= NULL
;
1548 get_session(session
);
1549 mutex_unlock(&mdsc
->mutex
);
1551 mutex_lock(&session
->s_mutex
);
1552 if (!list_empty(&session
->s_cap_snaps_flushing
)) {
1553 struct ceph_cap_snap
*capsnap
=
1554 list_first_entry(&session
->s_cap_snaps_flushing
,
1555 struct ceph_cap_snap
,
1557 struct ceph_inode_info
*ci
= capsnap
->ci
;
1558 if (!check_capsnap_flush(ci
, want_snap_seq
)) {
1559 dout("check_cap_flush still flushing snap %p "
1560 "follows %lld <= %lld to mds%d\n",
1561 &ci
->vfs_inode
, capsnap
->follows
,
1562 want_snap_seq
, mds
);
1563 inode
= igrab(&ci
->vfs_inode
);
1566 mutex_unlock(&session
->s_mutex
);
1567 ceph_put_mds_session(session
);
1570 wait_event(mdsc
->cap_flushing_wq
,
1571 check_capsnap_flush(ceph_inode(inode
),
1578 mutex_lock(&mdsc
->mutex
);
1580 mutex_unlock(&mdsc
->mutex
);
1582 wait_event(mdsc
->cap_flushing_wq
,
1583 check_caps_flush(mdsc
, want_flush_tid
));
1585 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid
);
1589 * called under s_mutex
1591 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1592 struct ceph_mds_session
*session
)
1594 struct ceph_msg
*msg
= NULL
;
1595 struct ceph_mds_cap_release
*head
;
1596 struct ceph_mds_cap_item
*item
;
1597 struct ceph_cap
*cap
;
1598 LIST_HEAD(tmp_list
);
1599 int num_cap_releases
;
1601 spin_lock(&session
->s_cap_lock
);
1603 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1604 num_cap_releases
= session
->s_num_cap_releases
;
1605 session
->s_num_cap_releases
= 0;
1606 spin_unlock(&session
->s_cap_lock
);
1608 while (!list_empty(&tmp_list
)) {
1610 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
,
1611 PAGE_SIZE
, GFP_NOFS
, false);
1614 head
= msg
->front
.iov_base
;
1615 head
->num
= cpu_to_le32(0);
1616 msg
->front
.iov_len
= sizeof(*head
);
1618 cap
= list_first_entry(&tmp_list
, struct ceph_cap
,
1620 list_del(&cap
->session_caps
);
1623 head
= msg
->front
.iov_base
;
1624 le32_add_cpu(&head
->num
, 1);
1625 item
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1626 item
->ino
= cpu_to_le64(cap
->cap_ino
);
1627 item
->cap_id
= cpu_to_le64(cap
->cap_id
);
1628 item
->migrate_seq
= cpu_to_le32(cap
->mseq
);
1629 item
->seq
= cpu_to_le32(cap
->issue_seq
);
1630 msg
->front
.iov_len
+= sizeof(*item
);
1632 ceph_put_cap(mdsc
, cap
);
1634 if (le32_to_cpu(head
->num
) == CEPH_CAPS_PER_RELEASE
) {
1635 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1636 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1637 ceph_con_send(&session
->s_con
, msg
);
1642 BUG_ON(num_cap_releases
!= 0);
1644 spin_lock(&session
->s_cap_lock
);
1645 if (!list_empty(&session
->s_cap_releases
))
1647 spin_unlock(&session
->s_cap_lock
);
1650 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1651 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1652 ceph_con_send(&session
->s_con
, msg
);
1656 pr_err("send_cap_releases mds%d, failed to allocate message\n",
1658 spin_lock(&session
->s_cap_lock
);
1659 list_splice(&tmp_list
, &session
->s_cap_releases
);
1660 session
->s_num_cap_releases
+= num_cap_releases
;
1661 spin_unlock(&session
->s_cap_lock
);
1668 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
1671 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1672 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1673 struct ceph_mount_options
*opt
= req
->r_mdsc
->fsc
->mount_options
;
1674 size_t size
= sizeof(*rinfo
->dir_in
) + sizeof(*rinfo
->dir_dname_len
) +
1675 sizeof(*rinfo
->dir_dname
) + sizeof(*rinfo
->dir_dlease
);
1676 int order
, num_entries
;
1678 spin_lock(&ci
->i_ceph_lock
);
1679 num_entries
= ci
->i_files
+ ci
->i_subdirs
;
1680 spin_unlock(&ci
->i_ceph_lock
);
1681 num_entries
= max(num_entries
, 1);
1682 num_entries
= min(num_entries
, opt
->max_readdir
);
1684 order
= get_order(size
* num_entries
);
1685 while (order
>= 0) {
1686 rinfo
->dir_in
= (void*)__get_free_pages(GFP_KERNEL
|
1696 num_entries
= (PAGE_SIZE
<< order
) / size
;
1697 num_entries
= min(num_entries
, opt
->max_readdir
);
1699 rinfo
->dir_buf_size
= PAGE_SIZE
<< order
;
1700 req
->r_num_caps
= num_entries
+ 1;
1701 req
->r_args
.readdir
.max_entries
= cpu_to_le32(num_entries
);
1702 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(opt
->max_readdir_bytes
);
1707 * Create an mds request.
1709 struct ceph_mds_request
*
1710 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1712 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1715 return ERR_PTR(-ENOMEM
);
1717 mutex_init(&req
->r_fill_mutex
);
1719 req
->r_started
= jiffies
;
1720 req
->r_resend_mds
= -1;
1721 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1722 INIT_LIST_HEAD(&req
->r_unsafe_target_item
);
1724 kref_init(&req
->r_kref
);
1725 INIT_LIST_HEAD(&req
->r_wait
);
1726 init_completion(&req
->r_completion
);
1727 init_completion(&req
->r_safe_completion
);
1728 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1730 req
->r_stamp
= current_fs_time(mdsc
->fsc
->sb
);
1733 req
->r_direct_mode
= mode
;
1738 * return oldest (lowest) request, tid in request tree, 0 if none.
1740 * called under mdsc->mutex.
1742 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1744 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1746 return rb_entry(rb_first(&mdsc
->request_tree
),
1747 struct ceph_mds_request
, r_node
);
1750 static inline u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1752 return mdsc
->oldest_tid
;
1756 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1757 * on build_path_from_dentry in fs/cifs/dir.c.
1759 * If @stop_on_nosnap, generate path relative to the first non-snapped
1762 * Encode hidden .snap dirs as a double /, i.e.
1763 * foo/.snap/bar -> foo//bar
1765 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1768 struct dentry
*temp
;
1774 return ERR_PTR(-EINVAL
);
1778 seq
= read_seqbegin(&rename_lock
);
1780 for (temp
= dentry
; !IS_ROOT(temp
);) {
1781 struct inode
*inode
= d_inode(temp
);
1782 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1783 len
++; /* slash only */
1784 else if (stop_on_nosnap
&& inode
&&
1785 ceph_snap(inode
) == CEPH_NOSNAP
)
1788 len
+= 1 + temp
->d_name
.len
;
1789 temp
= temp
->d_parent
;
1793 len
--; /* no leading '/' */
1795 path
= kmalloc(len
+1, GFP_NOFS
);
1797 return ERR_PTR(-ENOMEM
);
1799 path
[pos
] = 0; /* trailing null */
1801 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1802 struct inode
*inode
;
1804 spin_lock(&temp
->d_lock
);
1805 inode
= d_inode(temp
);
1806 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1807 dout("build_path path+%d: %p SNAPDIR\n",
1809 } else if (stop_on_nosnap
&& inode
&&
1810 ceph_snap(inode
) == CEPH_NOSNAP
) {
1811 spin_unlock(&temp
->d_lock
);
1814 pos
-= temp
->d_name
.len
;
1816 spin_unlock(&temp
->d_lock
);
1819 strncpy(path
+ pos
, temp
->d_name
.name
,
1822 spin_unlock(&temp
->d_lock
);
1825 temp
= temp
->d_parent
;
1828 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1829 pr_err("build_path did not end path lookup where "
1830 "expected, namelen is %d, pos is %d\n", len
, pos
);
1831 /* presumably this is only possible if racing with a
1832 rename of one of the parent directories (we can not
1833 lock the dentries above us to prevent this, but
1834 retrying should be harmless) */
1839 *base
= ceph_ino(d_inode(temp
));
1841 dout("build_path on %p %d built %llx '%.*s'\n",
1842 dentry
, d_count(dentry
), *base
, len
, path
);
1846 static int build_dentry_path(struct dentry
*dentry
,
1847 const char **ppath
, int *ppathlen
, u64
*pino
,
1852 if (ceph_snap(d_inode(dentry
->d_parent
)) == CEPH_NOSNAP
) {
1853 *pino
= ceph_ino(d_inode(dentry
->d_parent
));
1854 *ppath
= dentry
->d_name
.name
;
1855 *ppathlen
= dentry
->d_name
.len
;
1858 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1860 return PTR_ERR(path
);
1866 static int build_inode_path(struct inode
*inode
,
1867 const char **ppath
, int *ppathlen
, u64
*pino
,
1870 struct dentry
*dentry
;
1873 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1874 *pino
= ceph_ino(inode
);
1878 dentry
= d_find_alias(inode
);
1879 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1882 return PTR_ERR(path
);
1889 * request arguments may be specified via an inode *, a dentry *, or
1890 * an explicit ino+path.
1892 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1893 const char *rpath
, u64 rino
,
1894 const char **ppath
, int *pathlen
,
1895 u64
*ino
, int *freepath
)
1900 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1901 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1903 } else if (rdentry
) {
1904 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1905 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1907 } else if (rpath
|| rino
) {
1910 *pathlen
= rpath
? strlen(rpath
) : 0;
1911 dout(" path %.*s\n", *pathlen
, rpath
);
1918 * called under mdsc->mutex
1920 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1921 struct ceph_mds_request
*req
,
1922 int mds
, bool drop_cap_releases
)
1924 struct ceph_msg
*msg
;
1925 struct ceph_mds_request_head
*head
;
1926 const char *path1
= NULL
;
1927 const char *path2
= NULL
;
1928 u64 ino1
= 0, ino2
= 0;
1929 int pathlen1
= 0, pathlen2
= 0;
1930 int freepath1
= 0, freepath2
= 0;
1936 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1937 req
->r_path1
, req
->r_ino1
.ino
,
1938 &path1
, &pathlen1
, &ino1
, &freepath1
);
1944 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1945 req
->r_path2
, req
->r_ino2
.ino
,
1946 &path2
, &pathlen2
, &ino2
, &freepath2
);
1952 len
= sizeof(*head
) +
1953 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
)) +
1954 sizeof(struct ceph_timespec
);
1956 /* calculate (max) length for cap releases */
1957 len
+= sizeof(struct ceph_mds_request_release
) *
1958 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1959 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1960 if (req
->r_dentry_drop
)
1961 len
+= req
->r_dentry
->d_name
.len
;
1962 if (req
->r_old_dentry_drop
)
1963 len
+= req
->r_old_dentry
->d_name
.len
;
1965 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
1967 msg
= ERR_PTR(-ENOMEM
);
1971 msg
->hdr
.version
= cpu_to_le16(2);
1972 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1974 head
= msg
->front
.iov_base
;
1975 p
= msg
->front
.iov_base
+ sizeof(*head
);
1976 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1978 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1979 head
->op
= cpu_to_le32(req
->r_op
);
1980 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
1981 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
1982 head
->args
= req
->r_args
;
1984 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1985 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1987 /* make note of release offset, in case we need to replay */
1988 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1992 if (req
->r_inode_drop
)
1993 releases
+= ceph_encode_inode_release(&p
,
1994 req
->r_inode
? req
->r_inode
: d_inode(req
->r_dentry
),
1995 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1996 if (req
->r_dentry_drop
)
1997 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1998 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1999 if (req
->r_old_dentry_drop
)
2000 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
2001 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
2002 if (req
->r_old_inode_drop
)
2003 releases
+= ceph_encode_inode_release(&p
,
2004 d_inode(req
->r_old_dentry
),
2005 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
2007 if (drop_cap_releases
) {
2009 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2012 head
->num_releases
= cpu_to_le16(releases
);
2016 struct ceph_timespec ts
;
2017 ceph_encode_timespec(&ts
, &req
->r_stamp
);
2018 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2022 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2023 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2025 if (req
->r_pagelist
) {
2026 struct ceph_pagelist
*pagelist
= req
->r_pagelist
;
2027 atomic_inc(&pagelist
->refcnt
);
2028 ceph_msg_data_add_pagelist(msg
, pagelist
);
2029 msg
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2031 msg
->hdr
.data_len
= 0;
2034 msg
->hdr
.data_off
= cpu_to_le16(0);
2038 kfree((char *)path2
);
2041 kfree((char *)path1
);
2047 * called under mdsc->mutex if error, under no mutex if
2050 static void complete_request(struct ceph_mds_client
*mdsc
,
2051 struct ceph_mds_request
*req
)
2053 if (req
->r_callback
)
2054 req
->r_callback(mdsc
, req
);
2056 complete_all(&req
->r_completion
);
2060 * called under mdsc->mutex
2062 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
2063 struct ceph_mds_request
*req
,
2064 int mds
, bool drop_cap_releases
)
2066 struct ceph_mds_request_head
*rhead
;
2067 struct ceph_msg
*msg
;
2072 struct ceph_cap
*cap
=
2073 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
2076 req
->r_sent_on_mseq
= cap
->mseq
;
2078 req
->r_sent_on_mseq
= -1;
2080 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
2081 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
2083 if (req
->r_got_unsafe
) {
2086 * Replay. Do not regenerate message (and rebuild
2087 * paths, etc.); just use the original message.
2088 * Rebuilding paths will break for renames because
2089 * d_move mangles the src name.
2091 msg
= req
->r_request
;
2092 rhead
= msg
->front
.iov_base
;
2094 flags
= le32_to_cpu(rhead
->flags
);
2095 flags
|= CEPH_MDS_FLAG_REPLAY
;
2096 rhead
->flags
= cpu_to_le32(flags
);
2098 if (req
->r_target_inode
)
2099 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
2101 rhead
->num_retry
= req
->r_attempts
- 1;
2103 /* remove cap/dentry releases from message */
2104 rhead
->num_releases
= 0;
2107 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2109 struct ceph_timespec ts
;
2110 ceph_encode_timespec(&ts
, &req
->r_stamp
);
2111 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2114 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2115 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2119 if (req
->r_request
) {
2120 ceph_msg_put(req
->r_request
);
2121 req
->r_request
= NULL
;
2123 msg
= create_request_message(mdsc
, req
, mds
, drop_cap_releases
);
2125 req
->r_err
= PTR_ERR(msg
);
2126 return PTR_ERR(msg
);
2128 req
->r_request
= msg
;
2130 rhead
= msg
->front
.iov_base
;
2131 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
2132 if (req
->r_got_unsafe
)
2133 flags
|= CEPH_MDS_FLAG_REPLAY
;
2134 if (req
->r_locked_dir
)
2135 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
2136 rhead
->flags
= cpu_to_le32(flags
);
2137 rhead
->num_fwd
= req
->r_num_fwd
;
2138 rhead
->num_retry
= req
->r_attempts
- 1;
2141 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
2146 * send request, or put it on the appropriate wait list.
2148 static int __do_request(struct ceph_mds_client
*mdsc
,
2149 struct ceph_mds_request
*req
)
2151 struct ceph_mds_session
*session
= NULL
;
2155 if (req
->r_err
|| req
->r_got_result
) {
2157 __unregister_request(mdsc
, req
);
2161 if (req
->r_timeout
&&
2162 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
2163 dout("do_request timed out\n");
2167 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
2168 dout("do_request forced umount\n");
2173 put_request_session(req
);
2175 mds
= __choose_mds(mdsc
, req
);
2177 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
2178 dout("do_request no mds or not active, waiting for map\n");
2179 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2183 /* get, open session */
2184 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2186 session
= register_session(mdsc
, mds
);
2187 if (IS_ERR(session
)) {
2188 err
= PTR_ERR(session
);
2192 req
->r_session
= get_session(session
);
2194 dout("do_request mds%d session %p state %s\n", mds
, session
,
2195 ceph_session_state_name(session
->s_state
));
2196 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
2197 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
2198 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
2199 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
2200 __open_session(mdsc
, session
);
2201 list_add(&req
->r_wait
, &session
->s_waiting
);
2206 req
->r_resend_mds
= -1; /* forget any previous mds hint */
2208 if (req
->r_request_started
== 0) /* note request start time */
2209 req
->r_request_started
= jiffies
;
2211 err
= __prepare_send_request(mdsc
, req
, mds
, false);
2213 ceph_msg_get(req
->r_request
);
2214 ceph_con_send(&session
->s_con
, req
->r_request
);
2218 ceph_put_mds_session(session
);
2221 dout("__do_request early error %d\n", err
);
2223 complete_request(mdsc
, req
);
2224 __unregister_request(mdsc
, req
);
2231 * called under mdsc->mutex
2233 static void __wake_requests(struct ceph_mds_client
*mdsc
,
2234 struct list_head
*head
)
2236 struct ceph_mds_request
*req
;
2237 LIST_HEAD(tmp_list
);
2239 list_splice_init(head
, &tmp_list
);
2241 while (!list_empty(&tmp_list
)) {
2242 req
= list_entry(tmp_list
.next
,
2243 struct ceph_mds_request
, r_wait
);
2244 list_del_init(&req
->r_wait
);
2245 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
2246 __do_request(mdsc
, req
);
2251 * Wake up threads with requests pending for @mds, so that they can
2252 * resubmit their requests to a possibly different mds.
2254 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
2256 struct ceph_mds_request
*req
;
2257 struct rb_node
*p
= rb_first(&mdsc
->request_tree
);
2259 dout("kick_requests mds%d\n", mds
);
2261 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2263 if (req
->r_got_unsafe
)
2265 if (req
->r_attempts
> 0)
2266 continue; /* only new requests */
2267 if (req
->r_session
&&
2268 req
->r_session
->s_mds
== mds
) {
2269 dout(" kicking tid %llu\n", req
->r_tid
);
2270 list_del_init(&req
->r_wait
);
2271 __do_request(mdsc
, req
);
2276 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
2277 struct ceph_mds_request
*req
)
2279 dout("submit_request on %p\n", req
);
2280 mutex_lock(&mdsc
->mutex
);
2281 __register_request(mdsc
, req
, NULL
);
2282 __do_request(mdsc
, req
);
2283 mutex_unlock(&mdsc
->mutex
);
2287 * Synchrously perform an mds request. Take care of all of the
2288 * session setup, forwarding, retry details.
2290 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2292 struct ceph_mds_request
*req
)
2296 dout("do_request on %p\n", req
);
2298 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2300 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2301 if (req
->r_locked_dir
)
2302 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
2303 if (req
->r_old_dentry_dir
)
2304 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2307 /* deny access to directories with pool_ns layouts */
2308 if (req
->r_inode
&& S_ISDIR(req
->r_inode
->i_mode
) &&
2309 ceph_inode(req
->r_inode
)->i_pool_ns_len
)
2311 if (req
->r_locked_dir
&&
2312 ceph_inode(req
->r_locked_dir
)->i_pool_ns_len
)
2316 mutex_lock(&mdsc
->mutex
);
2317 __register_request(mdsc
, req
, dir
);
2318 __do_request(mdsc
, req
);
2326 mutex_unlock(&mdsc
->mutex
);
2327 dout("do_request waiting\n");
2328 if (!req
->r_timeout
&& req
->r_wait_for_completion
) {
2329 err
= req
->r_wait_for_completion(mdsc
, req
);
2331 long timeleft
= wait_for_completion_killable_timeout(
2333 ceph_timeout_jiffies(req
->r_timeout
));
2337 err
= -EIO
; /* timed out */
2339 err
= timeleft
; /* killed */
2341 dout("do_request waited, got %d\n", err
);
2342 mutex_lock(&mdsc
->mutex
);
2344 /* only abort if we didn't race with a real reply */
2345 if (req
->r_got_result
) {
2346 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2347 } else if (err
< 0) {
2348 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2351 * ensure we aren't running concurrently with
2352 * ceph_fill_trace or ceph_readdir_prepopulate, which
2353 * rely on locks (dir mutex) held by our caller.
2355 mutex_lock(&req
->r_fill_mutex
);
2357 req
->r_aborted
= true;
2358 mutex_unlock(&req
->r_fill_mutex
);
2360 if (req
->r_locked_dir
&&
2361 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2362 ceph_invalidate_dir_request(req
);
2368 mutex_unlock(&mdsc
->mutex
);
2369 dout("do_request %p done, result %d\n", req
, err
);
2374 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2375 * namespace request.
2377 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2379 struct inode
*inode
= req
->r_locked_dir
;
2381 dout("invalidate_dir_request %p (complete, lease(s))\n", inode
);
2383 ceph_dir_clear_complete(inode
);
2385 ceph_invalidate_dentry_lease(req
->r_dentry
);
2386 if (req
->r_old_dentry
)
2387 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2393 * We take the session mutex and parse and process the reply immediately.
2394 * This preserves the logical ordering of replies, capabilities, etc., sent
2395 * by the MDS as they are applied to our local cache.
2397 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2399 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2400 struct ceph_mds_request
*req
;
2401 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2402 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2403 struct ceph_snap_realm
*realm
;
2406 int mds
= session
->s_mds
;
2408 if (msg
->front
.iov_len
< sizeof(*head
)) {
2409 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2414 /* get request, session */
2415 tid
= le64_to_cpu(msg
->hdr
.tid
);
2416 mutex_lock(&mdsc
->mutex
);
2417 req
= __lookup_request(mdsc
, tid
);
2419 dout("handle_reply on unknown tid %llu\n", tid
);
2420 mutex_unlock(&mdsc
->mutex
);
2423 dout("handle_reply %p\n", req
);
2425 /* correct session? */
2426 if (req
->r_session
!= session
) {
2427 pr_err("mdsc_handle_reply got %llu on session mds%d"
2428 " not mds%d\n", tid
, session
->s_mds
,
2429 req
->r_session
? req
->r_session
->s_mds
: -1);
2430 mutex_unlock(&mdsc
->mutex
);
2435 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2436 (req
->r_got_safe
&& head
->safe
)) {
2437 pr_warn("got a dup %s reply on %llu from mds%d\n",
2438 head
->safe
? "safe" : "unsafe", tid
, mds
);
2439 mutex_unlock(&mdsc
->mutex
);
2442 if (req
->r_got_safe
) {
2443 pr_warn("got unsafe after safe on %llu from mds%d\n",
2445 mutex_unlock(&mdsc
->mutex
);
2449 result
= le32_to_cpu(head
->result
);
2453 * if we're not talking to the authority, send to them
2454 * if the authority has changed while we weren't looking,
2455 * send to new authority
2456 * Otherwise we just have to return an ESTALE
2458 if (result
== -ESTALE
) {
2459 dout("got ESTALE on request %llu", req
->r_tid
);
2460 req
->r_resend_mds
= -1;
2461 if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2462 dout("not using auth, setting for that now");
2463 req
->r_direct_mode
= USE_AUTH_MDS
;
2464 __do_request(mdsc
, req
);
2465 mutex_unlock(&mdsc
->mutex
);
2468 int mds
= __choose_mds(mdsc
, req
);
2469 if (mds
>= 0 && mds
!= req
->r_session
->s_mds
) {
2470 dout("but auth changed, so resending");
2471 __do_request(mdsc
, req
);
2472 mutex_unlock(&mdsc
->mutex
);
2476 dout("have to return ESTALE on request %llu", req
->r_tid
);
2481 req
->r_got_safe
= true;
2482 __unregister_request(mdsc
, req
);
2484 if (req
->r_got_unsafe
) {
2486 * We already handled the unsafe response, now do the
2487 * cleanup. No need to examine the response; the MDS
2488 * doesn't include any result info in the safe
2489 * response. And even if it did, there is nothing
2490 * useful we could do with a revised return value.
2492 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2493 list_del_init(&req
->r_unsafe_item
);
2495 /* last unsafe request during umount? */
2496 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2497 complete_all(&mdsc
->safe_umount_waiters
);
2498 mutex_unlock(&mdsc
->mutex
);
2502 req
->r_got_unsafe
= true;
2503 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2504 if (req
->r_unsafe_dir
) {
2505 struct ceph_inode_info
*ci
=
2506 ceph_inode(req
->r_unsafe_dir
);
2507 spin_lock(&ci
->i_unsafe_lock
);
2508 list_add_tail(&req
->r_unsafe_dir_item
,
2509 &ci
->i_unsafe_dirops
);
2510 spin_unlock(&ci
->i_unsafe_lock
);
2514 dout("handle_reply tid %lld result %d\n", tid
, result
);
2515 rinfo
= &req
->r_reply_info
;
2516 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2517 mutex_unlock(&mdsc
->mutex
);
2519 mutex_lock(&session
->s_mutex
);
2521 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2528 if (rinfo
->snapblob_len
) {
2529 down_write(&mdsc
->snap_rwsem
);
2530 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2531 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2532 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
,
2534 downgrade_write(&mdsc
->snap_rwsem
);
2536 down_read(&mdsc
->snap_rwsem
);
2539 /* insert trace into our cache */
2540 mutex_lock(&req
->r_fill_mutex
);
2541 current
->journal_info
= req
;
2542 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2544 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2545 req
->r_op
== CEPH_MDS_OP_LSSNAP
))
2546 ceph_readdir_prepopulate(req
, req
->r_session
);
2547 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2549 current
->journal_info
= NULL
;
2550 mutex_unlock(&req
->r_fill_mutex
);
2552 up_read(&mdsc
->snap_rwsem
);
2554 ceph_put_snap_realm(mdsc
, realm
);
2556 if (err
== 0 && req
->r_got_unsafe
&& req
->r_target_inode
) {
2557 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
2558 spin_lock(&ci
->i_unsafe_lock
);
2559 list_add_tail(&req
->r_unsafe_target_item
, &ci
->i_unsafe_iops
);
2560 spin_unlock(&ci
->i_unsafe_lock
);
2563 mutex_lock(&mdsc
->mutex
);
2564 if (!req
->r_aborted
) {
2568 req
->r_reply
= ceph_msg_get(msg
);
2569 req
->r_got_result
= true;
2572 dout("reply arrived after request %lld was aborted\n", tid
);
2574 mutex_unlock(&mdsc
->mutex
);
2576 mutex_unlock(&session
->s_mutex
);
2578 /* kick calling process */
2579 complete_request(mdsc
, req
);
2581 ceph_mdsc_put_request(req
);
2588 * handle mds notification that our request has been forwarded.
2590 static void handle_forward(struct ceph_mds_client
*mdsc
,
2591 struct ceph_mds_session
*session
,
2592 struct ceph_msg
*msg
)
2594 struct ceph_mds_request
*req
;
2595 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2599 void *p
= msg
->front
.iov_base
;
2600 void *end
= p
+ msg
->front
.iov_len
;
2602 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2603 next_mds
= ceph_decode_32(&p
);
2604 fwd_seq
= ceph_decode_32(&p
);
2606 mutex_lock(&mdsc
->mutex
);
2607 req
= __lookup_request(mdsc
, tid
);
2609 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2610 goto out
; /* dup reply? */
2613 if (req
->r_aborted
) {
2614 dout("forward tid %llu aborted, unregistering\n", tid
);
2615 __unregister_request(mdsc
, req
);
2616 } else if (fwd_seq
<= req
->r_num_fwd
) {
2617 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2618 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2620 /* resend. forward race not possible; mds would drop */
2621 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2623 BUG_ON(req
->r_got_result
);
2624 req
->r_attempts
= 0;
2625 req
->r_num_fwd
= fwd_seq
;
2626 req
->r_resend_mds
= next_mds
;
2627 put_request_session(req
);
2628 __do_request(mdsc
, req
);
2630 ceph_mdsc_put_request(req
);
2632 mutex_unlock(&mdsc
->mutex
);
2636 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2640 * handle a mds session control message
2642 static void handle_session(struct ceph_mds_session
*session
,
2643 struct ceph_msg
*msg
)
2645 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2648 int mds
= session
->s_mds
;
2649 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2653 if (msg
->front
.iov_len
!= sizeof(*h
))
2655 op
= le32_to_cpu(h
->op
);
2656 seq
= le64_to_cpu(h
->seq
);
2658 mutex_lock(&mdsc
->mutex
);
2659 if (op
== CEPH_SESSION_CLOSE
)
2660 __unregister_session(mdsc
, session
);
2661 /* FIXME: this ttl calculation is generous */
2662 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2663 mutex_unlock(&mdsc
->mutex
);
2665 mutex_lock(&session
->s_mutex
);
2667 dout("handle_session mds%d %s %p state %s seq %llu\n",
2668 mds
, ceph_session_op_name(op
), session
,
2669 ceph_session_state_name(session
->s_state
), seq
);
2671 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2672 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2673 pr_info("mds%d came back\n", session
->s_mds
);
2677 case CEPH_SESSION_OPEN
:
2678 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2679 pr_info("mds%d reconnect success\n", session
->s_mds
);
2680 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2681 renewed_caps(mdsc
, session
, 0);
2684 __close_session(mdsc
, session
);
2687 case CEPH_SESSION_RENEWCAPS
:
2688 if (session
->s_renew_seq
== seq
)
2689 renewed_caps(mdsc
, session
, 1);
2692 case CEPH_SESSION_CLOSE
:
2693 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2694 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2695 cleanup_session_requests(mdsc
, session
);
2696 remove_session_caps(session
);
2697 wake
= 2; /* for good measure */
2698 wake_up_all(&mdsc
->session_close_wq
);
2701 case CEPH_SESSION_STALE
:
2702 pr_info("mds%d caps went stale, renewing\n",
2704 spin_lock(&session
->s_gen_ttl_lock
);
2705 session
->s_cap_gen
++;
2706 session
->s_cap_ttl
= jiffies
- 1;
2707 spin_unlock(&session
->s_gen_ttl_lock
);
2708 send_renew_caps(mdsc
, session
);
2711 case CEPH_SESSION_RECALL_STATE
:
2712 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2715 case CEPH_SESSION_FLUSHMSG
:
2716 send_flushmsg_ack(mdsc
, session
, seq
);
2719 case CEPH_SESSION_FORCE_RO
:
2720 dout("force_session_readonly %p\n", session
);
2721 spin_lock(&session
->s_cap_lock
);
2722 session
->s_readonly
= true;
2723 spin_unlock(&session
->s_cap_lock
);
2724 wake_up_session_caps(session
, 0);
2728 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2732 mutex_unlock(&session
->s_mutex
);
2734 mutex_lock(&mdsc
->mutex
);
2735 __wake_requests(mdsc
, &session
->s_waiting
);
2737 kick_requests(mdsc
, mds
);
2738 mutex_unlock(&mdsc
->mutex
);
2743 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2744 (int)msg
->front
.iov_len
);
2751 * called under session->mutex.
2753 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2754 struct ceph_mds_session
*session
)
2756 struct ceph_mds_request
*req
, *nreq
;
2760 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2762 mutex_lock(&mdsc
->mutex
);
2763 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2764 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
, true);
2766 ceph_msg_get(req
->r_request
);
2767 ceph_con_send(&session
->s_con
, req
->r_request
);
2772 * also re-send old requests when MDS enters reconnect stage. So that MDS
2773 * can process completed request in clientreplay stage.
2775 p
= rb_first(&mdsc
->request_tree
);
2777 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2779 if (req
->r_got_unsafe
)
2781 if (req
->r_attempts
== 0)
2782 continue; /* only old requests */
2783 if (req
->r_session
&&
2784 req
->r_session
->s_mds
== session
->s_mds
) {
2785 err
= __prepare_send_request(mdsc
, req
,
2786 session
->s_mds
, true);
2788 ceph_msg_get(req
->r_request
);
2789 ceph_con_send(&session
->s_con
, req
->r_request
);
2793 mutex_unlock(&mdsc
->mutex
);
2797 * Encode information about a cap for a reconnect with the MDS.
2799 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2803 struct ceph_mds_cap_reconnect v2
;
2804 struct ceph_mds_cap_reconnect_v1 v1
;
2807 struct ceph_inode_info
*ci
;
2808 struct ceph_reconnect_state
*recon_state
= arg
;
2809 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2813 struct dentry
*dentry
;
2817 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2818 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2819 ceph_cap_string(cap
->issued
));
2820 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2824 dentry
= d_find_alias(inode
);
2826 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2828 err
= PTR_ERR(path
);
2835 err
= ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2839 spin_lock(&ci
->i_ceph_lock
);
2840 cap
->seq
= 0; /* reset cap seq */
2841 cap
->issue_seq
= 0; /* and issue_seq */
2842 cap
->mseq
= 0; /* and migrate_seq */
2843 cap
->cap_gen
= cap
->session
->s_cap_gen
;
2845 if (recon_state
->flock
) {
2846 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2847 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2848 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2849 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2850 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2851 rec
.v2
.flock_len
= 0;
2852 reclen
= sizeof(rec
.v2
);
2854 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2855 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2856 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2857 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2858 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2859 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2860 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2861 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2862 reclen
= sizeof(rec
.v1
);
2864 spin_unlock(&ci
->i_ceph_lock
);
2866 if (recon_state
->flock
) {
2867 int num_fcntl_locks
, num_flock_locks
;
2868 struct ceph_filelock
*flocks
;
2871 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
2872 flocks
= kmalloc((num_fcntl_locks
+num_flock_locks
) *
2873 sizeof(struct ceph_filelock
), GFP_NOFS
);
2878 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
2888 * number of encoded locks is stable, so copy to pagelist
2890 rec
.v2
.flock_len
= cpu_to_le32(2*sizeof(u32
) +
2891 (num_fcntl_locks
+num_flock_locks
) *
2892 sizeof(struct ceph_filelock
));
2893 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2895 err
= ceph_locks_to_pagelist(flocks
, pagelist
,
2900 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2903 recon_state
->nr_caps
++;
2913 * If an MDS fails and recovers, clients need to reconnect in order to
2914 * reestablish shared state. This includes all caps issued through
2915 * this session _and_ the snap_realm hierarchy. Because it's not
2916 * clear which snap realms the mds cares about, we send everything we
2917 * know about.. that ensures we'll then get any new info the
2918 * recovering MDS might have.
2920 * This is a relatively heavyweight operation, but it's rare.
2922 * called with mdsc->mutex held.
2924 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2925 struct ceph_mds_session
*session
)
2927 struct ceph_msg
*reply
;
2929 int mds
= session
->s_mds
;
2932 struct ceph_pagelist
*pagelist
;
2933 struct ceph_reconnect_state recon_state
;
2935 pr_info("mds%d reconnect start\n", mds
);
2937 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2939 goto fail_nopagelist
;
2940 ceph_pagelist_init(pagelist
);
2942 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
2946 mutex_lock(&session
->s_mutex
);
2947 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2950 dout("session %p state %s\n", session
,
2951 ceph_session_state_name(session
->s_state
));
2953 spin_lock(&session
->s_gen_ttl_lock
);
2954 session
->s_cap_gen
++;
2955 spin_unlock(&session
->s_gen_ttl_lock
);
2957 spin_lock(&session
->s_cap_lock
);
2958 /* don't know if session is readonly */
2959 session
->s_readonly
= 0;
2961 * notify __ceph_remove_cap() that we are composing cap reconnect.
2962 * If a cap get released before being added to the cap reconnect,
2963 * __ceph_remove_cap() should skip queuing cap release.
2965 session
->s_cap_reconnect
= 1;
2966 /* drop old cap expires; we're about to reestablish that state */
2967 cleanup_cap_releases(mdsc
, session
);
2969 /* trim unused caps to reduce MDS's cache rejoin time */
2970 if (mdsc
->fsc
->sb
->s_root
)
2971 shrink_dcache_parent(mdsc
->fsc
->sb
->s_root
);
2973 ceph_con_close(&session
->s_con
);
2974 ceph_con_open(&session
->s_con
,
2975 CEPH_ENTITY_TYPE_MDS
, mds
,
2976 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2978 /* replay unsafe requests */
2979 replay_unsafe_requests(mdsc
, session
);
2981 down_read(&mdsc
->snap_rwsem
);
2983 /* traverse this session's caps */
2984 s_nr_caps
= session
->s_nr_caps
;
2985 err
= ceph_pagelist_encode_32(pagelist
, s_nr_caps
);
2989 recon_state
.nr_caps
= 0;
2990 recon_state
.pagelist
= pagelist
;
2991 recon_state
.flock
= session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
;
2992 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
2996 spin_lock(&session
->s_cap_lock
);
2997 session
->s_cap_reconnect
= 0;
2998 spin_unlock(&session
->s_cap_lock
);
3001 * snaprealms. we provide mds with the ino, seq (version), and
3002 * parent for all of our realms. If the mds has any newer info,
3005 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
3006 struct ceph_snap_realm
*realm
=
3007 rb_entry(p
, struct ceph_snap_realm
, node
);
3008 struct ceph_mds_snaprealm_reconnect sr_rec
;
3010 dout(" adding snap realm %llx seq %lld parent %llx\n",
3011 realm
->ino
, realm
->seq
, realm
->parent_ino
);
3012 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
3013 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
3014 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
3015 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
3020 if (recon_state
.flock
)
3021 reply
->hdr
.version
= cpu_to_le16(2);
3023 /* raced with cap release? */
3024 if (s_nr_caps
!= recon_state
.nr_caps
) {
3025 struct page
*page
= list_first_entry(&pagelist
->head
,
3027 __le32
*addr
= kmap_atomic(page
);
3028 *addr
= cpu_to_le32(recon_state
.nr_caps
);
3029 kunmap_atomic(addr
);
3032 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
3033 ceph_msg_data_add_pagelist(reply
, pagelist
);
3035 ceph_early_kick_flushing_caps(mdsc
, session
);
3037 ceph_con_send(&session
->s_con
, reply
);
3039 mutex_unlock(&session
->s_mutex
);
3041 mutex_lock(&mdsc
->mutex
);
3042 __wake_requests(mdsc
, &session
->s_waiting
);
3043 mutex_unlock(&mdsc
->mutex
);
3045 up_read(&mdsc
->snap_rwsem
);
3049 ceph_msg_put(reply
);
3050 up_read(&mdsc
->snap_rwsem
);
3051 mutex_unlock(&session
->s_mutex
);
3053 ceph_pagelist_release(pagelist
);
3055 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
3061 * compare old and new mdsmaps, kicking requests
3062 * and closing out old connections as necessary
3064 * called under mdsc->mutex.
3066 static void check_new_map(struct ceph_mds_client
*mdsc
,
3067 struct ceph_mdsmap
*newmap
,
3068 struct ceph_mdsmap
*oldmap
)
3071 int oldstate
, newstate
;
3072 struct ceph_mds_session
*s
;
3074 dout("check_new_map new %u old %u\n",
3075 newmap
->m_epoch
, oldmap
->m_epoch
);
3077 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
3078 if (mdsc
->sessions
[i
] == NULL
)
3080 s
= mdsc
->sessions
[i
];
3081 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
3082 newstate
= ceph_mdsmap_get_state(newmap
, i
);
3084 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3085 i
, ceph_mds_state_name(oldstate
),
3086 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
3087 ceph_mds_state_name(newstate
),
3088 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
3089 ceph_session_state_name(s
->s_state
));
3091 if (i
>= newmap
->m_max_mds
||
3092 memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
3093 ceph_mdsmap_get_addr(newmap
, i
),
3094 sizeof(struct ceph_entity_addr
))) {
3095 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
3096 /* the session never opened, just close it
3098 __wake_requests(mdsc
, &s
->s_waiting
);
3099 __unregister_session(mdsc
, s
);
3102 mutex_unlock(&mdsc
->mutex
);
3103 mutex_lock(&s
->s_mutex
);
3104 mutex_lock(&mdsc
->mutex
);
3105 ceph_con_close(&s
->s_con
);
3106 mutex_unlock(&s
->s_mutex
);
3107 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
3109 } else if (oldstate
== newstate
) {
3110 continue; /* nothing new with this mds */
3116 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
3117 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
3118 mutex_unlock(&mdsc
->mutex
);
3119 send_mds_reconnect(mdsc
, s
);
3120 mutex_lock(&mdsc
->mutex
);
3124 * kick request on any mds that has gone active.
3126 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
3127 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
3128 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
3129 oldstate
!= CEPH_MDS_STATE_STARTING
)
3130 pr_info("mds%d recovery completed\n", s
->s_mds
);
3131 kick_requests(mdsc
, i
);
3132 ceph_kick_flushing_caps(mdsc
, s
);
3133 wake_up_session_caps(s
, 1);
3137 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
3138 s
= mdsc
->sessions
[i
];
3141 if (!ceph_mdsmap_is_laggy(newmap
, i
))
3143 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3144 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
3145 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3146 dout(" connecting to export targets of laggy mds%d\n",
3148 __open_export_target_sessions(mdsc
, s
);
3160 * caller must hold session s_mutex, dentry->d_lock
3162 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
3164 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
3166 ceph_put_mds_session(di
->lease_session
);
3167 di
->lease_session
= NULL
;
3170 static void handle_lease(struct ceph_mds_client
*mdsc
,
3171 struct ceph_mds_session
*session
,
3172 struct ceph_msg
*msg
)
3174 struct super_block
*sb
= mdsc
->fsc
->sb
;
3175 struct inode
*inode
;
3176 struct dentry
*parent
, *dentry
;
3177 struct ceph_dentry_info
*di
;
3178 int mds
= session
->s_mds
;
3179 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
3181 struct ceph_vino vino
;
3185 dout("handle_lease from mds%d\n", mds
);
3188 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
3190 vino
.ino
= le64_to_cpu(h
->ino
);
3191 vino
.snap
= CEPH_NOSNAP
;
3192 seq
= le32_to_cpu(h
->seq
);
3193 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
3194 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
3195 if (dname
.len
!= get_unaligned_le32(h
+1))
3199 inode
= ceph_find_inode(sb
, vino
);
3200 dout("handle_lease %s, ino %llx %p %.*s\n",
3201 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
3202 dname
.len
, dname
.name
);
3204 mutex_lock(&session
->s_mutex
);
3207 if (inode
== NULL
) {
3208 dout("handle_lease no inode %llx\n", vino
.ino
);
3213 parent
= d_find_alias(inode
);
3215 dout("no parent dentry on inode %p\n", inode
);
3217 goto release
; /* hrm... */
3219 dname
.hash
= full_name_hash(dname
.name
, dname
.len
);
3220 dentry
= d_lookup(parent
, &dname
);
3225 spin_lock(&dentry
->d_lock
);
3226 di
= ceph_dentry(dentry
);
3227 switch (h
->action
) {
3228 case CEPH_MDS_LEASE_REVOKE
:
3229 if (di
->lease_session
== session
) {
3230 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
3231 h
->seq
= cpu_to_le32(di
->lease_seq
);
3232 __ceph_mdsc_drop_dentry_lease(dentry
);
3237 case CEPH_MDS_LEASE_RENEW
:
3238 if (di
->lease_session
== session
&&
3239 di
->lease_gen
== session
->s_cap_gen
&&
3240 di
->lease_renew_from
&&
3241 di
->lease_renew_after
== 0) {
3242 unsigned long duration
=
3243 msecs_to_jiffies(le32_to_cpu(h
->duration_ms
));
3245 di
->lease_seq
= seq
;
3246 dentry
->d_time
= di
->lease_renew_from
+ duration
;
3247 di
->lease_renew_after
= di
->lease_renew_from
+
3249 di
->lease_renew_from
= 0;
3253 spin_unlock(&dentry
->d_lock
);
3260 /* let's just reuse the same message */
3261 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
3263 ceph_con_send(&session
->s_con
, msg
);
3267 mutex_unlock(&session
->s_mutex
);
3271 pr_err("corrupt lease message\n");
3275 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
3276 struct inode
*inode
,
3277 struct dentry
*dentry
, char action
,
3280 struct ceph_msg
*msg
;
3281 struct ceph_mds_lease
*lease
;
3282 int len
= sizeof(*lease
) + sizeof(u32
);
3285 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3286 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
3287 dnamelen
= dentry
->d_name
.len
;
3290 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
3293 lease
= msg
->front
.iov_base
;
3294 lease
->action
= action
;
3295 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
3296 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
3297 lease
->seq
= cpu_to_le32(seq
);
3298 put_unaligned_le32(dnamelen
, lease
+ 1);
3299 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
3302 * if this is a preemptive lease RELEASE, no need to
3303 * flush request stream, since the actual request will
3306 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
3308 ceph_con_send(&session
->s_con
, msg
);
3312 * Preemptively release a lease we expect to invalidate anyway.
3313 * Pass @inode always, @dentry is optional.
3315 void ceph_mdsc_lease_release(struct ceph_mds_client
*mdsc
, struct inode
*inode
,
3316 struct dentry
*dentry
)
3318 struct ceph_dentry_info
*di
;
3319 struct ceph_mds_session
*session
;
3322 BUG_ON(inode
== NULL
);
3323 BUG_ON(dentry
== NULL
);
3325 /* is dentry lease valid? */
3326 spin_lock(&dentry
->d_lock
);
3327 di
= ceph_dentry(dentry
);
3328 if (!di
|| !di
->lease_session
||
3329 di
->lease_session
->s_mds
< 0 ||
3330 di
->lease_gen
!= di
->lease_session
->s_cap_gen
||
3331 !time_before(jiffies
, dentry
->d_time
)) {
3332 dout("lease_release inode %p dentry %p -- "
3335 spin_unlock(&dentry
->d_lock
);
3339 /* we do have a lease on this dentry; note mds and seq */
3340 session
= ceph_get_mds_session(di
->lease_session
);
3341 seq
= di
->lease_seq
;
3342 __ceph_mdsc_drop_dentry_lease(dentry
);
3343 spin_unlock(&dentry
->d_lock
);
3345 dout("lease_release inode %p dentry %p to mds%d\n",
3346 inode
, dentry
, session
->s_mds
);
3347 ceph_mdsc_lease_send_msg(session
, inode
, dentry
,
3348 CEPH_MDS_LEASE_RELEASE
, seq
);
3349 ceph_put_mds_session(session
);
3353 * drop all leases (and dentry refs) in preparation for umount
3355 static void drop_leases(struct ceph_mds_client
*mdsc
)
3359 dout("drop_leases\n");
3360 mutex_lock(&mdsc
->mutex
);
3361 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3362 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3365 mutex_unlock(&mdsc
->mutex
);
3366 mutex_lock(&s
->s_mutex
);
3367 mutex_unlock(&s
->s_mutex
);
3368 ceph_put_mds_session(s
);
3369 mutex_lock(&mdsc
->mutex
);
3371 mutex_unlock(&mdsc
->mutex
);
3377 * delayed work -- periodically trim expired leases, renew caps with mds
3379 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
3382 unsigned hz
= round_jiffies_relative(HZ
* delay
);
3383 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
3386 static void delayed_work(struct work_struct
*work
)
3389 struct ceph_mds_client
*mdsc
=
3390 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
3394 dout("mdsc delayed_work\n");
3395 ceph_check_delayed_caps(mdsc
);
3397 mutex_lock(&mdsc
->mutex
);
3398 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
3399 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
3400 mdsc
->last_renew_caps
);
3402 mdsc
->last_renew_caps
= jiffies
;
3404 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3405 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3408 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3409 dout("resending session close request for mds%d\n",
3411 request_close_session(mdsc
, s
);
3412 ceph_put_mds_session(s
);
3415 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
3416 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
3417 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
3418 pr_info("mds%d hung\n", s
->s_mds
);
3421 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
3422 /* this mds is failed or recovering, just wait */
3423 ceph_put_mds_session(s
);
3426 mutex_unlock(&mdsc
->mutex
);
3428 mutex_lock(&s
->s_mutex
);
3430 send_renew_caps(mdsc
, s
);
3432 ceph_con_keepalive(&s
->s_con
);
3433 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3434 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
3435 ceph_send_cap_releases(mdsc
, s
);
3436 mutex_unlock(&s
->s_mutex
);
3437 ceph_put_mds_session(s
);
3439 mutex_lock(&mdsc
->mutex
);
3441 mutex_unlock(&mdsc
->mutex
);
3443 schedule_delayed(mdsc
);
3446 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3449 struct ceph_mds_client
*mdsc
;
3451 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3456 mutex_init(&mdsc
->mutex
);
3457 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3458 if (mdsc
->mdsmap
== NULL
) {
3463 init_completion(&mdsc
->safe_umount_waiters
);
3464 init_waitqueue_head(&mdsc
->session_close_wq
);
3465 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3466 mdsc
->sessions
= NULL
;
3467 atomic_set(&mdsc
->num_sessions
, 0);
3468 mdsc
->max_sessions
= 0;
3470 mdsc
->last_snap_seq
= 0;
3471 init_rwsem(&mdsc
->snap_rwsem
);
3472 mdsc
->snap_realms
= RB_ROOT
;
3473 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3474 spin_lock_init(&mdsc
->snap_empty_lock
);
3476 mdsc
->oldest_tid
= 0;
3477 mdsc
->request_tree
= RB_ROOT
;
3478 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3479 mdsc
->last_renew_caps
= jiffies
;
3480 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3481 spin_lock_init(&mdsc
->cap_delay_lock
);
3482 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3483 spin_lock_init(&mdsc
->snap_flush_lock
);
3484 mdsc
->last_cap_flush_tid
= 1;
3485 mdsc
->cap_flush_tree
= RB_ROOT
;
3486 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3487 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3488 mdsc
->num_cap_flushing
= 0;
3489 spin_lock_init(&mdsc
->cap_dirty_lock
);
3490 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3491 spin_lock_init(&mdsc
->dentry_lru_lock
);
3492 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3494 ceph_caps_init(mdsc
);
3495 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3497 init_rwsem(&mdsc
->pool_perm_rwsem
);
3498 mdsc
->pool_perm_tree
= RB_ROOT
;
3504 * Wait for safe replies on open mds requests. If we time out, drop
3505 * all requests from the tree to avoid dangling dentry refs.
3507 static void wait_requests(struct ceph_mds_client
*mdsc
)
3509 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3510 struct ceph_mds_request
*req
;
3512 mutex_lock(&mdsc
->mutex
);
3513 if (__get_oldest_req(mdsc
)) {
3514 mutex_unlock(&mdsc
->mutex
);
3516 dout("wait_requests waiting for requests\n");
3517 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3518 ceph_timeout_jiffies(opts
->mount_timeout
));
3520 /* tear down remaining requests */
3521 mutex_lock(&mdsc
->mutex
);
3522 while ((req
= __get_oldest_req(mdsc
))) {
3523 dout("wait_requests timed out on tid %llu\n",
3525 __unregister_request(mdsc
, req
);
3528 mutex_unlock(&mdsc
->mutex
);
3529 dout("wait_requests done\n");
3533 * called before mount is ro, and before dentries are torn down.
3534 * (hmm, does this still race with new lookups?)
3536 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3538 dout("pre_umount\n");
3542 ceph_flush_dirty_caps(mdsc
);
3543 wait_requests(mdsc
);
3546 * wait for reply handlers to drop their request refs and
3547 * their inode/dcache refs
3553 * wait for all write mds requests to flush.
3555 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3557 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3560 mutex_lock(&mdsc
->mutex
);
3561 dout("wait_unsafe_requests want %lld\n", want_tid
);
3563 req
= __get_oldest_req(mdsc
);
3564 while (req
&& req
->r_tid
<= want_tid
) {
3565 /* find next request */
3566 n
= rb_next(&req
->r_node
);
3568 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3571 if (req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
&&
3572 (req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3574 ceph_mdsc_get_request(req
);
3576 ceph_mdsc_get_request(nextreq
);
3577 mutex_unlock(&mdsc
->mutex
);
3578 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3579 req
->r_tid
, want_tid
);
3580 wait_for_completion(&req
->r_safe_completion
);
3581 mutex_lock(&mdsc
->mutex
);
3582 ceph_mdsc_put_request(req
);
3584 break; /* next dne before, so we're done! */
3585 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3586 /* next request was removed from tree */
3587 ceph_mdsc_put_request(nextreq
);
3590 ceph_mdsc_put_request(nextreq
); /* won't go away */
3594 mutex_unlock(&mdsc
->mutex
);
3595 dout("wait_unsafe_requests done\n");
3598 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3600 u64 want_tid
, want_flush
, want_snap
;
3602 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3606 mutex_lock(&mdsc
->mutex
);
3607 want_tid
= mdsc
->last_tid
;
3608 mutex_unlock(&mdsc
->mutex
);
3610 ceph_flush_dirty_caps(mdsc
);
3611 spin_lock(&mdsc
->cap_dirty_lock
);
3612 want_flush
= mdsc
->last_cap_flush_tid
;
3613 spin_unlock(&mdsc
->cap_dirty_lock
);
3615 down_read(&mdsc
->snap_rwsem
);
3616 want_snap
= mdsc
->last_snap_seq
;
3617 up_read(&mdsc
->snap_rwsem
);
3619 dout("sync want tid %lld flush_seq %lld snap_seq %lld\n",
3620 want_tid
, want_flush
, want_snap
);
3622 wait_unsafe_requests(mdsc
, want_tid
);
3623 wait_caps_flush(mdsc
, want_flush
, want_snap
);
3627 * true if all sessions are closed, or we force unmount
3629 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
)
3631 if (ACCESS_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
3633 return atomic_read(&mdsc
->num_sessions
) == 0;
3637 * called after sb is ro.
3639 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3641 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
3642 struct ceph_mds_session
*session
;
3645 dout("close_sessions\n");
3647 /* close sessions */
3648 mutex_lock(&mdsc
->mutex
);
3649 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3650 session
= __ceph_lookup_mds_session(mdsc
, i
);
3653 mutex_unlock(&mdsc
->mutex
);
3654 mutex_lock(&session
->s_mutex
);
3655 __close_session(mdsc
, session
);
3656 mutex_unlock(&session
->s_mutex
);
3657 ceph_put_mds_session(session
);
3658 mutex_lock(&mdsc
->mutex
);
3660 mutex_unlock(&mdsc
->mutex
);
3662 dout("waiting for sessions to close\n");
3663 wait_event_timeout(mdsc
->session_close_wq
, done_closing_sessions(mdsc
),
3664 ceph_timeout_jiffies(opts
->mount_timeout
));
3666 /* tear down remaining sessions */
3667 mutex_lock(&mdsc
->mutex
);
3668 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3669 if (mdsc
->sessions
[i
]) {
3670 session
= get_session(mdsc
->sessions
[i
]);
3671 __unregister_session(mdsc
, session
);
3672 mutex_unlock(&mdsc
->mutex
);
3673 mutex_lock(&session
->s_mutex
);
3674 remove_session_caps(session
);
3675 mutex_unlock(&session
->s_mutex
);
3676 ceph_put_mds_session(session
);
3677 mutex_lock(&mdsc
->mutex
);
3680 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3681 mutex_unlock(&mdsc
->mutex
);
3683 ceph_cleanup_empty_realms(mdsc
);
3685 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3690 void ceph_mdsc_force_umount(struct ceph_mds_client
*mdsc
)
3692 struct ceph_mds_session
*session
;
3695 dout("force umount\n");
3697 mutex_lock(&mdsc
->mutex
);
3698 for (mds
= 0; mds
< mdsc
->max_sessions
; mds
++) {
3699 session
= __ceph_lookup_mds_session(mdsc
, mds
);
3702 mutex_unlock(&mdsc
->mutex
);
3703 mutex_lock(&session
->s_mutex
);
3704 __close_session(mdsc
, session
);
3705 if (session
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3706 cleanup_session_requests(mdsc
, session
);
3707 remove_session_caps(session
);
3709 mutex_unlock(&session
->s_mutex
);
3710 ceph_put_mds_session(session
);
3711 mutex_lock(&mdsc
->mutex
);
3712 kick_requests(mdsc
, mds
);
3714 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3715 mutex_unlock(&mdsc
->mutex
);
3718 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3721 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3723 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3724 kfree(mdsc
->sessions
);
3725 ceph_caps_finalize(mdsc
);
3726 ceph_pool_perm_destroy(mdsc
);
3729 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3731 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3733 dout("mdsc_destroy %p\n", mdsc
);
3734 ceph_mdsc_stop(mdsc
);
3736 /* flush out any connection work with references to us */
3741 dout("mdsc_destroy %p done\n", mdsc
);
3746 * handle mds map update.
3748 void ceph_mdsc_handle_map(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3752 void *p
= msg
->front
.iov_base
;
3753 void *end
= p
+ msg
->front
.iov_len
;
3754 struct ceph_mdsmap
*newmap
, *oldmap
;
3755 struct ceph_fsid fsid
;
3758 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3759 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3760 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3762 epoch
= ceph_decode_32(&p
);
3763 maplen
= ceph_decode_32(&p
);
3764 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3766 /* do we need it? */
3767 mutex_lock(&mdsc
->mutex
);
3768 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3769 dout("handle_map epoch %u <= our %u\n",
3770 epoch
, mdsc
->mdsmap
->m_epoch
);
3771 mutex_unlock(&mdsc
->mutex
);
3775 newmap
= ceph_mdsmap_decode(&p
, end
);
3776 if (IS_ERR(newmap
)) {
3777 err
= PTR_ERR(newmap
);
3781 /* swap into place */
3783 oldmap
= mdsc
->mdsmap
;
3784 mdsc
->mdsmap
= newmap
;
3785 check_new_map(mdsc
, newmap
, oldmap
);
3786 ceph_mdsmap_destroy(oldmap
);
3788 mdsc
->mdsmap
= newmap
; /* first mds map */
3790 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3792 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3793 ceph_monc_got_map(&mdsc
->fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
3794 mdsc
->mdsmap
->m_epoch
);
3796 mutex_unlock(&mdsc
->mutex
);
3797 schedule_delayed(mdsc
);
3801 mutex_unlock(&mdsc
->mutex
);
3803 pr_err("error decoding mdsmap %d\n", err
);
3807 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3809 struct ceph_mds_session
*s
= con
->private;
3811 if (get_session(s
)) {
3812 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3815 dout("mdsc con_get %p FAIL\n", s
);
3819 static void con_put(struct ceph_connection
*con
)
3821 struct ceph_mds_session
*s
= con
->private;
3823 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
) - 1);
3824 ceph_put_mds_session(s
);
3828 * if the client is unresponsive for long enough, the mds will kill
3829 * the session entirely.
3831 static void peer_reset(struct ceph_connection
*con
)
3833 struct ceph_mds_session
*s
= con
->private;
3834 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3836 pr_warn("mds%d closed our session\n", s
->s_mds
);
3837 send_mds_reconnect(mdsc
, s
);
3840 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3842 struct ceph_mds_session
*s
= con
->private;
3843 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3844 int type
= le16_to_cpu(msg
->hdr
.type
);
3846 mutex_lock(&mdsc
->mutex
);
3847 if (__verify_registered_session(mdsc
, s
) < 0) {
3848 mutex_unlock(&mdsc
->mutex
);
3851 mutex_unlock(&mdsc
->mutex
);
3854 case CEPH_MSG_MDS_MAP
:
3855 ceph_mdsc_handle_map(mdsc
, msg
);
3857 case CEPH_MSG_CLIENT_SESSION
:
3858 handle_session(s
, msg
);
3860 case CEPH_MSG_CLIENT_REPLY
:
3861 handle_reply(s
, msg
);
3863 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3864 handle_forward(mdsc
, s
, msg
);
3866 case CEPH_MSG_CLIENT_CAPS
:
3867 ceph_handle_caps(s
, msg
);
3869 case CEPH_MSG_CLIENT_SNAP
:
3870 ceph_handle_snap(mdsc
, s
, msg
);
3872 case CEPH_MSG_CLIENT_LEASE
:
3873 handle_lease(mdsc
, s
, msg
);
3877 pr_err("received unknown message type %d %s\n", type
,
3878 ceph_msg_type_name(type
));
3889 * Note: returned pointer is the address of a structure that's
3890 * managed separately. Caller must *not* attempt to free it.
3892 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
3893 int *proto
, int force_new
)
3895 struct ceph_mds_session
*s
= con
->private;
3896 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3897 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3898 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3900 if (force_new
&& auth
->authorizer
) {
3901 ceph_auth_destroy_authorizer(auth
->authorizer
);
3902 auth
->authorizer
= NULL
;
3904 if (!auth
->authorizer
) {
3905 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3908 return ERR_PTR(ret
);
3910 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3913 return ERR_PTR(ret
);
3915 *proto
= ac
->protocol
;
3921 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3923 struct ceph_mds_session
*s
= con
->private;
3924 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3925 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3927 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
, len
);
3930 static int invalidate_authorizer(struct ceph_connection
*con
)
3932 struct ceph_mds_session
*s
= con
->private;
3933 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3934 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3936 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3938 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
3941 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
3942 struct ceph_msg_header
*hdr
, int *skip
)
3944 struct ceph_msg
*msg
;
3945 int type
= (int) le16_to_cpu(hdr
->type
);
3946 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
3952 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
3954 pr_err("unable to allocate msg type %d len %d\n",
3962 static int mds_sign_message(struct ceph_msg
*msg
)
3964 struct ceph_mds_session
*s
= msg
->con
->private;
3965 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3967 return ceph_auth_sign_message(auth
, msg
);
3970 static int mds_check_message_signature(struct ceph_msg
*msg
)
3972 struct ceph_mds_session
*s
= msg
->con
->private;
3973 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3975 return ceph_auth_check_message_signature(auth
, msg
);
3978 static const struct ceph_connection_operations mds_con_ops
= {
3981 .dispatch
= dispatch
,
3982 .get_authorizer
= get_authorizer
,
3983 .verify_authorizer_reply
= verify_authorizer_reply
,
3984 .invalidate_authorizer
= invalidate_authorizer
,
3985 .peer_reset
= peer_reset
,
3986 .alloc_msg
= mds_alloc_msg
,
3987 .sign_message
= mds_sign_message
,
3988 .check_message_signature
= mds_check_message_signature
,