1 #include "ceph_debug.h"
3 #include <linux/wait.h>
4 #include <linux/slab.h>
5 #include <linux/sched.h>
7 #include "mds_client.h"
8 #include "mon_client.h"
10 #include "messenger.h"
16 * A cluster of MDS (metadata server) daemons is responsible for
17 * managing the file system namespace (the directory hierarchy and
18 * inodes) and for coordinating shared access to storage. Metadata is
19 * partitioning hierarchically across a number of servers, and that
20 * partition varies over time as the cluster adjusts the distribution
21 * in order to balance load.
23 * The MDS client is primarily responsible to managing synchronous
24 * metadata requests for operations like open, unlink, and so forth.
25 * If there is a MDS failure, we find out about it when we (possibly
26 * request and) receive a new MDS map, and can resubmit affected
29 * For the most part, though, we take advantage of a lossless
30 * communications channel to the MDS, and do not need to worry about
31 * timing out or resubmitting requests.
33 * We maintain a stateful "session" with each MDS we interact with.
34 * Within each session, we sent periodic heartbeat messages to ensure
35 * any capabilities or leases we have been issues remain valid. If
36 * the session times out and goes stale, our leases and capabilities
37 * are no longer valid.
40 static void __wake_requests(struct ceph_mds_client
*mdsc
,
41 struct list_head
*head
);
43 const static struct ceph_connection_operations mds_con_ops
;
51 * parse individual inode info
53 static int parse_reply_info_in(void **p
, void *end
,
54 struct ceph_mds_reply_info_in
*info
)
59 *p
+= sizeof(struct ceph_mds_reply_inode
) +
60 sizeof(*info
->in
->fragtree
.splits
) *
61 le32_to_cpu(info
->in
->fragtree
.nsplits
);
63 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
64 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
66 *p
+= info
->symlink_len
;
68 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
69 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
70 info
->xattr_data
= *p
;
71 *p
+= info
->xattr_len
;
78 * parse a normal reply, which may contain a (dir+)dentry and/or a
81 static int parse_reply_info_trace(void **p
, void *end
,
82 struct ceph_mds_reply_info_parsed
*info
)
86 if (info
->head
->is_dentry
) {
87 err
= parse_reply_info_in(p
, end
, &info
->diri
);
91 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
94 *p
+= sizeof(*info
->dirfrag
) +
95 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
96 if (unlikely(*p
> end
))
99 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
100 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
102 *p
+= info
->dname_len
;
104 *p
+= sizeof(*info
->dlease
);
107 if (info
->head
->is_target
) {
108 err
= parse_reply_info_in(p
, end
, &info
->targeti
);
113 if (unlikely(*p
!= end
))
120 pr_err("problem parsing mds trace %d\n", err
);
125 * parse readdir results
127 static int parse_reply_info_dir(void **p
, void *end
,
128 struct ceph_mds_reply_info_parsed
*info
)
134 if (*p
+ sizeof(*info
->dir_dir
) > end
)
136 *p
+= sizeof(*info
->dir_dir
) +
137 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
141 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
142 num
= ceph_decode_32(p
);
143 info
->dir_end
= ceph_decode_8(p
);
144 info
->dir_complete
= ceph_decode_8(p
);
148 /* alloc large array */
150 info
->dir_in
= kcalloc(num
, sizeof(*info
->dir_in
) +
151 sizeof(*info
->dir_dname
) +
152 sizeof(*info
->dir_dname_len
) +
153 sizeof(*info
->dir_dlease
),
155 if (info
->dir_in
== NULL
) {
159 info
->dir_dname
= (void *)(info
->dir_in
+ num
);
160 info
->dir_dname_len
= (void *)(info
->dir_dname
+ num
);
161 info
->dir_dlease
= (void *)(info
->dir_dname_len
+ num
);
165 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
166 info
->dir_dname_len
[i
] = ceph_decode_32(p
);
167 ceph_decode_need(p
, end
, info
->dir_dname_len
[i
], bad
);
168 info
->dir_dname
[i
] = *p
;
169 *p
+= info
->dir_dname_len
[i
];
170 dout("parsed dir dname '%.*s'\n", info
->dir_dname_len
[i
],
172 info
->dir_dlease
[i
] = *p
;
173 *p
+= sizeof(struct ceph_mds_reply_lease
);
176 err
= parse_reply_info_in(p
, end
, &info
->dir_in
[i
]);
191 pr_err("problem parsing dir contents %d\n", err
);
196 * parse entire mds reply
198 static int parse_reply_info(struct ceph_msg
*msg
,
199 struct ceph_mds_reply_info_parsed
*info
)
205 info
->head
= msg
->front
.iov_base
;
206 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
207 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
210 ceph_decode_32_safe(&p
, end
, len
, bad
);
212 err
= parse_reply_info_trace(&p
, p
+len
, info
);
218 ceph_decode_32_safe(&p
, end
, len
, bad
);
220 err
= parse_reply_info_dir(&p
, p
+len
, info
);
226 ceph_decode_32_safe(&p
, end
, len
, bad
);
227 info
->snapblob_len
= len
;
238 pr_err("mds parse_reply err %d\n", err
);
242 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
251 static const char *session_state_name(int s
)
254 case CEPH_MDS_SESSION_NEW
: return "new";
255 case CEPH_MDS_SESSION_OPENING
: return "opening";
256 case CEPH_MDS_SESSION_OPEN
: return "open";
257 case CEPH_MDS_SESSION_HUNG
: return "hung";
258 case CEPH_MDS_SESSION_CLOSING
: return "closing";
259 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
260 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
261 default: return "???";
265 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
267 if (atomic_inc_not_zero(&s
->s_ref
)) {
268 dout("mdsc get_session %p %d -> %d\n", s
,
269 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
272 dout("mdsc get_session %p 0 -- FAIL", s
);
277 void ceph_put_mds_session(struct ceph_mds_session
*s
)
279 dout("mdsc put_session %p %d -> %d\n", s
,
280 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
281 if (atomic_dec_and_test(&s
->s_ref
)) {
283 s
->s_mdsc
->client
->monc
.auth
->ops
->destroy_authorizer(
284 s
->s_mdsc
->client
->monc
.auth
, s
->s_authorizer
);
290 * called under mdsc->mutex
292 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
295 struct ceph_mds_session
*session
;
297 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
299 session
= mdsc
->sessions
[mds
];
300 dout("lookup_mds_session %p %d\n", session
,
301 atomic_read(&session
->s_ref
));
302 get_session(session
);
306 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
308 if (mds
>= mdsc
->max_sessions
)
310 return mdsc
->sessions
[mds
];
313 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
314 struct ceph_mds_session
*s
)
316 if (s
->s_mds
>= mdsc
->max_sessions
||
317 mdsc
->sessions
[s
->s_mds
] != s
)
323 * create+register a new session for given mds.
324 * called under mdsc->mutex.
326 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
329 struct ceph_mds_session
*s
;
331 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
333 return ERR_PTR(-ENOMEM
);
336 s
->s_state
= CEPH_MDS_SESSION_NEW
;
339 mutex_init(&s
->s_mutex
);
341 ceph_con_init(mdsc
->client
->msgr
, &s
->s_con
);
342 s
->s_con
.private = s
;
343 s
->s_con
.ops
= &mds_con_ops
;
344 s
->s_con
.peer_name
.type
= CEPH_ENTITY_TYPE_MDS
;
345 s
->s_con
.peer_name
.num
= cpu_to_le64(mds
);
347 spin_lock_init(&s
->s_cap_lock
);
350 s
->s_renew_requested
= 0;
352 INIT_LIST_HEAD(&s
->s_caps
);
355 atomic_set(&s
->s_ref
, 1);
356 INIT_LIST_HEAD(&s
->s_waiting
);
357 INIT_LIST_HEAD(&s
->s_unsafe
);
358 s
->s_num_cap_releases
= 0;
359 s
->s_cap_iterator
= NULL
;
360 INIT_LIST_HEAD(&s
->s_cap_releases
);
361 INIT_LIST_HEAD(&s
->s_cap_releases_done
);
362 INIT_LIST_HEAD(&s
->s_cap_flushing
);
363 INIT_LIST_HEAD(&s
->s_cap_snaps_flushing
);
365 dout("register_session mds%d\n", mds
);
366 if (mds
>= mdsc
->max_sessions
) {
367 int newmax
= 1 << get_count_order(mds
+1);
368 struct ceph_mds_session
**sa
;
370 dout("register_session realloc to %d\n", newmax
);
371 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
374 if (mdsc
->sessions
) {
375 memcpy(sa
, mdsc
->sessions
,
376 mdsc
->max_sessions
* sizeof(void *));
377 kfree(mdsc
->sessions
);
380 mdsc
->max_sessions
= newmax
;
382 mdsc
->sessions
[mds
] = s
;
383 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
385 ceph_con_open(&s
->s_con
, ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
391 return ERR_PTR(-ENOMEM
);
395 * called under mdsc->mutex
397 static void __unregister_session(struct ceph_mds_client
*mdsc
,
398 struct ceph_mds_session
*s
)
400 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
401 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
402 mdsc
->sessions
[s
->s_mds
] = NULL
;
403 ceph_con_close(&s
->s_con
);
404 ceph_put_mds_session(s
);
408 * drop session refs in request.
410 * should be last request ref, or hold mdsc->mutex
412 static void put_request_session(struct ceph_mds_request
*req
)
414 if (req
->r_session
) {
415 ceph_put_mds_session(req
->r_session
);
416 req
->r_session
= NULL
;
420 void ceph_mdsc_release_request(struct kref
*kref
)
422 struct ceph_mds_request
*req
= container_of(kref
,
423 struct ceph_mds_request
,
426 ceph_msg_put(req
->r_request
);
428 ceph_msg_put(req
->r_reply
);
429 destroy_reply_info(&req
->r_reply_info
);
432 ceph_put_cap_refs(ceph_inode(req
->r_inode
),
436 if (req
->r_locked_dir
)
437 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
),
439 if (req
->r_target_inode
)
440 iput(req
->r_target_inode
);
443 if (req
->r_old_dentry
) {
445 ceph_inode(req
->r_old_dentry
->d_parent
->d_inode
),
447 dput(req
->r_old_dentry
);
451 put_request_session(req
);
452 ceph_unreserve_caps(&req
->r_caps_reservation
);
457 * lookup session, bump ref if found.
459 * called under mdsc->mutex.
461 static struct ceph_mds_request
*__lookup_request(struct ceph_mds_client
*mdsc
,
464 struct ceph_mds_request
*req
;
465 struct rb_node
*n
= mdsc
->request_tree
.rb_node
;
468 req
= rb_entry(n
, struct ceph_mds_request
, r_node
);
469 if (tid
< req
->r_tid
)
471 else if (tid
> req
->r_tid
)
474 ceph_mdsc_get_request(req
);
481 static void __insert_request(struct ceph_mds_client
*mdsc
,
482 struct ceph_mds_request
*new)
484 struct rb_node
**p
= &mdsc
->request_tree
.rb_node
;
485 struct rb_node
*parent
= NULL
;
486 struct ceph_mds_request
*req
= NULL
;
490 req
= rb_entry(parent
, struct ceph_mds_request
, r_node
);
491 if (new->r_tid
< req
->r_tid
)
493 else if (new->r_tid
> req
->r_tid
)
499 rb_link_node(&new->r_node
, parent
, p
);
500 rb_insert_color(&new->r_node
, &mdsc
->request_tree
);
504 * Register an in-flight request, and assign a tid. Link to directory
505 * are modifying (if any).
507 * Called under mdsc->mutex.
509 static void __register_request(struct ceph_mds_client
*mdsc
,
510 struct ceph_mds_request
*req
,
513 req
->r_tid
= ++mdsc
->last_tid
;
515 ceph_reserve_caps(&req
->r_caps_reservation
, req
->r_num_caps
);
516 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
517 ceph_mdsc_get_request(req
);
518 __insert_request(mdsc
, req
);
521 struct ceph_inode_info
*ci
= ceph_inode(dir
);
523 spin_lock(&ci
->i_unsafe_lock
);
524 req
->r_unsafe_dir
= dir
;
525 list_add_tail(&req
->r_unsafe_dir_item
, &ci
->i_unsafe_dirops
);
526 spin_unlock(&ci
->i_unsafe_lock
);
530 static void __unregister_request(struct ceph_mds_client
*mdsc
,
531 struct ceph_mds_request
*req
)
533 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
534 rb_erase(&req
->r_node
, &mdsc
->request_tree
);
535 RB_CLEAR_NODE(&req
->r_node
);
537 if (req
->r_unsafe_dir
) {
538 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
540 spin_lock(&ci
->i_unsafe_lock
);
541 list_del_init(&req
->r_unsafe_dir_item
);
542 spin_unlock(&ci
->i_unsafe_lock
);
545 ceph_mdsc_put_request(req
);
549 * Choose mds to send request to next. If there is a hint set in the
550 * request (e.g., due to a prior forward hint from the mds), use that.
551 * Otherwise, consult frag tree and/or caps to identify the
552 * appropriate mds. If all else fails, choose randomly.
554 * Called under mdsc->mutex.
556 static int __choose_mds(struct ceph_mds_client
*mdsc
,
557 struct ceph_mds_request
*req
)
560 struct ceph_inode_info
*ci
;
561 struct ceph_cap
*cap
;
562 int mode
= req
->r_direct_mode
;
564 u32 hash
= req
->r_direct_hash
;
565 bool is_hash
= req
->r_direct_is_hash
;
568 * is there a specific mds we should try? ignore hint if we have
569 * no session and the mds is not up (active or recovering).
571 if (req
->r_resend_mds
>= 0 &&
572 (__have_session(mdsc
, req
->r_resend_mds
) ||
573 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
574 dout("choose_mds using resend_mds mds%d\n",
576 return req
->r_resend_mds
;
579 if (mode
== USE_RANDOM_MDS
)
584 inode
= req
->r_inode
;
585 } else if (req
->r_dentry
) {
586 if (req
->r_dentry
->d_inode
) {
587 inode
= req
->r_dentry
->d_inode
;
589 inode
= req
->r_dentry
->d_parent
->d_inode
;
590 hash
= req
->r_dentry
->d_name
.hash
;
594 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
598 ci
= ceph_inode(inode
);
600 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
601 struct ceph_inode_frag frag
;
604 ceph_choose_frag(ci
, hash
, &frag
, &found
);
606 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
609 /* choose a random replica */
610 get_random_bytes(&r
, 1);
613 dout("choose_mds %p %llx.%llx "
614 "frag %u mds%d (%d/%d)\n",
615 inode
, ceph_vinop(inode
),
621 /* since this file/dir wasn't known to be
622 * replicated, then we want to look for the
623 * authoritative mds. */
626 /* choose auth mds */
628 dout("choose_mds %p %llx.%llx "
629 "frag %u mds%d (auth)\n",
630 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
636 spin_lock(&inode
->i_lock
);
638 if (mode
== USE_AUTH_MDS
)
639 cap
= ci
->i_auth_cap
;
640 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
641 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
643 spin_unlock(&inode
->i_lock
);
646 mds
= cap
->session
->s_mds
;
647 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
648 inode
, ceph_vinop(inode
), mds
,
649 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
650 spin_unlock(&inode
->i_lock
);
654 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
655 dout("choose_mds chose random mds%d\n", mds
);
663 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
665 struct ceph_msg
*msg
;
666 struct ceph_mds_session_head
*h
;
668 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), 0, 0, NULL
);
670 pr_err("create_session_msg ENOMEM creating msg\n");
671 return ERR_PTR(PTR_ERR(msg
));
673 h
= msg
->front
.iov_base
;
674 h
->op
= cpu_to_le32(op
);
675 h
->seq
= cpu_to_le64(seq
);
680 * send session open request.
682 * called under mdsc->mutex
684 static int __open_session(struct ceph_mds_client
*mdsc
,
685 struct ceph_mds_session
*session
)
687 struct ceph_msg
*msg
;
689 int mds
= session
->s_mds
;
692 /* wait for mds to go active? */
693 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
694 dout("open_session to mds%d (%s)\n", mds
,
695 ceph_mds_state_name(mstate
));
696 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
697 session
->s_renew_requested
= jiffies
;
699 /* send connect message */
700 msg
= create_session_msg(CEPH_SESSION_REQUEST_OPEN
, session
->s_seq
);
705 ceph_con_send(&session
->s_con
, msg
);
716 * Free preallocated cap messages assigned to this session
718 static void cleanup_cap_releases(struct ceph_mds_session
*session
)
720 struct ceph_msg
*msg
;
722 spin_lock(&session
->s_cap_lock
);
723 while (!list_empty(&session
->s_cap_releases
)) {
724 msg
= list_first_entry(&session
->s_cap_releases
,
725 struct ceph_msg
, list_head
);
726 list_del_init(&msg
->list_head
);
729 while (!list_empty(&session
->s_cap_releases_done
)) {
730 msg
= list_first_entry(&session
->s_cap_releases_done
,
731 struct ceph_msg
, list_head
);
732 list_del_init(&msg
->list_head
);
735 spin_unlock(&session
->s_cap_lock
);
739 * Helper to safely iterate over all caps associated with a session, with
740 * special care taken to handle a racing __ceph_remove_cap().
742 * Caller must hold session s_mutex.
744 static int iterate_session_caps(struct ceph_mds_session
*session
,
745 int (*cb
)(struct inode
*, struct ceph_cap
*,
749 struct ceph_cap
*cap
;
750 struct inode
*inode
, *last_inode
= NULL
;
751 struct ceph_cap
*old_cap
= NULL
;
754 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
755 spin_lock(&session
->s_cap_lock
);
756 p
= session
->s_caps
.next
;
757 while (p
!= &session
->s_caps
) {
758 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
759 inode
= igrab(&cap
->ci
->vfs_inode
);
764 session
->s_cap_iterator
= cap
;
765 spin_unlock(&session
->s_cap_lock
);
772 ceph_put_cap(old_cap
);
776 ret
= cb(inode
, cap
, arg
);
779 spin_lock(&session
->s_cap_lock
);
781 if (cap
->ci
== NULL
) {
782 dout("iterate_session_caps finishing cap %p removal\n",
784 BUG_ON(cap
->session
!= session
);
785 list_del_init(&cap
->session_caps
);
786 session
->s_nr_caps
--;
788 old_cap
= cap
; /* put_cap it w/o locks held */
795 session
->s_cap_iterator
= NULL
;
796 spin_unlock(&session
->s_cap_lock
);
801 ceph_put_cap(old_cap
);
806 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
809 struct ceph_inode_info
*ci
= ceph_inode(inode
);
810 dout("removing cap %p, ci is %p, inode is %p\n",
811 cap
, ci
, &ci
->vfs_inode
);
812 ceph_remove_cap(cap
);
817 * caller must hold session s_mutex
819 static void remove_session_caps(struct ceph_mds_session
*session
)
821 dout("remove_session_caps on %p\n", session
);
822 iterate_session_caps(session
, remove_session_caps_cb
, NULL
);
823 BUG_ON(session
->s_nr_caps
> 0);
824 cleanup_cap_releases(session
);
828 * wake up any threads waiting on this session's caps. if the cap is
829 * old (didn't get renewed on the client reconnect), remove it now.
831 * caller must hold s_mutex.
833 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
836 struct ceph_inode_info
*ci
= ceph_inode(inode
);
838 wake_up(&ci
->i_cap_wq
);
840 spin_lock(&inode
->i_lock
);
841 ci
->i_wanted_max_size
= 0;
842 ci
->i_requested_max_size
= 0;
843 spin_unlock(&inode
->i_lock
);
848 static void wake_up_session_caps(struct ceph_mds_session
*session
,
851 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
852 iterate_session_caps(session
, wake_up_session_cb
,
853 (void *)(unsigned long)reconnect
);
857 * Send periodic message to MDS renewing all currently held caps. The
858 * ack will reset the expiration for all caps from this session.
860 * caller holds s_mutex
862 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
863 struct ceph_mds_session
*session
)
865 struct ceph_msg
*msg
;
868 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
869 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
870 pr_info("mds%d caps stale\n", session
->s_mds
);
871 session
->s_renew_requested
= jiffies
;
873 /* do not try to renew caps until a recovering mds has reconnected
874 * with its clients. */
875 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
876 if (state
< CEPH_MDS_STATE_RECONNECT
) {
877 dout("send_renew_caps ignoring mds%d (%s)\n",
878 session
->s_mds
, ceph_mds_state_name(state
));
882 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
883 ceph_mds_state_name(state
));
884 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
885 ++session
->s_renew_seq
);
888 ceph_con_send(&session
->s_con
, msg
);
893 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
895 * Called under session->s_mutex
897 static void renewed_caps(struct ceph_mds_client
*mdsc
,
898 struct ceph_mds_session
*session
, int is_renew
)
903 spin_lock(&session
->s_cap_lock
);
904 was_stale
= is_renew
&& (session
->s_cap_ttl
== 0 ||
905 time_after_eq(jiffies
, session
->s_cap_ttl
));
907 session
->s_cap_ttl
= session
->s_renew_requested
+
908 mdsc
->mdsmap
->m_session_timeout
*HZ
;
911 if (time_before(jiffies
, session
->s_cap_ttl
)) {
912 pr_info("mds%d caps renewed\n", session
->s_mds
);
915 pr_info("mds%d caps still stale\n", session
->s_mds
);
918 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
919 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
920 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
921 spin_unlock(&session
->s_cap_lock
);
924 wake_up_session_caps(session
, 0);
928 * send a session close request
930 static int request_close_session(struct ceph_mds_client
*mdsc
,
931 struct ceph_mds_session
*session
)
933 struct ceph_msg
*msg
;
936 dout("request_close_session mds%d state %s seq %lld\n",
937 session
->s_mds
, session_state_name(session
->s_state
),
939 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
943 ceph_con_send(&session
->s_con
, msg
);
948 * Called with s_mutex held.
950 static int __close_session(struct ceph_mds_client
*mdsc
,
951 struct ceph_mds_session
*session
)
953 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
955 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
956 return request_close_session(mdsc
, session
);
962 * Because we can't cache an inode without one or more caps, we do
963 * this indirectly: if a cap is unused, we prune its aliases, at which
964 * point the inode will hopefully get dropped to.
966 * Yes, this is a bit sloppy. Our only real goal here is to respond to
967 * memory pressure from the MDS, though, so it needn't be perfect.
969 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
971 struct ceph_mds_session
*session
= arg
;
972 struct ceph_inode_info
*ci
= ceph_inode(inode
);
973 int used
, oissued
, mine
;
975 if (session
->s_trim_caps
<= 0)
978 spin_lock(&inode
->i_lock
);
979 mine
= cap
->issued
| cap
->implemented
;
980 used
= __ceph_caps_used(ci
);
981 oissued
= __ceph_caps_issued_other(ci
, cap
);
983 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
984 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
985 ceph_cap_string(used
));
986 if (ci
->i_dirty_caps
)
987 goto out
; /* dirty caps */
988 if ((used
& ~oissued
) & mine
)
989 goto out
; /* we need these caps */
991 session
->s_trim_caps
--;
993 /* we aren't the only cap.. just remove us */
994 __ceph_remove_cap(cap
);
996 /* try to drop referring dentries */
997 spin_unlock(&inode
->i_lock
);
998 d_prune_aliases(inode
);
999 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1000 inode
, cap
, atomic_read(&inode
->i_count
));
1005 spin_unlock(&inode
->i_lock
);
1010 * Trim session cap count down to some max number.
1012 static int trim_caps(struct ceph_mds_client
*mdsc
,
1013 struct ceph_mds_session
*session
,
1016 int trim_caps
= session
->s_nr_caps
- max_caps
;
1018 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1019 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1020 if (trim_caps
> 0) {
1021 session
->s_trim_caps
= trim_caps
;
1022 iterate_session_caps(session
, trim_caps_cb
, session
);
1023 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1024 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1025 trim_caps
- session
->s_trim_caps
);
1026 session
->s_trim_caps
= 0;
1032 * Allocate cap_release messages. If there is a partially full message
1033 * in the queue, try to allocate enough to cover it's remainder, so that
1034 * we can send it immediately.
1036 * Called under s_mutex.
1038 static int add_cap_releases(struct ceph_mds_client
*mdsc
,
1039 struct ceph_mds_session
*session
,
1042 struct ceph_msg
*msg
;
1043 struct ceph_mds_cap_release
*head
;
1047 extra
= mdsc
->client
->mount_args
->cap_release_safety
;
1049 spin_lock(&session
->s_cap_lock
);
1051 if (!list_empty(&session
->s_cap_releases
)) {
1052 msg
= list_first_entry(&session
->s_cap_releases
,
1055 head
= msg
->front
.iov_base
;
1056 extra
+= CEPH_CAPS_PER_RELEASE
- le32_to_cpu(head
->num
);
1059 while (session
->s_num_cap_releases
< session
->s_nr_caps
+ extra
) {
1060 spin_unlock(&session
->s_cap_lock
);
1061 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
, PAGE_CACHE_SIZE
,
1065 dout("add_cap_releases %p msg %p now %d\n", session
, msg
,
1066 (int)msg
->front
.iov_len
);
1067 head
= msg
->front
.iov_base
;
1068 head
->num
= cpu_to_le32(0);
1069 msg
->front
.iov_len
= sizeof(*head
);
1070 spin_lock(&session
->s_cap_lock
);
1071 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1072 session
->s_num_cap_releases
+= CEPH_CAPS_PER_RELEASE
;
1075 if (!list_empty(&session
->s_cap_releases
)) {
1076 msg
= list_first_entry(&session
->s_cap_releases
,
1079 head
= msg
->front
.iov_base
;
1081 dout(" queueing non-full %p (%d)\n", msg
,
1082 le32_to_cpu(head
->num
));
1083 list_move_tail(&msg
->list_head
,
1084 &session
->s_cap_releases_done
);
1085 session
->s_num_cap_releases
-=
1086 CEPH_CAPS_PER_RELEASE
- le32_to_cpu(head
->num
);
1090 spin_unlock(&session
->s_cap_lock
);
1096 * flush all dirty inode data to disk.
1098 * returns true if we've flushed through want_flush_seq
1100 static int check_cap_flush(struct ceph_mds_client
*mdsc
, u64 want_flush_seq
)
1104 dout("check_cap_flush want %lld\n", want_flush_seq
);
1105 mutex_lock(&mdsc
->mutex
);
1106 for (mds
= 0; ret
&& mds
< mdsc
->max_sessions
; mds
++) {
1107 struct ceph_mds_session
*session
= mdsc
->sessions
[mds
];
1111 get_session(session
);
1112 mutex_unlock(&mdsc
->mutex
);
1114 mutex_lock(&session
->s_mutex
);
1115 if (!list_empty(&session
->s_cap_flushing
)) {
1116 struct ceph_inode_info
*ci
=
1117 list_entry(session
->s_cap_flushing
.next
,
1118 struct ceph_inode_info
,
1120 struct inode
*inode
= &ci
->vfs_inode
;
1122 spin_lock(&inode
->i_lock
);
1123 if (ci
->i_cap_flush_seq
<= want_flush_seq
) {
1124 dout("check_cap_flush still flushing %p "
1125 "seq %lld <= %lld to mds%d\n", inode
,
1126 ci
->i_cap_flush_seq
, want_flush_seq
,
1130 spin_unlock(&inode
->i_lock
);
1132 mutex_unlock(&session
->s_mutex
);
1133 ceph_put_mds_session(session
);
1137 mutex_lock(&mdsc
->mutex
);
1140 mutex_unlock(&mdsc
->mutex
);
1141 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq
);
1146 * called under s_mutex
1148 static void send_cap_releases(struct ceph_mds_client
*mdsc
,
1149 struct ceph_mds_session
*session
)
1151 struct ceph_msg
*msg
;
1153 dout("send_cap_releases mds%d\n", session
->s_mds
);
1155 spin_lock(&session
->s_cap_lock
);
1156 if (list_empty(&session
->s_cap_releases_done
))
1158 msg
= list_first_entry(&session
->s_cap_releases_done
,
1159 struct ceph_msg
, list_head
);
1160 list_del_init(&msg
->list_head
);
1161 spin_unlock(&session
->s_cap_lock
);
1162 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1163 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1164 ceph_con_send(&session
->s_con
, msg
);
1166 spin_unlock(&session
->s_cap_lock
);
1174 * Create an mds request.
1176 struct ceph_mds_request
*
1177 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1179 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1182 return ERR_PTR(-ENOMEM
);
1184 req
->r_started
= jiffies
;
1185 req
->r_resend_mds
= -1;
1186 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1188 kref_init(&req
->r_kref
);
1189 INIT_LIST_HEAD(&req
->r_wait
);
1190 init_completion(&req
->r_completion
);
1191 init_completion(&req
->r_safe_completion
);
1192 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1195 req
->r_direct_mode
= mode
;
1200 * return oldest (lowest) request, tid in request tree, 0 if none.
1202 * called under mdsc->mutex.
1204 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1206 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1208 return rb_entry(rb_first(&mdsc
->request_tree
),
1209 struct ceph_mds_request
, r_node
);
1212 static u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1214 struct ceph_mds_request
*req
= __get_oldest_req(mdsc
);
1222 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1223 * on build_path_from_dentry in fs/cifs/dir.c.
1225 * If @stop_on_nosnap, generate path relative to the first non-snapped
1228 * Encode hidden .snap dirs as a double /, i.e.
1229 * foo/.snap/bar -> foo//bar
1231 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1234 struct dentry
*temp
;
1239 return ERR_PTR(-EINVAL
);
1243 for (temp
= dentry
; !IS_ROOT(temp
);) {
1244 struct inode
*inode
= temp
->d_inode
;
1245 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1246 len
++; /* slash only */
1247 else if (stop_on_nosnap
&& inode
&&
1248 ceph_snap(inode
) == CEPH_NOSNAP
)
1251 len
+= 1 + temp
->d_name
.len
;
1252 temp
= temp
->d_parent
;
1254 pr_err("build_path_dentry corrupt dentry %p\n", dentry
);
1255 return ERR_PTR(-EINVAL
);
1259 len
--; /* no leading '/' */
1261 path
= kmalloc(len
+1, GFP_NOFS
);
1263 return ERR_PTR(-ENOMEM
);
1265 path
[pos
] = 0; /* trailing null */
1266 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1267 struct inode
*inode
= temp
->d_inode
;
1269 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1270 dout("build_path_dentry path+%d: %p SNAPDIR\n",
1272 } else if (stop_on_nosnap
&& inode
&&
1273 ceph_snap(inode
) == CEPH_NOSNAP
) {
1276 pos
-= temp
->d_name
.len
;
1279 strncpy(path
+ pos
, temp
->d_name
.name
,
1281 dout("build_path_dentry path+%d: %p '%.*s'\n",
1282 pos
, temp
, temp
->d_name
.len
, path
+ pos
);
1286 temp
= temp
->d_parent
;
1288 pr_err("build_path_dentry corrupt dentry\n");
1290 return ERR_PTR(-EINVAL
);
1294 pr_err("build_path_dentry did not end path lookup where "
1295 "expected, namelen is %d, pos is %d\n", len
, pos
);
1296 /* presumably this is only possible if racing with a
1297 rename of one of the parent directories (we can not
1298 lock the dentries above us to prevent this, but
1299 retrying should be harmless) */
1304 *base
= ceph_ino(temp
->d_inode
);
1306 dout("build_path_dentry on %p %d built %llx '%.*s'\n",
1307 dentry
, atomic_read(&dentry
->d_count
), *base
, len
, path
);
1311 static int build_dentry_path(struct dentry
*dentry
,
1312 const char **ppath
, int *ppathlen
, u64
*pino
,
1317 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
) {
1318 *pino
= ceph_ino(dentry
->d_parent
->d_inode
);
1319 *ppath
= dentry
->d_name
.name
;
1320 *ppathlen
= dentry
->d_name
.len
;
1323 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1325 return PTR_ERR(path
);
1331 static int build_inode_path(struct inode
*inode
,
1332 const char **ppath
, int *ppathlen
, u64
*pino
,
1335 struct dentry
*dentry
;
1338 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1339 *pino
= ceph_ino(inode
);
1343 dentry
= d_find_alias(inode
);
1344 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1347 return PTR_ERR(path
);
1354 * request arguments may be specified via an inode *, a dentry *, or
1355 * an explicit ino+path.
1357 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1358 const char *rpath
, u64 rino
,
1359 const char **ppath
, int *pathlen
,
1360 u64
*ino
, int *freepath
)
1365 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1366 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1368 } else if (rdentry
) {
1369 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1370 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1375 *pathlen
= strlen(rpath
);
1376 dout(" path %.*s\n", *pathlen
, rpath
);
1383 * called under mdsc->mutex
1385 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1386 struct ceph_mds_request
*req
,
1389 struct ceph_msg
*msg
;
1390 struct ceph_mds_request_head
*head
;
1391 const char *path1
= NULL
;
1392 const char *path2
= NULL
;
1393 u64 ino1
= 0, ino2
= 0;
1394 int pathlen1
= 0, pathlen2
= 0;
1395 int freepath1
= 0, freepath2
= 0;
1401 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1402 req
->r_path1
, req
->r_ino1
.ino
,
1403 &path1
, &pathlen1
, &ino1
, &freepath1
);
1409 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1410 req
->r_path2
, req
->r_ino2
.ino
,
1411 &path2
, &pathlen2
, &ino2
, &freepath2
);
1417 len
= sizeof(*head
) +
1418 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
));
1420 /* calculate (max) length for cap releases */
1421 len
+= sizeof(struct ceph_mds_request_release
) *
1422 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1423 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1424 if (req
->r_dentry_drop
)
1425 len
+= req
->r_dentry
->d_name
.len
;
1426 if (req
->r_old_dentry_drop
)
1427 len
+= req
->r_old_dentry
->d_name
.len
;
1429 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, 0, 0, NULL
);
1433 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1435 head
= msg
->front
.iov_base
;
1436 p
= msg
->front
.iov_base
+ sizeof(*head
);
1437 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1439 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1440 head
->op
= cpu_to_le32(req
->r_op
);
1441 head
->caller_uid
= cpu_to_le32(current_fsuid());
1442 head
->caller_gid
= cpu_to_le32(current_fsgid());
1443 head
->args
= req
->r_args
;
1445 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1446 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1450 if (req
->r_inode_drop
)
1451 releases
+= ceph_encode_inode_release(&p
,
1452 req
->r_inode
? req
->r_inode
: req
->r_dentry
->d_inode
,
1453 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1454 if (req
->r_dentry_drop
)
1455 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1456 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1457 if (req
->r_old_dentry_drop
)
1458 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1459 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1460 if (req
->r_old_inode_drop
)
1461 releases
+= ceph_encode_inode_release(&p
,
1462 req
->r_old_dentry
->d_inode
,
1463 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1464 head
->num_releases
= cpu_to_le16(releases
);
1467 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1468 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1470 msg
->pages
= req
->r_pages
;
1471 msg
->nr_pages
= req
->r_num_pages
;
1472 msg
->hdr
.data_len
= cpu_to_le32(req
->r_data_len
);
1473 msg
->hdr
.data_off
= cpu_to_le16(0);
1477 kfree((char *)path2
);
1480 kfree((char *)path1
);
1486 * called under mdsc->mutex if error, under no mutex if
1489 static void complete_request(struct ceph_mds_client
*mdsc
,
1490 struct ceph_mds_request
*req
)
1492 if (req
->r_callback
)
1493 req
->r_callback(mdsc
, req
);
1495 complete(&req
->r_completion
);
1499 * called under mdsc->mutex
1501 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
1502 struct ceph_mds_request
*req
,
1505 struct ceph_mds_request_head
*rhead
;
1506 struct ceph_msg
*msg
;
1511 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
1512 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
1514 if (req
->r_request
) {
1515 ceph_msg_put(req
->r_request
);
1516 req
->r_request
= NULL
;
1518 msg
= create_request_message(mdsc
, req
, mds
);
1520 req
->r_reply
= ERR_PTR(PTR_ERR(msg
));
1521 complete_request(mdsc
, req
);
1522 return -PTR_ERR(msg
);
1524 req
->r_request
= msg
;
1526 rhead
= msg
->front
.iov_base
;
1527 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
1528 if (req
->r_got_unsafe
)
1529 flags
|= CEPH_MDS_FLAG_REPLAY
;
1530 if (req
->r_locked_dir
)
1531 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
1532 rhead
->flags
= cpu_to_le32(flags
);
1533 rhead
->num_fwd
= req
->r_num_fwd
;
1534 rhead
->num_retry
= req
->r_attempts
- 1;
1536 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
1538 if (req
->r_target_inode
&& req
->r_got_unsafe
)
1539 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
1546 * send request, or put it on the appropriate wait list.
1548 static int __do_request(struct ceph_mds_client
*mdsc
,
1549 struct ceph_mds_request
*req
)
1551 struct ceph_mds_session
*session
= NULL
;
1558 if (req
->r_timeout
&&
1559 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
1560 dout("do_request timed out\n");
1565 mds
= __choose_mds(mdsc
, req
);
1567 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
1568 dout("do_request no mds or not active, waiting for map\n");
1569 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
1573 /* get, open session */
1574 session
= __ceph_lookup_mds_session(mdsc
, mds
);
1576 session
= register_session(mdsc
, mds
);
1577 if (IS_ERR(session
)) {
1578 err
= PTR_ERR(session
);
1582 dout("do_request mds%d session %p state %s\n", mds
, session
,
1583 session_state_name(session
->s_state
));
1584 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
1585 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
1586 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
1587 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
1588 __open_session(mdsc
, session
);
1589 list_add(&req
->r_wait
, &session
->s_waiting
);
1594 req
->r_session
= get_session(session
);
1595 req
->r_resend_mds
= -1; /* forget any previous mds hint */
1597 if (req
->r_request_started
== 0) /* note request start time */
1598 req
->r_request_started
= jiffies
;
1600 err
= __prepare_send_request(mdsc
, req
, mds
);
1602 ceph_msg_get(req
->r_request
);
1603 ceph_con_send(&session
->s_con
, req
->r_request
);
1607 ceph_put_mds_session(session
);
1612 req
->r_reply
= ERR_PTR(err
);
1613 complete_request(mdsc
, req
);
1618 * called under mdsc->mutex
1620 static void __wake_requests(struct ceph_mds_client
*mdsc
,
1621 struct list_head
*head
)
1623 struct ceph_mds_request
*req
, *nreq
;
1625 list_for_each_entry_safe(req
, nreq
, head
, r_wait
) {
1626 list_del_init(&req
->r_wait
);
1627 __do_request(mdsc
, req
);
1632 * Wake up threads with requests pending for @mds, so that they can
1633 * resubmit their requests to a possibly different mds. If @all is set,
1634 * wake up if their requests has been forwarded to @mds, too.
1636 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
, int all
)
1638 struct ceph_mds_request
*req
;
1641 dout("kick_requests mds%d\n", mds
);
1642 for (p
= rb_first(&mdsc
->request_tree
); p
; p
= rb_next(p
)) {
1643 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1644 if (req
->r_got_unsafe
)
1646 if (req
->r_session
&&
1647 req
->r_session
->s_mds
== mds
) {
1648 dout(" kicking tid %llu\n", req
->r_tid
);
1649 put_request_session(req
);
1650 __do_request(mdsc
, req
);
1655 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
1656 struct ceph_mds_request
*req
)
1658 dout("submit_request on %p\n", req
);
1659 mutex_lock(&mdsc
->mutex
);
1660 __register_request(mdsc
, req
, NULL
);
1661 __do_request(mdsc
, req
);
1662 mutex_unlock(&mdsc
->mutex
);
1666 * Synchrously perform an mds request. Take care of all of the
1667 * session setup, forwarding, retry details.
1669 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
1671 struct ceph_mds_request
*req
)
1675 dout("do_request on %p\n", req
);
1677 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1679 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
1680 if (req
->r_locked_dir
)
1681 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
1682 if (req
->r_old_dentry
)
1684 ceph_inode(req
->r_old_dentry
->d_parent
->d_inode
),
1688 mutex_lock(&mdsc
->mutex
);
1689 __register_request(mdsc
, req
, dir
);
1690 __do_request(mdsc
, req
);
1693 if (!req
->r_reply
) {
1694 mutex_unlock(&mdsc
->mutex
);
1695 if (req
->r_timeout
) {
1696 err
= (long)wait_for_completion_interruptible_timeout(
1697 &req
->r_completion
, req
->r_timeout
);
1699 req
->r_reply
= ERR_PTR(-EIO
);
1701 req
->r_reply
= ERR_PTR(err
);
1703 err
= wait_for_completion_interruptible(
1704 &req
->r_completion
);
1706 req
->r_reply
= ERR_PTR(err
);
1708 mutex_lock(&mdsc
->mutex
);
1711 if (IS_ERR(req
->r_reply
)) {
1712 err
= PTR_ERR(req
->r_reply
);
1713 req
->r_reply
= NULL
;
1715 if (err
== -ERESTARTSYS
) {
1717 req
->r_aborted
= true;
1719 if (req
->r_locked_dir
&&
1720 (req
->r_op
& CEPH_MDS_OP_WRITE
)) {
1721 struct ceph_inode_info
*ci
=
1722 ceph_inode(req
->r_locked_dir
);
1724 dout("aborted, clearing I_COMPLETE on %p\n",
1726 spin_lock(&req
->r_locked_dir
->i_lock
);
1727 ci
->i_ceph_flags
&= ~CEPH_I_COMPLETE
;
1728 ci
->i_release_count
++;
1729 spin_unlock(&req
->r_locked_dir
->i_lock
);
1732 /* clean up this request */
1733 __unregister_request(mdsc
, req
);
1734 if (!list_empty(&req
->r_unsafe_item
))
1735 list_del_init(&req
->r_unsafe_item
);
1736 complete(&req
->r_safe_completion
);
1738 } else if (req
->r_err
) {
1741 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
1743 mutex_unlock(&mdsc
->mutex
);
1745 dout("do_request %p done, result %d\n", req
, err
);
1752 * We take the session mutex and parse and process the reply immediately.
1753 * This preserves the logical ordering of replies, capabilities, etc., sent
1754 * by the MDS as they are applied to our local cache.
1756 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
1758 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
1759 struct ceph_mds_request
*req
;
1760 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
1761 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
1764 int mds
= session
->s_mds
;
1766 if (msg
->front
.iov_len
< sizeof(*head
)) {
1767 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
1772 /* get request, session */
1773 tid
= le64_to_cpu(msg
->hdr
.tid
);
1774 mutex_lock(&mdsc
->mutex
);
1775 req
= __lookup_request(mdsc
, tid
);
1777 dout("handle_reply on unknown tid %llu\n", tid
);
1778 mutex_unlock(&mdsc
->mutex
);
1781 dout("handle_reply %p\n", req
);
1783 /* correct session? */
1784 if (req
->r_session
!= session
) {
1785 pr_err("mdsc_handle_reply got %llu on session mds%d"
1786 " not mds%d\n", tid
, session
->s_mds
,
1787 req
->r_session
? req
->r_session
->s_mds
: -1);
1788 mutex_unlock(&mdsc
->mutex
);
1793 if ((req
->r_got_unsafe
&& !head
->safe
) ||
1794 (req
->r_got_safe
&& head
->safe
)) {
1795 pr_warning("got a dup %s reply on %llu from mds%d\n",
1796 head
->safe
? "safe" : "unsafe", tid
, mds
);
1797 mutex_unlock(&mdsc
->mutex
);
1801 result
= le32_to_cpu(head
->result
);
1804 * Tolerate 2 consecutive ESTALEs from the same mds.
1805 * FIXME: we should be looking at the cap migrate_seq.
1807 if (result
== -ESTALE
) {
1808 req
->r_direct_mode
= USE_AUTH_MDS
;
1810 if (req
->r_num_stale
<= 2) {
1811 __do_request(mdsc
, req
);
1812 mutex_unlock(&mdsc
->mutex
);
1816 req
->r_num_stale
= 0;
1820 req
->r_got_safe
= true;
1821 __unregister_request(mdsc
, req
);
1822 complete(&req
->r_safe_completion
);
1824 if (req
->r_got_unsafe
) {
1826 * We already handled the unsafe response, now do the
1827 * cleanup. No need to examine the response; the MDS
1828 * doesn't include any result info in the safe
1829 * response. And even if it did, there is nothing
1830 * useful we could do with a revised return value.
1832 dout("got safe reply %llu, mds%d\n", tid
, mds
);
1833 list_del_init(&req
->r_unsafe_item
);
1835 /* last unsafe request during umount? */
1836 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
1837 complete(&mdsc
->safe_umount_waiters
);
1838 mutex_unlock(&mdsc
->mutex
);
1843 BUG_ON(req
->r_reply
);
1846 req
->r_got_unsafe
= true;
1847 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
1850 dout("handle_reply tid %lld result %d\n", tid
, result
);
1851 rinfo
= &req
->r_reply_info
;
1852 err
= parse_reply_info(msg
, rinfo
);
1853 mutex_unlock(&mdsc
->mutex
);
1855 mutex_lock(&session
->s_mutex
);
1857 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds
);
1863 if (rinfo
->snapblob_len
) {
1864 down_write(&mdsc
->snap_rwsem
);
1865 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
1866 rinfo
->snapblob
+ rinfo
->snapblob_len
,
1867 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
);
1868 downgrade_write(&mdsc
->snap_rwsem
);
1870 down_read(&mdsc
->snap_rwsem
);
1873 /* insert trace into our cache */
1874 err
= ceph_fill_trace(mdsc
->client
->sb
, req
, req
->r_session
);
1876 if (result
== 0 && rinfo
->dir_nr
)
1877 ceph_readdir_prepopulate(req
, req
->r_session
);
1878 ceph_unreserve_caps(&req
->r_caps_reservation
);
1881 up_read(&mdsc
->snap_rwsem
);
1890 add_cap_releases(mdsc
, req
->r_session
, -1);
1891 mutex_unlock(&session
->s_mutex
);
1893 /* kick calling process */
1894 complete_request(mdsc
, req
);
1896 ceph_mdsc_put_request(req
);
1903 * handle mds notification that our request has been forwarded.
1905 static void handle_forward(struct ceph_mds_client
*mdsc
,
1906 struct ceph_mds_session
*session
,
1907 struct ceph_msg
*msg
)
1909 struct ceph_mds_request
*req
;
1910 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
1914 void *p
= msg
->front
.iov_base
;
1915 void *end
= p
+ msg
->front
.iov_len
;
1917 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
1918 next_mds
= ceph_decode_32(&p
);
1919 fwd_seq
= ceph_decode_32(&p
);
1921 mutex_lock(&mdsc
->mutex
);
1922 req
= __lookup_request(mdsc
, tid
);
1924 dout("forward %llu to mds%d - req dne\n", tid
, next_mds
);
1925 goto out
; /* dup reply? */
1928 if (fwd_seq
<= req
->r_num_fwd
) {
1929 dout("forward %llu to mds%d - old seq %d <= %d\n",
1930 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
1932 /* resend. forward race not possible; mds would drop */
1933 dout("forward %llu to mds%d (we resend)\n", tid
, next_mds
);
1934 req
->r_num_fwd
= fwd_seq
;
1935 req
->r_resend_mds
= next_mds
;
1936 put_request_session(req
);
1937 __do_request(mdsc
, req
);
1939 ceph_mdsc_put_request(req
);
1941 mutex_unlock(&mdsc
->mutex
);
1945 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
1949 * handle a mds session control message
1951 static void handle_session(struct ceph_mds_session
*session
,
1952 struct ceph_msg
*msg
)
1954 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
1957 int mds
= session
->s_mds
;
1958 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
1962 if (msg
->front
.iov_len
!= sizeof(*h
))
1964 op
= le32_to_cpu(h
->op
);
1965 seq
= le64_to_cpu(h
->seq
);
1967 mutex_lock(&mdsc
->mutex
);
1968 if (op
== CEPH_SESSION_CLOSE
)
1969 __unregister_session(mdsc
, session
);
1970 /* FIXME: this ttl calculation is generous */
1971 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
1972 mutex_unlock(&mdsc
->mutex
);
1974 mutex_lock(&session
->s_mutex
);
1976 dout("handle_session mds%d %s %p state %s seq %llu\n",
1977 mds
, ceph_session_op_name(op
), session
,
1978 session_state_name(session
->s_state
), seq
);
1980 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
1981 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
1982 pr_info("mds%d came back\n", session
->s_mds
);
1986 case CEPH_SESSION_OPEN
:
1987 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
1988 renewed_caps(mdsc
, session
, 0);
1991 __close_session(mdsc
, session
);
1994 case CEPH_SESSION_RENEWCAPS
:
1995 if (session
->s_renew_seq
== seq
)
1996 renewed_caps(mdsc
, session
, 1);
1999 case CEPH_SESSION_CLOSE
:
2000 remove_session_caps(session
);
2001 wake
= 1; /* for good measure */
2002 complete(&mdsc
->session_close_waiters
);
2003 kick_requests(mdsc
, mds
, 0); /* cur only */
2006 case CEPH_SESSION_STALE
:
2007 pr_info("mds%d caps went stale, renewing\n",
2009 spin_lock(&session
->s_cap_lock
);
2010 session
->s_cap_gen
++;
2011 session
->s_cap_ttl
= 0;
2012 spin_unlock(&session
->s_cap_lock
);
2013 send_renew_caps(mdsc
, session
);
2016 case CEPH_SESSION_RECALL_STATE
:
2017 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2021 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2025 mutex_unlock(&session
->s_mutex
);
2027 mutex_lock(&mdsc
->mutex
);
2028 __wake_requests(mdsc
, &session
->s_waiting
);
2029 mutex_unlock(&mdsc
->mutex
);
2034 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2035 (int)msg
->front
.iov_len
);
2042 * called under session->mutex.
2044 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2045 struct ceph_mds_session
*session
)
2047 struct ceph_mds_request
*req
, *nreq
;
2050 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2052 mutex_lock(&mdsc
->mutex
);
2053 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2054 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
);
2056 ceph_msg_get(req
->r_request
);
2057 ceph_con_send(&session
->s_con
, req
->r_request
);
2060 mutex_unlock(&mdsc
->mutex
);
2064 * Encode information about a cap for a reconnect with the MDS.
2066 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2069 struct ceph_mds_cap_reconnect rec
;
2070 struct ceph_inode_info
*ci
;
2071 struct ceph_pagelist
*pagelist
= arg
;
2075 struct dentry
*dentry
;
2079 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2080 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2081 ceph_cap_string(cap
->issued
));
2082 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2086 dentry
= d_find_alias(inode
);
2088 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2090 err
= PTR_ERR(path
);
2097 err
= ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2101 spin_lock(&inode
->i_lock
);
2102 cap
->seq
= 0; /* reset cap seq */
2103 cap
->issue_seq
= 0; /* and issue_seq */
2104 rec
.cap_id
= cpu_to_le64(cap
->cap_id
);
2105 rec
.pathbase
= cpu_to_le64(pathbase
);
2106 rec
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2107 rec
.issued
= cpu_to_le32(cap
->issued
);
2108 rec
.size
= cpu_to_le64(inode
->i_size
);
2109 ceph_encode_timespec(&rec
.mtime
, &inode
->i_mtime
);
2110 ceph_encode_timespec(&rec
.atime
, &inode
->i_atime
);
2111 rec
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2112 spin_unlock(&inode
->i_lock
);
2114 err
= ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
));
2124 * If an MDS fails and recovers, clients need to reconnect in order to
2125 * reestablish shared state. This includes all caps issued through
2126 * this session _and_ the snap_realm hierarchy. Because it's not
2127 * clear which snap realms the mds cares about, we send everything we
2128 * know about.. that ensures we'll then get any new info the
2129 * recovering MDS might have.
2131 * This is a relatively heavyweight operation, but it's rare.
2133 * called with mdsc->mutex held.
2135 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
, int mds
)
2137 struct ceph_mds_session
*session
= NULL
;
2138 struct ceph_msg
*reply
;
2141 struct ceph_pagelist
*pagelist
;
2143 pr_info("reconnect to recovering mds%d\n", mds
);
2145 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2147 goto fail_nopagelist
;
2148 ceph_pagelist_init(pagelist
);
2150 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, 0, 0, NULL
);
2151 if (IS_ERR(reply
)) {
2152 err
= PTR_ERR(reply
);
2157 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2158 mutex_unlock(&mdsc
->mutex
); /* drop lock for duration */
2161 mutex_lock(&session
->s_mutex
);
2163 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2166 ceph_con_open(&session
->s_con
,
2167 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2169 /* replay unsafe requests */
2170 replay_unsafe_requests(mdsc
, session
);
2172 dout("no session for mds%d, will send short reconnect\n",
2176 down_read(&mdsc
->snap_rwsem
);
2180 dout("session %p state %s\n", session
,
2181 session_state_name(session
->s_state
));
2183 /* traverse this session's caps */
2184 err
= ceph_pagelist_encode_32(pagelist
, session
->s_nr_caps
);
2187 err
= iterate_session_caps(session
, encode_caps_cb
, pagelist
);
2192 * snaprealms. we provide mds with the ino, seq (version), and
2193 * parent for all of our realms. If the mds has any newer info,
2196 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
2197 struct ceph_snap_realm
*realm
=
2198 rb_entry(p
, struct ceph_snap_realm
, node
);
2199 struct ceph_mds_snaprealm_reconnect sr_rec
;
2201 dout(" adding snap realm %llx seq %lld parent %llx\n",
2202 realm
->ino
, realm
->seq
, realm
->parent_ino
);
2203 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
2204 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
2205 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
2206 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
2212 reply
->pagelist
= pagelist
;
2213 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2214 reply
->nr_pages
= calc_pages_for(0, pagelist
->length
);
2215 ceph_con_send(&session
->s_con
, reply
);
2217 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2218 mutex_unlock(&session
->s_mutex
);
2220 mutex_lock(&mdsc
->mutex
);
2221 __wake_requests(mdsc
, &session
->s_waiting
);
2222 mutex_unlock(&mdsc
->mutex
);
2224 ceph_put_mds_session(session
);
2226 up_read(&mdsc
->snap_rwsem
);
2227 mutex_lock(&mdsc
->mutex
);
2231 ceph_msg_put(reply
);
2232 up_read(&mdsc
->snap_rwsem
);
2233 mutex_unlock(&session
->s_mutex
);
2234 ceph_put_mds_session(session
);
2236 ceph_pagelist_release(pagelist
);
2239 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
2240 mutex_lock(&mdsc
->mutex
);
2246 * compare old and new mdsmaps, kicking requests
2247 * and closing out old connections as necessary
2249 * called under mdsc->mutex.
2251 static void check_new_map(struct ceph_mds_client
*mdsc
,
2252 struct ceph_mdsmap
*newmap
,
2253 struct ceph_mdsmap
*oldmap
)
2256 int oldstate
, newstate
;
2257 struct ceph_mds_session
*s
;
2259 dout("check_new_map new %u old %u\n",
2260 newmap
->m_epoch
, oldmap
->m_epoch
);
2262 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2263 if (mdsc
->sessions
[i
] == NULL
)
2265 s
= mdsc
->sessions
[i
];
2266 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
2267 newstate
= ceph_mdsmap_get_state(newmap
, i
);
2269 dout("check_new_map mds%d state %s -> %s (session %s)\n",
2270 i
, ceph_mds_state_name(oldstate
),
2271 ceph_mds_state_name(newstate
),
2272 session_state_name(s
->s_state
));
2274 if (memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
2275 ceph_mdsmap_get_addr(newmap
, i
),
2276 sizeof(struct ceph_entity_addr
))) {
2277 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
2278 /* the session never opened, just close it
2280 __wake_requests(mdsc
, &s
->s_waiting
);
2281 __unregister_session(mdsc
, s
);
2284 mutex_unlock(&mdsc
->mutex
);
2285 mutex_lock(&s
->s_mutex
);
2286 mutex_lock(&mdsc
->mutex
);
2287 ceph_con_close(&s
->s_con
);
2288 mutex_unlock(&s
->s_mutex
);
2289 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
2292 /* kick any requests waiting on the recovering mds */
2293 kick_requests(mdsc
, i
, 1);
2294 } else if (oldstate
== newstate
) {
2295 continue; /* nothing new with this mds */
2301 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
2302 newstate
>= CEPH_MDS_STATE_RECONNECT
)
2303 send_mds_reconnect(mdsc
, i
);
2306 * kick requests on any mds that has gone active.
2308 * kick requests on cur or forwarder: we may have sent
2309 * the request to mds1, mds1 told us it forwarded it
2310 * to mds2, but then we learn mds1 failed and can't be
2311 * sure it successfully forwarded our request before
2314 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
2315 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
2316 pr_info("mds%d reconnect completed\n", s
->s_mds
);
2317 kick_requests(mdsc
, i
, 1);
2318 ceph_kick_flushing_caps(mdsc
, s
);
2319 wake_up_session_caps(s
, 1);
2331 * caller must hold session s_mutex, dentry->d_lock
2333 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
2335 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
2337 ceph_put_mds_session(di
->lease_session
);
2338 di
->lease_session
= NULL
;
2341 static void handle_lease(struct ceph_mds_client
*mdsc
,
2342 struct ceph_mds_session
*session
,
2343 struct ceph_msg
*msg
)
2345 struct super_block
*sb
= mdsc
->client
->sb
;
2346 struct inode
*inode
;
2347 struct ceph_inode_info
*ci
;
2348 struct dentry
*parent
, *dentry
;
2349 struct ceph_dentry_info
*di
;
2350 int mds
= session
->s_mds
;
2351 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
2352 struct ceph_vino vino
;
2357 dout("handle_lease from mds%d\n", mds
);
2360 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
2362 vino
.ino
= le64_to_cpu(h
->ino
);
2363 vino
.snap
= CEPH_NOSNAP
;
2364 mask
= le16_to_cpu(h
->mask
);
2365 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
2366 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
2367 if (dname
.len
!= get_unaligned_le32(h
+1))
2370 mutex_lock(&session
->s_mutex
);
2374 inode
= ceph_find_inode(sb
, vino
);
2375 dout("handle_lease '%s', mask %d, ino %llx %p\n",
2376 ceph_lease_op_name(h
->action
), mask
, vino
.ino
, inode
);
2377 if (inode
== NULL
) {
2378 dout("handle_lease no inode %llx\n", vino
.ino
);
2381 ci
= ceph_inode(inode
);
2384 parent
= d_find_alias(inode
);
2386 dout("no parent dentry on inode %p\n", inode
);
2388 goto release
; /* hrm... */
2390 dname
.hash
= full_name_hash(dname
.name
, dname
.len
);
2391 dentry
= d_lookup(parent
, &dname
);
2396 spin_lock(&dentry
->d_lock
);
2397 di
= ceph_dentry(dentry
);
2398 switch (h
->action
) {
2399 case CEPH_MDS_LEASE_REVOKE
:
2400 if (di
&& di
->lease_session
== session
) {
2401 h
->seq
= cpu_to_le32(di
->lease_seq
);
2402 __ceph_mdsc_drop_dentry_lease(dentry
);
2407 case CEPH_MDS_LEASE_RENEW
:
2408 if (di
&& di
->lease_session
== session
&&
2409 di
->lease_gen
== session
->s_cap_gen
&&
2410 di
->lease_renew_from
&&
2411 di
->lease_renew_after
== 0) {
2412 unsigned long duration
=
2413 le32_to_cpu(h
->duration_ms
) * HZ
/ 1000;
2415 di
->lease_seq
= le32_to_cpu(h
->seq
);
2416 dentry
->d_time
= di
->lease_renew_from
+ duration
;
2417 di
->lease_renew_after
= di
->lease_renew_from
+
2419 di
->lease_renew_from
= 0;
2423 spin_unlock(&dentry
->d_lock
);
2430 /* let's just reuse the same message */
2431 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
2433 ceph_con_send(&session
->s_con
, msg
);
2437 mutex_unlock(&session
->s_mutex
);
2441 pr_err("corrupt lease message\n");
2445 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
2446 struct inode
*inode
,
2447 struct dentry
*dentry
, char action
,
2450 struct ceph_msg
*msg
;
2451 struct ceph_mds_lease
*lease
;
2452 int len
= sizeof(*lease
) + sizeof(u32
);
2455 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2456 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
2457 dnamelen
= dentry
->d_name
.len
;
2460 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, 0, 0, NULL
);
2463 lease
= msg
->front
.iov_base
;
2464 lease
->action
= action
;
2465 lease
->mask
= cpu_to_le16(CEPH_LOCK_DN
);
2466 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
2467 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
2468 lease
->seq
= cpu_to_le32(seq
);
2469 put_unaligned_le32(dnamelen
, lease
+ 1);
2470 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
2473 * if this is a preemptive lease RELEASE, no need to
2474 * flush request stream, since the actual request will
2477 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
2479 ceph_con_send(&session
->s_con
, msg
);
2483 * Preemptively release a lease we expect to invalidate anyway.
2484 * Pass @inode always, @dentry is optional.
2486 void ceph_mdsc_lease_release(struct ceph_mds_client
*mdsc
, struct inode
*inode
,
2487 struct dentry
*dentry
, int mask
)
2489 struct ceph_dentry_info
*di
;
2490 struct ceph_mds_session
*session
;
2493 BUG_ON(inode
== NULL
);
2494 BUG_ON(dentry
== NULL
);
2495 BUG_ON(mask
!= CEPH_LOCK_DN
);
2497 /* is dentry lease valid? */
2498 spin_lock(&dentry
->d_lock
);
2499 di
= ceph_dentry(dentry
);
2500 if (!di
|| !di
->lease_session
||
2501 di
->lease_session
->s_mds
< 0 ||
2502 di
->lease_gen
!= di
->lease_session
->s_cap_gen
||
2503 !time_before(jiffies
, dentry
->d_time
)) {
2504 dout("lease_release inode %p dentry %p -- "
2506 inode
, dentry
, mask
);
2507 spin_unlock(&dentry
->d_lock
);
2511 /* we do have a lease on this dentry; note mds and seq */
2512 session
= ceph_get_mds_session(di
->lease_session
);
2513 seq
= di
->lease_seq
;
2514 __ceph_mdsc_drop_dentry_lease(dentry
);
2515 spin_unlock(&dentry
->d_lock
);
2517 dout("lease_release inode %p dentry %p mask %d to mds%d\n",
2518 inode
, dentry
, mask
, session
->s_mds
);
2519 ceph_mdsc_lease_send_msg(session
, inode
, dentry
,
2520 CEPH_MDS_LEASE_RELEASE
, seq
);
2521 ceph_put_mds_session(session
);
2525 * drop all leases (and dentry refs) in preparation for umount
2527 static void drop_leases(struct ceph_mds_client
*mdsc
)
2531 dout("drop_leases\n");
2532 mutex_lock(&mdsc
->mutex
);
2533 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2534 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2537 mutex_unlock(&mdsc
->mutex
);
2538 mutex_lock(&s
->s_mutex
);
2539 mutex_unlock(&s
->s_mutex
);
2540 ceph_put_mds_session(s
);
2541 mutex_lock(&mdsc
->mutex
);
2543 mutex_unlock(&mdsc
->mutex
);
2549 * delayed work -- periodically trim expired leases, renew caps with mds
2551 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
2554 unsigned hz
= round_jiffies_relative(HZ
* delay
);
2555 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
2558 static void delayed_work(struct work_struct
*work
)
2561 struct ceph_mds_client
*mdsc
=
2562 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
2566 dout("mdsc delayed_work\n");
2567 ceph_check_delayed_caps(mdsc
);
2569 mutex_lock(&mdsc
->mutex
);
2570 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
2571 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
2572 mdsc
->last_renew_caps
);
2574 mdsc
->last_renew_caps
= jiffies
;
2576 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2577 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2580 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2581 dout("resending session close request for mds%d\n",
2583 request_close_session(mdsc
, s
);
2584 ceph_put_mds_session(s
);
2587 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
2588 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
2589 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
2590 pr_info("mds%d hung\n", s
->s_mds
);
2593 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
2594 /* this mds is failed or recovering, just wait */
2595 ceph_put_mds_session(s
);
2598 mutex_unlock(&mdsc
->mutex
);
2600 mutex_lock(&s
->s_mutex
);
2602 send_renew_caps(mdsc
, s
);
2604 ceph_con_keepalive(&s
->s_con
);
2605 add_cap_releases(mdsc
, s
, -1);
2606 send_cap_releases(mdsc
, s
);
2607 mutex_unlock(&s
->s_mutex
);
2608 ceph_put_mds_session(s
);
2610 mutex_lock(&mdsc
->mutex
);
2612 mutex_unlock(&mdsc
->mutex
);
2614 schedule_delayed(mdsc
);
2618 int ceph_mdsc_init(struct ceph_mds_client
*mdsc
, struct ceph_client
*client
)
2620 mdsc
->client
= client
;
2621 mutex_init(&mdsc
->mutex
);
2622 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
2623 init_completion(&mdsc
->safe_umount_waiters
);
2624 init_completion(&mdsc
->session_close_waiters
);
2625 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
2626 mdsc
->sessions
= NULL
;
2627 mdsc
->max_sessions
= 0;
2629 init_rwsem(&mdsc
->snap_rwsem
);
2630 mdsc
->snap_realms
= RB_ROOT
;
2631 INIT_LIST_HEAD(&mdsc
->snap_empty
);
2632 spin_lock_init(&mdsc
->snap_empty_lock
);
2634 mdsc
->request_tree
= RB_ROOT
;
2635 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
2636 mdsc
->last_renew_caps
= jiffies
;
2637 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
2638 spin_lock_init(&mdsc
->cap_delay_lock
);
2639 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
2640 spin_lock_init(&mdsc
->snap_flush_lock
);
2641 mdsc
->cap_flush_seq
= 0;
2642 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
2643 mdsc
->num_cap_flushing
= 0;
2644 spin_lock_init(&mdsc
->cap_dirty_lock
);
2645 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
2646 spin_lock_init(&mdsc
->dentry_lru_lock
);
2647 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
2652 * Wait for safe replies on open mds requests. If we time out, drop
2653 * all requests from the tree to avoid dangling dentry refs.
2655 static void wait_requests(struct ceph_mds_client
*mdsc
)
2657 struct ceph_mds_request
*req
;
2658 struct ceph_client
*client
= mdsc
->client
;
2660 mutex_lock(&mdsc
->mutex
);
2661 if (__get_oldest_req(mdsc
)) {
2662 mutex_unlock(&mdsc
->mutex
);
2664 dout("wait_requests waiting for requests\n");
2665 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
2666 client
->mount_args
->mount_timeout
* HZ
);
2668 /* tear down remaining requests */
2669 mutex_lock(&mdsc
->mutex
);
2670 while ((req
= __get_oldest_req(mdsc
))) {
2671 dout("wait_requests timed out on tid %llu\n",
2673 __unregister_request(mdsc
, req
);
2676 mutex_unlock(&mdsc
->mutex
);
2677 dout("wait_requests done\n");
2681 * called before mount is ro, and before dentries are torn down.
2682 * (hmm, does this still race with new lookups?)
2684 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
2686 dout("pre_umount\n");
2690 ceph_flush_dirty_caps(mdsc
);
2691 wait_requests(mdsc
);
2695 * wait for all write mds requests to flush.
2697 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
2699 struct ceph_mds_request
*req
= NULL
, *nextreq
;
2702 mutex_lock(&mdsc
->mutex
);
2703 dout("wait_unsafe_requests want %lld\n", want_tid
);
2705 req
= __get_oldest_req(mdsc
);
2706 while (req
&& req
->r_tid
<= want_tid
) {
2707 /* find next request */
2708 n
= rb_next(&req
->r_node
);
2710 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
2713 if ((req
->r_op
& CEPH_MDS_OP_WRITE
)) {
2715 ceph_mdsc_get_request(req
);
2717 ceph_mdsc_get_request(nextreq
);
2718 mutex_unlock(&mdsc
->mutex
);
2719 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
2720 req
->r_tid
, want_tid
);
2721 wait_for_completion(&req
->r_safe_completion
);
2722 mutex_lock(&mdsc
->mutex
);
2723 ceph_mdsc_put_request(req
);
2725 break; /* next dne before, so we're done! */
2726 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
2727 /* next request was removed from tree */
2728 ceph_mdsc_put_request(nextreq
);
2731 ceph_mdsc_put_request(nextreq
); /* won't go away */
2735 mutex_unlock(&mdsc
->mutex
);
2736 dout("wait_unsafe_requests done\n");
2739 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
2741 u64 want_tid
, want_flush
;
2744 mutex_lock(&mdsc
->mutex
);
2745 want_tid
= mdsc
->last_tid
;
2746 want_flush
= mdsc
->cap_flush_seq
;
2747 mutex_unlock(&mdsc
->mutex
);
2748 dout("sync want tid %lld flush_seq %lld\n", want_tid
, want_flush
);
2750 ceph_flush_dirty_caps(mdsc
);
2752 wait_unsafe_requests(mdsc
, want_tid
);
2753 wait_event(mdsc
->cap_flushing_wq
, check_cap_flush(mdsc
, want_flush
));
2758 * called after sb is ro.
2760 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
2762 struct ceph_mds_session
*session
;
2765 struct ceph_client
*client
= mdsc
->client
;
2766 unsigned long started
, timeout
= client
->mount_args
->mount_timeout
* HZ
;
2768 dout("close_sessions\n");
2770 mutex_lock(&mdsc
->mutex
);
2772 /* close sessions */
2774 while (time_before(jiffies
, started
+ timeout
)) {
2775 dout("closing sessions\n");
2777 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2778 session
= __ceph_lookup_mds_session(mdsc
, i
);
2781 mutex_unlock(&mdsc
->mutex
);
2782 mutex_lock(&session
->s_mutex
);
2783 __close_session(mdsc
, session
);
2784 mutex_unlock(&session
->s_mutex
);
2785 ceph_put_mds_session(session
);
2786 mutex_lock(&mdsc
->mutex
);
2792 if (client
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
2795 dout("waiting for sessions to close\n");
2796 mutex_unlock(&mdsc
->mutex
);
2797 wait_for_completion_timeout(&mdsc
->session_close_waiters
,
2799 mutex_lock(&mdsc
->mutex
);
2802 /* tear down remaining sessions */
2803 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2804 if (mdsc
->sessions
[i
]) {
2805 session
= get_session(mdsc
->sessions
[i
]);
2806 __unregister_session(mdsc
, session
);
2807 mutex_unlock(&mdsc
->mutex
);
2808 mutex_lock(&session
->s_mutex
);
2809 remove_session_caps(session
);
2810 mutex_unlock(&session
->s_mutex
);
2811 ceph_put_mds_session(session
);
2812 mutex_lock(&mdsc
->mutex
);
2816 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
2818 mutex_unlock(&mdsc
->mutex
);
2820 ceph_cleanup_empty_realms(mdsc
);
2822 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
2827 void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
2830 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
2832 ceph_mdsmap_destroy(mdsc
->mdsmap
);
2833 kfree(mdsc
->sessions
);
2838 * handle mds map update.
2840 void ceph_mdsc_handle_map(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
2844 void *p
= msg
->front
.iov_base
;
2845 void *end
= p
+ msg
->front
.iov_len
;
2846 struct ceph_mdsmap
*newmap
, *oldmap
;
2847 struct ceph_fsid fsid
;
2850 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
2851 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
2852 if (ceph_check_fsid(mdsc
->client
, &fsid
) < 0)
2854 epoch
= ceph_decode_32(&p
);
2855 maplen
= ceph_decode_32(&p
);
2856 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
2858 /* do we need it? */
2859 ceph_monc_got_mdsmap(&mdsc
->client
->monc
, epoch
);
2860 mutex_lock(&mdsc
->mutex
);
2861 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
2862 dout("handle_map epoch %u <= our %u\n",
2863 epoch
, mdsc
->mdsmap
->m_epoch
);
2864 mutex_unlock(&mdsc
->mutex
);
2868 newmap
= ceph_mdsmap_decode(&p
, end
);
2869 if (IS_ERR(newmap
)) {
2870 err
= PTR_ERR(newmap
);
2874 /* swap into place */
2876 oldmap
= mdsc
->mdsmap
;
2877 mdsc
->mdsmap
= newmap
;
2878 check_new_map(mdsc
, newmap
, oldmap
);
2879 ceph_mdsmap_destroy(oldmap
);
2881 mdsc
->mdsmap
= newmap
; /* first mds map */
2883 mdsc
->client
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
2885 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
2887 mutex_unlock(&mdsc
->mutex
);
2888 schedule_delayed(mdsc
);
2892 mutex_unlock(&mdsc
->mutex
);
2894 pr_err("error decoding mdsmap %d\n", err
);
2898 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
2900 struct ceph_mds_session
*s
= con
->private;
2902 if (get_session(s
)) {
2903 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
2906 dout("mdsc con_get %p FAIL\n", s
);
2910 static void con_put(struct ceph_connection
*con
)
2912 struct ceph_mds_session
*s
= con
->private;
2914 ceph_put_mds_session(s
);
2915 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
));
2919 * if the client is unresponsive for long enough, the mds will kill
2920 * the session entirely.
2922 static void peer_reset(struct ceph_connection
*con
)
2924 struct ceph_mds_session
*s
= con
->private;
2926 pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n",
2930 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2932 struct ceph_mds_session
*s
= con
->private;
2933 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
2934 int type
= le16_to_cpu(msg
->hdr
.type
);
2936 mutex_lock(&mdsc
->mutex
);
2937 if (__verify_registered_session(mdsc
, s
) < 0) {
2938 mutex_unlock(&mdsc
->mutex
);
2941 mutex_unlock(&mdsc
->mutex
);
2944 case CEPH_MSG_MDS_MAP
:
2945 ceph_mdsc_handle_map(mdsc
, msg
);
2947 case CEPH_MSG_CLIENT_SESSION
:
2948 handle_session(s
, msg
);
2950 case CEPH_MSG_CLIENT_REPLY
:
2951 handle_reply(s
, msg
);
2953 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
2954 handle_forward(mdsc
, s
, msg
);
2956 case CEPH_MSG_CLIENT_CAPS
:
2957 ceph_handle_caps(s
, msg
);
2959 case CEPH_MSG_CLIENT_SNAP
:
2960 ceph_handle_snap(mdsc
, s
, msg
);
2962 case CEPH_MSG_CLIENT_LEASE
:
2963 handle_lease(mdsc
, s
, msg
);
2967 pr_err("received unknown message type %d %s\n", type
,
2968 ceph_msg_type_name(type
));
2977 static int get_authorizer(struct ceph_connection
*con
,
2978 void **buf
, int *len
, int *proto
,
2979 void **reply_buf
, int *reply_len
, int force_new
)
2981 struct ceph_mds_session
*s
= con
->private;
2982 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
2983 struct ceph_auth_client
*ac
= mdsc
->client
->monc
.auth
;
2986 if (force_new
&& s
->s_authorizer
) {
2987 ac
->ops
->destroy_authorizer(ac
, s
->s_authorizer
);
2988 s
->s_authorizer
= NULL
;
2990 if (s
->s_authorizer
== NULL
) {
2991 if (ac
->ops
->create_authorizer
) {
2992 ret
= ac
->ops
->create_authorizer(
2993 ac
, CEPH_ENTITY_TYPE_MDS
,
2995 &s
->s_authorizer_buf
,
2996 &s
->s_authorizer_buf_len
,
2997 &s
->s_authorizer_reply_buf
,
2998 &s
->s_authorizer_reply_buf_len
);
3004 *proto
= ac
->protocol
;
3005 *buf
= s
->s_authorizer_buf
;
3006 *len
= s
->s_authorizer_buf_len
;
3007 *reply_buf
= s
->s_authorizer_reply_buf
;
3008 *reply_len
= s
->s_authorizer_reply_buf_len
;
3013 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3015 struct ceph_mds_session
*s
= con
->private;
3016 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3017 struct ceph_auth_client
*ac
= mdsc
->client
->monc
.auth
;
3019 return ac
->ops
->verify_authorizer_reply(ac
, s
->s_authorizer
, len
);
3022 static int invalidate_authorizer(struct ceph_connection
*con
)
3024 struct ceph_mds_session
*s
= con
->private;
3025 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3026 struct ceph_auth_client
*ac
= mdsc
->client
->monc
.auth
;
3028 if (ac
->ops
->invalidate_authorizer
)
3029 ac
->ops
->invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3031 return ceph_monc_validate_auth(&mdsc
->client
->monc
);
3034 const static struct ceph_connection_operations mds_con_ops
= {
3037 .dispatch
= dispatch
,
3038 .get_authorizer
= get_authorizer
,
3039 .verify_authorizer_reply
= verify_authorizer_reply
,
3040 .invalidate_authorizer
= invalidate_authorizer
,
3041 .peer_reset
= peer_reset
,