1 #include "ceph_debug.h"
3 #include <linux/types.h>
4 #include <linux/slab.h>
5 #include <linux/random.h>
6 #include <linux/sched.h>
8 #include "mon_client.h"
14 * Interact with Ceph monitor cluster. Handle requests for new map
15 * versions, and periodically resend as needed. Also implement
16 * statfs() and umount().
18 * A small cluster of Ceph "monitors" are responsible for managing critical
19 * cluster configuration and state information. An odd number (e.g., 3, 5)
20 * of cmon daemons use a modified version of the Paxos part-time parliament
21 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
22 * list of clients who have mounted the file system.
24 * We maintain an open, active session with a monitor at all times in order to
25 * receive timely MDSMap updates. We periodically send a keepalive byte on the
26 * TCP socket to ensure we detect a failure. If the connection does break, we
27 * randomly hunt for a new monitor. Once the connection is reestablished, we
28 * resend any outstanding requests.
31 const static struct ceph_connection_operations mon_con_ops
;
33 static int __validate_auth(struct ceph_mon_client
*monc
);
36 * Decode a monmap blob (e.g., during mount).
38 struct ceph_monmap
*ceph_monmap_decode(void *p
, void *end
)
40 struct ceph_monmap
*m
= NULL
;
42 struct ceph_fsid fsid
;
47 ceph_decode_32_safe(&p
, end
, len
, bad
);
48 ceph_decode_need(&p
, end
, len
, bad
);
50 dout("monmap_decode %p %p len %d\n", p
, end
, (int)(end
-p
));
52 ceph_decode_16_safe(&p
, end
, version
, bad
);
54 ceph_decode_need(&p
, end
, sizeof(fsid
) + 2*sizeof(u32
), bad
);
55 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
56 epoch
= ceph_decode_32(&p
);
58 num_mon
= ceph_decode_32(&p
);
59 ceph_decode_need(&p
, end
, num_mon
*sizeof(m
->mon_inst
[0]), bad
);
61 if (num_mon
>= CEPH_MAX_MON
)
63 m
= kmalloc(sizeof(*m
) + sizeof(m
->mon_inst
[0])*num_mon
, GFP_NOFS
);
65 return ERR_PTR(-ENOMEM
);
69 ceph_decode_copy(&p
, m
->mon_inst
, num_mon
*sizeof(m
->mon_inst
[0]));
70 for (i
= 0; i
< num_mon
; i
++)
71 ceph_decode_addr(&m
->mon_inst
[i
].addr
);
73 dout("monmap_decode epoch %d, num_mon %d\n", m
->epoch
,
75 for (i
= 0; i
< m
->num_mon
; i
++)
76 dout("monmap_decode mon%d is %s\n", i
,
77 pr_addr(&m
->mon_inst
[i
].addr
.in_addr
));
81 dout("monmap_decode failed with %d\n", err
);
87 * return true if *addr is included in the monmap.
89 int ceph_monmap_contains(struct ceph_monmap
*m
, struct ceph_entity_addr
*addr
)
93 for (i
= 0; i
< m
->num_mon
; i
++)
94 if (memcmp(addr
, &m
->mon_inst
[i
].addr
, sizeof(*addr
)) == 0)
100 * Send an auth request.
102 static void __send_prepared_auth_request(struct ceph_mon_client
*monc
, int len
)
104 monc
->pending_auth
= 1;
105 monc
->m_auth
->front
.iov_len
= len
;
106 monc
->m_auth
->hdr
.front_len
= cpu_to_le32(len
);
107 ceph_msg_get(monc
->m_auth
); /* keep our ref */
108 ceph_con_send(monc
->con
, monc
->m_auth
);
112 * Close monitor session, if any.
114 static void __close_session(struct ceph_mon_client
*monc
)
117 dout("__close_session closing mon%d\n", monc
->cur_mon
);
118 ceph_con_revoke(monc
->con
, monc
->m_auth
);
119 ceph_con_close(monc
->con
);
121 monc
->pending_auth
= 0;
122 ceph_auth_reset(monc
->auth
);
127 * Open a session with a (new) monitor.
129 static int __open_session(struct ceph_mon_client
*monc
)
134 if (monc
->cur_mon
< 0) {
135 get_random_bytes(&r
, 1);
136 monc
->cur_mon
= r
% monc
->monmap
->num_mon
;
137 dout("open_session num=%d r=%d -> mon%d\n",
138 monc
->monmap
->num_mon
, r
, monc
->cur_mon
);
140 monc
->sub_renew_after
= jiffies
; /* i.e., expired */
141 monc
->want_next_osdmap
= !!monc
->want_next_osdmap
;
143 dout("open_session mon%d opening\n", monc
->cur_mon
);
144 monc
->con
->peer_name
.type
= CEPH_ENTITY_TYPE_MON
;
145 monc
->con
->peer_name
.num
= cpu_to_le64(monc
->cur_mon
);
146 ceph_con_open(monc
->con
,
147 &monc
->monmap
->mon_inst
[monc
->cur_mon
].addr
);
149 /* initiatiate authentication handshake */
150 ret
= ceph_auth_build_hello(monc
->auth
,
151 monc
->m_auth
->front
.iov_base
,
152 monc
->m_auth
->front_max
);
153 __send_prepared_auth_request(monc
, ret
);
155 dout("open_session mon%d already open\n", monc
->cur_mon
);
160 static bool __sub_expired(struct ceph_mon_client
*monc
)
162 return time_after_eq(jiffies
, monc
->sub_renew_after
);
166 * Reschedule delayed work timer.
168 static void __schedule_delayed(struct ceph_mon_client
*monc
)
172 if (monc
->cur_mon
< 0 || __sub_expired(monc
))
176 dout("__schedule_delayed after %u\n", delay
);
177 schedule_delayed_work(&monc
->delayed_work
, delay
);
181 * Send subscribe request for mdsmap and/or osdmap.
183 static void __send_subscribe(struct ceph_mon_client
*monc
)
185 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
186 (unsigned)monc
->sub_sent
, __sub_expired(monc
),
187 monc
->want_next_osdmap
);
188 if ((__sub_expired(monc
) && !monc
->sub_sent
) ||
189 monc
->want_next_osdmap
== 1) {
190 struct ceph_msg
*msg
;
191 struct ceph_mon_subscribe_item
*i
;
194 msg
= ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE
, 96, GFP_NOFS
);
198 p
= msg
->front
.iov_base
;
199 end
= p
+ msg
->front
.iov_len
;
201 dout("__send_subscribe to 'mdsmap' %u+\n",
202 (unsigned)monc
->have_mdsmap
);
203 if (monc
->want_next_osdmap
) {
204 dout("__send_subscribe to 'osdmap' %u\n",
205 (unsigned)monc
->have_osdmap
);
206 ceph_encode_32(&p
, 3);
207 ceph_encode_string(&p
, end
, "osdmap", 6);
209 i
->have
= cpu_to_le64(monc
->have_osdmap
);
212 monc
->want_next_osdmap
= 2; /* requested */
214 ceph_encode_32(&p
, 2);
216 ceph_encode_string(&p
, end
, "mdsmap", 6);
218 i
->have
= cpu_to_le64(monc
->have_mdsmap
);
221 ceph_encode_string(&p
, end
, "monmap", 6);
227 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
228 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
229 ceph_con_send(monc
->con
, msg
);
231 monc
->sub_sent
= jiffies
| 1; /* never 0 */
235 static void handle_subscribe_ack(struct ceph_mon_client
*monc
,
236 struct ceph_msg
*msg
)
239 struct ceph_mon_subscribe_ack
*h
= msg
->front
.iov_base
;
241 if (msg
->front
.iov_len
< sizeof(*h
))
243 seconds
= le32_to_cpu(h
->duration
);
245 mutex_lock(&monc
->mutex
);
247 pr_info("mon%d %s session established\n",
248 monc
->cur_mon
, pr_addr(&monc
->con
->peer_addr
.in_addr
));
249 monc
->hunting
= false;
251 dout("handle_subscribe_ack after %d seconds\n", seconds
);
252 monc
->sub_renew_after
= monc
->sub_sent
+ (seconds
>> 1)*HZ
- 1;
254 mutex_unlock(&monc
->mutex
);
257 pr_err("got corrupt subscribe-ack msg\n");
262 * Keep track of which maps we have
264 int ceph_monc_got_mdsmap(struct ceph_mon_client
*monc
, u32 got
)
266 mutex_lock(&monc
->mutex
);
267 monc
->have_mdsmap
= got
;
268 mutex_unlock(&monc
->mutex
);
272 int ceph_monc_got_osdmap(struct ceph_mon_client
*monc
, u32 got
)
274 mutex_lock(&monc
->mutex
);
275 monc
->have_osdmap
= got
;
276 monc
->want_next_osdmap
= 0;
277 mutex_unlock(&monc
->mutex
);
282 * Register interest in the next osdmap
284 void ceph_monc_request_next_osdmap(struct ceph_mon_client
*monc
)
286 dout("request_next_osdmap have %u\n", monc
->have_osdmap
);
287 mutex_lock(&monc
->mutex
);
288 if (!monc
->want_next_osdmap
)
289 monc
->want_next_osdmap
= 1;
290 if (monc
->want_next_osdmap
< 2)
291 __send_subscribe(monc
);
292 mutex_unlock(&monc
->mutex
);
298 int ceph_monc_open_session(struct ceph_mon_client
*monc
)
301 monc
->con
= kmalloc(sizeof(*monc
->con
), GFP_KERNEL
);
304 ceph_con_init(monc
->client
->msgr
, monc
->con
);
305 monc
->con
->private = monc
;
306 monc
->con
->ops
= &mon_con_ops
;
309 mutex_lock(&monc
->mutex
);
310 __open_session(monc
);
311 __schedule_delayed(monc
);
312 mutex_unlock(&monc
->mutex
);
317 * The monitor responds with mount ack indicate mount success. The
318 * included client ticket allows the client to talk to MDSs and OSDs.
320 static void ceph_monc_handle_map(struct ceph_mon_client
*monc
,
321 struct ceph_msg
*msg
)
323 struct ceph_client
*client
= monc
->client
;
324 struct ceph_monmap
*monmap
= NULL
, *old
= monc
->monmap
;
327 mutex_lock(&monc
->mutex
);
329 dout("handle_monmap\n");
330 p
= msg
->front
.iov_base
;
331 end
= p
+ msg
->front
.iov_len
;
333 monmap
= ceph_monmap_decode(p
, end
);
334 if (IS_ERR(monmap
)) {
335 pr_err("problem decoding monmap, %d\n",
336 (int)PTR_ERR(monmap
));
340 if (ceph_check_fsid(monc
->client
, &monmap
->fsid
) < 0) {
345 client
->monc
.monmap
= monmap
;
349 mutex_unlock(&monc
->mutex
);
350 wake_up(&client
->auth_wq
);
356 static struct ceph_mon_generic_request
*__lookup_generic_req(
357 struct ceph_mon_client
*monc
, u64 tid
)
359 struct ceph_mon_generic_request
*req
;
360 struct rb_node
*n
= monc
->generic_request_tree
.rb_node
;
363 req
= rb_entry(n
, struct ceph_mon_generic_request
, node
);
366 else if (tid
> req
->tid
)
374 static void __insert_generic_request(struct ceph_mon_client
*monc
,
375 struct ceph_mon_generic_request
*new)
377 struct rb_node
**p
= &monc
->generic_request_tree
.rb_node
;
378 struct rb_node
*parent
= NULL
;
379 struct ceph_mon_generic_request
*req
= NULL
;
383 req
= rb_entry(parent
, struct ceph_mon_generic_request
, node
);
384 if (new->tid
< req
->tid
)
386 else if (new->tid
> req
->tid
)
392 rb_link_node(&new->node
, parent
, p
);
393 rb_insert_color(&new->node
, &monc
->generic_request_tree
);
396 static void release_generic_request(struct kref
*kref
)
398 struct ceph_mon_generic_request
*req
=
399 container_of(kref
, struct ceph_mon_generic_request
, kref
);
402 ceph_msg_put(req
->reply
);
404 ceph_msg_put(req
->request
);
407 static void put_generic_request(struct ceph_mon_generic_request
*req
)
409 kref_put(&req
->kref
, release_generic_request
);
412 static void get_generic_request(struct ceph_mon_generic_request
*req
)
414 kref_get(&req
->kref
);
417 static struct ceph_msg
*get_generic_reply(struct ceph_connection
*con
,
418 struct ceph_msg_header
*hdr
,
421 struct ceph_mon_client
*monc
= con
->private;
422 struct ceph_mon_generic_request
*req
;
423 u64 tid
= le64_to_cpu(hdr
->tid
);
426 mutex_lock(&monc
->mutex
);
427 req
= __lookup_generic_req(monc
, tid
);
429 dout("get_generic_reply %lld dne\n", tid
);
433 dout("get_generic_reply %lld got %p\n", tid
, req
->reply
);
434 m
= ceph_msg_get(req
->reply
);
436 * we don't need to track the connection reading into
437 * this reply because we only have one open connection
441 mutex_unlock(&monc
->mutex
);
445 static void handle_statfs_reply(struct ceph_mon_client
*monc
,
446 struct ceph_msg
*msg
)
448 struct ceph_mon_generic_request
*req
;
449 struct ceph_mon_statfs_reply
*reply
= msg
->front
.iov_base
;
450 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
452 if (msg
->front
.iov_len
!= sizeof(*reply
))
454 dout("handle_statfs_reply %p tid %llu\n", msg
, tid
);
456 mutex_lock(&monc
->mutex
);
457 req
= __lookup_generic_req(monc
, tid
);
459 *(struct ceph_statfs
*)req
->buf
= reply
->st
;
461 get_generic_request(req
);
463 mutex_unlock(&monc
->mutex
);
465 complete(&req
->completion
);
466 put_generic_request(req
);
471 pr_err("corrupt generic reply, no tid\n");
476 * Do a synchronous statfs().
478 int ceph_monc_do_statfs(struct ceph_mon_client
*monc
, struct ceph_statfs
*buf
)
480 struct ceph_mon_generic_request
*req
;
481 struct ceph_mon_statfs
*h
;
484 req
= kzalloc(sizeof(*req
), GFP_NOFS
);
488 kref_init(&req
->kref
);
490 init_completion(&req
->completion
);
493 req
->request
= ceph_msg_new(CEPH_MSG_STATFS
, sizeof(*h
), GFP_NOFS
);
496 req
->reply
= ceph_msg_new(CEPH_MSG_STATFS_REPLY
, 1024, GFP_NOFS
);
500 /* fill out request */
501 h
= req
->request
->front
.iov_base
;
502 h
->monhdr
.have_version
= 0;
503 h
->monhdr
.session_mon
= cpu_to_le16(-1);
504 h
->monhdr
.session_mon_tid
= 0;
505 h
->fsid
= monc
->monmap
->fsid
;
507 /* register request */
508 mutex_lock(&monc
->mutex
);
509 req
->tid
= ++monc
->last_tid
;
510 req
->request
->hdr
.tid
= cpu_to_le64(req
->tid
);
511 __insert_generic_request(monc
, req
);
512 monc
->num_generic_requests
++;
513 mutex_unlock(&monc
->mutex
);
515 /* send request and wait */
516 ceph_con_send(monc
->con
, ceph_msg_get(req
->request
));
517 err
= wait_for_completion_interruptible(&req
->completion
);
519 mutex_lock(&monc
->mutex
);
520 rb_erase(&req
->node
, &monc
->generic_request_tree
);
521 monc
->num_generic_requests
--;
522 mutex_unlock(&monc
->mutex
);
528 kref_put(&req
->kref
, release_generic_request
);
533 * Resend pending statfs requests.
535 static void __resend_generic_request(struct ceph_mon_client
*monc
)
537 struct ceph_mon_generic_request
*req
;
540 for (p
= rb_first(&monc
->generic_request_tree
); p
; p
= rb_next(p
)) {
541 req
= rb_entry(p
, struct ceph_mon_generic_request
, node
);
542 ceph_con_send(monc
->con
, ceph_msg_get(req
->request
));
547 * Delayed work. If we haven't mounted yet, retry. Otherwise,
548 * renew/retry subscription as needed (in case it is timing out, or we
549 * got an ENOMEM). And keep the monitor connection alive.
551 static void delayed_work(struct work_struct
*work
)
553 struct ceph_mon_client
*monc
=
554 container_of(work
, struct ceph_mon_client
, delayed_work
.work
);
556 dout("monc delayed_work\n");
557 mutex_lock(&monc
->mutex
);
559 __close_session(monc
);
560 __open_session(monc
); /* continue hunting */
562 ceph_con_keepalive(monc
->con
);
564 __validate_auth(monc
);
566 if (monc
->auth
->ops
->is_authenticated(monc
->auth
))
567 __send_subscribe(monc
);
569 __schedule_delayed(monc
);
570 mutex_unlock(&monc
->mutex
);
574 * On startup, we build a temporary monmap populated with the IPs
575 * provided by mount(2).
577 static int build_initial_monmap(struct ceph_mon_client
*monc
)
579 struct ceph_mount_args
*args
= monc
->client
->mount_args
;
580 struct ceph_entity_addr
*mon_addr
= args
->mon_addr
;
581 int num_mon
= args
->num_mon
;
584 /* build initial monmap */
585 monc
->monmap
= kzalloc(sizeof(*monc
->monmap
) +
586 num_mon
*sizeof(monc
->monmap
->mon_inst
[0]),
590 for (i
= 0; i
< num_mon
; i
++) {
591 monc
->monmap
->mon_inst
[i
].addr
= mon_addr
[i
];
592 monc
->monmap
->mon_inst
[i
].addr
.nonce
= 0;
593 monc
->monmap
->mon_inst
[i
].name
.type
=
594 CEPH_ENTITY_TYPE_MON
;
595 monc
->monmap
->mon_inst
[i
].name
.num
= cpu_to_le64(i
);
597 monc
->monmap
->num_mon
= num_mon
;
598 monc
->have_fsid
= false;
600 /* release addr memory */
601 kfree(args
->mon_addr
);
602 args
->mon_addr
= NULL
;
607 int ceph_monc_init(struct ceph_mon_client
*monc
, struct ceph_client
*cl
)
612 memset(monc
, 0, sizeof(*monc
));
615 mutex_init(&monc
->mutex
);
617 err
= build_initial_monmap(monc
);
624 monc
->auth
= ceph_auth_init(cl
->mount_args
->name
,
625 cl
->mount_args
->secret
);
626 if (IS_ERR(monc
->auth
))
627 return PTR_ERR(monc
->auth
);
628 monc
->auth
->want_keys
=
629 CEPH_ENTITY_TYPE_AUTH
| CEPH_ENTITY_TYPE_MON
|
630 CEPH_ENTITY_TYPE_OSD
| CEPH_ENTITY_TYPE_MDS
;
634 monc
->m_subscribe_ack
= ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK
,
635 sizeof(struct ceph_mon_subscribe_ack
),
637 if (!monc
->m_subscribe_ack
)
640 monc
->m_auth_reply
= ceph_msg_new(CEPH_MSG_AUTH_REPLY
, 4096, GFP_NOFS
);
641 if (!monc
->m_auth_reply
)
642 goto out_subscribe_ack
;
644 monc
->m_auth
= ceph_msg_new(CEPH_MSG_AUTH
, 4096, GFP_NOFS
);
645 monc
->pending_auth
= 0;
650 monc
->hunting
= true;
651 monc
->sub_renew_after
= jiffies
;
654 INIT_DELAYED_WORK(&monc
->delayed_work
, delayed_work
);
655 monc
->generic_request_tree
= RB_ROOT
;
656 monc
->num_generic_requests
= 0;
659 monc
->have_mdsmap
= 0;
660 monc
->have_osdmap
= 0;
661 monc
->want_next_osdmap
= 1;
665 ceph_msg_put(monc
->m_auth_reply
);
667 ceph_msg_put(monc
->m_subscribe_ack
);
674 void ceph_monc_stop(struct ceph_mon_client
*monc
)
677 cancel_delayed_work_sync(&monc
->delayed_work
);
679 mutex_lock(&monc
->mutex
);
680 __close_session(monc
);
682 monc
->con
->private = NULL
;
683 monc
->con
->ops
->put(monc
->con
);
686 mutex_unlock(&monc
->mutex
);
688 ceph_auth_destroy(monc
->auth
);
690 ceph_msg_put(monc
->m_auth
);
691 ceph_msg_put(monc
->m_auth_reply
);
692 ceph_msg_put(monc
->m_subscribe_ack
);
697 static void handle_auth_reply(struct ceph_mon_client
*monc
,
698 struct ceph_msg
*msg
)
702 mutex_lock(&monc
->mutex
);
703 monc
->pending_auth
= 0;
704 ret
= ceph_handle_auth_reply(monc
->auth
, msg
->front
.iov_base
,
706 monc
->m_auth
->front
.iov_base
,
707 monc
->m_auth
->front_max
);
709 monc
->client
->auth_err
= ret
;
710 wake_up(&monc
->client
->auth_wq
);
711 } else if (ret
> 0) {
712 __send_prepared_auth_request(monc
, ret
);
713 } else if (monc
->auth
->ops
->is_authenticated(monc
->auth
)) {
714 dout("authenticated, starting session\n");
716 monc
->client
->msgr
->inst
.name
.type
= CEPH_ENTITY_TYPE_CLIENT
;
717 monc
->client
->msgr
->inst
.name
.num
= monc
->auth
->global_id
;
719 __send_subscribe(monc
);
720 __resend_generic_request(monc
);
722 mutex_unlock(&monc
->mutex
);
725 static int __validate_auth(struct ceph_mon_client
*monc
)
729 if (monc
->pending_auth
)
732 ret
= ceph_build_auth(monc
->auth
, monc
->m_auth
->front
.iov_base
,
733 monc
->m_auth
->front_max
);
735 return ret
; /* either an error, or no need to authenticate */
736 __send_prepared_auth_request(monc
, ret
);
740 int ceph_monc_validate_auth(struct ceph_mon_client
*monc
)
744 mutex_lock(&monc
->mutex
);
745 ret
= __validate_auth(monc
);
746 mutex_unlock(&monc
->mutex
);
751 * handle incoming message
753 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
755 struct ceph_mon_client
*monc
= con
->private;
756 int type
= le16_to_cpu(msg
->hdr
.type
);
762 case CEPH_MSG_AUTH_REPLY
:
763 handle_auth_reply(monc
, msg
);
766 case CEPH_MSG_MON_SUBSCRIBE_ACK
:
767 handle_subscribe_ack(monc
, msg
);
770 case CEPH_MSG_STATFS_REPLY
:
771 handle_statfs_reply(monc
, msg
);
774 case CEPH_MSG_MON_MAP
:
775 ceph_monc_handle_map(monc
, msg
);
778 case CEPH_MSG_MDS_MAP
:
779 ceph_mdsc_handle_map(&monc
->client
->mdsc
, msg
);
782 case CEPH_MSG_OSD_MAP
:
783 ceph_osdc_handle_map(&monc
->client
->osdc
, msg
);
787 pr_err("received unknown message type %d %s\n", type
,
788 ceph_msg_type_name(type
));
794 * Allocate memory for incoming message
796 static struct ceph_msg
*mon_alloc_msg(struct ceph_connection
*con
,
797 struct ceph_msg_header
*hdr
,
800 struct ceph_mon_client
*monc
= con
->private;
801 int type
= le16_to_cpu(hdr
->type
);
802 int front_len
= le32_to_cpu(hdr
->front_len
);
803 struct ceph_msg
*m
= NULL
;
808 case CEPH_MSG_MON_SUBSCRIBE_ACK
:
809 m
= ceph_msg_get(monc
->m_subscribe_ack
);
811 case CEPH_MSG_STATFS_REPLY
:
812 return get_generic_reply(con
, hdr
, skip
);
813 case CEPH_MSG_AUTH_REPLY
:
814 m
= ceph_msg_get(monc
->m_auth_reply
);
816 case CEPH_MSG_MON_MAP
:
817 case CEPH_MSG_MDS_MAP
:
818 case CEPH_MSG_OSD_MAP
:
819 m
= ceph_msg_new(type
, front_len
, GFP_NOFS
);
824 pr_info("alloc_msg unknown type %d\n", type
);
831 * If the monitor connection resets, pick a new monitor and resubmit
832 * any pending requests.
834 static void mon_fault(struct ceph_connection
*con
)
836 struct ceph_mon_client
*monc
= con
->private;
842 mutex_lock(&monc
->mutex
);
846 if (monc
->con
&& !monc
->hunting
)
847 pr_info("mon%d %s session lost, "
848 "hunting for new mon\n", monc
->cur_mon
,
849 pr_addr(&monc
->con
->peer_addr
.in_addr
));
851 __close_session(monc
);
852 if (!monc
->hunting
) {
854 monc
->hunting
= true;
855 __open_session(monc
);
857 /* already hunting, let's wait a bit */
858 __schedule_delayed(monc
);
861 mutex_unlock(&monc
->mutex
);
864 const static struct ceph_connection_operations mon_con_ops
= {
867 .dispatch
= dispatch
,
869 .alloc_msg
= mon_alloc_msg
,