staging: iio: Documentation fixes
[zen-stable.git] / fs / ceph / messenger.c
blobcdaaa131add314fcfc7471127b2ef55c0f68f2c0
1 #include "ceph_debug.h"
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <net/tcp.h>
14 #include "super.h"
15 #include "messenger.h"
16 #include "decode.h"
17 #include "pagelist.h"
20 * Ceph uses the messenger to exchange ceph_msg messages with other
21 * hosts in the system. The messenger provides ordered and reliable
22 * delivery. We tolerate TCP disconnects by reconnecting (with
23 * exponential backoff) in the case of a fault (disconnection, bad
24 * crc, protocol error). Acks allow sent messages to be discarded by
25 * the sender.
28 /* static tag bytes (protocol control messages) */
29 static char tag_msg = CEPH_MSGR_TAG_MSG;
30 static char tag_ack = CEPH_MSGR_TAG_ACK;
31 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
33 #ifdef CONFIG_LOCKDEP
34 static struct lock_class_key socket_class;
35 #endif
38 static void queue_con(struct ceph_connection *con);
39 static void con_work(struct work_struct *);
40 static void ceph_fault(struct ceph_connection *con);
42 const char *ceph_name_type_str(int t)
44 switch (t) {
45 case CEPH_ENTITY_TYPE_MON: return "mon";
46 case CEPH_ENTITY_TYPE_MDS: return "mds";
47 case CEPH_ENTITY_TYPE_OSD: return "osd";
48 case CEPH_ENTITY_TYPE_CLIENT: return "client";
49 case CEPH_ENTITY_TYPE_ADMIN: return "admin";
50 default: return "???";
55 * nicely render a sockaddr as a string.
57 #define MAX_ADDR_STR 20
58 static char addr_str[MAX_ADDR_STR][40];
59 static DEFINE_SPINLOCK(addr_str_lock);
60 static int last_addr_str;
62 const char *pr_addr(const struct sockaddr_storage *ss)
64 int i;
65 char *s;
66 struct sockaddr_in *in4 = (void *)ss;
67 unsigned char *quad = (void *)&in4->sin_addr.s_addr;
68 struct sockaddr_in6 *in6 = (void *)ss;
70 spin_lock(&addr_str_lock);
71 i = last_addr_str++;
72 if (last_addr_str == MAX_ADDR_STR)
73 last_addr_str = 0;
74 spin_unlock(&addr_str_lock);
75 s = addr_str[i];
77 switch (ss->ss_family) {
78 case AF_INET:
79 sprintf(s, "%u.%u.%u.%u:%u",
80 (unsigned int)quad[0],
81 (unsigned int)quad[1],
82 (unsigned int)quad[2],
83 (unsigned int)quad[3],
84 (unsigned int)ntohs(in4->sin_port));
85 break;
87 case AF_INET6:
88 sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
89 in6->sin6_addr.s6_addr16[0],
90 in6->sin6_addr.s6_addr16[1],
91 in6->sin6_addr.s6_addr16[2],
92 in6->sin6_addr.s6_addr16[3],
93 in6->sin6_addr.s6_addr16[4],
94 in6->sin6_addr.s6_addr16[5],
95 in6->sin6_addr.s6_addr16[6],
96 in6->sin6_addr.s6_addr16[7],
97 (unsigned int)ntohs(in6->sin6_port));
98 break;
100 default:
101 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
104 return s;
107 static void encode_my_addr(struct ceph_messenger *msgr)
109 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
110 ceph_encode_addr(&msgr->my_enc_addr);
114 * work queue for all reading and writing to/from the socket.
116 struct workqueue_struct *ceph_msgr_wq;
118 int __init ceph_msgr_init(void)
120 ceph_msgr_wq = create_workqueue("ceph-msgr");
121 if (IS_ERR(ceph_msgr_wq)) {
122 int ret = PTR_ERR(ceph_msgr_wq);
123 pr_err("msgr_init failed to create workqueue: %d\n", ret);
124 ceph_msgr_wq = NULL;
125 return ret;
127 return 0;
130 void ceph_msgr_exit(void)
132 destroy_workqueue(ceph_msgr_wq);
136 * socket callback functions
139 /* data available on socket, or listen socket received a connect */
140 static void ceph_data_ready(struct sock *sk, int count_unused)
142 struct ceph_connection *con =
143 (struct ceph_connection *)sk->sk_user_data;
144 if (sk->sk_state != TCP_CLOSE_WAIT) {
145 dout("ceph_data_ready on %p state = %lu, queueing work\n",
146 con, con->state);
147 queue_con(con);
151 /* socket has buffer space for writing */
152 static void ceph_write_space(struct sock *sk)
154 struct ceph_connection *con =
155 (struct ceph_connection *)sk->sk_user_data;
157 /* only queue to workqueue if there is data we want to write. */
158 if (test_bit(WRITE_PENDING, &con->state)) {
159 dout("ceph_write_space %p queueing write work\n", con);
160 queue_con(con);
161 } else {
162 dout("ceph_write_space %p nothing to write\n", con);
165 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
166 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
169 /* socket's state has changed */
170 static void ceph_state_change(struct sock *sk)
172 struct ceph_connection *con =
173 (struct ceph_connection *)sk->sk_user_data;
175 dout("ceph_state_change %p state = %lu sk_state = %u\n",
176 con, con->state, sk->sk_state);
178 if (test_bit(CLOSED, &con->state))
179 return;
181 switch (sk->sk_state) {
182 case TCP_CLOSE:
183 dout("ceph_state_change TCP_CLOSE\n");
184 case TCP_CLOSE_WAIT:
185 dout("ceph_state_change TCP_CLOSE_WAIT\n");
186 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
187 if (test_bit(CONNECTING, &con->state))
188 con->error_msg = "connection failed";
189 else
190 con->error_msg = "socket closed";
191 queue_con(con);
193 break;
194 case TCP_ESTABLISHED:
195 dout("ceph_state_change TCP_ESTABLISHED\n");
196 queue_con(con);
197 break;
202 * set up socket callbacks
204 static void set_sock_callbacks(struct socket *sock,
205 struct ceph_connection *con)
207 struct sock *sk = sock->sk;
208 sk->sk_user_data = (void *)con;
209 sk->sk_data_ready = ceph_data_ready;
210 sk->sk_write_space = ceph_write_space;
211 sk->sk_state_change = ceph_state_change;
216 * socket helpers
220 * initiate connection to a remote socket.
222 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
224 struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
225 struct socket *sock;
226 int ret;
228 BUG_ON(con->sock);
229 ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
230 if (ret)
231 return ERR_PTR(ret);
232 con->sock = sock;
233 sock->sk->sk_allocation = GFP_NOFS;
235 #ifdef CONFIG_LOCKDEP
236 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
237 #endif
239 set_sock_callbacks(sock, con);
241 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
243 ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
244 if (ret == -EINPROGRESS) {
245 dout("connect %s EINPROGRESS sk_state = %u\n",
246 pr_addr(&con->peer_addr.in_addr),
247 sock->sk->sk_state);
248 ret = 0;
250 if (ret < 0) {
251 pr_err("connect %s error %d\n",
252 pr_addr(&con->peer_addr.in_addr), ret);
253 sock_release(sock);
254 con->sock = NULL;
255 con->error_msg = "connect error";
258 if (ret < 0)
259 return ERR_PTR(ret);
260 return sock;
263 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
265 struct kvec iov = {buf, len};
266 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
268 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
272 * write something. @more is true if caller will be sending more data
273 * shortly.
275 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
276 size_t kvlen, size_t len, int more)
278 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
280 if (more)
281 msg.msg_flags |= MSG_MORE;
282 else
283 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
285 return kernel_sendmsg(sock, &msg, iov, kvlen, len);
290 * Shutdown/close the socket for the given connection.
292 static int con_close_socket(struct ceph_connection *con)
294 int rc;
296 dout("con_close_socket on %p sock %p\n", con, con->sock);
297 if (!con->sock)
298 return 0;
299 set_bit(SOCK_CLOSED, &con->state);
300 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
301 sock_release(con->sock);
302 con->sock = NULL;
303 clear_bit(SOCK_CLOSED, &con->state);
304 return rc;
308 * Reset a connection. Discard all incoming and outgoing messages
309 * and clear *_seq state.
311 static void ceph_msg_remove(struct ceph_msg *msg)
313 list_del_init(&msg->list_head);
314 ceph_msg_put(msg);
316 static void ceph_msg_remove_list(struct list_head *head)
318 while (!list_empty(head)) {
319 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
320 list_head);
321 ceph_msg_remove(msg);
325 static void reset_connection(struct ceph_connection *con)
327 /* reset connection, out_queue, msg_ and connect_seq */
328 /* discard existing out_queue and msg_seq */
329 ceph_msg_remove_list(&con->out_queue);
330 ceph_msg_remove_list(&con->out_sent);
332 if (con->in_msg) {
333 ceph_msg_put(con->in_msg);
334 con->in_msg = NULL;
337 con->connect_seq = 0;
338 con->out_seq = 0;
339 if (con->out_msg) {
340 ceph_msg_put(con->out_msg);
341 con->out_msg = NULL;
343 con->in_seq = 0;
344 con->in_seq_acked = 0;
348 * mark a peer down. drop any open connections.
350 void ceph_con_close(struct ceph_connection *con)
352 dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
353 set_bit(CLOSED, &con->state); /* in case there's queued work */
354 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
355 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
356 clear_bit(KEEPALIVE_PENDING, &con->state);
357 clear_bit(WRITE_PENDING, &con->state);
358 mutex_lock(&con->mutex);
359 reset_connection(con);
360 cancel_delayed_work(&con->work);
361 mutex_unlock(&con->mutex);
362 queue_con(con);
366 * Reopen a closed connection, with a new peer address.
368 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
370 dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
371 set_bit(OPENING, &con->state);
372 clear_bit(CLOSED, &con->state);
373 memcpy(&con->peer_addr, addr, sizeof(*addr));
374 con->delay = 0; /* reset backoff memory */
375 queue_con(con);
379 * return true if this connection ever successfully opened
381 bool ceph_con_opened(struct ceph_connection *con)
383 return con->connect_seq > 0;
387 * generic get/put
389 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
391 dout("con_get %p nref = %d -> %d\n", con,
392 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
393 if (atomic_inc_not_zero(&con->nref))
394 return con;
395 return NULL;
398 void ceph_con_put(struct ceph_connection *con)
400 dout("con_put %p nref = %d -> %d\n", con,
401 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
402 BUG_ON(atomic_read(&con->nref) == 0);
403 if (atomic_dec_and_test(&con->nref)) {
404 BUG_ON(con->sock);
405 kfree(con);
410 * initialize a new connection.
412 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
414 dout("con_init %p\n", con);
415 memset(con, 0, sizeof(*con));
416 atomic_set(&con->nref, 1);
417 con->msgr = msgr;
418 mutex_init(&con->mutex);
419 INIT_LIST_HEAD(&con->out_queue);
420 INIT_LIST_HEAD(&con->out_sent);
421 INIT_DELAYED_WORK(&con->work, con_work);
426 * We maintain a global counter to order connection attempts. Get
427 * a unique seq greater than @gt.
429 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
431 u32 ret;
433 spin_lock(&msgr->global_seq_lock);
434 if (msgr->global_seq < gt)
435 msgr->global_seq = gt;
436 ret = ++msgr->global_seq;
437 spin_unlock(&msgr->global_seq_lock);
438 return ret;
443 * Prepare footer for currently outgoing message, and finish things
444 * off. Assumes out_kvec* are already valid.. we just add on to the end.
446 static void prepare_write_message_footer(struct ceph_connection *con, int v)
448 struct ceph_msg *m = con->out_msg;
450 dout("prepare_write_message_footer %p\n", con);
451 con->out_kvec_is_msg = true;
452 con->out_kvec[v].iov_base = &m->footer;
453 con->out_kvec[v].iov_len = sizeof(m->footer);
454 con->out_kvec_bytes += sizeof(m->footer);
455 con->out_kvec_left++;
456 con->out_more = m->more_to_follow;
457 con->out_msg_done = true;
461 * Prepare headers for the next outgoing message.
463 static void prepare_write_message(struct ceph_connection *con)
465 struct ceph_msg *m;
466 int v = 0;
468 con->out_kvec_bytes = 0;
469 con->out_kvec_is_msg = true;
470 con->out_msg_done = false;
472 /* Sneak an ack in there first? If we can get it into the same
473 * TCP packet that's a good thing. */
474 if (con->in_seq > con->in_seq_acked) {
475 con->in_seq_acked = con->in_seq;
476 con->out_kvec[v].iov_base = &tag_ack;
477 con->out_kvec[v++].iov_len = 1;
478 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
479 con->out_kvec[v].iov_base = &con->out_temp_ack;
480 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
481 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
484 m = list_first_entry(&con->out_queue,
485 struct ceph_msg, list_head);
486 con->out_msg = m;
487 if (test_bit(LOSSYTX, &con->state)) {
488 list_del_init(&m->list_head);
489 } else {
490 /* put message on sent list */
491 ceph_msg_get(m);
492 list_move_tail(&m->list_head, &con->out_sent);
495 m->hdr.seq = cpu_to_le64(++con->out_seq);
497 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
498 m, con->out_seq, le16_to_cpu(m->hdr.type),
499 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
500 le32_to_cpu(m->hdr.data_len),
501 m->nr_pages);
502 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
504 /* tag + hdr + front + middle */
505 con->out_kvec[v].iov_base = &tag_msg;
506 con->out_kvec[v++].iov_len = 1;
507 con->out_kvec[v].iov_base = &m->hdr;
508 con->out_kvec[v++].iov_len = sizeof(m->hdr);
509 con->out_kvec[v++] = m->front;
510 if (m->middle)
511 con->out_kvec[v++] = m->middle->vec;
512 con->out_kvec_left = v;
513 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
514 (m->middle ? m->middle->vec.iov_len : 0);
515 con->out_kvec_cur = con->out_kvec;
517 /* fill in crc (except data pages), footer */
518 con->out_msg->hdr.crc =
519 cpu_to_le32(crc32c(0, (void *)&m->hdr,
520 sizeof(m->hdr) - sizeof(m->hdr.crc)));
521 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
522 con->out_msg->footer.front_crc =
523 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
524 if (m->middle)
525 con->out_msg->footer.middle_crc =
526 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
527 m->middle->vec.iov_len));
528 else
529 con->out_msg->footer.middle_crc = 0;
530 con->out_msg->footer.data_crc = 0;
531 dout("prepare_write_message front_crc %u data_crc %u\n",
532 le32_to_cpu(con->out_msg->footer.front_crc),
533 le32_to_cpu(con->out_msg->footer.middle_crc));
535 /* is there a data payload? */
536 if (le32_to_cpu(m->hdr.data_len) > 0) {
537 /* initialize page iterator */
538 con->out_msg_pos.page = 0;
539 con->out_msg_pos.page_pos =
540 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
541 con->out_msg_pos.data_pos = 0;
542 con->out_msg_pos.did_page_crc = 0;
543 con->out_more = 1; /* data + footer will follow */
544 } else {
545 /* no, queue up footer too and be done */
546 prepare_write_message_footer(con, v);
549 set_bit(WRITE_PENDING, &con->state);
553 * Prepare an ack.
555 static void prepare_write_ack(struct ceph_connection *con)
557 dout("prepare_write_ack %p %llu -> %llu\n", con,
558 con->in_seq_acked, con->in_seq);
559 con->in_seq_acked = con->in_seq;
561 con->out_kvec[0].iov_base = &tag_ack;
562 con->out_kvec[0].iov_len = 1;
563 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
564 con->out_kvec[1].iov_base = &con->out_temp_ack;
565 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
566 con->out_kvec_left = 2;
567 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
568 con->out_kvec_cur = con->out_kvec;
569 con->out_more = 1; /* more will follow.. eventually.. */
570 set_bit(WRITE_PENDING, &con->state);
574 * Prepare to write keepalive byte.
576 static void prepare_write_keepalive(struct ceph_connection *con)
578 dout("prepare_write_keepalive %p\n", con);
579 con->out_kvec[0].iov_base = &tag_keepalive;
580 con->out_kvec[0].iov_len = 1;
581 con->out_kvec_left = 1;
582 con->out_kvec_bytes = 1;
583 con->out_kvec_cur = con->out_kvec;
584 set_bit(WRITE_PENDING, &con->state);
588 * Connection negotiation.
591 static void prepare_connect_authorizer(struct ceph_connection *con)
593 void *auth_buf;
594 int auth_len = 0;
595 int auth_protocol = 0;
597 mutex_unlock(&con->mutex);
598 if (con->ops->get_authorizer)
599 con->ops->get_authorizer(con, &auth_buf, &auth_len,
600 &auth_protocol, &con->auth_reply_buf,
601 &con->auth_reply_buf_len,
602 con->auth_retry);
603 mutex_lock(&con->mutex);
605 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
606 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
608 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
609 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
610 con->out_kvec_left++;
611 con->out_kvec_bytes += auth_len;
615 * We connected to a peer and are saying hello.
617 static void prepare_write_banner(struct ceph_messenger *msgr,
618 struct ceph_connection *con)
620 int len = strlen(CEPH_BANNER);
622 con->out_kvec[0].iov_base = CEPH_BANNER;
623 con->out_kvec[0].iov_len = len;
624 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
625 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
626 con->out_kvec_left = 2;
627 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
628 con->out_kvec_cur = con->out_kvec;
629 con->out_more = 0;
630 set_bit(WRITE_PENDING, &con->state);
633 static void prepare_write_connect(struct ceph_messenger *msgr,
634 struct ceph_connection *con,
635 int after_banner)
637 unsigned global_seq = get_global_seq(con->msgr, 0);
638 int proto;
640 switch (con->peer_name.type) {
641 case CEPH_ENTITY_TYPE_MON:
642 proto = CEPH_MONC_PROTOCOL;
643 break;
644 case CEPH_ENTITY_TYPE_OSD:
645 proto = CEPH_OSDC_PROTOCOL;
646 break;
647 case CEPH_ENTITY_TYPE_MDS:
648 proto = CEPH_MDSC_PROTOCOL;
649 break;
650 default:
651 BUG();
654 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
655 con->connect_seq, global_seq, proto);
657 con->out_connect.features = CEPH_FEATURE_SUPPORTED;
658 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
659 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
660 con->out_connect.global_seq = cpu_to_le32(global_seq);
661 con->out_connect.protocol_version = cpu_to_le32(proto);
662 con->out_connect.flags = 0;
664 if (!after_banner) {
665 con->out_kvec_left = 0;
666 con->out_kvec_bytes = 0;
668 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
669 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
670 con->out_kvec_left++;
671 con->out_kvec_bytes += sizeof(con->out_connect);
672 con->out_kvec_cur = con->out_kvec;
673 con->out_more = 0;
674 set_bit(WRITE_PENDING, &con->state);
676 prepare_connect_authorizer(con);
681 * write as much of pending kvecs to the socket as we can.
682 * 1 -> done
683 * 0 -> socket full, but more to do
684 * <0 -> error
686 static int write_partial_kvec(struct ceph_connection *con)
688 int ret;
690 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
691 while (con->out_kvec_bytes > 0) {
692 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
693 con->out_kvec_left, con->out_kvec_bytes,
694 con->out_more);
695 if (ret <= 0)
696 goto out;
697 con->out_kvec_bytes -= ret;
698 if (con->out_kvec_bytes == 0)
699 break; /* done */
700 while (ret > 0) {
701 if (ret >= con->out_kvec_cur->iov_len) {
702 ret -= con->out_kvec_cur->iov_len;
703 con->out_kvec_cur++;
704 con->out_kvec_left--;
705 } else {
706 con->out_kvec_cur->iov_len -= ret;
707 con->out_kvec_cur->iov_base += ret;
708 ret = 0;
709 break;
713 con->out_kvec_left = 0;
714 con->out_kvec_is_msg = false;
715 ret = 1;
716 out:
717 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
718 con->out_kvec_bytes, con->out_kvec_left, ret);
719 return ret; /* done! */
723 * Write as much message data payload as we can. If we finish, queue
724 * up the footer.
725 * 1 -> done, footer is now queued in out_kvec[].
726 * 0 -> socket full, but more to do
727 * <0 -> error
729 static int write_partial_msg_pages(struct ceph_connection *con)
731 struct ceph_msg *msg = con->out_msg;
732 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
733 size_t len;
734 int crc = con->msgr->nocrc;
735 int ret;
737 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
738 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
739 con->out_msg_pos.page_pos);
741 while (con->out_msg_pos.page < con->out_msg->nr_pages) {
742 struct page *page = NULL;
743 void *kaddr = NULL;
746 * if we are calculating the data crc (the default), we need
747 * to map the page. if our pages[] has been revoked, use the
748 * zero page.
750 if (msg->pages) {
751 page = msg->pages[con->out_msg_pos.page];
752 if (crc)
753 kaddr = kmap(page);
754 } else if (msg->pagelist) {
755 page = list_first_entry(&msg->pagelist->head,
756 struct page, lru);
757 if (crc)
758 kaddr = kmap(page);
759 } else {
760 page = con->msgr->zero_page;
761 if (crc)
762 kaddr = page_address(con->msgr->zero_page);
764 len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos),
765 (int)(data_len - con->out_msg_pos.data_pos));
766 if (crc && !con->out_msg_pos.did_page_crc) {
767 void *base = kaddr + con->out_msg_pos.page_pos;
768 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
770 BUG_ON(kaddr == NULL);
771 con->out_msg->footer.data_crc =
772 cpu_to_le32(crc32c(tmpcrc, base, len));
773 con->out_msg_pos.did_page_crc = 1;
776 ret = kernel_sendpage(con->sock, page,
777 con->out_msg_pos.page_pos, len,
778 MSG_DONTWAIT | MSG_NOSIGNAL |
779 MSG_MORE);
781 if (crc && (msg->pages || msg->pagelist))
782 kunmap(page);
784 if (ret <= 0)
785 goto out;
787 con->out_msg_pos.data_pos += ret;
788 con->out_msg_pos.page_pos += ret;
789 if (ret == len) {
790 con->out_msg_pos.page_pos = 0;
791 con->out_msg_pos.page++;
792 con->out_msg_pos.did_page_crc = 0;
793 if (msg->pagelist)
794 list_move_tail(&page->lru,
795 &msg->pagelist->head);
799 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
801 /* prepare and queue up footer, too */
802 if (!crc)
803 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
804 con->out_kvec_bytes = 0;
805 con->out_kvec_left = 0;
806 con->out_kvec_cur = con->out_kvec;
807 prepare_write_message_footer(con, 0);
808 ret = 1;
809 out:
810 return ret;
814 * write some zeros
816 static int write_partial_skip(struct ceph_connection *con)
818 int ret;
820 while (con->out_skip > 0) {
821 struct kvec iov = {
822 .iov_base = page_address(con->msgr->zero_page),
823 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
826 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
827 if (ret <= 0)
828 goto out;
829 con->out_skip -= ret;
831 ret = 1;
832 out:
833 return ret;
837 * Prepare to read connection handshake, or an ack.
839 static void prepare_read_banner(struct ceph_connection *con)
841 dout("prepare_read_banner %p\n", con);
842 con->in_base_pos = 0;
845 static void prepare_read_connect(struct ceph_connection *con)
847 dout("prepare_read_connect %p\n", con);
848 con->in_base_pos = 0;
851 static void prepare_read_ack(struct ceph_connection *con)
853 dout("prepare_read_ack %p\n", con);
854 con->in_base_pos = 0;
857 static void prepare_read_tag(struct ceph_connection *con)
859 dout("prepare_read_tag %p\n", con);
860 con->in_base_pos = 0;
861 con->in_tag = CEPH_MSGR_TAG_READY;
865 * Prepare to read a message.
867 static int prepare_read_message(struct ceph_connection *con)
869 dout("prepare_read_message %p\n", con);
870 BUG_ON(con->in_msg != NULL);
871 con->in_base_pos = 0;
872 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
873 return 0;
877 static int read_partial(struct ceph_connection *con,
878 int *to, int size, void *object)
880 *to += size;
881 while (con->in_base_pos < *to) {
882 int left = *to - con->in_base_pos;
883 int have = size - left;
884 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
885 if (ret <= 0)
886 return ret;
887 con->in_base_pos += ret;
889 return 1;
894 * Read all or part of the connect-side handshake on a new connection
896 static int read_partial_banner(struct ceph_connection *con)
898 int ret, to = 0;
900 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
902 /* peer's banner */
903 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
904 if (ret <= 0)
905 goto out;
906 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
907 &con->actual_peer_addr);
908 if (ret <= 0)
909 goto out;
910 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
911 &con->peer_addr_for_me);
912 if (ret <= 0)
913 goto out;
914 out:
915 return ret;
918 static int read_partial_connect(struct ceph_connection *con)
920 int ret, to = 0;
922 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
924 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
925 if (ret <= 0)
926 goto out;
927 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
928 con->auth_reply_buf);
929 if (ret <= 0)
930 goto out;
932 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
933 con, (int)con->in_reply.tag,
934 le32_to_cpu(con->in_reply.connect_seq),
935 le32_to_cpu(con->in_reply.global_seq));
936 out:
937 return ret;
942 * Verify the hello banner looks okay.
944 static int verify_hello(struct ceph_connection *con)
946 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
947 pr_err("connect to %s got bad banner\n",
948 pr_addr(&con->peer_addr.in_addr));
949 con->error_msg = "protocol error, bad banner";
950 return -1;
952 return 0;
955 static bool addr_is_blank(struct sockaddr_storage *ss)
957 switch (ss->ss_family) {
958 case AF_INET:
959 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
960 case AF_INET6:
961 return
962 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
963 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
964 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
965 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
967 return false;
970 static int addr_port(struct sockaddr_storage *ss)
972 switch (ss->ss_family) {
973 case AF_INET:
974 return ntohs(((struct sockaddr_in *)ss)->sin_port);
975 case AF_INET6:
976 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
978 return 0;
981 static void addr_set_port(struct sockaddr_storage *ss, int p)
983 switch (ss->ss_family) {
984 case AF_INET:
985 ((struct sockaddr_in *)ss)->sin_port = htons(p);
986 case AF_INET6:
987 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
992 * Parse an ip[:port] list into an addr array. Use the default
993 * monitor port if a port isn't specified.
995 int ceph_parse_ips(const char *c, const char *end,
996 struct ceph_entity_addr *addr,
997 int max_count, int *count)
999 int i;
1000 const char *p = c;
1002 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1003 for (i = 0; i < max_count; i++) {
1004 const char *ipend;
1005 struct sockaddr_storage *ss = &addr[i].in_addr;
1006 struct sockaddr_in *in4 = (void *)ss;
1007 struct sockaddr_in6 *in6 = (void *)ss;
1008 int port;
1010 memset(ss, 0, sizeof(*ss));
1011 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1012 ',', &ipend)) {
1013 ss->ss_family = AF_INET;
1014 } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1015 ',', &ipend)) {
1016 ss->ss_family = AF_INET6;
1017 } else {
1018 goto bad;
1020 p = ipend;
1022 /* port? */
1023 if (p < end && *p == ':') {
1024 port = 0;
1025 p++;
1026 while (p < end && *p >= '0' && *p <= '9') {
1027 port = (port * 10) + (*p - '0');
1028 p++;
1030 if (port > 65535 || port == 0)
1031 goto bad;
1032 } else {
1033 port = CEPH_MON_PORT;
1036 addr_set_port(ss, port);
1038 dout("parse_ips got %s\n", pr_addr(ss));
1040 if (p == end)
1041 break;
1042 if (*p != ',')
1043 goto bad;
1044 p++;
1047 if (p != end)
1048 goto bad;
1050 if (count)
1051 *count = i + 1;
1052 return 0;
1054 bad:
1055 pr_err("parse_ips bad ip '%s'\n", c);
1056 return -EINVAL;
1059 static int process_banner(struct ceph_connection *con)
1061 dout("process_banner on %p\n", con);
1063 if (verify_hello(con) < 0)
1064 return -1;
1066 ceph_decode_addr(&con->actual_peer_addr);
1067 ceph_decode_addr(&con->peer_addr_for_me);
1070 * Make sure the other end is who we wanted. note that the other
1071 * end may not yet know their ip address, so if it's 0.0.0.0, give
1072 * them the benefit of the doubt.
1074 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1075 sizeof(con->peer_addr)) != 0 &&
1076 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1077 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1078 pr_warning("wrong peer, want %s/%lld, got %s/%lld\n",
1079 pr_addr(&con->peer_addr.in_addr),
1080 le64_to_cpu(con->peer_addr.nonce),
1081 pr_addr(&con->actual_peer_addr.in_addr),
1082 le64_to_cpu(con->actual_peer_addr.nonce));
1083 con->error_msg = "wrong peer at address";
1084 return -1;
1088 * did we learn our address?
1090 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1091 int port = addr_port(&con->msgr->inst.addr.in_addr);
1093 memcpy(&con->msgr->inst.addr.in_addr,
1094 &con->peer_addr_for_me.in_addr,
1095 sizeof(con->peer_addr_for_me.in_addr));
1096 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1097 encode_my_addr(con->msgr);
1098 dout("process_banner learned my addr is %s\n",
1099 pr_addr(&con->msgr->inst.addr.in_addr));
1102 set_bit(NEGOTIATING, &con->state);
1103 prepare_read_connect(con);
1104 return 0;
1107 static void fail_protocol(struct ceph_connection *con)
1109 reset_connection(con);
1110 set_bit(CLOSED, &con->state); /* in case there's queued work */
1112 mutex_unlock(&con->mutex);
1113 if (con->ops->bad_proto)
1114 con->ops->bad_proto(con);
1115 mutex_lock(&con->mutex);
1118 static int process_connect(struct ceph_connection *con)
1120 u64 sup_feat = CEPH_FEATURE_SUPPORTED;
1121 u64 req_feat = CEPH_FEATURE_REQUIRED;
1122 u64 server_feat = le64_to_cpu(con->in_reply.features);
1124 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1126 switch (con->in_reply.tag) {
1127 case CEPH_MSGR_TAG_FEATURES:
1128 pr_err("%s%lld %s feature set mismatch,"
1129 " my %llx < server's %llx, missing %llx\n",
1130 ENTITY_NAME(con->peer_name),
1131 pr_addr(&con->peer_addr.in_addr),
1132 sup_feat, server_feat, server_feat & ~sup_feat);
1133 con->error_msg = "missing required protocol features";
1134 fail_protocol(con);
1135 return -1;
1137 case CEPH_MSGR_TAG_BADPROTOVER:
1138 pr_err("%s%lld %s protocol version mismatch,"
1139 " my %d != server's %d\n",
1140 ENTITY_NAME(con->peer_name),
1141 pr_addr(&con->peer_addr.in_addr),
1142 le32_to_cpu(con->out_connect.protocol_version),
1143 le32_to_cpu(con->in_reply.protocol_version));
1144 con->error_msg = "protocol version mismatch";
1145 fail_protocol(con);
1146 return -1;
1148 case CEPH_MSGR_TAG_BADAUTHORIZER:
1149 con->auth_retry++;
1150 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1151 con->auth_retry);
1152 if (con->auth_retry == 2) {
1153 con->error_msg = "connect authorization failure";
1154 reset_connection(con);
1155 set_bit(CLOSED, &con->state);
1156 return -1;
1158 con->auth_retry = 1;
1159 prepare_write_connect(con->msgr, con, 0);
1160 prepare_read_connect(con);
1161 break;
1163 case CEPH_MSGR_TAG_RESETSESSION:
1165 * If we connected with a large connect_seq but the peer
1166 * has no record of a session with us (no connection, or
1167 * connect_seq == 0), they will send RESETSESION to indicate
1168 * that they must have reset their session, and may have
1169 * dropped messages.
1171 dout("process_connect got RESET peer seq %u\n",
1172 le32_to_cpu(con->in_connect.connect_seq));
1173 pr_err("%s%lld %s connection reset\n",
1174 ENTITY_NAME(con->peer_name),
1175 pr_addr(&con->peer_addr.in_addr));
1176 reset_connection(con);
1177 prepare_write_connect(con->msgr, con, 0);
1178 prepare_read_connect(con);
1180 /* Tell ceph about it. */
1181 mutex_unlock(&con->mutex);
1182 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1183 if (con->ops->peer_reset)
1184 con->ops->peer_reset(con);
1185 mutex_lock(&con->mutex);
1186 break;
1188 case CEPH_MSGR_TAG_RETRY_SESSION:
1190 * If we sent a smaller connect_seq than the peer has, try
1191 * again with a larger value.
1193 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1194 le32_to_cpu(con->out_connect.connect_seq),
1195 le32_to_cpu(con->in_connect.connect_seq));
1196 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1197 prepare_write_connect(con->msgr, con, 0);
1198 prepare_read_connect(con);
1199 break;
1201 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1203 * If we sent a smaller global_seq than the peer has, try
1204 * again with a larger value.
1206 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1207 con->peer_global_seq,
1208 le32_to_cpu(con->in_connect.global_seq));
1209 get_global_seq(con->msgr,
1210 le32_to_cpu(con->in_connect.global_seq));
1211 prepare_write_connect(con->msgr, con, 0);
1212 prepare_read_connect(con);
1213 break;
1215 case CEPH_MSGR_TAG_READY:
1216 if (req_feat & ~server_feat) {
1217 pr_err("%s%lld %s protocol feature mismatch,"
1218 " my required %llx > server's %llx, need %llx\n",
1219 ENTITY_NAME(con->peer_name),
1220 pr_addr(&con->peer_addr.in_addr),
1221 req_feat, server_feat, req_feat & ~server_feat);
1222 con->error_msg = "missing required protocol features";
1223 fail_protocol(con);
1224 return -1;
1226 clear_bit(CONNECTING, &con->state);
1227 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1228 con->connect_seq++;
1229 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1230 con->peer_global_seq,
1231 le32_to_cpu(con->in_reply.connect_seq),
1232 con->connect_seq);
1233 WARN_ON(con->connect_seq !=
1234 le32_to_cpu(con->in_reply.connect_seq));
1236 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1237 set_bit(LOSSYTX, &con->state);
1239 prepare_read_tag(con);
1240 break;
1242 case CEPH_MSGR_TAG_WAIT:
1244 * If there is a connection race (we are opening
1245 * connections to each other), one of us may just have
1246 * to WAIT. This shouldn't happen if we are the
1247 * client.
1249 pr_err("process_connect peer connecting WAIT\n");
1251 default:
1252 pr_err("connect protocol error, will retry\n");
1253 con->error_msg = "protocol error, garbage tag during connect";
1254 return -1;
1256 return 0;
1261 * read (part of) an ack
1263 static int read_partial_ack(struct ceph_connection *con)
1265 int to = 0;
1267 return read_partial(con, &to, sizeof(con->in_temp_ack),
1268 &con->in_temp_ack);
1273 * We can finally discard anything that's been acked.
1275 static void process_ack(struct ceph_connection *con)
1277 struct ceph_msg *m;
1278 u64 ack = le64_to_cpu(con->in_temp_ack);
1279 u64 seq;
1281 while (!list_empty(&con->out_sent)) {
1282 m = list_first_entry(&con->out_sent, struct ceph_msg,
1283 list_head);
1284 seq = le64_to_cpu(m->hdr.seq);
1285 if (seq > ack)
1286 break;
1287 dout("got ack for seq %llu type %d at %p\n", seq,
1288 le16_to_cpu(m->hdr.type), m);
1289 ceph_msg_remove(m);
1291 prepare_read_tag(con);
1297 static int read_partial_message_section(struct ceph_connection *con,
1298 struct kvec *section, unsigned int sec_len,
1299 u32 *crc)
1301 int left;
1302 int ret;
1304 BUG_ON(!section);
1306 while (section->iov_len < sec_len) {
1307 BUG_ON(section->iov_base == NULL);
1308 left = sec_len - section->iov_len;
1309 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1310 section->iov_len, left);
1311 if (ret <= 0)
1312 return ret;
1313 section->iov_len += ret;
1314 if (section->iov_len == sec_len)
1315 *crc = crc32c(0, section->iov_base,
1316 section->iov_len);
1319 return 1;
1322 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1323 struct ceph_msg_header *hdr,
1324 int *skip);
1326 * read (part of) a message.
1328 static int read_partial_message(struct ceph_connection *con)
1330 struct ceph_msg *m = con->in_msg;
1331 void *p;
1332 int ret;
1333 int to, left;
1334 unsigned front_len, middle_len, data_len, data_off;
1335 int datacrc = con->msgr->nocrc;
1336 int skip;
1338 dout("read_partial_message con %p msg %p\n", con, m);
1340 /* header */
1341 while (con->in_base_pos < sizeof(con->in_hdr)) {
1342 left = sizeof(con->in_hdr) - con->in_base_pos;
1343 ret = ceph_tcp_recvmsg(con->sock,
1344 (char *)&con->in_hdr + con->in_base_pos,
1345 left);
1346 if (ret <= 0)
1347 return ret;
1348 con->in_base_pos += ret;
1349 if (con->in_base_pos == sizeof(con->in_hdr)) {
1350 u32 crc = crc32c(0, (void *)&con->in_hdr,
1351 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1352 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1353 pr_err("read_partial_message bad hdr "
1354 " crc %u != expected %u\n",
1355 crc, con->in_hdr.crc);
1356 return -EBADMSG;
1360 front_len = le32_to_cpu(con->in_hdr.front_len);
1361 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1362 return -EIO;
1363 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1364 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1365 return -EIO;
1366 data_len = le32_to_cpu(con->in_hdr.data_len);
1367 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1368 return -EIO;
1369 data_off = le16_to_cpu(con->in_hdr.data_off);
1371 /* allocate message? */
1372 if (!con->in_msg) {
1373 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1374 con->in_hdr.front_len, con->in_hdr.data_len);
1375 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1376 if (skip) {
1377 /* skip this message */
1378 dout("alloc_msg returned NULL, skipping message\n");
1379 con->in_base_pos = -front_len - middle_len - data_len -
1380 sizeof(m->footer);
1381 con->in_tag = CEPH_MSGR_TAG_READY;
1382 return 0;
1384 if (IS_ERR(con->in_msg)) {
1385 ret = PTR_ERR(con->in_msg);
1386 con->in_msg = NULL;
1387 con->error_msg =
1388 "error allocating memory for incoming message";
1389 return ret;
1391 m = con->in_msg;
1392 m->front.iov_len = 0; /* haven't read it yet */
1393 if (m->middle)
1394 m->middle->vec.iov_len = 0;
1396 con->in_msg_pos.page = 0;
1397 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1398 con->in_msg_pos.data_pos = 0;
1401 /* front */
1402 ret = read_partial_message_section(con, &m->front, front_len,
1403 &con->in_front_crc);
1404 if (ret <= 0)
1405 return ret;
1407 /* middle */
1408 if (m->middle) {
1409 ret = read_partial_message_section(con, &m->middle->vec, middle_len,
1410 &con->in_middle_crc);
1411 if (ret <= 0)
1412 return ret;
1415 /* (page) data */
1416 while (con->in_msg_pos.data_pos < data_len) {
1417 left = min((int)(data_len - con->in_msg_pos.data_pos),
1418 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1419 BUG_ON(m->pages == NULL);
1420 p = kmap(m->pages[con->in_msg_pos.page]);
1421 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1422 left);
1423 if (ret > 0 && datacrc)
1424 con->in_data_crc =
1425 crc32c(con->in_data_crc,
1426 p + con->in_msg_pos.page_pos, ret);
1427 kunmap(m->pages[con->in_msg_pos.page]);
1428 if (ret <= 0)
1429 return ret;
1430 con->in_msg_pos.data_pos += ret;
1431 con->in_msg_pos.page_pos += ret;
1432 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1433 con->in_msg_pos.page_pos = 0;
1434 con->in_msg_pos.page++;
1438 /* footer */
1439 to = sizeof(m->hdr) + sizeof(m->footer);
1440 while (con->in_base_pos < to) {
1441 left = to - con->in_base_pos;
1442 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1443 (con->in_base_pos - sizeof(m->hdr)),
1444 left);
1445 if (ret <= 0)
1446 return ret;
1447 con->in_base_pos += ret;
1449 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1450 m, front_len, m->footer.front_crc, middle_len,
1451 m->footer.middle_crc, data_len, m->footer.data_crc);
1453 /* crc ok? */
1454 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1455 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1456 m, con->in_front_crc, m->footer.front_crc);
1457 return -EBADMSG;
1459 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1460 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1461 m, con->in_middle_crc, m->footer.middle_crc);
1462 return -EBADMSG;
1464 if (datacrc &&
1465 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1466 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1467 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1468 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1469 return -EBADMSG;
1472 return 1; /* done! */
1476 * Process message. This happens in the worker thread. The callback should
1477 * be careful not to do anything that waits on other incoming messages or it
1478 * may deadlock.
1480 static void process_message(struct ceph_connection *con)
1482 struct ceph_msg *msg;
1484 msg = con->in_msg;
1485 con->in_msg = NULL;
1487 /* if first message, set peer_name */
1488 if (con->peer_name.type == 0)
1489 con->peer_name = msg->hdr.src.name;
1491 con->in_seq++;
1492 mutex_unlock(&con->mutex);
1494 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1495 msg, le64_to_cpu(msg->hdr.seq),
1496 ENTITY_NAME(msg->hdr.src.name),
1497 le16_to_cpu(msg->hdr.type),
1498 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1499 le32_to_cpu(msg->hdr.front_len),
1500 le32_to_cpu(msg->hdr.data_len),
1501 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1502 con->ops->dispatch(con, msg);
1504 mutex_lock(&con->mutex);
1505 prepare_read_tag(con);
1510 * Write something to the socket. Called in a worker thread when the
1511 * socket appears to be writeable and we have something ready to send.
1513 static int try_write(struct ceph_connection *con)
1515 struct ceph_messenger *msgr = con->msgr;
1516 int ret = 1;
1518 dout("try_write start %p state %lu nref %d\n", con, con->state,
1519 atomic_read(&con->nref));
1521 mutex_lock(&con->mutex);
1522 more:
1523 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1525 /* open the socket first? */
1526 if (con->sock == NULL) {
1528 * if we were STANDBY and are reconnecting _this_
1529 * connection, bump connect_seq now. Always bump
1530 * global_seq.
1532 if (test_and_clear_bit(STANDBY, &con->state))
1533 con->connect_seq++;
1535 prepare_write_banner(msgr, con);
1536 prepare_write_connect(msgr, con, 1);
1537 prepare_read_banner(con);
1538 set_bit(CONNECTING, &con->state);
1539 clear_bit(NEGOTIATING, &con->state);
1541 BUG_ON(con->in_msg);
1542 con->in_tag = CEPH_MSGR_TAG_READY;
1543 dout("try_write initiating connect on %p new state %lu\n",
1544 con, con->state);
1545 con->sock = ceph_tcp_connect(con);
1546 if (IS_ERR(con->sock)) {
1547 con->sock = NULL;
1548 con->error_msg = "connect error";
1549 ret = -1;
1550 goto out;
1554 more_kvec:
1555 /* kvec data queued? */
1556 if (con->out_skip) {
1557 ret = write_partial_skip(con);
1558 if (ret <= 0)
1559 goto done;
1560 if (ret < 0) {
1561 dout("try_write write_partial_skip err %d\n", ret);
1562 goto done;
1565 if (con->out_kvec_left) {
1566 ret = write_partial_kvec(con);
1567 if (ret <= 0)
1568 goto done;
1571 /* msg pages? */
1572 if (con->out_msg) {
1573 if (con->out_msg_done) {
1574 ceph_msg_put(con->out_msg);
1575 con->out_msg = NULL; /* we're done with this one */
1576 goto do_next;
1579 ret = write_partial_msg_pages(con);
1580 if (ret == 1)
1581 goto more_kvec; /* we need to send the footer, too! */
1582 if (ret == 0)
1583 goto done;
1584 if (ret < 0) {
1585 dout("try_write write_partial_msg_pages err %d\n",
1586 ret);
1587 goto done;
1591 do_next:
1592 if (!test_bit(CONNECTING, &con->state)) {
1593 /* is anything else pending? */
1594 if (!list_empty(&con->out_queue)) {
1595 prepare_write_message(con);
1596 goto more;
1598 if (con->in_seq > con->in_seq_acked) {
1599 prepare_write_ack(con);
1600 goto more;
1602 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1603 prepare_write_keepalive(con);
1604 goto more;
1608 /* Nothing to do! */
1609 clear_bit(WRITE_PENDING, &con->state);
1610 dout("try_write nothing else to write.\n");
1611 done:
1612 ret = 0;
1613 out:
1614 mutex_unlock(&con->mutex);
1615 dout("try_write done on %p\n", con);
1616 return ret;
1622 * Read what we can from the socket.
1624 static int try_read(struct ceph_connection *con)
1626 struct ceph_messenger *msgr;
1627 int ret = -1;
1629 if (!con->sock)
1630 return 0;
1632 if (test_bit(STANDBY, &con->state))
1633 return 0;
1635 dout("try_read start on %p\n", con);
1636 msgr = con->msgr;
1638 mutex_lock(&con->mutex);
1640 more:
1641 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1642 con->in_base_pos);
1643 if (test_bit(CONNECTING, &con->state)) {
1644 if (!test_bit(NEGOTIATING, &con->state)) {
1645 dout("try_read connecting\n");
1646 ret = read_partial_banner(con);
1647 if (ret <= 0)
1648 goto done;
1649 if (process_banner(con) < 0) {
1650 ret = -1;
1651 goto out;
1654 ret = read_partial_connect(con);
1655 if (ret <= 0)
1656 goto done;
1657 if (process_connect(con) < 0) {
1658 ret = -1;
1659 goto out;
1661 goto more;
1664 if (con->in_base_pos < 0) {
1666 * skipping + discarding content.
1668 * FIXME: there must be a better way to do this!
1670 static char buf[1024];
1671 int skip = min(1024, -con->in_base_pos);
1672 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1673 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1674 if (ret <= 0)
1675 goto done;
1676 con->in_base_pos += ret;
1677 if (con->in_base_pos)
1678 goto more;
1680 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1682 * what's next?
1684 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1685 if (ret <= 0)
1686 goto done;
1687 dout("try_read got tag %d\n", (int)con->in_tag);
1688 switch (con->in_tag) {
1689 case CEPH_MSGR_TAG_MSG:
1690 prepare_read_message(con);
1691 break;
1692 case CEPH_MSGR_TAG_ACK:
1693 prepare_read_ack(con);
1694 break;
1695 case CEPH_MSGR_TAG_CLOSE:
1696 set_bit(CLOSED, &con->state); /* fixme */
1697 goto done;
1698 default:
1699 goto bad_tag;
1702 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1703 ret = read_partial_message(con);
1704 if (ret <= 0) {
1705 switch (ret) {
1706 case -EBADMSG:
1707 con->error_msg = "bad crc";
1708 ret = -EIO;
1709 goto out;
1710 case -EIO:
1711 con->error_msg = "io error";
1712 goto out;
1713 default:
1714 goto done;
1717 if (con->in_tag == CEPH_MSGR_TAG_READY)
1718 goto more;
1719 process_message(con);
1720 goto more;
1722 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1723 ret = read_partial_ack(con);
1724 if (ret <= 0)
1725 goto done;
1726 process_ack(con);
1727 goto more;
1730 done:
1731 ret = 0;
1732 out:
1733 mutex_unlock(&con->mutex);
1734 dout("try_read done on %p\n", con);
1735 return ret;
1737 bad_tag:
1738 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1739 con->error_msg = "protocol error, garbage tag";
1740 ret = -1;
1741 goto out;
1746 * Atomically queue work on a connection. Bump @con reference to
1747 * avoid races with connection teardown.
1749 * There is some trickery going on with QUEUED and BUSY because we
1750 * only want a _single_ thread operating on each connection at any
1751 * point in time, but we want to use all available CPUs.
1753 * The worker thread only proceeds if it can atomically set BUSY. It
1754 * clears QUEUED and does it's thing. When it thinks it's done, it
1755 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1756 * (tries again to set BUSY).
1758 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1759 * try to queue work. If that fails (work is already queued, or BUSY)
1760 * we give up (work also already being done or is queued) but leave QUEUED
1761 * set so that the worker thread will loop if necessary.
1763 static void queue_con(struct ceph_connection *con)
1765 if (test_bit(DEAD, &con->state)) {
1766 dout("queue_con %p ignoring: DEAD\n",
1767 con);
1768 return;
1771 if (!con->ops->get(con)) {
1772 dout("queue_con %p ref count 0\n", con);
1773 return;
1776 set_bit(QUEUED, &con->state);
1777 if (test_bit(BUSY, &con->state)) {
1778 dout("queue_con %p - already BUSY\n", con);
1779 con->ops->put(con);
1780 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1781 dout("queue_con %p - already queued\n", con);
1782 con->ops->put(con);
1783 } else {
1784 dout("queue_con %p\n", con);
1789 * Do some work on a connection. Drop a connection ref when we're done.
1791 static void con_work(struct work_struct *work)
1793 struct ceph_connection *con = container_of(work, struct ceph_connection,
1794 work.work);
1795 int backoff = 0;
1797 more:
1798 if (test_and_set_bit(BUSY, &con->state) != 0) {
1799 dout("con_work %p BUSY already set\n", con);
1800 goto out;
1802 dout("con_work %p start, clearing QUEUED\n", con);
1803 clear_bit(QUEUED, &con->state);
1805 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1806 dout("con_work CLOSED\n");
1807 con_close_socket(con);
1808 goto done;
1810 if (test_and_clear_bit(OPENING, &con->state)) {
1811 /* reopen w/ new peer */
1812 dout("con_work OPENING\n");
1813 con_close_socket(con);
1816 if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
1817 try_read(con) < 0 ||
1818 try_write(con) < 0) {
1819 backoff = 1;
1820 ceph_fault(con); /* error/fault path */
1823 done:
1824 clear_bit(BUSY, &con->state);
1825 dout("con->state=%lu\n", con->state);
1826 if (test_bit(QUEUED, &con->state)) {
1827 if (!backoff || test_bit(OPENING, &con->state)) {
1828 dout("con_work %p QUEUED reset, looping\n", con);
1829 goto more;
1831 dout("con_work %p QUEUED reset, but just faulted\n", con);
1832 clear_bit(QUEUED, &con->state);
1834 dout("con_work %p done\n", con);
1836 out:
1837 con->ops->put(con);
1842 * Generic error/fault handler. A retry mechanism is used with
1843 * exponential backoff
1845 static void ceph_fault(struct ceph_connection *con)
1847 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1848 pr_addr(&con->peer_addr.in_addr), con->error_msg);
1849 dout("fault %p state %lu to peer %s\n",
1850 con, con->state, pr_addr(&con->peer_addr.in_addr));
1852 if (test_bit(LOSSYTX, &con->state)) {
1853 dout("fault on LOSSYTX channel\n");
1854 goto out;
1857 mutex_lock(&con->mutex);
1858 if (test_bit(CLOSED, &con->state))
1859 goto out_unlock;
1861 con_close_socket(con);
1863 if (con->in_msg) {
1864 ceph_msg_put(con->in_msg);
1865 con->in_msg = NULL;
1868 /* Requeue anything that hasn't been acked */
1869 list_splice_init(&con->out_sent, &con->out_queue);
1871 /* If there are no messages in the queue, place the connection
1872 * in a STANDBY state (i.e., don't try to reconnect just yet). */
1873 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
1874 dout("fault setting STANDBY\n");
1875 set_bit(STANDBY, &con->state);
1876 } else {
1877 /* retry after a delay. */
1878 if (con->delay == 0)
1879 con->delay = BASE_DELAY_INTERVAL;
1880 else if (con->delay < MAX_DELAY_INTERVAL)
1881 con->delay *= 2;
1882 dout("fault queueing %p delay %lu\n", con, con->delay);
1883 con->ops->get(con);
1884 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1885 round_jiffies_relative(con->delay)) == 0)
1886 con->ops->put(con);
1889 out_unlock:
1890 mutex_unlock(&con->mutex);
1891 out:
1893 * in case we faulted due to authentication, invalidate our
1894 * current tickets so that we can get new ones.
1896 if (con->auth_retry && con->ops->invalidate_authorizer) {
1897 dout("calling invalidate_authorizer()\n");
1898 con->ops->invalidate_authorizer(con);
1901 if (con->ops->fault)
1902 con->ops->fault(con);
1908 * create a new messenger instance
1910 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
1912 struct ceph_messenger *msgr;
1914 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
1915 if (msgr == NULL)
1916 return ERR_PTR(-ENOMEM);
1918 spin_lock_init(&msgr->global_seq_lock);
1920 /* the zero page is needed if a request is "canceled" while the message
1921 * is being written over the socket */
1922 msgr->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1923 if (!msgr->zero_page) {
1924 kfree(msgr);
1925 return ERR_PTR(-ENOMEM);
1927 kmap(msgr->zero_page);
1929 if (myaddr)
1930 msgr->inst.addr = *myaddr;
1932 /* select a random nonce */
1933 msgr->inst.addr.type = 0;
1934 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
1935 encode_my_addr(msgr);
1937 dout("messenger_create %p\n", msgr);
1938 return msgr;
1941 void ceph_messenger_destroy(struct ceph_messenger *msgr)
1943 dout("destroy %p\n", msgr);
1944 kunmap(msgr->zero_page);
1945 __free_page(msgr->zero_page);
1946 kfree(msgr);
1947 dout("destroyed messenger %p\n", msgr);
1951 * Queue up an outgoing message on the given connection.
1953 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1955 if (test_bit(CLOSED, &con->state)) {
1956 dout("con_send %p closed, dropping %p\n", con, msg);
1957 ceph_msg_put(msg);
1958 return;
1961 /* set src+dst */
1962 msg->hdr.src.name = con->msgr->inst.name;
1963 msg->hdr.src.addr = con->msgr->my_enc_addr;
1964 msg->hdr.orig_src = msg->hdr.src;
1966 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1968 /* queue */
1969 mutex_lock(&con->mutex);
1970 BUG_ON(!list_empty(&msg->list_head));
1971 list_add_tail(&msg->list_head, &con->out_queue);
1972 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1973 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1974 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1975 le32_to_cpu(msg->hdr.front_len),
1976 le32_to_cpu(msg->hdr.middle_len),
1977 le32_to_cpu(msg->hdr.data_len));
1978 mutex_unlock(&con->mutex);
1980 /* if there wasn't anything waiting to send before, queue
1981 * new work */
1982 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
1983 queue_con(con);
1987 * Revoke a message that was previously queued for send
1989 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
1991 mutex_lock(&con->mutex);
1992 if (!list_empty(&msg->list_head)) {
1993 dout("con_revoke %p msg %p\n", con, msg);
1994 list_del_init(&msg->list_head);
1995 ceph_msg_put(msg);
1996 msg->hdr.seq = 0;
1997 if (con->out_msg == msg) {
1998 ceph_msg_put(con->out_msg);
1999 con->out_msg = NULL;
2001 if (con->out_kvec_is_msg) {
2002 con->out_skip = con->out_kvec_bytes;
2003 con->out_kvec_is_msg = false;
2005 } else {
2006 dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
2008 mutex_unlock(&con->mutex);
2012 * Revoke a message that we may be reading data into
2014 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2016 mutex_lock(&con->mutex);
2017 if (con->in_msg && con->in_msg == msg) {
2018 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2019 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2020 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2022 /* skip rest of message */
2023 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2024 con->in_base_pos = con->in_base_pos -
2025 sizeof(struct ceph_msg_header) -
2026 front_len -
2027 middle_len -
2028 data_len -
2029 sizeof(struct ceph_msg_footer);
2030 ceph_msg_put(con->in_msg);
2031 con->in_msg = NULL;
2032 con->in_tag = CEPH_MSGR_TAG_READY;
2033 } else {
2034 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2035 con, con->in_msg, msg);
2037 mutex_unlock(&con->mutex);
2041 * Queue a keepalive byte to ensure the tcp connection is alive.
2043 void ceph_con_keepalive(struct ceph_connection *con)
2045 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2046 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2047 queue_con(con);
2052 * construct a new message with given type, size
2053 * the new msg has a ref count of 1.
2055 struct ceph_msg *ceph_msg_new(int type, int front_len,
2056 int page_len, int page_off, struct page **pages)
2058 struct ceph_msg *m;
2060 m = kmalloc(sizeof(*m), GFP_NOFS);
2061 if (m == NULL)
2062 goto out;
2063 kref_init(&m->kref);
2064 INIT_LIST_HEAD(&m->list_head);
2066 m->hdr.type = cpu_to_le16(type);
2067 m->hdr.front_len = cpu_to_le32(front_len);
2068 m->hdr.middle_len = 0;
2069 m->hdr.data_len = cpu_to_le32(page_len);
2070 m->hdr.data_off = cpu_to_le16(page_off);
2071 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2072 m->footer.front_crc = 0;
2073 m->footer.middle_crc = 0;
2074 m->footer.data_crc = 0;
2075 m->front_max = front_len;
2076 m->front_is_vmalloc = false;
2077 m->more_to_follow = false;
2078 m->pool = NULL;
2080 /* front */
2081 if (front_len) {
2082 if (front_len > PAGE_CACHE_SIZE) {
2083 m->front.iov_base = __vmalloc(front_len, GFP_NOFS,
2084 PAGE_KERNEL);
2085 m->front_is_vmalloc = true;
2086 } else {
2087 m->front.iov_base = kmalloc(front_len, GFP_NOFS);
2089 if (m->front.iov_base == NULL) {
2090 pr_err("msg_new can't allocate %d bytes\n",
2091 front_len);
2092 goto out2;
2094 } else {
2095 m->front.iov_base = NULL;
2097 m->front.iov_len = front_len;
2099 /* middle */
2100 m->middle = NULL;
2102 /* data */
2103 m->nr_pages = calc_pages_for(page_off, page_len);
2104 m->pages = pages;
2105 m->pagelist = NULL;
2107 dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len,
2108 m->nr_pages);
2109 return m;
2111 out2:
2112 ceph_msg_put(m);
2113 out:
2114 pr_err("msg_new can't create type %d len %d\n", type, front_len);
2115 return ERR_PTR(-ENOMEM);
2119 * Allocate "middle" portion of a message, if it is needed and wasn't
2120 * allocated by alloc_msg. This allows us to read a small fixed-size
2121 * per-type header in the front and then gracefully fail (i.e.,
2122 * propagate the error to the caller based on info in the front) when
2123 * the middle is too large.
2125 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2127 int type = le16_to_cpu(msg->hdr.type);
2128 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2130 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2131 ceph_msg_type_name(type), middle_len);
2132 BUG_ON(!middle_len);
2133 BUG_ON(msg->middle);
2135 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2136 if (!msg->middle)
2137 return -ENOMEM;
2138 return 0;
2142 * Generic message allocator, for incoming messages.
2144 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2145 struct ceph_msg_header *hdr,
2146 int *skip)
2148 int type = le16_to_cpu(hdr->type);
2149 int front_len = le32_to_cpu(hdr->front_len);
2150 int middle_len = le32_to_cpu(hdr->middle_len);
2151 struct ceph_msg *msg = NULL;
2152 int ret;
2154 if (con->ops->alloc_msg) {
2155 mutex_unlock(&con->mutex);
2156 msg = con->ops->alloc_msg(con, hdr, skip);
2157 mutex_lock(&con->mutex);
2158 if (IS_ERR(msg))
2159 return msg;
2161 if (*skip)
2162 return NULL;
2164 if (!msg) {
2165 *skip = 0;
2166 msg = ceph_msg_new(type, front_len, 0, 0, NULL);
2167 if (!msg) {
2168 pr_err("unable to allocate msg type %d len %d\n",
2169 type, front_len);
2170 return ERR_PTR(-ENOMEM);
2173 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2175 if (middle_len) {
2176 ret = ceph_alloc_middle(con, msg);
2178 if (ret < 0) {
2179 ceph_msg_put(msg);
2180 return msg;
2184 return msg;
2189 * Free a generically kmalloc'd message.
2191 void ceph_msg_kfree(struct ceph_msg *m)
2193 dout("msg_kfree %p\n", m);
2194 if (m->front_is_vmalloc)
2195 vfree(m->front.iov_base);
2196 else
2197 kfree(m->front.iov_base);
2198 kfree(m);
2202 * Drop a msg ref. Destroy as needed.
2204 void ceph_msg_last_put(struct kref *kref)
2206 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2208 dout("ceph_msg_put last one on %p\n", m);
2209 WARN_ON(!list_empty(&m->list_head));
2211 /* drop middle, data, if any */
2212 if (m->middle) {
2213 ceph_buffer_put(m->middle);
2214 m->middle = NULL;
2216 m->nr_pages = 0;
2217 m->pages = NULL;
2219 if (m->pagelist) {
2220 ceph_pagelist_release(m->pagelist);
2221 kfree(m->pagelist);
2222 m->pagelist = NULL;
2225 if (m->pool)
2226 ceph_msgpool_put(m->pool, m);
2227 else
2228 ceph_msg_kfree(m);
2231 void ceph_msg_dump(struct ceph_msg *msg)
2233 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2234 msg->front_max, msg->nr_pages);
2235 print_hex_dump(KERN_DEBUG, "header: ",
2236 DUMP_PREFIX_OFFSET, 16, 1,
2237 &msg->hdr, sizeof(msg->hdr), true);
2238 print_hex_dump(KERN_DEBUG, " front: ",
2239 DUMP_PREFIX_OFFSET, 16, 1,
2240 msg->front.iov_base, msg->front.iov_len, true);
2241 if (msg->middle)
2242 print_hex_dump(KERN_DEBUG, "middle: ",
2243 DUMP_PREFIX_OFFSET, 16, 1,
2244 msg->middle->vec.iov_base,
2245 msg->middle->vec.iov_len, true);
2246 print_hex_dump(KERN_DEBUG, "footer: ",
2247 DUMP_PREFIX_OFFSET, 16, 1,
2248 &msg->footer, sizeof(msg->footer), true);