1 // SPDX-License-Identifier: GPL-2.0
3 * Ceph msgr2 protocol implementation
5 * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
8 #include <linux/ceph/ceph_debug.h>
10 #include <crypto/aead.h>
11 #include <crypto/algapi.h> /* for crypto_memneq() */
12 #include <crypto/hash.h>
13 #include <crypto/sha2.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
28 #include "crypto.h" /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
30 #define FRAME_TAG_HELLO 1
31 #define FRAME_TAG_AUTH_REQUEST 2
32 #define FRAME_TAG_AUTH_BAD_METHOD 3
33 #define FRAME_TAG_AUTH_REPLY_MORE 4
34 #define FRAME_TAG_AUTH_REQUEST_MORE 5
35 #define FRAME_TAG_AUTH_DONE 6
36 #define FRAME_TAG_AUTH_SIGNATURE 7
37 #define FRAME_TAG_CLIENT_IDENT 8
38 #define FRAME_TAG_SERVER_IDENT 9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT 11
41 #define FRAME_TAG_SESSION_RESET 12
42 #define FRAME_TAG_SESSION_RETRY 13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL 14
44 #define FRAME_TAG_SESSION_RECONNECT_OK 15
45 #define FRAME_TAG_WAIT 16
46 #define FRAME_TAG_MESSAGE 17
47 #define FRAME_TAG_KEEPALIVE2 18
48 #define FRAME_TAG_KEEPALIVE2_ACK 19
49 #define FRAME_TAG_ACK 20
51 #define FRAME_LATE_STATUS_ABORTED 0x1
52 #define FRAME_LATE_STATUS_COMPLETE 0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK 0xf
55 #define IN_S_HANDLE_PREAMBLE 1
56 #define IN_S_HANDLE_CONTROL 2
57 #define IN_S_HANDLE_CONTROL_REMAINDER 3
58 #define IN_S_PREPARE_READ_DATA 4
59 #define IN_S_PREPARE_READ_DATA_CONT 5
60 #define IN_S_HANDLE_EPILOGUE 6
61 #define IN_S_FINISH_SKIP 7
63 #define OUT_S_QUEUE_DATA 1
64 #define OUT_S_QUEUE_DATA_CONT 2
65 #define OUT_S_QUEUE_ENC_PAGE 3
66 #define OUT_S_QUEUE_ZEROS 4
67 #define OUT_S_FINISH_MESSAGE 5
68 #define OUT_S_GET_NEXT 6
70 #define CTRL_BODY(p) ((void *)(p) + CEPH_PREAMBLE_LEN)
71 #define FRONT_PAD(p) ((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
72 #define MIDDLE_PAD(p) (FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
73 #define DATA_PAD(p) (MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
75 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
77 static int do_recvmsg(struct socket
*sock
, struct iov_iter
*it
)
79 struct msghdr msg
= { .msg_flags
= CEPH_MSG_FLAGS
};
83 while (iov_iter_count(it
)) {
84 ret
= sock_recvmsg(sock
, &msg
, msg
.msg_flags
);
91 iov_iter_advance(it
, ret
);
94 WARN_ON(msg_data_left(&msg
));
99 * Read as much as possible.
102 * 1 - done, nothing (else) to read
103 * 0 - socket is empty, need to wait
106 static int ceph_tcp_recv(struct ceph_connection
*con
)
110 dout("%s con %p %s %zu\n", __func__
, con
,
111 iov_iter_is_discard(&con
->v2
.in_iter
) ? "discard" : "need",
112 iov_iter_count(&con
->v2
.in_iter
));
113 ret
= do_recvmsg(con
->sock
, &con
->v2
.in_iter
);
114 dout("%s con %p ret %d left %zu\n", __func__
, con
, ret
,
115 iov_iter_count(&con
->v2
.in_iter
));
119 static int do_sendmsg(struct socket
*sock
, struct iov_iter
*it
)
121 struct msghdr msg
= { .msg_flags
= CEPH_MSG_FLAGS
};
125 while (iov_iter_count(it
)) {
126 ret
= sock_sendmsg(sock
, &msg
);
133 iov_iter_advance(it
, ret
);
136 WARN_ON(msg_data_left(&msg
));
140 static int do_try_sendpage(struct socket
*sock
, struct iov_iter
*it
)
142 struct msghdr msg
= { .msg_flags
= CEPH_MSG_FLAGS
};
146 if (WARN_ON(!iov_iter_is_bvec(it
)))
149 while (iov_iter_count(it
)) {
150 /* iov_iter_iovec() for ITER_BVEC */
151 bv
.bv_page
= it
->bvec
->bv_page
;
152 bv
.bv_offset
= it
->bvec
->bv_offset
+ it
->iov_offset
;
153 bv
.bv_len
= min(iov_iter_count(it
),
154 it
->bvec
->bv_len
- it
->iov_offset
);
157 * sendpage cannot properly handle pages with
158 * page_count == 0, we need to fall back to sendmsg if
161 * Same goes for slab pages: skb_can_coalesce() allows
162 * coalescing neighboring slab objects into a single frag
163 * which triggers one of hardened usercopy checks.
165 if (sendpage_ok(bv
.bv_page
)) {
166 ret
= sock
->ops
->sendpage(sock
, bv
.bv_page
,
167 bv
.bv_offset
, bv
.bv_len
,
170 iov_iter_bvec(&msg
.msg_iter
, WRITE
, &bv
, 1, bv
.bv_len
);
171 ret
= sock_sendmsg(sock
, &msg
);
179 iov_iter_advance(it
, ret
);
186 * Write as much as possible. The socket is expected to be corked,
187 * so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here.
190 * 1 - done, nothing (else) to write
191 * 0 - socket is full, need to wait
194 static int ceph_tcp_send(struct ceph_connection
*con
)
198 dout("%s con %p have %zu try_sendpage %d\n", __func__
, con
,
199 iov_iter_count(&con
->v2
.out_iter
), con
->v2
.out_iter_sendpage
);
200 if (con
->v2
.out_iter_sendpage
)
201 ret
= do_try_sendpage(con
->sock
, &con
->v2
.out_iter
);
203 ret
= do_sendmsg(con
->sock
, &con
->v2
.out_iter
);
204 dout("%s con %p ret %d left %zu\n", __func__
, con
, ret
,
205 iov_iter_count(&con
->v2
.out_iter
));
209 static void add_in_kvec(struct ceph_connection
*con
, void *buf
, int len
)
211 BUG_ON(con
->v2
.in_kvec_cnt
>= ARRAY_SIZE(con
->v2
.in_kvecs
));
212 WARN_ON(!iov_iter_is_kvec(&con
->v2
.in_iter
));
214 con
->v2
.in_kvecs
[con
->v2
.in_kvec_cnt
].iov_base
= buf
;
215 con
->v2
.in_kvecs
[con
->v2
.in_kvec_cnt
].iov_len
= len
;
216 con
->v2
.in_kvec_cnt
++;
218 con
->v2
.in_iter
.nr_segs
++;
219 con
->v2
.in_iter
.count
+= len
;
222 static void reset_in_kvecs(struct ceph_connection
*con
)
224 WARN_ON(iov_iter_count(&con
->v2
.in_iter
));
226 con
->v2
.in_kvec_cnt
= 0;
227 iov_iter_kvec(&con
->v2
.in_iter
, READ
, con
->v2
.in_kvecs
, 0, 0);
230 static void set_in_bvec(struct ceph_connection
*con
, const struct bio_vec
*bv
)
232 WARN_ON(iov_iter_count(&con
->v2
.in_iter
));
234 con
->v2
.in_bvec
= *bv
;
235 iov_iter_bvec(&con
->v2
.in_iter
, READ
, &con
->v2
.in_bvec
, 1, bv
->bv_len
);
238 static void set_in_skip(struct ceph_connection
*con
, int len
)
240 WARN_ON(iov_iter_count(&con
->v2
.in_iter
));
242 dout("%s con %p len %d\n", __func__
, con
, len
);
243 iov_iter_discard(&con
->v2
.in_iter
, READ
, len
);
246 static void add_out_kvec(struct ceph_connection
*con
, void *buf
, int len
)
248 BUG_ON(con
->v2
.out_kvec_cnt
>= ARRAY_SIZE(con
->v2
.out_kvecs
));
249 WARN_ON(!iov_iter_is_kvec(&con
->v2
.out_iter
));
250 WARN_ON(con
->v2
.out_zero
);
252 con
->v2
.out_kvecs
[con
->v2
.out_kvec_cnt
].iov_base
= buf
;
253 con
->v2
.out_kvecs
[con
->v2
.out_kvec_cnt
].iov_len
= len
;
254 con
->v2
.out_kvec_cnt
++;
256 con
->v2
.out_iter
.nr_segs
++;
257 con
->v2
.out_iter
.count
+= len
;
260 static void reset_out_kvecs(struct ceph_connection
*con
)
262 WARN_ON(iov_iter_count(&con
->v2
.out_iter
));
263 WARN_ON(con
->v2
.out_zero
);
265 con
->v2
.out_kvec_cnt
= 0;
267 iov_iter_kvec(&con
->v2
.out_iter
, WRITE
, con
->v2
.out_kvecs
, 0, 0);
268 con
->v2
.out_iter_sendpage
= false;
271 static void set_out_bvec(struct ceph_connection
*con
, const struct bio_vec
*bv
,
274 WARN_ON(iov_iter_count(&con
->v2
.out_iter
));
275 WARN_ON(con
->v2
.out_zero
);
277 con
->v2
.out_bvec
= *bv
;
278 con
->v2
.out_iter_sendpage
= zerocopy
;
279 iov_iter_bvec(&con
->v2
.out_iter
, WRITE
, &con
->v2
.out_bvec
, 1,
280 con
->v2
.out_bvec
.bv_len
);
283 static void set_out_bvec_zero(struct ceph_connection
*con
)
285 WARN_ON(iov_iter_count(&con
->v2
.out_iter
));
286 WARN_ON(!con
->v2
.out_zero
);
288 con
->v2
.out_bvec
.bv_page
= ceph_zero_page
;
289 con
->v2
.out_bvec
.bv_offset
= 0;
290 con
->v2
.out_bvec
.bv_len
= min(con
->v2
.out_zero
, (int)PAGE_SIZE
);
291 con
->v2
.out_iter_sendpage
= true;
292 iov_iter_bvec(&con
->v2
.out_iter
, WRITE
, &con
->v2
.out_bvec
, 1,
293 con
->v2
.out_bvec
.bv_len
);
296 static void out_zero_add(struct ceph_connection
*con
, int len
)
298 dout("%s con %p len %d\n", __func__
, con
, len
);
299 con
->v2
.out_zero
+= len
;
302 static void *alloc_conn_buf(struct ceph_connection
*con
, int len
)
306 dout("%s con %p len %d\n", __func__
, con
, len
);
308 if (WARN_ON(con
->v2
.conn_buf_cnt
>= ARRAY_SIZE(con
->v2
.conn_bufs
)))
311 buf
= ceph_kvmalloc(len
, GFP_NOIO
);
315 con
->v2
.conn_bufs
[con
->v2
.conn_buf_cnt
++] = buf
;
319 static void free_conn_bufs(struct ceph_connection
*con
)
321 while (con
->v2
.conn_buf_cnt
)
322 kvfree(con
->v2
.conn_bufs
[--con
->v2
.conn_buf_cnt
]);
325 static void add_in_sign_kvec(struct ceph_connection
*con
, void *buf
, int len
)
327 BUG_ON(con
->v2
.in_sign_kvec_cnt
>= ARRAY_SIZE(con
->v2
.in_sign_kvecs
));
329 con
->v2
.in_sign_kvecs
[con
->v2
.in_sign_kvec_cnt
].iov_base
= buf
;
330 con
->v2
.in_sign_kvecs
[con
->v2
.in_sign_kvec_cnt
].iov_len
= len
;
331 con
->v2
.in_sign_kvec_cnt
++;
334 static void clear_in_sign_kvecs(struct ceph_connection
*con
)
336 con
->v2
.in_sign_kvec_cnt
= 0;
339 static void add_out_sign_kvec(struct ceph_connection
*con
, void *buf
, int len
)
341 BUG_ON(con
->v2
.out_sign_kvec_cnt
>= ARRAY_SIZE(con
->v2
.out_sign_kvecs
));
343 con
->v2
.out_sign_kvecs
[con
->v2
.out_sign_kvec_cnt
].iov_base
= buf
;
344 con
->v2
.out_sign_kvecs
[con
->v2
.out_sign_kvec_cnt
].iov_len
= len
;
345 con
->v2
.out_sign_kvec_cnt
++;
348 static void clear_out_sign_kvecs(struct ceph_connection
*con
)
350 con
->v2
.out_sign_kvec_cnt
= 0;
353 static bool con_secure(struct ceph_connection
*con
)
355 return con
->v2
.con_mode
== CEPH_CON_MODE_SECURE
;
358 static int front_len(const struct ceph_msg
*msg
)
360 return le32_to_cpu(msg
->hdr
.front_len
);
363 static int middle_len(const struct ceph_msg
*msg
)
365 return le32_to_cpu(msg
->hdr
.middle_len
);
368 static int data_len(const struct ceph_msg
*msg
)
370 return le32_to_cpu(msg
->hdr
.data_len
);
373 static bool need_padding(int len
)
375 return !IS_ALIGNED(len
, CEPH_GCM_BLOCK_LEN
);
378 static int padded_len(int len
)
380 return ALIGN(len
, CEPH_GCM_BLOCK_LEN
);
383 static int padding_len(int len
)
385 return padded_len(len
) - len
;
388 /* preamble + control segment */
389 static int head_onwire_len(int ctrl_len
, bool secure
)
395 head_len
= CEPH_PREAMBLE_SECURE_LEN
;
396 if (ctrl_len
> CEPH_PREAMBLE_INLINE_LEN
) {
397 rem_len
= ctrl_len
- CEPH_PREAMBLE_INLINE_LEN
;
398 head_len
+= padded_len(rem_len
) + CEPH_GCM_TAG_LEN
;
401 head_len
= CEPH_PREAMBLE_PLAIN_LEN
;
403 head_len
+= ctrl_len
+ CEPH_CRC_LEN
;
408 /* front, middle and data segments + epilogue */
409 static int __tail_onwire_len(int front_len
, int middle_len
, int data_len
,
412 if (!front_len
&& !middle_len
&& !data_len
)
416 return front_len
+ middle_len
+ data_len
+
417 CEPH_EPILOGUE_PLAIN_LEN
;
419 return padded_len(front_len
) + padded_len(middle_len
) +
420 padded_len(data_len
) + CEPH_EPILOGUE_SECURE_LEN
;
423 static int tail_onwire_len(const struct ceph_msg
*msg
, bool secure
)
425 return __tail_onwire_len(front_len(msg
), middle_len(msg
),
426 data_len(msg
), secure
);
429 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
430 #define MESSAGE_HEAD_PLAIN_LEN (CEPH_PREAMBLE_PLAIN_LEN + \
431 sizeof(struct ceph_msg_header2) + \
434 static const int frame_aligns
[] = {
442 * Discards trailing empty segments, unless there is just one segment.
443 * A frame always has at least one (possibly empty) segment.
445 static int calc_segment_count(const int *lens
, int len_cnt
)
449 for (i
= len_cnt
- 1; i
>= 0; i
--) {
457 static void init_frame_desc(struct ceph_frame_desc
*desc
, int tag
,
458 const int *lens
, int len_cnt
)
462 memset(desc
, 0, sizeof(*desc
));
465 desc
->fd_seg_cnt
= calc_segment_count(lens
, len_cnt
);
466 BUG_ON(desc
->fd_seg_cnt
> CEPH_FRAME_MAX_SEGMENT_COUNT
);
467 for (i
= 0; i
< desc
->fd_seg_cnt
; i
++) {
468 desc
->fd_lens
[i
] = lens
[i
];
469 desc
->fd_aligns
[i
] = frame_aligns
[i
];
474 * Preamble crc covers everything up to itself (28 bytes) and
475 * is calculated and verified irrespective of the connection mode
476 * (i.e. even if the frame is encrypted).
478 static void encode_preamble(const struct ceph_frame_desc
*desc
, void *p
)
480 void *crcp
= p
+ CEPH_PREAMBLE_LEN
- CEPH_CRC_LEN
;
484 memset(p
, 0, CEPH_PREAMBLE_LEN
);
486 ceph_encode_8(&p
, desc
->fd_tag
);
487 ceph_encode_8(&p
, desc
->fd_seg_cnt
);
488 for (i
= 0; i
< desc
->fd_seg_cnt
; i
++) {
489 ceph_encode_32(&p
, desc
->fd_lens
[i
]);
490 ceph_encode_16(&p
, desc
->fd_aligns
[i
]);
493 put_unaligned_le32(crc32c(0, start
, crcp
- start
), crcp
);
496 static int decode_preamble(void *p
, struct ceph_frame_desc
*desc
)
498 void *crcp
= p
+ CEPH_PREAMBLE_LEN
- CEPH_CRC_LEN
;
499 u32 crc
, expected_crc
;
502 crc
= crc32c(0, p
, crcp
- p
);
503 expected_crc
= get_unaligned_le32(crcp
);
504 if (crc
!= expected_crc
) {
505 pr_err("bad preamble crc, calculated %u, expected %u\n",
510 memset(desc
, 0, sizeof(*desc
));
512 desc
->fd_tag
= ceph_decode_8(&p
);
513 desc
->fd_seg_cnt
= ceph_decode_8(&p
);
514 if (desc
->fd_seg_cnt
< 1 ||
515 desc
->fd_seg_cnt
> CEPH_FRAME_MAX_SEGMENT_COUNT
) {
516 pr_err("bad segment count %d\n", desc
->fd_seg_cnt
);
519 for (i
= 0; i
< desc
->fd_seg_cnt
; i
++) {
520 desc
->fd_lens
[i
] = ceph_decode_32(&p
);
521 desc
->fd_aligns
[i
] = ceph_decode_16(&p
);
525 * This would fire for FRAME_TAG_WAIT (it has one empty
526 * segment), but we should never get it as client.
528 if (!desc
->fd_lens
[desc
->fd_seg_cnt
- 1]) {
529 pr_err("last segment empty\n");
533 if (desc
->fd_lens
[0] > CEPH_MSG_MAX_CONTROL_LEN
) {
534 pr_err("control segment too big %d\n", desc
->fd_lens
[0]);
537 if (desc
->fd_lens
[1] > CEPH_MSG_MAX_FRONT_LEN
) {
538 pr_err("front segment too big %d\n", desc
->fd_lens
[1]);
541 if (desc
->fd_lens
[2] > CEPH_MSG_MAX_MIDDLE_LEN
) {
542 pr_err("middle segment too big %d\n", desc
->fd_lens
[2]);
545 if (desc
->fd_lens
[3] > CEPH_MSG_MAX_DATA_LEN
) {
546 pr_err("data segment too big %d\n", desc
->fd_lens
[3]);
553 static void encode_epilogue_plain(struct ceph_connection
*con
, bool aborted
)
555 con
->v2
.out_epil
.late_status
= aborted
? FRAME_LATE_STATUS_ABORTED
:
556 FRAME_LATE_STATUS_COMPLETE
;
557 cpu_to_le32s(&con
->v2
.out_epil
.front_crc
);
558 cpu_to_le32s(&con
->v2
.out_epil
.middle_crc
);
559 cpu_to_le32s(&con
->v2
.out_epil
.data_crc
);
562 static void encode_epilogue_secure(struct ceph_connection
*con
, bool aborted
)
564 memset(&con
->v2
.out_epil
, 0, sizeof(con
->v2
.out_epil
));
565 con
->v2
.out_epil
.late_status
= aborted
? FRAME_LATE_STATUS_ABORTED
:
566 FRAME_LATE_STATUS_COMPLETE
;
569 static int decode_epilogue(void *p
, u32
*front_crc
, u32
*middle_crc
,
574 late_status
= ceph_decode_8(&p
);
575 if ((late_status
& FRAME_LATE_STATUS_ABORTED_MASK
) !=
576 FRAME_LATE_STATUS_COMPLETE
) {
577 /* we should never get an aborted message as client */
578 pr_err("bad late_status 0x%x\n", late_status
);
582 if (front_crc
&& middle_crc
&& data_crc
) {
583 *front_crc
= ceph_decode_32(&p
);
584 *middle_crc
= ceph_decode_32(&p
);
585 *data_crc
= ceph_decode_32(&p
);
591 static void fill_header(struct ceph_msg_header
*hdr
,
592 const struct ceph_msg_header2
*hdr2
,
593 int front_len
, int middle_len
, int data_len
,
594 const struct ceph_entity_name
*peer_name
)
596 hdr
->seq
= hdr2
->seq
;
597 hdr
->tid
= hdr2
->tid
;
598 hdr
->type
= hdr2
->type
;
599 hdr
->priority
= hdr2
->priority
;
600 hdr
->version
= hdr2
->version
;
601 hdr
->front_len
= cpu_to_le32(front_len
);
602 hdr
->middle_len
= cpu_to_le32(middle_len
);
603 hdr
->data_len
= cpu_to_le32(data_len
);
604 hdr
->data_off
= hdr2
->data_off
;
605 hdr
->src
= *peer_name
;
606 hdr
->compat_version
= hdr2
->compat_version
;
611 static void fill_header2(struct ceph_msg_header2
*hdr2
,
612 const struct ceph_msg_header
*hdr
, u64 ack_seq
)
614 hdr2
->seq
= hdr
->seq
;
615 hdr2
->tid
= hdr
->tid
;
616 hdr2
->type
= hdr
->type
;
617 hdr2
->priority
= hdr
->priority
;
618 hdr2
->version
= hdr
->version
;
619 hdr2
->data_pre_padding_len
= 0;
620 hdr2
->data_off
= hdr
->data_off
;
621 hdr2
->ack_seq
= cpu_to_le64(ack_seq
);
623 hdr2
->compat_version
= hdr
->compat_version
;
627 static int verify_control_crc(struct ceph_connection
*con
)
629 int ctrl_len
= con
->v2
.in_desc
.fd_lens
[0];
630 u32 crc
, expected_crc
;
632 WARN_ON(con
->v2
.in_kvecs
[0].iov_len
!= ctrl_len
);
633 WARN_ON(con
->v2
.in_kvecs
[1].iov_len
!= CEPH_CRC_LEN
);
635 crc
= crc32c(-1, con
->v2
.in_kvecs
[0].iov_base
, ctrl_len
);
636 expected_crc
= get_unaligned_le32(con
->v2
.in_kvecs
[1].iov_base
);
637 if (crc
!= expected_crc
) {
638 pr_err("bad control crc, calculated %u, expected %u\n",
646 static int verify_epilogue_crcs(struct ceph_connection
*con
, u32 front_crc
,
647 u32 middle_crc
, u32 data_crc
)
649 if (front_len(con
->in_msg
)) {
650 con
->in_front_crc
= crc32c(-1, con
->in_msg
->front
.iov_base
,
651 front_len(con
->in_msg
));
653 WARN_ON(!middle_len(con
->in_msg
) && !data_len(con
->in_msg
));
654 con
->in_front_crc
= -1;
657 if (middle_len(con
->in_msg
))
658 con
->in_middle_crc
= crc32c(-1,
659 con
->in_msg
->middle
->vec
.iov_base
,
660 middle_len(con
->in_msg
));
661 else if (data_len(con
->in_msg
))
662 con
->in_middle_crc
= -1;
664 con
->in_middle_crc
= 0;
666 if (!data_len(con
->in_msg
))
667 con
->in_data_crc
= 0;
669 dout("%s con %p msg %p crcs %u %u %u\n", __func__
, con
, con
->in_msg
,
670 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
672 if (con
->in_front_crc
!= front_crc
) {
673 pr_err("bad front crc, calculated %u, expected %u\n",
674 con
->in_front_crc
, front_crc
);
677 if (con
->in_middle_crc
!= middle_crc
) {
678 pr_err("bad middle crc, calculated %u, expected %u\n",
679 con
->in_middle_crc
, middle_crc
);
682 if (con
->in_data_crc
!= data_crc
) {
683 pr_err("bad data crc, calculated %u, expected %u\n",
684 con
->in_data_crc
, data_crc
);
691 static int setup_crypto(struct ceph_connection
*con
,
692 u8
*session_key
, int session_key_len
,
693 u8
*con_secret
, int con_secret_len
)
695 unsigned int noio_flag
;
699 dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
700 __func__
, con
, con
->v2
.con_mode
, session_key_len
, con_secret_len
);
701 WARN_ON(con
->v2
.hmac_tfm
|| con
->v2
.gcm_tfm
|| con
->v2
.gcm_req
);
703 if (con
->v2
.con_mode
!= CEPH_CON_MODE_CRC
&&
704 con
->v2
.con_mode
!= CEPH_CON_MODE_SECURE
) {
705 pr_err("bad con_mode %d\n", con
->v2
.con_mode
);
709 if (!session_key_len
) {
710 WARN_ON(con
->v2
.con_mode
!= CEPH_CON_MODE_CRC
);
711 WARN_ON(con_secret_len
);
712 return 0; /* auth_none */
715 noio_flag
= memalloc_noio_save();
716 con
->v2
.hmac_tfm
= crypto_alloc_shash("hmac(sha256)", 0, 0);
717 memalloc_noio_restore(noio_flag
);
718 if (IS_ERR(con
->v2
.hmac_tfm
)) {
719 ret
= PTR_ERR(con
->v2
.hmac_tfm
);
720 con
->v2
.hmac_tfm
= NULL
;
721 pr_err("failed to allocate hmac tfm context: %d\n", ret
);
725 WARN_ON((unsigned long)session_key
&
726 crypto_shash_alignmask(con
->v2
.hmac_tfm
));
727 ret
= crypto_shash_setkey(con
->v2
.hmac_tfm
, session_key
,
730 pr_err("failed to set hmac key: %d\n", ret
);
734 if (con
->v2
.con_mode
== CEPH_CON_MODE_CRC
) {
735 WARN_ON(con_secret_len
);
736 return 0; /* auth_x, plain mode */
739 if (con_secret_len
< CEPH_GCM_KEY_LEN
+ 2 * CEPH_GCM_IV_LEN
) {
740 pr_err("con_secret too small %d\n", con_secret_len
);
744 noio_flag
= memalloc_noio_save();
745 con
->v2
.gcm_tfm
= crypto_alloc_aead("gcm(aes)", 0, 0);
746 memalloc_noio_restore(noio_flag
);
747 if (IS_ERR(con
->v2
.gcm_tfm
)) {
748 ret
= PTR_ERR(con
->v2
.gcm_tfm
);
749 con
->v2
.gcm_tfm
= NULL
;
750 pr_err("failed to allocate gcm tfm context: %d\n", ret
);
755 WARN_ON((unsigned long)p
& crypto_aead_alignmask(con
->v2
.gcm_tfm
));
756 ret
= crypto_aead_setkey(con
->v2
.gcm_tfm
, p
, CEPH_GCM_KEY_LEN
);
758 pr_err("failed to set gcm key: %d\n", ret
);
762 p
+= CEPH_GCM_KEY_LEN
;
763 WARN_ON(crypto_aead_ivsize(con
->v2
.gcm_tfm
) != CEPH_GCM_IV_LEN
);
764 ret
= crypto_aead_setauthsize(con
->v2
.gcm_tfm
, CEPH_GCM_TAG_LEN
);
766 pr_err("failed to set gcm tag size: %d\n", ret
);
770 con
->v2
.gcm_req
= aead_request_alloc(con
->v2
.gcm_tfm
, GFP_NOIO
);
771 if (!con
->v2
.gcm_req
) {
772 pr_err("failed to allocate gcm request\n");
776 crypto_init_wait(&con
->v2
.gcm_wait
);
777 aead_request_set_callback(con
->v2
.gcm_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
778 crypto_req_done
, &con
->v2
.gcm_wait
);
780 memcpy(&con
->v2
.in_gcm_nonce
, p
, CEPH_GCM_IV_LEN
);
781 memcpy(&con
->v2
.out_gcm_nonce
, p
+ CEPH_GCM_IV_LEN
, CEPH_GCM_IV_LEN
);
782 return 0; /* auth_x, secure mode */
785 static int hmac_sha256(struct ceph_connection
*con
, const struct kvec
*kvecs
,
786 int kvec_cnt
, u8
*hmac
)
788 SHASH_DESC_ON_STACK(desc
, con
->v2
.hmac_tfm
); /* tfm arg is ignored */
792 dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__
, con
,
793 con
->v2
.hmac_tfm
, kvec_cnt
);
795 if (!con
->v2
.hmac_tfm
) {
796 memset(hmac
, 0, SHA256_DIGEST_SIZE
);
797 return 0; /* auth_none */
800 desc
->tfm
= con
->v2
.hmac_tfm
;
801 ret
= crypto_shash_init(desc
);
805 for (i
= 0; i
< kvec_cnt
; i
++) {
806 WARN_ON((unsigned long)kvecs
[i
].iov_base
&
807 crypto_shash_alignmask(con
->v2
.hmac_tfm
));
808 ret
= crypto_shash_update(desc
, kvecs
[i
].iov_base
,
814 ret
= crypto_shash_final(desc
, hmac
);
818 shash_desc_zero(desc
);
819 return 0; /* auth_x, both plain and secure modes */
822 static void gcm_inc_nonce(struct ceph_gcm_nonce
*nonce
)
826 counter
= le64_to_cpu(nonce
->counter
);
827 nonce
->counter
= cpu_to_le64(counter
+ 1);
830 static int gcm_crypt(struct ceph_connection
*con
, bool encrypt
,
831 struct scatterlist
*src
, struct scatterlist
*dst
,
834 struct ceph_gcm_nonce
*nonce
;
837 nonce
= encrypt
? &con
->v2
.out_gcm_nonce
: &con
->v2
.in_gcm_nonce
;
839 aead_request_set_ad(con
->v2
.gcm_req
, 0); /* no AAD */
840 aead_request_set_crypt(con
->v2
.gcm_req
, src
, dst
, src_len
, (u8
*)nonce
);
841 ret
= crypto_wait_req(encrypt
? crypto_aead_encrypt(con
->v2
.gcm_req
) :
842 crypto_aead_decrypt(con
->v2
.gcm_req
),
847 gcm_inc_nonce(nonce
);
851 static void get_bvec_at(struct ceph_msg_data_cursor
*cursor
,
857 WARN_ON(!cursor
->total_resid
);
859 /* skip zero-length data items */
860 while (!cursor
->resid
)
861 ceph_msg_data_advance(cursor
, 0);
863 /* get a piece of data, cursor isn't advanced */
864 page
= ceph_msg_data_next(cursor
, &off
, &len
, NULL
);
871 static int calc_sg_cnt(void *buf
, int buf_len
)
878 sg_cnt
= need_padding(buf_len
) ? 1 : 0;
879 if (is_vmalloc_addr(buf
)) {
880 WARN_ON(offset_in_page(buf
));
881 sg_cnt
+= PAGE_ALIGN(buf_len
) >> PAGE_SHIFT
;
889 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor
*cursor
)
891 int data_len
= cursor
->total_resid
;
898 sg_cnt
= need_padding(data_len
) ? 1 : 0;
900 get_bvec_at(cursor
, &bv
);
903 ceph_msg_data_advance(cursor
, bv
.bv_len
);
904 } while (cursor
->total_resid
);
909 static void init_sgs(struct scatterlist
**sg
, void *buf
, int buf_len
, u8
*pad
)
911 void *end
= buf
+ buf_len
;
919 if (is_vmalloc_addr(buf
)) {
922 page
= vmalloc_to_page(p
);
923 len
= min_t(int, end
- p
, PAGE_SIZE
);
924 WARN_ON(!page
|| !len
|| offset_in_page(p
));
925 sg_set_page(*sg
, page
, len
, 0);
930 sg_set_buf(*sg
, buf
, buf_len
);
934 if (need_padding(buf_len
)) {
935 sg_set_buf(*sg
, pad
, padding_len(buf_len
));
940 static void init_sgs_cursor(struct scatterlist
**sg
,
941 struct ceph_msg_data_cursor
*cursor
, u8
*pad
)
943 int data_len
= cursor
->total_resid
;
950 get_bvec_at(cursor
, &bv
);
951 sg_set_page(*sg
, bv
.bv_page
, bv
.bv_len
, bv
.bv_offset
);
954 ceph_msg_data_advance(cursor
, bv
.bv_len
);
955 } while (cursor
->total_resid
);
957 if (need_padding(data_len
)) {
958 sg_set_buf(*sg
, pad
, padding_len(data_len
));
963 static int setup_message_sgs(struct sg_table
*sgt
, struct ceph_msg
*msg
,
964 u8
*front_pad
, u8
*middle_pad
, u8
*data_pad
,
965 void *epilogue
, bool add_tag
)
967 struct ceph_msg_data_cursor cursor
;
968 struct scatterlist
*cur_sg
;
972 if (!front_len(msg
) && !middle_len(msg
) && !data_len(msg
))
975 sg_cnt
= 1; /* epilogue + [auth tag] */
977 sg_cnt
+= calc_sg_cnt(msg
->front
.iov_base
,
980 sg_cnt
+= calc_sg_cnt(msg
->middle
->vec
.iov_base
,
983 ceph_msg_data_cursor_init(&cursor
, msg
, data_len(msg
));
984 sg_cnt
+= calc_sg_cnt_cursor(&cursor
);
987 ret
= sg_alloc_table(sgt
, sg_cnt
, GFP_NOIO
);
993 init_sgs(&cur_sg
, msg
->front
.iov_base
, front_len(msg
),
996 init_sgs(&cur_sg
, msg
->middle
->vec
.iov_base
, middle_len(msg
),
999 ceph_msg_data_cursor_init(&cursor
, msg
, data_len(msg
));
1000 init_sgs_cursor(&cur_sg
, &cursor
, data_pad
);
1003 WARN_ON(!sg_is_last(cur_sg
));
1004 sg_set_buf(cur_sg
, epilogue
,
1005 CEPH_GCM_BLOCK_LEN
+ (add_tag
? CEPH_GCM_TAG_LEN
: 0));
1009 static int decrypt_preamble(struct ceph_connection
*con
)
1011 struct scatterlist sg
;
1013 sg_init_one(&sg
, con
->v2
.in_buf
, CEPH_PREAMBLE_SECURE_LEN
);
1014 return gcm_crypt(con
, false, &sg
, &sg
, CEPH_PREAMBLE_SECURE_LEN
);
1017 static int decrypt_control_remainder(struct ceph_connection
*con
)
1019 int ctrl_len
= con
->v2
.in_desc
.fd_lens
[0];
1020 int rem_len
= ctrl_len
- CEPH_PREAMBLE_INLINE_LEN
;
1021 int pt_len
= padding_len(rem_len
) + CEPH_GCM_TAG_LEN
;
1022 struct scatterlist sgs
[2];
1024 WARN_ON(con
->v2
.in_kvecs
[0].iov_len
!= rem_len
);
1025 WARN_ON(con
->v2
.in_kvecs
[1].iov_len
!= pt_len
);
1027 sg_init_table(sgs
, 2);
1028 sg_set_buf(&sgs
[0], con
->v2
.in_kvecs
[0].iov_base
, rem_len
);
1029 sg_set_buf(&sgs
[1], con
->v2
.in_buf
, pt_len
);
1031 return gcm_crypt(con
, false, sgs
, sgs
,
1032 padded_len(rem_len
) + CEPH_GCM_TAG_LEN
);
1035 static int decrypt_message(struct ceph_connection
*con
)
1037 struct sg_table sgt
= {};
1040 ret
= setup_message_sgs(&sgt
, con
->in_msg
, FRONT_PAD(con
->v2
.in_buf
),
1041 MIDDLE_PAD(con
->v2
.in_buf
), DATA_PAD(con
->v2
.in_buf
),
1042 con
->v2
.in_buf
, true);
1046 ret
= gcm_crypt(con
, false, sgt
.sgl
, sgt
.sgl
,
1047 tail_onwire_len(con
->in_msg
, true));
1050 sg_free_table(&sgt
);
1054 static int prepare_banner(struct ceph_connection
*con
)
1056 int buf_len
= CEPH_BANNER_V2_LEN
+ 2 + 8 + 8;
1059 buf
= alloc_conn_buf(con
, buf_len
);
1064 ceph_encode_copy(&p
, CEPH_BANNER_V2
, CEPH_BANNER_V2_LEN
);
1065 ceph_encode_16(&p
, sizeof(u64
) + sizeof(u64
));
1066 ceph_encode_64(&p
, CEPH_MSGR2_SUPPORTED_FEATURES
);
1067 ceph_encode_64(&p
, CEPH_MSGR2_REQUIRED_FEATURES
);
1068 WARN_ON(p
!= buf
+ buf_len
);
1070 add_out_kvec(con
, buf
, buf_len
);
1071 add_out_sign_kvec(con
, buf
, buf_len
);
1072 ceph_con_flag_set(con
, CEPH_CON_F_WRITE_PENDING
);
1079 * control body (ctrl_len bytes)
1080 * space for control crc
1082 * extdata (optional):
1083 * control body (extdata_len bytes)
1085 * Compute control crc and gather base and extdata into:
1088 * control body (ctrl_len + extdata_len bytes)
1091 * Preamble should already be encoded at the start of base.
1093 static void prepare_head_plain(struct ceph_connection
*con
, void *base
,
1094 int ctrl_len
, void *extdata
, int extdata_len
,
1097 int base_len
= CEPH_PREAMBLE_LEN
+ ctrl_len
+ CEPH_CRC_LEN
;
1098 void *crcp
= base
+ base_len
- CEPH_CRC_LEN
;
1101 crc
= crc32c(-1, CTRL_BODY(base
), ctrl_len
);
1103 crc
= crc32c(crc
, extdata
, extdata_len
);
1104 put_unaligned_le32(crc
, crcp
);
1107 add_out_kvec(con
, base
, base_len
);
1109 add_out_sign_kvec(con
, base
, base_len
);
1113 add_out_kvec(con
, base
, crcp
- base
);
1114 add_out_kvec(con
, extdata
, extdata_len
);
1115 add_out_kvec(con
, crcp
, CEPH_CRC_LEN
);
1117 add_out_sign_kvec(con
, base
, crcp
- base
);
1118 add_out_sign_kvec(con
, extdata
, extdata_len
);
1119 add_out_sign_kvec(con
, crcp
, CEPH_CRC_LEN
);
1123 static int prepare_head_secure_small(struct ceph_connection
*con
,
1124 void *base
, int ctrl_len
)
1126 struct scatterlist sg
;
1129 /* inline buffer padding? */
1130 if (ctrl_len
< CEPH_PREAMBLE_INLINE_LEN
)
1131 memset(CTRL_BODY(base
) + ctrl_len
, 0,
1132 CEPH_PREAMBLE_INLINE_LEN
- ctrl_len
);
1134 sg_init_one(&sg
, base
, CEPH_PREAMBLE_SECURE_LEN
);
1135 ret
= gcm_crypt(con
, true, &sg
, &sg
,
1136 CEPH_PREAMBLE_SECURE_LEN
- CEPH_GCM_TAG_LEN
);
1140 add_out_kvec(con
, base
, CEPH_PREAMBLE_SECURE_LEN
);
1147 * control body (ctrl_len bytes)
1148 * space for padding, if needed
1149 * space for control remainder auth tag
1150 * space for preamble auth tag
1152 * Encrypt preamble and the inline portion, then encrypt the remainder
1156 * control body (48 bytes)
1158 * control body (ctrl_len - 48 bytes)
1159 * zero padding, if needed
1160 * control remainder auth tag
1162 * Preamble should already be encoded at the start of base.
1164 static int prepare_head_secure_big(struct ceph_connection
*con
,
1165 void *base
, int ctrl_len
)
1167 int rem_len
= ctrl_len
- CEPH_PREAMBLE_INLINE_LEN
;
1168 void *rem
= CTRL_BODY(base
) + CEPH_PREAMBLE_INLINE_LEN
;
1169 void *rem_tag
= rem
+ padded_len(rem_len
);
1170 void *pmbl_tag
= rem_tag
+ CEPH_GCM_TAG_LEN
;
1171 struct scatterlist sgs
[2];
1174 sg_init_table(sgs
, 2);
1175 sg_set_buf(&sgs
[0], base
, rem
- base
);
1176 sg_set_buf(&sgs
[1], pmbl_tag
, CEPH_GCM_TAG_LEN
);
1177 ret
= gcm_crypt(con
, true, sgs
, sgs
, rem
- base
);
1181 /* control remainder padding? */
1182 if (need_padding(rem_len
))
1183 memset(rem
+ rem_len
, 0, padding_len(rem_len
));
1185 sg_init_one(&sgs
[0], rem
, pmbl_tag
- rem
);
1186 ret
= gcm_crypt(con
, true, sgs
, sgs
, rem_tag
- rem
);
1190 add_out_kvec(con
, base
, rem
- base
);
1191 add_out_kvec(con
, pmbl_tag
, CEPH_GCM_TAG_LEN
);
1192 add_out_kvec(con
, rem
, pmbl_tag
- rem
);
1196 static int __prepare_control(struct ceph_connection
*con
, int tag
,
1197 void *base
, int ctrl_len
, void *extdata
,
1198 int extdata_len
, bool to_be_signed
)
1200 int total_len
= ctrl_len
+ extdata_len
;
1201 struct ceph_frame_desc desc
;
1204 dout("%s con %p tag %d len %d (%d+%d)\n", __func__
, con
, tag
,
1205 total_len
, ctrl_len
, extdata_len
);
1207 /* extdata may be vmalloc'ed but not base */
1208 if (WARN_ON(is_vmalloc_addr(base
) || !ctrl_len
))
1211 init_frame_desc(&desc
, tag
, &total_len
, 1);
1212 encode_preamble(&desc
, base
);
1214 if (con_secure(con
)) {
1215 if (WARN_ON(extdata_len
|| to_be_signed
))
1218 if (ctrl_len
<= CEPH_PREAMBLE_INLINE_LEN
)
1219 /* fully inlined, inline buffer may need padding */
1220 ret
= prepare_head_secure_small(con
, base
, ctrl_len
);
1222 /* partially inlined, inline buffer is full */
1223 ret
= prepare_head_secure_big(con
, base
, ctrl_len
);
1227 prepare_head_plain(con
, base
, ctrl_len
, extdata
, extdata_len
,
1231 ceph_con_flag_set(con
, CEPH_CON_F_WRITE_PENDING
);
1235 static int prepare_control(struct ceph_connection
*con
, int tag
,
1236 void *base
, int ctrl_len
)
1238 return __prepare_control(con
, tag
, base
, ctrl_len
, NULL
, 0, false);
1241 static int prepare_hello(struct ceph_connection
*con
)
1246 ctrl_len
= 1 + ceph_entity_addr_encoding_len(&con
->peer_addr
);
1247 buf
= alloc_conn_buf(con
, head_onwire_len(ctrl_len
, false));
1252 ceph_encode_8(&p
, CEPH_ENTITY_TYPE_CLIENT
);
1253 ceph_encode_entity_addr(&p
, &con
->peer_addr
);
1254 WARN_ON(p
!= CTRL_BODY(buf
) + ctrl_len
);
1256 return __prepare_control(con
, FRAME_TAG_HELLO
, buf
, ctrl_len
,
1260 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1261 #define AUTH_BUF_LEN (512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1263 static int prepare_auth_request(struct ceph_connection
*con
)
1265 void *authorizer
, *authorizer_copy
;
1266 int ctrl_len
, authorizer_len
;
1270 ctrl_len
= AUTH_BUF_LEN
;
1271 buf
= alloc_conn_buf(con
, head_onwire_len(ctrl_len
, false));
1275 mutex_unlock(&con
->mutex
);
1276 ret
= con
->ops
->get_auth_request(con
, CTRL_BODY(buf
), &ctrl_len
,
1277 &authorizer
, &authorizer_len
);
1278 mutex_lock(&con
->mutex
);
1279 if (con
->state
!= CEPH_CON_S_V2_HELLO
) {
1280 dout("%s con %p state changed to %d\n", __func__
, con
,
1285 dout("%s con %p get_auth_request ret %d\n", __func__
, con
, ret
);
1289 authorizer_copy
= alloc_conn_buf(con
, authorizer_len
);
1290 if (!authorizer_copy
)
1293 memcpy(authorizer_copy
, authorizer
, authorizer_len
);
1295 return __prepare_control(con
, FRAME_TAG_AUTH_REQUEST
, buf
, ctrl_len
,
1296 authorizer_copy
, authorizer_len
, true);
1299 static int prepare_auth_request_more(struct ceph_connection
*con
,
1300 void *reply
, int reply_len
)
1302 int ctrl_len
, authorizer_len
;
1307 ctrl_len
= AUTH_BUF_LEN
;
1308 buf
= alloc_conn_buf(con
, head_onwire_len(ctrl_len
, false));
1312 mutex_unlock(&con
->mutex
);
1313 ret
= con
->ops
->handle_auth_reply_more(con
, reply
, reply_len
,
1314 CTRL_BODY(buf
), &ctrl_len
,
1315 &authorizer
, &authorizer_len
);
1316 mutex_lock(&con
->mutex
);
1317 if (con
->state
!= CEPH_CON_S_V2_AUTH
) {
1318 dout("%s con %p state changed to %d\n", __func__
, con
,
1323 dout("%s con %p handle_auth_reply_more ret %d\n", __func__
, con
, ret
);
1327 return __prepare_control(con
, FRAME_TAG_AUTH_REQUEST_MORE
, buf
,
1328 ctrl_len
, authorizer
, authorizer_len
, true);
1331 static int prepare_auth_signature(struct ceph_connection
*con
)
1336 buf
= alloc_conn_buf(con
, head_onwire_len(SHA256_DIGEST_SIZE
,
1341 ret
= hmac_sha256(con
, con
->v2
.in_sign_kvecs
, con
->v2
.in_sign_kvec_cnt
,
1346 return prepare_control(con
, FRAME_TAG_AUTH_SIGNATURE
, buf
,
1347 SHA256_DIGEST_SIZE
);
1350 static int prepare_client_ident(struct ceph_connection
*con
)
1352 struct ceph_entity_addr
*my_addr
= &con
->msgr
->inst
.addr
;
1353 struct ceph_client
*client
= from_msgr(con
->msgr
);
1354 u64 global_id
= ceph_client_gid(client
);
1358 WARN_ON(con
->v2
.server_cookie
);
1359 WARN_ON(con
->v2
.connect_seq
);
1360 WARN_ON(con
->v2
.peer_global_seq
);
1362 if (!con
->v2
.client_cookie
) {
1364 get_random_bytes(&con
->v2
.client_cookie
,
1365 sizeof(con
->v2
.client_cookie
));
1366 } while (!con
->v2
.client_cookie
);
1367 dout("%s con %p generated cookie 0x%llx\n", __func__
, con
,
1368 con
->v2
.client_cookie
);
1370 dout("%s con %p cookie already set 0x%llx\n", __func__
, con
,
1371 con
->v2
.client_cookie
);
1374 dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1375 __func__
, con
, ceph_pr_addr(my_addr
), le32_to_cpu(my_addr
->nonce
),
1376 ceph_pr_addr(&con
->peer_addr
), le32_to_cpu(con
->peer_addr
.nonce
),
1377 global_id
, con
->v2
.global_seq
, client
->supported_features
,
1378 client
->required_features
, con
->v2
.client_cookie
);
1380 ctrl_len
= 1 + 4 + ceph_entity_addr_encoding_len(my_addr
) +
1381 ceph_entity_addr_encoding_len(&con
->peer_addr
) + 6 * 8;
1382 buf
= alloc_conn_buf(con
, head_onwire_len(ctrl_len
, con_secure(con
)));
1387 ceph_encode_8(&p
, 2); /* addrvec marker */
1388 ceph_encode_32(&p
, 1); /* addr_cnt */
1389 ceph_encode_entity_addr(&p
, my_addr
);
1390 ceph_encode_entity_addr(&p
, &con
->peer_addr
);
1391 ceph_encode_64(&p
, global_id
);
1392 ceph_encode_64(&p
, con
->v2
.global_seq
);
1393 ceph_encode_64(&p
, client
->supported_features
);
1394 ceph_encode_64(&p
, client
->required_features
);
1395 ceph_encode_64(&p
, 0); /* flags */
1396 ceph_encode_64(&p
, con
->v2
.client_cookie
);
1397 WARN_ON(p
!= CTRL_BODY(buf
) + ctrl_len
);
1399 return prepare_control(con
, FRAME_TAG_CLIENT_IDENT
, buf
, ctrl_len
);
1402 static int prepare_session_reconnect(struct ceph_connection
*con
)
1404 struct ceph_entity_addr
*my_addr
= &con
->msgr
->inst
.addr
;
1408 WARN_ON(!con
->v2
.client_cookie
);
1409 WARN_ON(!con
->v2
.server_cookie
);
1410 WARN_ON(!con
->v2
.connect_seq
);
1411 WARN_ON(!con
->v2
.peer_global_seq
);
1413 dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1414 __func__
, con
, ceph_pr_addr(my_addr
), le32_to_cpu(my_addr
->nonce
),
1415 con
->v2
.client_cookie
, con
->v2
.server_cookie
, con
->v2
.global_seq
,
1416 con
->v2
.connect_seq
, con
->in_seq
);
1418 ctrl_len
= 1 + 4 + ceph_entity_addr_encoding_len(my_addr
) + 5 * 8;
1419 buf
= alloc_conn_buf(con
, head_onwire_len(ctrl_len
, con_secure(con
)));
1424 ceph_encode_8(&p
, 2); /* entity_addrvec_t marker */
1425 ceph_encode_32(&p
, 1); /* my_addrs len */
1426 ceph_encode_entity_addr(&p
, my_addr
);
1427 ceph_encode_64(&p
, con
->v2
.client_cookie
);
1428 ceph_encode_64(&p
, con
->v2
.server_cookie
);
1429 ceph_encode_64(&p
, con
->v2
.global_seq
);
1430 ceph_encode_64(&p
, con
->v2
.connect_seq
);
1431 ceph_encode_64(&p
, con
->in_seq
);
1432 WARN_ON(p
!= CTRL_BODY(buf
) + ctrl_len
);
1434 return prepare_control(con
, FRAME_TAG_SESSION_RECONNECT
, buf
, ctrl_len
);
1437 static int prepare_keepalive2(struct ceph_connection
*con
)
1439 struct ceph_timespec
*ts
= CTRL_BODY(con
->v2
.out_buf
);
1440 struct timespec64 now
;
1442 ktime_get_real_ts64(&now
);
1443 dout("%s con %p timestamp %lld.%09ld\n", __func__
, con
, now
.tv_sec
,
1446 ceph_encode_timespec64(ts
, &now
);
1448 reset_out_kvecs(con
);
1449 return prepare_control(con
, FRAME_TAG_KEEPALIVE2
, con
->v2
.out_buf
,
1450 sizeof(struct ceph_timespec
));
1453 static int prepare_ack(struct ceph_connection
*con
)
1457 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__
, con
,
1458 con
->in_seq_acked
, con
->in_seq
);
1459 con
->in_seq_acked
= con
->in_seq
;
1461 p
= CTRL_BODY(con
->v2
.out_buf
);
1462 ceph_encode_64(&p
, con
->in_seq_acked
);
1464 reset_out_kvecs(con
);
1465 return prepare_control(con
, FRAME_TAG_ACK
, con
->v2
.out_buf
, 8);
1468 static void prepare_epilogue_plain(struct ceph_connection
*con
, bool aborted
)
1470 dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__
, con
,
1471 con
->out_msg
, aborted
, con
->v2
.out_epil
.front_crc
,
1472 con
->v2
.out_epil
.middle_crc
, con
->v2
.out_epil
.data_crc
);
1474 encode_epilogue_plain(con
, aborted
);
1475 add_out_kvec(con
, &con
->v2
.out_epil
, CEPH_EPILOGUE_PLAIN_LEN
);
1479 * For "used" empty segments, crc is -1. For unused (trailing)
1480 * segments, crc is 0.
1482 static void prepare_message_plain(struct ceph_connection
*con
)
1484 struct ceph_msg
*msg
= con
->out_msg
;
1486 prepare_head_plain(con
, con
->v2
.out_buf
,
1487 sizeof(struct ceph_msg_header2
), NULL
, 0, false);
1489 if (!front_len(msg
) && !middle_len(msg
)) {
1490 if (!data_len(msg
)) {
1492 * Empty message: once the head is written,
1493 * we are done -- there is no epilogue.
1495 con
->v2
.out_state
= OUT_S_FINISH_MESSAGE
;
1499 con
->v2
.out_epil
.front_crc
= -1;
1500 con
->v2
.out_epil
.middle_crc
= -1;
1501 con
->v2
.out_state
= OUT_S_QUEUE_DATA
;
1505 if (front_len(msg
)) {
1506 con
->v2
.out_epil
.front_crc
= crc32c(-1, msg
->front
.iov_base
,
1508 add_out_kvec(con
, msg
->front
.iov_base
, front_len(msg
));
1510 /* middle (at least) is there, checked above */
1511 con
->v2
.out_epil
.front_crc
= -1;
1514 if (middle_len(msg
)) {
1515 con
->v2
.out_epil
.middle_crc
=
1516 crc32c(-1, msg
->middle
->vec
.iov_base
, middle_len(msg
));
1517 add_out_kvec(con
, msg
->middle
->vec
.iov_base
, middle_len(msg
));
1519 con
->v2
.out_epil
.middle_crc
= data_len(msg
) ? -1 : 0;
1522 if (data_len(msg
)) {
1523 con
->v2
.out_state
= OUT_S_QUEUE_DATA
;
1525 con
->v2
.out_epil
.data_crc
= 0;
1526 prepare_epilogue_plain(con
, false);
1527 con
->v2
.out_state
= OUT_S_FINISH_MESSAGE
;
1532 * Unfortunately the kernel crypto API doesn't support streaming
1533 * (piecewise) operation for AEAD algorithms, so we can't get away
1534 * with a fixed size buffer and a couple sgs. Instead, we have to
1535 * allocate pages for the entire tail of the message (currently up
1536 * to ~32M) and two sgs arrays (up to ~256K each)...
1538 static int prepare_message_secure(struct ceph_connection
*con
)
1540 void *zerop
= page_address(ceph_zero_page
);
1541 struct sg_table enc_sgt
= {};
1542 struct sg_table sgt
= {};
1543 struct page
**enc_pages
;
1548 ret
= prepare_head_secure_small(con
, con
->v2
.out_buf
,
1549 sizeof(struct ceph_msg_header2
));
1553 tail_len
= tail_onwire_len(con
->out_msg
, true);
1556 * Empty message: once the head is written,
1557 * we are done -- there is no epilogue.
1559 con
->v2
.out_state
= OUT_S_FINISH_MESSAGE
;
1563 encode_epilogue_secure(con
, false);
1564 ret
= setup_message_sgs(&sgt
, con
->out_msg
, zerop
, zerop
, zerop
,
1565 &con
->v2
.out_epil
, false);
1569 enc_page_cnt
= calc_pages_for(0, tail_len
);
1570 enc_pages
= ceph_alloc_page_vector(enc_page_cnt
, GFP_NOIO
);
1571 if (IS_ERR(enc_pages
)) {
1572 ret
= PTR_ERR(enc_pages
);
1576 WARN_ON(con
->v2
.out_enc_pages
|| con
->v2
.out_enc_page_cnt
);
1577 con
->v2
.out_enc_pages
= enc_pages
;
1578 con
->v2
.out_enc_page_cnt
= enc_page_cnt
;
1579 con
->v2
.out_enc_resid
= tail_len
;
1580 con
->v2
.out_enc_i
= 0;
1582 ret
= sg_alloc_table_from_pages(&enc_sgt
, enc_pages
, enc_page_cnt
,
1583 0, tail_len
, GFP_NOIO
);
1587 ret
= gcm_crypt(con
, true, sgt
.sgl
, enc_sgt
.sgl
,
1588 tail_len
- CEPH_GCM_TAG_LEN
);
1592 dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__
, con
,
1593 con
->out_msg
, sgt
.orig_nents
, enc_page_cnt
);
1594 con
->v2
.out_state
= OUT_S_QUEUE_ENC_PAGE
;
1597 sg_free_table(&sgt
);
1598 sg_free_table(&enc_sgt
);
1602 static int prepare_message(struct ceph_connection
*con
)
1605 sizeof(struct ceph_msg_header2
),
1606 front_len(con
->out_msg
),
1607 middle_len(con
->out_msg
),
1608 data_len(con
->out_msg
)
1610 struct ceph_frame_desc desc
;
1613 dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__
, con
,
1614 con
->out_msg
, lens
[0], lens
[1], lens
[2], lens
[3]);
1616 if (con
->in_seq
> con
->in_seq_acked
) {
1617 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__
, con
,
1618 con
->in_seq_acked
, con
->in_seq
);
1619 con
->in_seq_acked
= con
->in_seq
;
1622 reset_out_kvecs(con
);
1623 init_frame_desc(&desc
, FRAME_TAG_MESSAGE
, lens
, 4);
1624 encode_preamble(&desc
, con
->v2
.out_buf
);
1625 fill_header2(CTRL_BODY(con
->v2
.out_buf
), &con
->out_msg
->hdr
,
1628 if (con_secure(con
)) {
1629 ret
= prepare_message_secure(con
);
1633 prepare_message_plain(con
);
1636 ceph_con_flag_set(con
, CEPH_CON_F_WRITE_PENDING
);
1640 static int prepare_read_banner_prefix(struct ceph_connection
*con
)
1644 buf
= alloc_conn_buf(con
, CEPH_BANNER_V2_PREFIX_LEN
);
1648 reset_in_kvecs(con
);
1649 add_in_kvec(con
, buf
, CEPH_BANNER_V2_PREFIX_LEN
);
1650 add_in_sign_kvec(con
, buf
, CEPH_BANNER_V2_PREFIX_LEN
);
1651 con
->state
= CEPH_CON_S_V2_BANNER_PREFIX
;
1655 static int prepare_read_banner_payload(struct ceph_connection
*con
,
1660 buf
= alloc_conn_buf(con
, payload_len
);
1664 reset_in_kvecs(con
);
1665 add_in_kvec(con
, buf
, payload_len
);
1666 add_in_sign_kvec(con
, buf
, payload_len
);
1667 con
->state
= CEPH_CON_S_V2_BANNER_PAYLOAD
;
1671 static void prepare_read_preamble(struct ceph_connection
*con
)
1673 reset_in_kvecs(con
);
1674 add_in_kvec(con
, con
->v2
.in_buf
,
1675 con_secure(con
) ? CEPH_PREAMBLE_SECURE_LEN
:
1676 CEPH_PREAMBLE_PLAIN_LEN
);
1677 con
->v2
.in_state
= IN_S_HANDLE_PREAMBLE
;
1680 static int prepare_read_control(struct ceph_connection
*con
)
1682 int ctrl_len
= con
->v2
.in_desc
.fd_lens
[0];
1686 reset_in_kvecs(con
);
1687 if (con
->state
== CEPH_CON_S_V2_HELLO
||
1688 con
->state
== CEPH_CON_S_V2_AUTH
) {
1689 head_len
= head_onwire_len(ctrl_len
, false);
1690 buf
= alloc_conn_buf(con
, head_len
);
1694 /* preserve preamble */
1695 memcpy(buf
, con
->v2
.in_buf
, CEPH_PREAMBLE_LEN
);
1697 add_in_kvec(con
, CTRL_BODY(buf
), ctrl_len
);
1698 add_in_kvec(con
, CTRL_BODY(buf
) + ctrl_len
, CEPH_CRC_LEN
);
1699 add_in_sign_kvec(con
, buf
, head_len
);
1701 if (ctrl_len
> CEPH_PREAMBLE_INLINE_LEN
) {
1702 buf
= alloc_conn_buf(con
, ctrl_len
);
1706 add_in_kvec(con
, buf
, ctrl_len
);
1708 add_in_kvec(con
, CTRL_BODY(con
->v2
.in_buf
), ctrl_len
);
1710 add_in_kvec(con
, con
->v2
.in_buf
, CEPH_CRC_LEN
);
1712 con
->v2
.in_state
= IN_S_HANDLE_CONTROL
;
1716 static int prepare_read_control_remainder(struct ceph_connection
*con
)
1718 int ctrl_len
= con
->v2
.in_desc
.fd_lens
[0];
1719 int rem_len
= ctrl_len
- CEPH_PREAMBLE_INLINE_LEN
;
1722 buf
= alloc_conn_buf(con
, ctrl_len
);
1726 memcpy(buf
, CTRL_BODY(con
->v2
.in_buf
), CEPH_PREAMBLE_INLINE_LEN
);
1728 reset_in_kvecs(con
);
1729 add_in_kvec(con
, buf
+ CEPH_PREAMBLE_INLINE_LEN
, rem_len
);
1730 add_in_kvec(con
, con
->v2
.in_buf
,
1731 padding_len(rem_len
) + CEPH_GCM_TAG_LEN
);
1732 con
->v2
.in_state
= IN_S_HANDLE_CONTROL_REMAINDER
;
1736 static void prepare_read_data(struct ceph_connection
*con
)
1740 if (!con_secure(con
))
1741 con
->in_data_crc
= -1;
1742 ceph_msg_data_cursor_init(&con
->v2
.in_cursor
, con
->in_msg
,
1743 data_len(con
->in_msg
));
1745 get_bvec_at(&con
->v2
.in_cursor
, &bv
);
1746 set_in_bvec(con
, &bv
);
1747 con
->v2
.in_state
= IN_S_PREPARE_READ_DATA_CONT
;
1750 static void prepare_read_data_cont(struct ceph_connection
*con
)
1754 if (!con_secure(con
))
1755 con
->in_data_crc
= ceph_crc32c_page(con
->in_data_crc
,
1756 con
->v2
.in_bvec
.bv_page
,
1757 con
->v2
.in_bvec
.bv_offset
,
1758 con
->v2
.in_bvec
.bv_len
);
1760 ceph_msg_data_advance(&con
->v2
.in_cursor
, con
->v2
.in_bvec
.bv_len
);
1761 if (con
->v2
.in_cursor
.total_resid
) {
1762 get_bvec_at(&con
->v2
.in_cursor
, &bv
);
1763 set_in_bvec(con
, &bv
);
1764 WARN_ON(con
->v2
.in_state
!= IN_S_PREPARE_READ_DATA_CONT
);
1769 * We've read all data. Prepare to read data padding (if any)
1772 reset_in_kvecs(con
);
1773 if (con_secure(con
)) {
1774 if (need_padding(data_len(con
->in_msg
)))
1775 add_in_kvec(con
, DATA_PAD(con
->v2
.in_buf
),
1776 padding_len(data_len(con
->in_msg
)));
1777 add_in_kvec(con
, con
->v2
.in_buf
, CEPH_EPILOGUE_SECURE_LEN
);
1779 add_in_kvec(con
, con
->v2
.in_buf
, CEPH_EPILOGUE_PLAIN_LEN
);
1781 con
->v2
.in_state
= IN_S_HANDLE_EPILOGUE
;
1784 static void __finish_skip(struct ceph_connection
*con
)
1787 prepare_read_preamble(con
);
1790 static void prepare_skip_message(struct ceph_connection
*con
)
1792 struct ceph_frame_desc
*desc
= &con
->v2
.in_desc
;
1795 dout("%s con %p %d+%d+%d\n", __func__
, con
, desc
->fd_lens
[1],
1796 desc
->fd_lens
[2], desc
->fd_lens
[3]);
1798 tail_len
= __tail_onwire_len(desc
->fd_lens
[1], desc
->fd_lens
[2],
1799 desc
->fd_lens
[3], con_secure(con
));
1803 set_in_skip(con
, tail_len
);
1804 con
->v2
.in_state
= IN_S_FINISH_SKIP
;
1808 static int process_banner_prefix(struct ceph_connection
*con
)
1813 WARN_ON(con
->v2
.in_kvecs
[0].iov_len
!= CEPH_BANNER_V2_PREFIX_LEN
);
1815 p
= con
->v2
.in_kvecs
[0].iov_base
;
1816 if (memcmp(p
, CEPH_BANNER_V2
, CEPH_BANNER_V2_LEN
)) {
1817 if (!memcmp(p
, CEPH_BANNER
, CEPH_BANNER_LEN
))
1818 con
->error_msg
= "server is speaking msgr1 protocol";
1820 con
->error_msg
= "protocol error, bad banner";
1824 p
+= CEPH_BANNER_V2_LEN
;
1825 payload_len
= ceph_decode_16(&p
);
1826 dout("%s con %p payload_len %d\n", __func__
, con
, payload_len
);
1828 return prepare_read_banner_payload(con
, payload_len
);
1831 static int process_banner_payload(struct ceph_connection
*con
)
1833 void *end
= con
->v2
.in_kvecs
[0].iov_base
+ con
->v2
.in_kvecs
[0].iov_len
;
1834 u64 feat
= CEPH_MSGR2_SUPPORTED_FEATURES
;
1835 u64 req_feat
= CEPH_MSGR2_REQUIRED_FEATURES
;
1836 u64 server_feat
, server_req_feat
;
1840 p
= con
->v2
.in_kvecs
[0].iov_base
;
1841 ceph_decode_64_safe(&p
, end
, server_feat
, bad
);
1842 ceph_decode_64_safe(&p
, end
, server_req_feat
, bad
);
1844 dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
1845 __func__
, con
, server_feat
, server_req_feat
);
1847 if (req_feat
& ~server_feat
) {
1848 pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
1849 server_feat
, req_feat
& ~server_feat
);
1850 con
->error_msg
= "missing required protocol features";
1853 if (server_req_feat
& ~feat
) {
1854 pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
1855 feat
, server_req_feat
& ~feat
);
1856 con
->error_msg
= "missing required protocol features";
1860 /* no reset_out_kvecs() as our banner may still be pending */
1861 ret
= prepare_hello(con
);
1863 pr_err("prepare_hello failed: %d\n", ret
);
1867 con
->state
= CEPH_CON_S_V2_HELLO
;
1868 prepare_read_preamble(con
);
1872 pr_err("failed to decode banner payload\n");
1876 static int process_hello(struct ceph_connection
*con
, void *p
, void *end
)
1878 struct ceph_entity_addr
*my_addr
= &con
->msgr
->inst
.addr
;
1879 struct ceph_entity_addr addr_for_me
;
1883 if (con
->state
!= CEPH_CON_S_V2_HELLO
) {
1884 con
->error_msg
= "protocol error, unexpected hello";
1888 ceph_decode_8_safe(&p
, end
, entity_type
, bad
);
1889 ret
= ceph_decode_entity_addr(&p
, end
, &addr_for_me
);
1891 pr_err("failed to decode addr_for_me: %d\n", ret
);
1895 dout("%s con %p entity_type %d addr_for_me %s\n", __func__
, con
,
1896 entity_type
, ceph_pr_addr(&addr_for_me
));
1898 if (entity_type
!= con
->peer_name
.type
) {
1899 pr_err("bad peer type, want %d, got %d\n",
1900 con
->peer_name
.type
, entity_type
);
1901 con
->error_msg
= "wrong peer at address";
1906 * Set our address to the address our first peer (i.e. monitor)
1907 * sees that we are connecting from. If we are behind some sort
1908 * of NAT and want to be identified by some private (not NATed)
1909 * address, ip option should be used.
1911 if (ceph_addr_is_blank(my_addr
)) {
1912 memcpy(&my_addr
->in_addr
, &addr_for_me
.in_addr
,
1913 sizeof(my_addr
->in_addr
));
1914 ceph_addr_set_port(my_addr
, 0);
1915 dout("%s con %p set my addr %s, as seen by peer %s\n",
1916 __func__
, con
, ceph_pr_addr(my_addr
),
1917 ceph_pr_addr(&con
->peer_addr
));
1919 dout("%s con %p my addr already set %s\n",
1920 __func__
, con
, ceph_pr_addr(my_addr
));
1923 WARN_ON(ceph_addr_is_blank(my_addr
) || ceph_addr_port(my_addr
));
1924 WARN_ON(my_addr
->type
!= CEPH_ENTITY_ADDR_TYPE_ANY
);
1925 WARN_ON(!my_addr
->nonce
);
1927 /* no reset_out_kvecs() as our hello may still be pending */
1928 ret
= prepare_auth_request(con
);
1931 pr_err("prepare_auth_request failed: %d\n", ret
);
1935 con
->state
= CEPH_CON_S_V2_AUTH
;
1939 pr_err("failed to decode hello\n");
1943 static int process_auth_bad_method(struct ceph_connection
*con
,
1946 int allowed_protos
[8], allowed_modes
[8];
1947 int allowed_proto_cnt
, allowed_mode_cnt
;
1948 int used_proto
, result
;
1952 if (con
->state
!= CEPH_CON_S_V2_AUTH
) {
1953 con
->error_msg
= "protocol error, unexpected auth_bad_method";
1957 ceph_decode_32_safe(&p
, end
, used_proto
, bad
);
1958 ceph_decode_32_safe(&p
, end
, result
, bad
);
1959 dout("%s con %p used_proto %d result %d\n", __func__
, con
, used_proto
,
1962 ceph_decode_32_safe(&p
, end
, allowed_proto_cnt
, bad
);
1963 if (allowed_proto_cnt
> ARRAY_SIZE(allowed_protos
)) {
1964 pr_err("allowed_protos too big %d\n", allowed_proto_cnt
);
1967 for (i
= 0; i
< allowed_proto_cnt
; i
++) {
1968 ceph_decode_32_safe(&p
, end
, allowed_protos
[i
], bad
);
1969 dout("%s con %p allowed_protos[%d] %d\n", __func__
, con
,
1970 i
, allowed_protos
[i
]);
1973 ceph_decode_32_safe(&p
, end
, allowed_mode_cnt
, bad
);
1974 if (allowed_mode_cnt
> ARRAY_SIZE(allowed_modes
)) {
1975 pr_err("allowed_modes too big %d\n", allowed_mode_cnt
);
1978 for (i
= 0; i
< allowed_mode_cnt
; i
++) {
1979 ceph_decode_32_safe(&p
, end
, allowed_modes
[i
], bad
);
1980 dout("%s con %p allowed_modes[%d] %d\n", __func__
, con
,
1981 i
, allowed_modes
[i
]);
1984 mutex_unlock(&con
->mutex
);
1985 ret
= con
->ops
->handle_auth_bad_method(con
, used_proto
, result
,
1990 mutex_lock(&con
->mutex
);
1991 if (con
->state
!= CEPH_CON_S_V2_AUTH
) {
1992 dout("%s con %p state changed to %d\n", __func__
, con
,
1997 dout("%s con %p handle_auth_bad_method ret %d\n", __func__
, con
, ret
);
2001 pr_err("failed to decode auth_bad_method\n");
2005 static int process_auth_reply_more(struct ceph_connection
*con
,
2011 if (con
->state
!= CEPH_CON_S_V2_AUTH
) {
2012 con
->error_msg
= "protocol error, unexpected auth_reply_more";
2016 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
2017 ceph_decode_need(&p
, end
, payload_len
, bad
);
2019 dout("%s con %p payload_len %d\n", __func__
, con
, payload_len
);
2021 reset_out_kvecs(con
);
2022 ret
= prepare_auth_request_more(con
, p
, payload_len
);
2025 pr_err("prepare_auth_request_more failed: %d\n", ret
);
2032 pr_err("failed to decode auth_reply_more\n");
2037 * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2038 * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2039 * setup_crypto(). __aligned(16) isn't guaranteed to work for stack
2040 * objects, so do it by hand.
2042 static int process_auth_done(struct ceph_connection
*con
, void *p
, void *end
)
2044 u8 session_key_buf
[CEPH_KEY_LEN
+ 16];
2045 u8 con_secret_buf
[CEPH_MAX_CON_SECRET_LEN
+ 16];
2046 u8
*session_key
= PTR_ALIGN(&session_key_buf
[0], 16);
2047 u8
*con_secret
= PTR_ALIGN(&con_secret_buf
[0], 16);
2048 int session_key_len
, con_secret_len
;
2053 if (con
->state
!= CEPH_CON_S_V2_AUTH
) {
2054 con
->error_msg
= "protocol error, unexpected auth_done";
2058 ceph_decode_64_safe(&p
, end
, global_id
, bad
);
2059 ceph_decode_32_safe(&p
, end
, con
->v2
.con_mode
, bad
);
2060 ceph_decode_32_safe(&p
, end
, payload_len
, bad
);
2062 dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2063 __func__
, con
, global_id
, con
->v2
.con_mode
, payload_len
);
2065 mutex_unlock(&con
->mutex
);
2066 session_key_len
= 0;
2068 ret
= con
->ops
->handle_auth_done(con
, global_id
, p
, payload_len
,
2069 session_key
, &session_key_len
,
2070 con_secret
, &con_secret_len
);
2071 mutex_lock(&con
->mutex
);
2072 if (con
->state
!= CEPH_CON_S_V2_AUTH
) {
2073 dout("%s con %p state changed to %d\n", __func__
, con
,
2078 dout("%s con %p handle_auth_done ret %d\n", __func__
, con
, ret
);
2082 ret
= setup_crypto(con
, session_key
, session_key_len
, con_secret
,
2087 reset_out_kvecs(con
);
2088 ret
= prepare_auth_signature(con
);
2090 pr_err("prepare_auth_signature failed: %d\n", ret
);
2094 con
->state
= CEPH_CON_S_V2_AUTH_SIGNATURE
;
2098 pr_err("failed to decode auth_done\n");
2102 static int process_auth_signature(struct ceph_connection
*con
,
2105 u8 hmac
[SHA256_DIGEST_SIZE
];
2108 if (con
->state
!= CEPH_CON_S_V2_AUTH_SIGNATURE
) {
2109 con
->error_msg
= "protocol error, unexpected auth_signature";
2113 ret
= hmac_sha256(con
, con
->v2
.out_sign_kvecs
,
2114 con
->v2
.out_sign_kvec_cnt
, hmac
);
2118 ceph_decode_need(&p
, end
, SHA256_DIGEST_SIZE
, bad
);
2119 if (crypto_memneq(p
, hmac
, SHA256_DIGEST_SIZE
)) {
2120 con
->error_msg
= "integrity error, bad auth signature";
2124 dout("%s con %p auth signature ok\n", __func__
, con
);
2126 /* no reset_out_kvecs() as our auth_signature may still be pending */
2127 if (!con
->v2
.server_cookie
) {
2128 ret
= prepare_client_ident(con
);
2130 pr_err("prepare_client_ident failed: %d\n", ret
);
2134 con
->state
= CEPH_CON_S_V2_SESSION_CONNECT
;
2136 ret
= prepare_session_reconnect(con
);
2138 pr_err("prepare_session_reconnect failed: %d\n", ret
);
2142 con
->state
= CEPH_CON_S_V2_SESSION_RECONNECT
;
2148 pr_err("failed to decode auth_signature\n");
2152 static int process_server_ident(struct ceph_connection
*con
,
2155 struct ceph_client
*client
= from_msgr(con
->msgr
);
2156 u64 features
, required_features
;
2157 struct ceph_entity_addr addr
;
2164 if (con
->state
!= CEPH_CON_S_V2_SESSION_CONNECT
) {
2165 con
->error_msg
= "protocol error, unexpected server_ident";
2169 ret
= ceph_decode_entity_addrvec(&p
, end
, true, &addr
);
2171 pr_err("failed to decode server addrs: %d\n", ret
);
2175 ceph_decode_64_safe(&p
, end
, global_id
, bad
);
2176 ceph_decode_64_safe(&p
, end
, global_seq
, bad
);
2177 ceph_decode_64_safe(&p
, end
, features
, bad
);
2178 ceph_decode_64_safe(&p
, end
, required_features
, bad
);
2179 ceph_decode_64_safe(&p
, end
, flags
, bad
);
2180 ceph_decode_64_safe(&p
, end
, cookie
, bad
);
2182 dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2183 __func__
, con
, ceph_pr_addr(&addr
), le32_to_cpu(addr
.nonce
),
2184 global_id
, global_seq
, features
, required_features
, flags
, cookie
);
2186 /* is this who we intended to talk to? */
2187 if (memcmp(&addr
, &con
->peer_addr
, sizeof(con
->peer_addr
))) {
2188 pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2189 ceph_pr_addr(&con
->peer_addr
),
2190 le32_to_cpu(con
->peer_addr
.nonce
),
2191 ceph_pr_addr(&addr
), le32_to_cpu(addr
.nonce
));
2192 con
->error_msg
= "wrong peer at address";
2196 if (client
->required_features
& ~features
) {
2197 pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2198 features
, client
->required_features
& ~features
);
2199 con
->error_msg
= "missing required protocol features";
2204 * Both name->type and name->num are set in ceph_con_open() but
2205 * name->num may be bogus in the initial monmap. name->type is
2206 * verified in handle_hello().
2208 WARN_ON(!con
->peer_name
.type
);
2209 con
->peer_name
.num
= cpu_to_le64(global_id
);
2210 con
->v2
.peer_global_seq
= global_seq
;
2211 con
->peer_features
= features
;
2212 WARN_ON(required_features
& ~client
->supported_features
);
2213 con
->v2
.server_cookie
= cookie
;
2215 if (flags
& CEPH_MSG_CONNECT_LOSSY
) {
2216 ceph_con_flag_set(con
, CEPH_CON_F_LOSSYTX
);
2217 WARN_ON(con
->v2
.server_cookie
);
2219 WARN_ON(!con
->v2
.server_cookie
);
2222 clear_in_sign_kvecs(con
);
2223 clear_out_sign_kvecs(con
);
2224 free_conn_bufs(con
);
2225 con
->delay
= 0; /* reset backoff memory */
2227 con
->state
= CEPH_CON_S_OPEN
;
2228 con
->v2
.out_state
= OUT_S_GET_NEXT
;
2232 pr_err("failed to decode server_ident\n");
2236 static int process_ident_missing_features(struct ceph_connection
*con
,
2239 struct ceph_client
*client
= from_msgr(con
->msgr
);
2240 u64 missing_features
;
2242 if (con
->state
!= CEPH_CON_S_V2_SESSION_CONNECT
) {
2243 con
->error_msg
= "protocol error, unexpected ident_missing_features";
2247 ceph_decode_64_safe(&p
, end
, missing_features
, bad
);
2248 pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2249 client
->supported_features
, missing_features
);
2250 con
->error_msg
= "missing required protocol features";
2254 pr_err("failed to decode ident_missing_features\n");
2258 static int process_session_reconnect_ok(struct ceph_connection
*con
,
2263 if (con
->state
!= CEPH_CON_S_V2_SESSION_RECONNECT
) {
2264 con
->error_msg
= "protocol error, unexpected session_reconnect_ok";
2268 ceph_decode_64_safe(&p
, end
, seq
, bad
);
2270 dout("%s con %p seq %llu\n", __func__
, con
, seq
);
2271 ceph_con_discard_requeued(con
, seq
);
2273 clear_in_sign_kvecs(con
);
2274 clear_out_sign_kvecs(con
);
2275 free_conn_bufs(con
);
2276 con
->delay
= 0; /* reset backoff memory */
2278 con
->state
= CEPH_CON_S_OPEN
;
2279 con
->v2
.out_state
= OUT_S_GET_NEXT
;
2283 pr_err("failed to decode session_reconnect_ok\n");
2287 static int process_session_retry(struct ceph_connection
*con
,
2293 if (con
->state
!= CEPH_CON_S_V2_SESSION_RECONNECT
) {
2294 con
->error_msg
= "protocol error, unexpected session_retry";
2298 ceph_decode_64_safe(&p
, end
, connect_seq
, bad
);
2300 dout("%s con %p connect_seq %llu\n", __func__
, con
, connect_seq
);
2301 WARN_ON(connect_seq
<= con
->v2
.connect_seq
);
2302 con
->v2
.connect_seq
= connect_seq
+ 1;
2304 free_conn_bufs(con
);
2306 reset_out_kvecs(con
);
2307 ret
= prepare_session_reconnect(con
);
2309 pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret
);
2316 pr_err("failed to decode session_retry\n");
2320 static int process_session_retry_global(struct ceph_connection
*con
,
2326 if (con
->state
!= CEPH_CON_S_V2_SESSION_RECONNECT
) {
2327 con
->error_msg
= "protocol error, unexpected session_retry_global";
2331 ceph_decode_64_safe(&p
, end
, global_seq
, bad
);
2333 dout("%s con %p global_seq %llu\n", __func__
, con
, global_seq
);
2334 WARN_ON(global_seq
<= con
->v2
.global_seq
);
2335 con
->v2
.global_seq
= ceph_get_global_seq(con
->msgr
, global_seq
);
2337 free_conn_bufs(con
);
2339 reset_out_kvecs(con
);
2340 ret
= prepare_session_reconnect(con
);
2342 pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret
);
2349 pr_err("failed to decode session_retry_global\n");
2353 static int process_session_reset(struct ceph_connection
*con
,
2359 if (con
->state
!= CEPH_CON_S_V2_SESSION_RECONNECT
) {
2360 con
->error_msg
= "protocol error, unexpected session_reset";
2364 ceph_decode_8_safe(&p
, end
, full
, bad
);
2366 con
->error_msg
= "protocol error, bad session_reset";
2370 pr_info("%s%lld %s session reset\n", ENTITY_NAME(con
->peer_name
),
2371 ceph_pr_addr(&con
->peer_addr
));
2372 ceph_con_reset_session(con
);
2374 mutex_unlock(&con
->mutex
);
2375 if (con
->ops
->peer_reset
)
2376 con
->ops
->peer_reset(con
);
2377 mutex_lock(&con
->mutex
);
2378 if (con
->state
!= CEPH_CON_S_V2_SESSION_RECONNECT
) {
2379 dout("%s con %p state changed to %d\n", __func__
, con
,
2384 free_conn_bufs(con
);
2386 reset_out_kvecs(con
);
2387 ret
= prepare_client_ident(con
);
2389 pr_err("prepare_client_ident (rst) failed: %d\n", ret
);
2393 con
->state
= CEPH_CON_S_V2_SESSION_CONNECT
;
2397 pr_err("failed to decode session_reset\n");
2401 static int process_keepalive2_ack(struct ceph_connection
*con
,
2404 if (con
->state
!= CEPH_CON_S_OPEN
) {
2405 con
->error_msg
= "protocol error, unexpected keepalive2_ack";
2409 ceph_decode_need(&p
, end
, sizeof(struct ceph_timespec
), bad
);
2410 ceph_decode_timespec64(&con
->last_keepalive_ack
, p
);
2412 dout("%s con %p timestamp %lld.%09ld\n", __func__
, con
,
2413 con
->last_keepalive_ack
.tv_sec
, con
->last_keepalive_ack
.tv_nsec
);
2418 pr_err("failed to decode keepalive2_ack\n");
2422 static int process_ack(struct ceph_connection
*con
, void *p
, void *end
)
2426 if (con
->state
!= CEPH_CON_S_OPEN
) {
2427 con
->error_msg
= "protocol error, unexpected ack";
2431 ceph_decode_64_safe(&p
, end
, seq
, bad
);
2433 dout("%s con %p seq %llu\n", __func__
, con
, seq
);
2434 ceph_con_discard_sent(con
, seq
);
2438 pr_err("failed to decode ack\n");
2442 static int process_control(struct ceph_connection
*con
, void *p
, void *end
)
2444 int tag
= con
->v2
.in_desc
.fd_tag
;
2447 dout("%s con %p tag %d len %d\n", __func__
, con
, tag
, (int)(end
- p
));
2450 case FRAME_TAG_HELLO
:
2451 ret
= process_hello(con
, p
, end
);
2453 case FRAME_TAG_AUTH_BAD_METHOD
:
2454 ret
= process_auth_bad_method(con
, p
, end
);
2456 case FRAME_TAG_AUTH_REPLY_MORE
:
2457 ret
= process_auth_reply_more(con
, p
, end
);
2459 case FRAME_TAG_AUTH_DONE
:
2460 ret
= process_auth_done(con
, p
, end
);
2462 case FRAME_TAG_AUTH_SIGNATURE
:
2463 ret
= process_auth_signature(con
, p
, end
);
2465 case FRAME_TAG_SERVER_IDENT
:
2466 ret
= process_server_ident(con
, p
, end
);
2468 case FRAME_TAG_IDENT_MISSING_FEATURES
:
2469 ret
= process_ident_missing_features(con
, p
, end
);
2471 case FRAME_TAG_SESSION_RECONNECT_OK
:
2472 ret
= process_session_reconnect_ok(con
, p
, end
);
2474 case FRAME_TAG_SESSION_RETRY
:
2475 ret
= process_session_retry(con
, p
, end
);
2477 case FRAME_TAG_SESSION_RETRY_GLOBAL
:
2478 ret
= process_session_retry_global(con
, p
, end
);
2480 case FRAME_TAG_SESSION_RESET
:
2481 ret
= process_session_reset(con
, p
, end
);
2483 case FRAME_TAG_KEEPALIVE2_ACK
:
2484 ret
= process_keepalive2_ack(con
, p
, end
);
2487 ret
= process_ack(con
, p
, end
);
2490 pr_err("bad tag %d\n", tag
);
2491 con
->error_msg
= "protocol error, bad tag";
2495 dout("%s con %p error %d\n", __func__
, con
, ret
);
2499 prepare_read_preamble(con
);
2505 * 1 - con->in_msg set, read message
2509 static int process_message_header(struct ceph_connection
*con
,
2512 struct ceph_frame_desc
*desc
= &con
->v2
.in_desc
;
2513 struct ceph_msg_header2
*hdr2
= p
;
2514 struct ceph_msg_header hdr
;
2520 seq
= le64_to_cpu(hdr2
->seq
);
2521 if ((s64
)seq
- (s64
)con
->in_seq
< 1) {
2522 pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2523 ENTITY_NAME(con
->peer_name
),
2524 ceph_pr_addr(&con
->peer_addr
),
2525 seq
, con
->in_seq
+ 1);
2528 if ((s64
)seq
- (s64
)con
->in_seq
> 1) {
2529 pr_err("bad seq %llu, expected %llu\n", seq
, con
->in_seq
+ 1);
2530 con
->error_msg
= "bad message sequence # for incoming message";
2534 ceph_con_discard_sent(con
, le64_to_cpu(hdr2
->ack_seq
));
2536 fill_header(&hdr
, hdr2
, desc
->fd_lens
[1], desc
->fd_lens
[2],
2537 desc
->fd_lens
[3], &con
->peer_name
);
2538 ret
= ceph_con_in_msg_alloc(con
, &hdr
, &skip
);
2542 WARN_ON(!con
->in_msg
^ skip
);
2546 WARN_ON(!con
->in_msg
);
2547 WARN_ON(con
->in_msg
->con
!= con
);
2551 static int process_message(struct ceph_connection
*con
)
2553 ceph_con_process_message(con
);
2556 * We could have been closed by ceph_con_close() because
2557 * ceph_con_process_message() temporarily drops con->mutex.
2559 if (con
->state
!= CEPH_CON_S_OPEN
) {
2560 dout("%s con %p state changed to %d\n", __func__
, con
,
2565 prepare_read_preamble(con
);
2569 static int __handle_control(struct ceph_connection
*con
, void *p
)
2571 void *end
= p
+ con
->v2
.in_desc
.fd_lens
[0];
2572 struct ceph_msg
*msg
;
2575 if (con
->v2
.in_desc
.fd_tag
!= FRAME_TAG_MESSAGE
)
2576 return process_control(con
, p
, end
);
2578 ret
= process_message_header(con
, p
, end
);
2582 prepare_skip_message(con
);
2586 msg
= con
->in_msg
; /* set in process_message_header() */
2587 if (!front_len(msg
) && !middle_len(msg
)) {
2589 return process_message(con
);
2591 prepare_read_data(con
);
2595 reset_in_kvecs(con
);
2596 if (front_len(msg
)) {
2597 WARN_ON(front_len(msg
) > msg
->front_alloc_len
);
2598 add_in_kvec(con
, msg
->front
.iov_base
, front_len(msg
));
2599 msg
->front
.iov_len
= front_len(msg
);
2601 if (con_secure(con
) && need_padding(front_len(msg
)))
2602 add_in_kvec(con
, FRONT_PAD(con
->v2
.in_buf
),
2603 padding_len(front_len(msg
)));
2605 msg
->front
.iov_len
= 0;
2607 if (middle_len(msg
)) {
2608 WARN_ON(middle_len(msg
) > msg
->middle
->alloc_len
);
2609 add_in_kvec(con
, msg
->middle
->vec
.iov_base
, middle_len(msg
));
2610 msg
->middle
->vec
.iov_len
= middle_len(msg
);
2612 if (con_secure(con
) && need_padding(middle_len(msg
)))
2613 add_in_kvec(con
, MIDDLE_PAD(con
->v2
.in_buf
),
2614 padding_len(middle_len(msg
)));
2615 } else if (msg
->middle
) {
2616 msg
->middle
->vec
.iov_len
= 0;
2619 if (data_len(msg
)) {
2620 con
->v2
.in_state
= IN_S_PREPARE_READ_DATA
;
2622 add_in_kvec(con
, con
->v2
.in_buf
,
2623 con_secure(con
) ? CEPH_EPILOGUE_SECURE_LEN
:
2624 CEPH_EPILOGUE_PLAIN_LEN
);
2625 con
->v2
.in_state
= IN_S_HANDLE_EPILOGUE
;
2630 static int handle_preamble(struct ceph_connection
*con
)
2632 struct ceph_frame_desc
*desc
= &con
->v2
.in_desc
;
2635 if (con_secure(con
)) {
2636 ret
= decrypt_preamble(con
);
2638 if (ret
== -EBADMSG
)
2639 con
->error_msg
= "integrity error, bad preamble auth tag";
2644 ret
= decode_preamble(con
->v2
.in_buf
, desc
);
2646 if (ret
== -EBADMSG
)
2647 con
->error_msg
= "integrity error, bad crc";
2649 con
->error_msg
= "protocol error, bad preamble";
2653 dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__
,
2654 con
, desc
->fd_tag
, desc
->fd_seg_cnt
, desc
->fd_lens
[0],
2655 desc
->fd_lens
[1], desc
->fd_lens
[2], desc
->fd_lens
[3]);
2657 if (!con_secure(con
))
2658 return prepare_read_control(con
);
2660 if (desc
->fd_lens
[0] > CEPH_PREAMBLE_INLINE_LEN
)
2661 return prepare_read_control_remainder(con
);
2663 return __handle_control(con
, CTRL_BODY(con
->v2
.in_buf
));
2666 static int handle_control(struct ceph_connection
*con
)
2668 int ctrl_len
= con
->v2
.in_desc
.fd_lens
[0];
2672 WARN_ON(con_secure(con
));
2674 ret
= verify_control_crc(con
);
2676 con
->error_msg
= "integrity error, bad crc";
2680 if (con
->state
== CEPH_CON_S_V2_AUTH
) {
2681 buf
= alloc_conn_buf(con
, ctrl_len
);
2685 memcpy(buf
, con
->v2
.in_kvecs
[0].iov_base
, ctrl_len
);
2686 return __handle_control(con
, buf
);
2689 return __handle_control(con
, con
->v2
.in_kvecs
[0].iov_base
);
2692 static int handle_control_remainder(struct ceph_connection
*con
)
2696 WARN_ON(!con_secure(con
));
2698 ret
= decrypt_control_remainder(con
);
2700 if (ret
== -EBADMSG
)
2701 con
->error_msg
= "integrity error, bad control remainder auth tag";
2705 return __handle_control(con
, con
->v2
.in_kvecs
[0].iov_base
-
2706 CEPH_PREAMBLE_INLINE_LEN
);
2709 static int handle_epilogue(struct ceph_connection
*con
)
2711 u32 front_crc
, middle_crc
, data_crc
;
2714 if (con_secure(con
)) {
2715 ret
= decrypt_message(con
);
2717 if (ret
== -EBADMSG
)
2718 con
->error_msg
= "integrity error, bad epilogue auth tag";
2722 /* just late_status */
2723 ret
= decode_epilogue(con
->v2
.in_buf
, NULL
, NULL
, NULL
);
2725 con
->error_msg
= "protocol error, bad epilogue";
2729 ret
= decode_epilogue(con
->v2
.in_buf
, &front_crc
,
2730 &middle_crc
, &data_crc
);
2732 con
->error_msg
= "protocol error, bad epilogue";
2736 ret
= verify_epilogue_crcs(con
, front_crc
, middle_crc
,
2739 con
->error_msg
= "integrity error, bad crc";
2744 return process_message(con
);
2747 static void finish_skip(struct ceph_connection
*con
)
2749 dout("%s con %p\n", __func__
, con
);
2751 if (con_secure(con
))
2752 gcm_inc_nonce(&con
->v2
.in_gcm_nonce
);
2757 static int populate_in_iter(struct ceph_connection
*con
)
2761 dout("%s con %p state %d in_state %d\n", __func__
, con
, con
->state
,
2763 WARN_ON(iov_iter_count(&con
->v2
.in_iter
));
2765 if (con
->state
== CEPH_CON_S_V2_BANNER_PREFIX
) {
2766 ret
= process_banner_prefix(con
);
2767 } else if (con
->state
== CEPH_CON_S_V2_BANNER_PAYLOAD
) {
2768 ret
= process_banner_payload(con
);
2769 } else if ((con
->state
>= CEPH_CON_S_V2_HELLO
&&
2770 con
->state
<= CEPH_CON_S_V2_SESSION_RECONNECT
) ||
2771 con
->state
== CEPH_CON_S_OPEN
) {
2772 switch (con
->v2
.in_state
) {
2773 case IN_S_HANDLE_PREAMBLE
:
2774 ret
= handle_preamble(con
);
2776 case IN_S_HANDLE_CONTROL
:
2777 ret
= handle_control(con
);
2779 case IN_S_HANDLE_CONTROL_REMAINDER
:
2780 ret
= handle_control_remainder(con
);
2782 case IN_S_PREPARE_READ_DATA
:
2783 prepare_read_data(con
);
2786 case IN_S_PREPARE_READ_DATA_CONT
:
2787 prepare_read_data_cont(con
);
2790 case IN_S_HANDLE_EPILOGUE
:
2791 ret
= handle_epilogue(con
);
2793 case IN_S_FINISH_SKIP
:
2798 WARN(1, "bad in_state %d", con
->v2
.in_state
);
2802 WARN(1, "bad state %d", con
->state
);
2806 dout("%s con %p error %d\n", __func__
, con
, ret
);
2810 if (WARN_ON(!iov_iter_count(&con
->v2
.in_iter
)))
2812 dout("%s con %p populated %zu\n", __func__
, con
,
2813 iov_iter_count(&con
->v2
.in_iter
));
2817 int ceph_con_v2_try_read(struct ceph_connection
*con
)
2821 dout("%s con %p state %d need %zu\n", __func__
, con
, con
->state
,
2822 iov_iter_count(&con
->v2
.in_iter
));
2824 if (con
->state
== CEPH_CON_S_PREOPEN
)
2828 * We should always have something pending here. If not,
2829 * avoid calling populate_in_iter() as if we read something
2830 * (ceph_tcp_recv() would immediately return 1).
2832 if (WARN_ON(!iov_iter_count(&con
->v2
.in_iter
)))
2836 ret
= ceph_tcp_recv(con
);
2840 ret
= populate_in_iter(con
);
2842 if (ret
&& ret
!= -EAGAIN
&& !con
->error_msg
)
2843 con
->error_msg
= "read processing error";
2849 static void queue_data(struct ceph_connection
*con
)
2853 con
->v2
.out_epil
.data_crc
= -1;
2854 ceph_msg_data_cursor_init(&con
->v2
.out_cursor
, con
->out_msg
,
2855 data_len(con
->out_msg
));
2857 get_bvec_at(&con
->v2
.out_cursor
, &bv
);
2858 set_out_bvec(con
, &bv
, true);
2859 con
->v2
.out_state
= OUT_S_QUEUE_DATA_CONT
;
2862 static void queue_data_cont(struct ceph_connection
*con
)
2866 con
->v2
.out_epil
.data_crc
= ceph_crc32c_page(
2867 con
->v2
.out_epil
.data_crc
, con
->v2
.out_bvec
.bv_page
,
2868 con
->v2
.out_bvec
.bv_offset
, con
->v2
.out_bvec
.bv_len
);
2870 ceph_msg_data_advance(&con
->v2
.out_cursor
, con
->v2
.out_bvec
.bv_len
);
2871 if (con
->v2
.out_cursor
.total_resid
) {
2872 get_bvec_at(&con
->v2
.out_cursor
, &bv
);
2873 set_out_bvec(con
, &bv
, true);
2874 WARN_ON(con
->v2
.out_state
!= OUT_S_QUEUE_DATA_CONT
);
2879 * We've written all data. Queue epilogue. Once it's written,
2882 reset_out_kvecs(con
);
2883 prepare_epilogue_plain(con
, false);
2884 con
->v2
.out_state
= OUT_S_FINISH_MESSAGE
;
2887 static void queue_enc_page(struct ceph_connection
*con
)
2891 dout("%s con %p i %d resid %d\n", __func__
, con
, con
->v2
.out_enc_i
,
2892 con
->v2
.out_enc_resid
);
2893 WARN_ON(!con
->v2
.out_enc_resid
);
2895 bv
.bv_page
= con
->v2
.out_enc_pages
[con
->v2
.out_enc_i
];
2897 bv
.bv_len
= min(con
->v2
.out_enc_resid
, (int)PAGE_SIZE
);
2899 set_out_bvec(con
, &bv
, false);
2900 con
->v2
.out_enc_i
++;
2901 con
->v2
.out_enc_resid
-= bv
.bv_len
;
2903 if (con
->v2
.out_enc_resid
) {
2904 WARN_ON(con
->v2
.out_state
!= OUT_S_QUEUE_ENC_PAGE
);
2909 * We've queued the last piece of ciphertext (ending with
2910 * epilogue) + auth tag. Once it's written, we are done.
2912 WARN_ON(con
->v2
.out_enc_i
!= con
->v2
.out_enc_page_cnt
);
2913 con
->v2
.out_state
= OUT_S_FINISH_MESSAGE
;
2916 static void queue_zeros(struct ceph_connection
*con
)
2918 dout("%s con %p out_zero %d\n", __func__
, con
, con
->v2
.out_zero
);
2920 if (con
->v2
.out_zero
) {
2921 set_out_bvec_zero(con
);
2922 con
->v2
.out_zero
-= con
->v2
.out_bvec
.bv_len
;
2923 con
->v2
.out_state
= OUT_S_QUEUE_ZEROS
;
2928 * We've zero-filled everything up to epilogue. Queue epilogue
2929 * with late_status set to ABORTED and crcs adjusted for zeros.
2930 * Once it's written, we are done patching up for the revoke.
2932 reset_out_kvecs(con
);
2933 prepare_epilogue_plain(con
, true);
2934 con
->v2
.out_state
= OUT_S_FINISH_MESSAGE
;
2937 static void finish_message(struct ceph_connection
*con
)
2939 dout("%s con %p msg %p\n", __func__
, con
, con
->out_msg
);
2941 /* we end up here both plain and secure modes */
2942 if (con
->v2
.out_enc_pages
) {
2943 WARN_ON(!con
->v2
.out_enc_page_cnt
);
2944 ceph_release_page_vector(con
->v2
.out_enc_pages
,
2945 con
->v2
.out_enc_page_cnt
);
2946 con
->v2
.out_enc_pages
= NULL
;
2947 con
->v2
.out_enc_page_cnt
= 0;
2949 /* message may have been revoked */
2951 ceph_msg_put(con
->out_msg
);
2952 con
->out_msg
= NULL
;
2955 con
->v2
.out_state
= OUT_S_GET_NEXT
;
2958 static int populate_out_iter(struct ceph_connection
*con
)
2962 dout("%s con %p state %d out_state %d\n", __func__
, con
, con
->state
,
2964 WARN_ON(iov_iter_count(&con
->v2
.out_iter
));
2966 if (con
->state
!= CEPH_CON_S_OPEN
) {
2967 WARN_ON(con
->state
< CEPH_CON_S_V2_BANNER_PREFIX
||
2968 con
->state
> CEPH_CON_S_V2_SESSION_RECONNECT
);
2969 goto nothing_pending
;
2972 switch (con
->v2
.out_state
) {
2973 case OUT_S_QUEUE_DATA
:
2974 WARN_ON(!con
->out_msg
);
2977 case OUT_S_QUEUE_DATA_CONT
:
2978 WARN_ON(!con
->out_msg
);
2979 queue_data_cont(con
);
2981 case OUT_S_QUEUE_ENC_PAGE
:
2982 queue_enc_page(con
);
2984 case OUT_S_QUEUE_ZEROS
:
2985 WARN_ON(con
->out_msg
); /* revoked */
2988 case OUT_S_FINISH_MESSAGE
:
2989 finish_message(con
);
2991 case OUT_S_GET_NEXT
:
2994 WARN(1, "bad out_state %d", con
->v2
.out_state
);
2998 WARN_ON(con
->v2
.out_state
!= OUT_S_GET_NEXT
);
2999 if (ceph_con_flag_test_and_clear(con
, CEPH_CON_F_KEEPALIVE_PENDING
)) {
3000 ret
= prepare_keepalive2(con
);
3002 pr_err("prepare_keepalive2 failed: %d\n", ret
);
3005 } else if (!list_empty(&con
->out_queue
)) {
3006 ceph_con_get_out_msg(con
);
3007 ret
= prepare_message(con
);
3009 pr_err("prepare_message failed: %d\n", ret
);
3012 } else if (con
->in_seq
> con
->in_seq_acked
) {
3013 ret
= prepare_ack(con
);
3015 pr_err("prepare_ack failed: %d\n", ret
);
3019 goto nothing_pending
;
3023 if (WARN_ON(!iov_iter_count(&con
->v2
.out_iter
)))
3025 dout("%s con %p populated %zu\n", __func__
, con
,
3026 iov_iter_count(&con
->v2
.out_iter
));
3030 WARN_ON(iov_iter_count(&con
->v2
.out_iter
));
3031 dout("%s con %p nothing pending\n", __func__
, con
);
3032 ceph_con_flag_clear(con
, CEPH_CON_F_WRITE_PENDING
);
3036 int ceph_con_v2_try_write(struct ceph_connection
*con
)
3040 dout("%s con %p state %d have %zu\n", __func__
, con
, con
->state
,
3041 iov_iter_count(&con
->v2
.out_iter
));
3043 /* open the socket first? */
3044 if (con
->state
== CEPH_CON_S_PREOPEN
) {
3045 WARN_ON(con
->peer_addr
.type
!= CEPH_ENTITY_ADDR_TYPE_MSGR2
);
3048 * Always bump global_seq. Bump connect_seq only if
3049 * there is a session (i.e. we are reconnecting and will
3050 * send session_reconnect instead of client_ident).
3052 con
->v2
.global_seq
= ceph_get_global_seq(con
->msgr
, 0);
3053 if (con
->v2
.server_cookie
)
3054 con
->v2
.connect_seq
++;
3056 ret
= prepare_read_banner_prefix(con
);
3058 pr_err("prepare_read_banner_prefix failed: %d\n", ret
);
3059 con
->error_msg
= "connect error";
3063 reset_out_kvecs(con
);
3064 ret
= prepare_banner(con
);
3066 pr_err("prepare_banner failed: %d\n", ret
);
3067 con
->error_msg
= "connect error";
3071 ret
= ceph_tcp_connect(con
);
3073 pr_err("ceph_tcp_connect failed: %d\n", ret
);
3074 con
->error_msg
= "connect error";
3079 if (!iov_iter_count(&con
->v2
.out_iter
)) {
3080 ret
= populate_out_iter(con
);
3082 if (ret
&& ret
!= -EAGAIN
&& !con
->error_msg
)
3083 con
->error_msg
= "write processing error";
3088 tcp_sock_set_cork(con
->sock
->sk
, true);
3090 ret
= ceph_tcp_send(con
);
3094 ret
= populate_out_iter(con
);
3096 if (ret
&& ret
!= -EAGAIN
&& !con
->error_msg
)
3097 con
->error_msg
= "write processing error";
3102 tcp_sock_set_cork(con
->sock
->sk
, false);
3106 static u32
crc32c_zeros(u32 crc
, int zero_len
)
3111 len
= min(zero_len
, (int)PAGE_SIZE
);
3112 crc
= crc32c(crc
, page_address(ceph_zero_page
), len
);
3119 static void prepare_zero_front(struct ceph_connection
*con
, int resid
)
3123 WARN_ON(!resid
|| resid
> front_len(con
->out_msg
));
3124 sent
= front_len(con
->out_msg
) - resid
;
3125 dout("%s con %p sent %d resid %d\n", __func__
, con
, sent
, resid
);
3128 con
->v2
.out_epil
.front_crc
=
3129 crc32c(-1, con
->out_msg
->front
.iov_base
, sent
);
3130 con
->v2
.out_epil
.front_crc
=
3131 crc32c_zeros(con
->v2
.out_epil
.front_crc
, resid
);
3133 con
->v2
.out_epil
.front_crc
= crc32c_zeros(-1, resid
);
3136 con
->v2
.out_iter
.count
-= resid
;
3137 out_zero_add(con
, resid
);
3140 static void prepare_zero_middle(struct ceph_connection
*con
, int resid
)
3144 WARN_ON(!resid
|| resid
> middle_len(con
->out_msg
));
3145 sent
= middle_len(con
->out_msg
) - resid
;
3146 dout("%s con %p sent %d resid %d\n", __func__
, con
, sent
, resid
);
3149 con
->v2
.out_epil
.middle_crc
=
3150 crc32c(-1, con
->out_msg
->middle
->vec
.iov_base
, sent
);
3151 con
->v2
.out_epil
.middle_crc
=
3152 crc32c_zeros(con
->v2
.out_epil
.middle_crc
, resid
);
3154 con
->v2
.out_epil
.middle_crc
= crc32c_zeros(-1, resid
);
3157 con
->v2
.out_iter
.count
-= resid
;
3158 out_zero_add(con
, resid
);
3161 static void prepare_zero_data(struct ceph_connection
*con
)
3163 dout("%s con %p\n", __func__
, con
);
3164 con
->v2
.out_epil
.data_crc
= crc32c_zeros(-1, data_len(con
->out_msg
));
3165 out_zero_add(con
, data_len(con
->out_msg
));
3168 static void revoke_at_queue_data(struct ceph_connection
*con
)
3173 WARN_ON(!data_len(con
->out_msg
));
3174 WARN_ON(!iov_iter_is_kvec(&con
->v2
.out_iter
));
3175 resid
= iov_iter_count(&con
->v2
.out_iter
);
3177 boundary
= front_len(con
->out_msg
) + middle_len(con
->out_msg
);
3178 if (resid
> boundary
) {
3180 WARN_ON(resid
> MESSAGE_HEAD_PLAIN_LEN
);
3181 dout("%s con %p was sending head\n", __func__
, con
);
3182 if (front_len(con
->out_msg
))
3183 prepare_zero_front(con
, front_len(con
->out_msg
));
3184 if (middle_len(con
->out_msg
))
3185 prepare_zero_middle(con
, middle_len(con
->out_msg
));
3186 prepare_zero_data(con
);
3187 WARN_ON(iov_iter_count(&con
->v2
.out_iter
) != resid
);
3188 con
->v2
.out_state
= OUT_S_QUEUE_ZEROS
;
3192 boundary
= middle_len(con
->out_msg
);
3193 if (resid
> boundary
) {
3195 dout("%s con %p was sending front\n", __func__
, con
);
3196 prepare_zero_front(con
, resid
);
3197 if (middle_len(con
->out_msg
))
3198 prepare_zero_middle(con
, middle_len(con
->out_msg
));
3199 prepare_zero_data(con
);
3205 dout("%s con %p was sending middle\n", __func__
, con
);
3206 prepare_zero_middle(con
, resid
);
3207 prepare_zero_data(con
);
3211 static void revoke_at_queue_data_cont(struct ceph_connection
*con
)
3213 int sent
, resid
; /* current piece of data */
3215 WARN_ON(!data_len(con
->out_msg
));
3216 WARN_ON(!iov_iter_is_bvec(&con
->v2
.out_iter
));
3217 resid
= iov_iter_count(&con
->v2
.out_iter
);
3218 WARN_ON(!resid
|| resid
> con
->v2
.out_bvec
.bv_len
);
3219 sent
= con
->v2
.out_bvec
.bv_len
- resid
;
3220 dout("%s con %p sent %d resid %d\n", __func__
, con
, sent
, resid
);
3223 con
->v2
.out_epil
.data_crc
= ceph_crc32c_page(
3224 con
->v2
.out_epil
.data_crc
, con
->v2
.out_bvec
.bv_page
,
3225 con
->v2
.out_bvec
.bv_offset
, sent
);
3226 ceph_msg_data_advance(&con
->v2
.out_cursor
, sent
);
3228 WARN_ON(resid
> con
->v2
.out_cursor
.total_resid
);
3229 con
->v2
.out_epil
.data_crc
= crc32c_zeros(con
->v2
.out_epil
.data_crc
,
3230 con
->v2
.out_cursor
.total_resid
);
3232 con
->v2
.out_iter
.count
-= resid
;
3233 out_zero_add(con
, con
->v2
.out_cursor
.total_resid
);
3237 static void revoke_at_finish_message(struct ceph_connection
*con
)
3242 WARN_ON(!iov_iter_is_kvec(&con
->v2
.out_iter
));
3243 resid
= iov_iter_count(&con
->v2
.out_iter
);
3245 if (!front_len(con
->out_msg
) && !middle_len(con
->out_msg
) &&
3246 !data_len(con
->out_msg
)) {
3247 WARN_ON(!resid
|| resid
> MESSAGE_HEAD_PLAIN_LEN
);
3248 dout("%s con %p was sending head (empty message) - noop\n",
3253 boundary
= front_len(con
->out_msg
) + middle_len(con
->out_msg
) +
3254 CEPH_EPILOGUE_PLAIN_LEN
;
3255 if (resid
> boundary
) {
3257 WARN_ON(resid
> MESSAGE_HEAD_PLAIN_LEN
);
3258 dout("%s con %p was sending head\n", __func__
, con
);
3259 if (front_len(con
->out_msg
))
3260 prepare_zero_front(con
, front_len(con
->out_msg
));
3261 if (middle_len(con
->out_msg
))
3262 prepare_zero_middle(con
, middle_len(con
->out_msg
));
3263 con
->v2
.out_iter
.count
-= CEPH_EPILOGUE_PLAIN_LEN
;
3264 WARN_ON(iov_iter_count(&con
->v2
.out_iter
) != resid
);
3265 con
->v2
.out_state
= OUT_S_QUEUE_ZEROS
;
3269 boundary
= middle_len(con
->out_msg
) + CEPH_EPILOGUE_PLAIN_LEN
;
3270 if (resid
> boundary
) {
3272 dout("%s con %p was sending front\n", __func__
, con
);
3273 prepare_zero_front(con
, resid
);
3274 if (middle_len(con
->out_msg
))
3275 prepare_zero_middle(con
, middle_len(con
->out_msg
));
3276 con
->v2
.out_iter
.count
-= CEPH_EPILOGUE_PLAIN_LEN
;
3281 boundary
= CEPH_EPILOGUE_PLAIN_LEN
;
3282 if (resid
> boundary
) {
3284 dout("%s con %p was sending middle\n", __func__
, con
);
3285 prepare_zero_middle(con
, resid
);
3286 con
->v2
.out_iter
.count
-= CEPH_EPILOGUE_PLAIN_LEN
;
3292 dout("%s con %p was sending epilogue - noop\n", __func__
, con
);
3295 void ceph_con_v2_revoke(struct ceph_connection
*con
)
3297 WARN_ON(con
->v2
.out_zero
);
3299 if (con_secure(con
)) {
3300 WARN_ON(con
->v2
.out_state
!= OUT_S_QUEUE_ENC_PAGE
&&
3301 con
->v2
.out_state
!= OUT_S_FINISH_MESSAGE
);
3302 dout("%s con %p secure - noop\n", __func__
, con
);
3306 switch (con
->v2
.out_state
) {
3307 case OUT_S_QUEUE_DATA
:
3308 revoke_at_queue_data(con
);
3310 case OUT_S_QUEUE_DATA_CONT
:
3311 revoke_at_queue_data_cont(con
);
3313 case OUT_S_FINISH_MESSAGE
:
3314 revoke_at_finish_message(con
);
3317 WARN(1, "bad out_state %d", con
->v2
.out_state
);
3322 static void revoke_at_prepare_read_data(struct ceph_connection
*con
)
3324 int remaining
; /* data + [data padding] + epilogue */
3327 WARN_ON(!data_len(con
->in_msg
));
3328 WARN_ON(!iov_iter_is_kvec(&con
->v2
.in_iter
));
3329 resid
= iov_iter_count(&con
->v2
.in_iter
);
3332 if (con_secure(con
))
3333 remaining
= padded_len(data_len(con
->in_msg
)) +
3334 CEPH_EPILOGUE_SECURE_LEN
;
3336 remaining
= data_len(con
->in_msg
) + CEPH_EPILOGUE_PLAIN_LEN
;
3338 dout("%s con %p resid %d remaining %d\n", __func__
, con
, resid
,
3340 con
->v2
.in_iter
.count
-= resid
;
3341 set_in_skip(con
, resid
+ remaining
);
3342 con
->v2
.in_state
= IN_S_FINISH_SKIP
;
3345 static void revoke_at_prepare_read_data_cont(struct ceph_connection
*con
)
3347 int recved
, resid
; /* current piece of data */
3348 int remaining
; /* [data padding] + epilogue */
3350 WARN_ON(!data_len(con
->in_msg
));
3351 WARN_ON(!iov_iter_is_bvec(&con
->v2
.in_iter
));
3352 resid
= iov_iter_count(&con
->v2
.in_iter
);
3353 WARN_ON(!resid
|| resid
> con
->v2
.in_bvec
.bv_len
);
3354 recved
= con
->v2
.in_bvec
.bv_len
- resid
;
3355 dout("%s con %p recved %d resid %d\n", __func__
, con
, recved
, resid
);
3358 ceph_msg_data_advance(&con
->v2
.in_cursor
, recved
);
3359 WARN_ON(resid
> con
->v2
.in_cursor
.total_resid
);
3361 if (con_secure(con
))
3362 remaining
= padding_len(data_len(con
->in_msg
)) +
3363 CEPH_EPILOGUE_SECURE_LEN
;
3365 remaining
= CEPH_EPILOGUE_PLAIN_LEN
;
3367 dout("%s con %p total_resid %zu remaining %d\n", __func__
, con
,
3368 con
->v2
.in_cursor
.total_resid
, remaining
);
3369 con
->v2
.in_iter
.count
-= resid
;
3370 set_in_skip(con
, con
->v2
.in_cursor
.total_resid
+ remaining
);
3371 con
->v2
.in_state
= IN_S_FINISH_SKIP
;
3374 static void revoke_at_handle_epilogue(struct ceph_connection
*con
)
3378 WARN_ON(!iov_iter_is_kvec(&con
->v2
.in_iter
));
3379 resid
= iov_iter_count(&con
->v2
.in_iter
);
3382 dout("%s con %p resid %d\n", __func__
, con
, resid
);
3383 con
->v2
.in_iter
.count
-= resid
;
3384 set_in_skip(con
, resid
);
3385 con
->v2
.in_state
= IN_S_FINISH_SKIP
;
3388 void ceph_con_v2_revoke_incoming(struct ceph_connection
*con
)
3390 switch (con
->v2
.in_state
) {
3391 case IN_S_PREPARE_READ_DATA
:
3392 revoke_at_prepare_read_data(con
);
3394 case IN_S_PREPARE_READ_DATA_CONT
:
3395 revoke_at_prepare_read_data_cont(con
);
3397 case IN_S_HANDLE_EPILOGUE
:
3398 revoke_at_handle_epilogue(con
);
3401 WARN(1, "bad in_state %d", con
->v2
.in_state
);
3406 bool ceph_con_v2_opened(struct ceph_connection
*con
)
3408 return con
->v2
.peer_global_seq
;
3411 void ceph_con_v2_reset_session(struct ceph_connection
*con
)
3413 con
->v2
.client_cookie
= 0;
3414 con
->v2
.server_cookie
= 0;
3415 con
->v2
.global_seq
= 0;
3416 con
->v2
.connect_seq
= 0;
3417 con
->v2
.peer_global_seq
= 0;
3420 void ceph_con_v2_reset_protocol(struct ceph_connection
*con
)
3422 iov_iter_truncate(&con
->v2
.in_iter
, 0);
3423 iov_iter_truncate(&con
->v2
.out_iter
, 0);
3424 con
->v2
.out_zero
= 0;
3426 clear_in_sign_kvecs(con
);
3427 clear_out_sign_kvecs(con
);
3428 free_conn_bufs(con
);
3430 if (con
->v2
.out_enc_pages
) {
3431 WARN_ON(!con
->v2
.out_enc_page_cnt
);
3432 ceph_release_page_vector(con
->v2
.out_enc_pages
,
3433 con
->v2
.out_enc_page_cnt
);
3434 con
->v2
.out_enc_pages
= NULL
;
3435 con
->v2
.out_enc_page_cnt
= 0;
3438 con
->v2
.con_mode
= CEPH_CON_MODE_UNKNOWN
;
3440 if (con
->v2
.hmac_tfm
) {
3441 crypto_free_shash(con
->v2
.hmac_tfm
);
3442 con
->v2
.hmac_tfm
= NULL
;
3444 if (con
->v2
.gcm_req
) {
3445 aead_request_free(con
->v2
.gcm_req
);
3446 con
->v2
.gcm_req
= NULL
;
3448 if (con
->v2
.gcm_tfm
) {
3449 crypto_free_aead(con
->v2
.gcm_tfm
);
3450 con
->v2
.gcm_tfm
= NULL
;