1 /* SPDX-License-Identifier: GPL-2.0-only */
7 * An implementation of the DCCP protocol
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
12 #include <linux/dccp.h>
13 #include <linux/ktime.h>
20 * DCCP - specific warning and debugging macros.
22 #define DCCP_WARN(fmt, ...) \
23 net_warn_ratelimited("%s: " fmt, __func__, ##__VA_ARGS__)
24 #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
25 __FILE__, __LINE__, __func__)
26 #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
27 #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \
28 DCCP_BUG("\"%s\" holds (exception!)", \
32 #define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \
33 printk(fmt, ##args); \
35 #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \
36 "%s: " fmt, __func__, ##a)
38 #ifdef CONFIG_IP_DCCP_DEBUG
39 extern bool dccp_debug
;
40 #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a)
41 #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
42 #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
44 #define dccp_pr_debug(format, a...)
45 #define dccp_pr_debug_cat(format, a...)
46 #define dccp_debug(format, a...)
49 extern struct inet_hashinfo dccp_hashinfo
;
51 extern struct percpu_counter dccp_orphan_count
;
53 void dccp_time_wait(struct sock
*sk
, int state
, int timeo
);
56 * Set safe upper bounds for header and option length. Since Data Offset is 8
57 * bits (RFC 4340, sec. 5.1), the total header length can never be more than
58 * 4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1):
59 * - DCCP-Response with ACK Subheader and 4 bytes of Service code OR
60 * - DCCP-Reset with ACK Subheader and 4 bytes of Reset Code fields
61 * Hence a safe upper bound for the maximum option length is 1020-28 = 992
63 #define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(uint32_t))
64 #define DCCP_MAX_PACKET_HDR 28
65 #define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR)
66 #define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER)
68 /* Upper bound for initial feature-negotiation overhead (padded to 32 bits) */
69 #define DCCP_FEATNEG_OVERHEAD (32 * sizeof(uint32_t))
71 #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT
72 * state, about 60 seconds */
74 /* RFC 1122, 4.2.3.1 initial RTO value */
75 #define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
78 * The maximum back-off value for retransmissions. This is needed for
79 * - retransmitting client-Requests (sec. 8.1.1),
80 * - retransmitting Close/CloseReq when closing (sec. 8.3),
81 * - feature-negotiation retransmission (sec. 6.6.3),
82 * - Acks in client-PARTOPEN state (sec. 8.1.5).
84 #define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
87 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
89 #define DCCP_SANE_RTT_MIN 100
90 #define DCCP_FALLBACK_RTT (USEC_PER_SEC / 5)
91 #define DCCP_SANE_RTT_MAX (3 * USEC_PER_SEC)
93 /* sysctl variables for DCCP */
94 extern int sysctl_dccp_request_retries
;
95 extern int sysctl_dccp_retries1
;
96 extern int sysctl_dccp_retries2
;
97 extern int sysctl_dccp_tx_qlen
;
98 extern int sysctl_dccp_sync_ratelimit
;
101 * 48-bit sequence number arithmetic (signed and unsigned)
103 #define INT48_MIN 0x800000000000LL /* 2^47 */
104 #define UINT48_MAX 0xFFFFFFFFFFFFLL /* 2^48 - 1 */
105 #define COMPLEMENT48(x) (0x1000000000000LL - (x)) /* 2^48 - x */
106 #define TO_SIGNED48(x) (((x) < INT48_MIN)? (x) : -COMPLEMENT48( (x)))
107 #define TO_UNSIGNED48(x) (((x) >= 0)? (x) : COMPLEMENT48(-(x)))
108 #define ADD48(a, b) (((a) + (b)) & UINT48_MAX)
109 #define SUB48(a, b) ADD48((a), COMPLEMENT48(b))
111 static inline void dccp_inc_seqno(u64
*seqno
)
113 *seqno
= ADD48(*seqno
, 1);
116 /* signed mod-2^48 distance: pos. if seqno1 < seqno2, neg. if seqno1 > seqno2 */
117 static inline s64
dccp_delta_seqno(const u64 seqno1
, const u64 seqno2
)
119 u64 delta
= SUB48(seqno2
, seqno1
);
121 return TO_SIGNED48(delta
);
124 /* is seq1 < seq2 ? */
125 static inline int before48(const u64 seq1
, const u64 seq2
)
127 return (s64
)((seq2
<< 16) - (seq1
<< 16)) > 0;
130 /* is seq1 > seq2 ? */
131 #define after48(seq1, seq2) before48(seq2, seq1)
133 /* is seq2 <= seq1 <= seq3 ? */
134 static inline int between48(const u64 seq1
, const u64 seq2
, const u64 seq3
)
136 return (seq3
<< 16) - (seq2
<< 16) >= (seq1
<< 16) - (seq2
<< 16);
139 static inline u64
max48(const u64 seq1
, const u64 seq2
)
141 return after48(seq1
, seq2
) ? seq1
: seq2
;
145 * dccp_loss_count - Approximate the number of lost data packets in a burst loss
146 * @s1: last known sequence number before the loss ('hole')
147 * @s2: first sequence number seen after the 'hole'
148 * @ndp: NDP count on packet with sequence number @s2
150 static inline u64
dccp_loss_count(const u64 s1
, const u64 s2
, const u64 ndp
)
152 s64 delta
= dccp_delta_seqno(s1
, s2
);
157 return delta
> 0 ? delta
: 0;
161 * dccp_loss_free - Evaluate condition for data loss from RFC 4340, 7.7.1
163 static inline bool dccp_loss_free(const u64 s1
, const u64 s2
, const u64 ndp
)
165 return dccp_loss_count(s1
, s2
, ndp
) == 0;
170 DCCP_MIB_ACTIVEOPENS
, /* ActiveOpens */
171 DCCP_MIB_ESTABRESETS
, /* EstabResets */
172 DCCP_MIB_CURRESTAB
, /* CurrEstab */
173 DCCP_MIB_OUTSEGS
, /* OutSegs */
175 DCCP_MIB_ABORTONTIMEOUT
,
177 DCCP_MIB_ABORTFAILED
,
178 DCCP_MIB_PASSIVEOPENS
,
179 DCCP_MIB_ATTEMPTFAILS
,
180 DCCP_MIB_OUTDATAGRAMS
,
182 DCCP_MIB_OPTMANDATORYERROR
,
187 #define DCCP_MIB_MAX __DCCP_MIB_MAX
189 unsigned long mibs
[DCCP_MIB_MAX
];
192 DECLARE_SNMP_STAT(struct dccp_mib
, dccp_statistics
);
193 #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
194 #define __DCCP_INC_STATS(field) __SNMP_INC_STATS(dccp_statistics, field)
195 #define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
198 * Checksumming routines
200 static inline unsigned int dccp_csum_coverage(const struct sk_buff
*skb
)
202 const struct dccp_hdr
* dh
= dccp_hdr(skb
);
204 if (dh
->dccph_cscov
== 0)
206 return (dh
->dccph_doff
+ dh
->dccph_cscov
- 1) * sizeof(u32
);
209 static inline void dccp_csum_outgoing(struct sk_buff
*skb
)
211 unsigned int cov
= dccp_csum_coverage(skb
);
214 dccp_hdr(skb
)->dccph_cscov
= 0;
216 skb
->csum
= skb_checksum(skb
, 0, (cov
> skb
->len
)? skb
->len
: cov
, 0);
219 void dccp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
);
221 int dccp_retransmit_skb(struct sock
*sk
);
223 void dccp_send_ack(struct sock
*sk
);
224 void dccp_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
225 struct request_sock
*rsk
);
227 void dccp_send_sync(struct sock
*sk
, const u64 seq
,
228 const enum dccp_pkt_type pkt_type
);
231 * TX Packet Dequeueing Interface
233 void dccp_qpolicy_push(struct sock
*sk
, struct sk_buff
*skb
);
234 bool dccp_qpolicy_full(struct sock
*sk
);
235 void dccp_qpolicy_drop(struct sock
*sk
, struct sk_buff
*skb
);
236 struct sk_buff
*dccp_qpolicy_top(struct sock
*sk
);
237 struct sk_buff
*dccp_qpolicy_pop(struct sock
*sk
);
238 bool dccp_qpolicy_param_ok(struct sock
*sk
, __be32 param
);
241 * TX Packet Output and TX Timers
243 void dccp_write_xmit(struct sock
*sk
);
244 void dccp_write_space(struct sock
*sk
);
245 void dccp_flush_write_queue(struct sock
*sk
, long *time_budget
);
247 void dccp_init_xmit_timers(struct sock
*sk
);
248 static inline void dccp_clear_xmit_timers(struct sock
*sk
)
250 inet_csk_clear_xmit_timers(sk
);
253 unsigned int dccp_sync_mss(struct sock
*sk
, u32 pmtu
);
255 const char *dccp_packet_name(const int type
);
257 void dccp_set_state(struct sock
*sk
, const int state
);
258 void dccp_done(struct sock
*sk
);
260 int dccp_reqsk_init(struct request_sock
*rq
, struct dccp_sock
const *dp
,
261 struct sk_buff
const *skb
);
263 int dccp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
);
265 struct sock
*dccp_create_openreq_child(const struct sock
*sk
,
266 const struct request_sock
*req
,
267 const struct sk_buff
*skb
);
269 int dccp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
271 struct sock
*dccp_v4_request_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
272 struct request_sock
*req
,
273 struct dst_entry
*dst
,
274 struct request_sock
*req_unhash
,
276 struct sock
*dccp_check_req(struct sock
*sk
, struct sk_buff
*skb
,
277 struct request_sock
*req
);
279 int dccp_child_process(struct sock
*parent
, struct sock
*child
,
280 struct sk_buff
*skb
);
281 int dccp_rcv_state_process(struct sock
*sk
, struct sk_buff
*skb
,
282 struct dccp_hdr
*dh
, unsigned int len
);
283 int dccp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
284 const struct dccp_hdr
*dh
, const unsigned int len
);
286 int dccp_init_sock(struct sock
*sk
, const __u8 ctl_sock_initialized
);
287 void dccp_destroy_sock(struct sock
*sk
);
289 void dccp_close(struct sock
*sk
, long timeout
);
290 struct sk_buff
*dccp_make_response(const struct sock
*sk
, struct dst_entry
*dst
,
291 struct request_sock
*req
);
293 int dccp_connect(struct sock
*sk
);
294 int dccp_disconnect(struct sock
*sk
, int flags
);
295 int dccp_getsockopt(struct sock
*sk
, int level
, int optname
,
296 char __user
*optval
, int __user
*optlen
);
297 int dccp_setsockopt(struct sock
*sk
, int level
, int optname
,
298 sockptr_t optval
, unsigned int optlen
);
299 int dccp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
);
300 int dccp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
);
301 int dccp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int nonblock
,
302 int flags
, int *addr_len
);
303 void dccp_shutdown(struct sock
*sk
, int how
);
304 int inet_dccp_listen(struct socket
*sock
, int backlog
);
305 __poll_t
dccp_poll(struct file
*file
, struct socket
*sock
,
307 int dccp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
);
308 void dccp_req_err(struct sock
*sk
, u64 seq
);
310 struct sk_buff
*dccp_ctl_make_reset(struct sock
*sk
, struct sk_buff
*skb
);
311 int dccp_send_reset(struct sock
*sk
, enum dccp_reset_codes code
);
312 void dccp_send_close(struct sock
*sk
, const int active
);
313 int dccp_invalid_packet(struct sk_buff
*skb
);
314 u32
dccp_sample_rtt(struct sock
*sk
, long delta
);
316 static inline bool dccp_bad_service_code(const struct sock
*sk
,
317 const __be32 service
)
319 const struct dccp_sock
*dp
= dccp_sk(sk
);
321 if (dp
->dccps_service
== service
)
323 return !dccp_list_has_service(dp
->dccps_service_list
, service
);
327 * dccp_skb_cb - DCCP per-packet control information
328 * @dccpd_type: one of %dccp_pkt_type (or unknown)
329 * @dccpd_ccval: CCVal field (5.1), see e.g. RFC 4342, 8.1
330 * @dccpd_reset_code: one of %dccp_reset_codes
331 * @dccpd_reset_data: Data1..3 fields (depend on @dccpd_reset_code)
332 * @dccpd_opt_len: total length of all options (5.8) in the packet
333 * @dccpd_seq: sequence number
334 * @dccpd_ack_seq: acknowledgment number subheader field value
336 * This is used for transmission as well as for reception.
340 struct inet_skb_parm h4
;
341 #if IS_ENABLED(CONFIG_IPV6)
342 struct inet6_skb_parm h6
;
347 __u8 dccpd_reset_code
,
354 #define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0]))
356 /* RFC 4340, sec. 7.7 */
357 static inline int dccp_non_data_packet(const struct sk_buff
*skb
)
359 const __u8 type
= DCCP_SKB_CB(skb
)->dccpd_type
;
361 return type
== DCCP_PKT_ACK
||
362 type
== DCCP_PKT_CLOSE
||
363 type
== DCCP_PKT_CLOSEREQ
||
364 type
== DCCP_PKT_RESET
||
365 type
== DCCP_PKT_SYNC
||
366 type
== DCCP_PKT_SYNCACK
;
369 /* RFC 4340, sec. 7.7 */
370 static inline int dccp_data_packet(const struct sk_buff
*skb
)
372 const __u8 type
= DCCP_SKB_CB(skb
)->dccpd_type
;
374 return type
== DCCP_PKT_DATA
||
375 type
== DCCP_PKT_DATAACK
||
376 type
== DCCP_PKT_REQUEST
||
377 type
== DCCP_PKT_RESPONSE
;
380 static inline int dccp_packet_without_ack(const struct sk_buff
*skb
)
382 const __u8 type
= DCCP_SKB_CB(skb
)->dccpd_type
;
384 return type
== DCCP_PKT_DATA
|| type
== DCCP_PKT_REQUEST
;
387 #define DCCP_PKT_WITHOUT_ACK_SEQ (UINT48_MAX << 2)
389 static inline void dccp_hdr_set_seq(struct dccp_hdr
*dh
, const u64 gss
)
391 struct dccp_hdr_ext
*dhx
= (struct dccp_hdr_ext
*)((void *)dh
+
394 dh
->dccph_seq
= htons((gss
>> 32) & 0xfffff);
395 dhx
->dccph_seq_low
= htonl(gss
& 0xffffffff);
398 static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits
*dhack
,
401 dhack
->dccph_reserved1
= 0;
402 dhack
->dccph_ack_nr_high
= htons(gsr
>> 32);
403 dhack
->dccph_ack_nr_low
= htonl(gsr
& 0xffffffff);
406 static inline void dccp_update_gsr(struct sock
*sk
, u64 seq
)
408 struct dccp_sock
*dp
= dccp_sk(sk
);
410 if (after48(seq
, dp
->dccps_gsr
))
412 /* Sequence validity window depends on remote Sequence Window (7.5.1) */
413 dp
->dccps_swl
= SUB48(ADD48(dp
->dccps_gsr
, 1), dp
->dccps_r_seq_win
/ 4);
415 * Adjust SWL so that it is not below ISR. In contrast to RFC 4340,
416 * 7.5.1 we perform this check beyond the initial handshake: W/W' are
417 * always > 32, so for the first W/W' packets in the lifetime of a
418 * connection we always have to adjust SWL.
419 * A second reason why we are doing this is that the window depends on
420 * the feature-remote value of Sequence Window: nothing stops the peer
421 * from updating this value while we are busy adjusting SWL for the
422 * first W packets (we would have to count from scratch again then).
423 * Therefore it is safer to always make sure that the Sequence Window
424 * is not artificially extended by a peer who grows SWL downwards by
425 * continually updating the feature-remote Sequence-Window.
426 * If sequence numbers wrap it is bad luck. But that will take a while
427 * (48 bit), and this measure prevents Sequence-number attacks.
429 if (before48(dp
->dccps_swl
, dp
->dccps_isr
))
430 dp
->dccps_swl
= dp
->dccps_isr
;
431 dp
->dccps_swh
= ADD48(dp
->dccps_gsr
, (3 * dp
->dccps_r_seq_win
) / 4);
434 static inline void dccp_update_gss(struct sock
*sk
, u64 seq
)
436 struct dccp_sock
*dp
= dccp_sk(sk
);
439 /* Ack validity window depends on local Sequence Window value (7.5.1) */
440 dp
->dccps_awl
= SUB48(ADD48(dp
->dccps_gss
, 1), dp
->dccps_l_seq_win
);
441 /* Adjust AWL so that it is not below ISS - see comment above for SWL */
442 if (before48(dp
->dccps_awl
, dp
->dccps_iss
))
443 dp
->dccps_awl
= dp
->dccps_iss
;
444 dp
->dccps_awh
= dp
->dccps_gss
;
447 static inline int dccp_ackvec_pending(const struct sock
*sk
)
449 return dccp_sk(sk
)->dccps_hc_rx_ackvec
!= NULL
&&
450 !dccp_ackvec_is_empty(dccp_sk(sk
)->dccps_hc_rx_ackvec
);
453 static inline int dccp_ack_pending(const struct sock
*sk
)
455 return dccp_ackvec_pending(sk
) || inet_csk_ack_scheduled(sk
);
458 int dccp_feat_signal_nn_change(struct sock
*sk
, u8 feat
, u64 nn_val
);
459 int dccp_feat_finalise_settings(struct dccp_sock
*dp
);
460 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock
*dreq
);
461 int dccp_feat_insert_opts(struct dccp_sock
*, struct dccp_request_sock
*,
462 struct sk_buff
*skb
);
463 int dccp_feat_activate_values(struct sock
*sk
, struct list_head
*fn
);
464 void dccp_feat_list_purge(struct list_head
*fn_list
);
466 int dccp_insert_options(struct sock
*sk
, struct sk_buff
*skb
);
467 int dccp_insert_options_rsk(struct dccp_request_sock
*, struct sk_buff
*);
468 u32
dccp_timestamp(void);
469 void dccp_timestamping_init(void);
470 int dccp_insert_option(struct sk_buff
*skb
, unsigned char option
,
471 const void *value
, unsigned char len
);
474 int dccp_sysctl_init(void);
475 void dccp_sysctl_exit(void);
477 static inline int dccp_sysctl_init(void)
482 static inline void dccp_sysctl_exit(void)