1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * NET Generic infrastructure for Network protocols.
5 * Definitions for request_sock
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * From code originally in include/net/tcp.h
11 #ifndef _REQUEST_SOCK_H
12 #define _REQUEST_SOCK_H
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/bug.h>
18 #include <linux/refcount.h>
27 struct request_sock_ops
{
29 unsigned int obj_size
;
30 struct kmem_cache
*slab
;
32 int (*rtx_syn_ack
)(const struct sock
*sk
,
33 struct request_sock
*req
);
34 void (*send_ack
)(const struct sock
*sk
, struct sk_buff
*skb
,
35 struct request_sock
*req
);
36 void (*send_reset
)(const struct sock
*sk
,
38 void (*destructor
)(struct request_sock
*req
);
39 void (*syn_ack_timeout
)(const struct request_sock
*req
);
42 int inet_rtx_syn_ack(const struct sock
*parent
, struct request_sock
*req
);
44 /* struct request_sock - mini sock to represent a connection request
47 struct sock_common __req_common
;
48 #define rsk_refcnt __req_common.skc_refcnt
49 #define rsk_hash __req_common.skc_hash
50 #define rsk_listener __req_common.skc_listener
51 #define rsk_window_clamp __req_common.skc_window_clamp
52 #define rsk_rcv_wnd __req_common.skc_rcv_wnd
54 struct request_sock
*dl_next
;
56 u8 num_retrans
; /* number of retransmits */
57 u8 cookie_ts
:1; /* syncookie: encode tcpopts in timestamp */
58 u8 num_timeout
:7; /* number of timeouts */
60 struct timer_list rsk_timer
;
61 const struct request_sock_ops
*rsk_ops
;
68 static inline struct request_sock
*inet_reqsk(const struct sock
*sk
)
70 return (struct request_sock
*)sk
;
73 static inline struct sock
*req_to_sk(struct request_sock
*req
)
75 return (struct sock
*)req
;
78 static inline struct request_sock
*
79 reqsk_alloc(const struct request_sock_ops
*ops
, struct sock
*sk_listener
,
82 struct request_sock
*req
;
84 req
= kmem_cache_alloc(ops
->slab
, GFP_ATOMIC
| __GFP_NOWARN
);
87 req
->rsk_listener
= NULL
;
88 if (attach_listener
) {
89 if (unlikely(!refcount_inc_not_zero(&sk_listener
->sk_refcnt
))) {
90 kmem_cache_free(ops
->slab
, req
);
93 req
->rsk_listener
= sk_listener
;
96 req_to_sk(req
)->sk_prot
= sk_listener
->sk_prot
;
97 sk_node_init(&req_to_sk(req
)->sk_node
);
98 sk_tx_queue_clear(req_to_sk(req
));
99 req
->saved_syn
= NULL
;
100 req
->num_timeout
= 0;
101 req
->num_retrans
= 0;
103 refcount_set(&req
->rsk_refcnt
, 0);
108 static inline void __reqsk_free(struct request_sock
*req
)
110 req
->rsk_ops
->destructor(req
);
111 if (req
->rsk_listener
)
112 sock_put(req
->rsk_listener
);
113 kfree(req
->saved_syn
);
114 kmem_cache_free(req
->rsk_ops
->slab
, req
);
117 static inline void reqsk_free(struct request_sock
*req
)
119 WARN_ON_ONCE(refcount_read(&req
->rsk_refcnt
) != 0);
123 static inline void reqsk_put(struct request_sock
*req
)
125 if (refcount_dec_and_test(&req
->rsk_refcnt
))
130 * For a TCP Fast Open listener -
131 * lock - protects the access to all the reqsk, which is co-owned by
132 * the listener and the child socket.
133 * qlen - pending TFO requests (still in TCP_SYN_RECV).
134 * max_qlen - max TFO reqs allowed before TFO is disabled.
136 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
137 * structure above. But there is some implementation difficulty due to
138 * listen_sock being part of request_sock_queue hence will be freed when
139 * a listener is stopped. But TFO related fields may continue to be
140 * accessed even after a listener is closed, until its sk_refcnt drops
141 * to 0 implying no more outstanding TFO reqs. One solution is to keep
142 * listen_opt around until sk_refcnt drops to 0. But there is some other
143 * complexity that needs to be resolved. E.g., a listener can be disabled
144 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
146 struct fastopen_queue
{
147 struct request_sock
*rskq_rst_head
; /* Keep track of past TFO */
148 struct request_sock
*rskq_rst_tail
; /* requests that caused RST.
149 * This is part of the defense
150 * against spoofing attack.
153 int qlen
; /* # of pending (TCP_SYN_RECV) reqs */
154 int max_qlen
; /* != 0 iff TFO is currently enabled */
156 struct tcp_fastopen_context __rcu
*ctx
; /* cipher context for cookie */
159 /** struct request_sock_queue - queue of request_socks
161 * @rskq_accept_head - FIFO head of established children
162 * @rskq_accept_tail - FIFO tail of established children
163 * @rskq_defer_accept - User waits for some data after accept()
166 struct request_sock_queue
{
167 spinlock_t rskq_lock
;
168 u8 rskq_defer_accept
;
174 struct request_sock
*rskq_accept_head
;
175 struct request_sock
*rskq_accept_tail
;
176 struct fastopen_queue fastopenq
; /* Check max_qlen != 0 to determine
181 void reqsk_queue_alloc(struct request_sock_queue
*queue
);
183 void reqsk_fastopen_remove(struct sock
*sk
, struct request_sock
*req
,
186 static inline bool reqsk_queue_empty(const struct request_sock_queue
*queue
)
188 return READ_ONCE(queue
->rskq_accept_head
) == NULL
;
191 static inline struct request_sock
*reqsk_queue_remove(struct request_sock_queue
*queue
,
194 struct request_sock
*req
;
196 spin_lock_bh(&queue
->rskq_lock
);
197 req
= queue
->rskq_accept_head
;
199 sk_acceptq_removed(parent
);
200 WRITE_ONCE(queue
->rskq_accept_head
, req
->dl_next
);
201 if (queue
->rskq_accept_head
== NULL
)
202 queue
->rskq_accept_tail
= NULL
;
204 spin_unlock_bh(&queue
->rskq_lock
);
208 static inline void reqsk_queue_removed(struct request_sock_queue
*queue
,
209 const struct request_sock
*req
)
211 if (req
->num_timeout
== 0)
212 atomic_dec(&queue
->young
);
213 atomic_dec(&queue
->qlen
);
216 static inline void reqsk_queue_added(struct request_sock_queue
*queue
)
218 atomic_inc(&queue
->young
);
219 atomic_inc(&queue
->qlen
);
222 static inline int reqsk_queue_len(const struct request_sock_queue
*queue
)
224 return atomic_read(&queue
->qlen
);
227 static inline int reqsk_queue_len_young(const struct request_sock_queue
*queue
)
229 return atomic_read(&queue
->young
);
232 #endif /* _REQUEST_SOCK_H */