2 * NET Generic infrastructure for Network protocols.
4 * Definitions for request_sock
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * From code originally in include/net/tcp.h
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 #ifndef _REQUEST_SOCK_H
16 #define _REQUEST_SOCK_H
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
21 #include <linux/bug.h>
30 struct request_sock_ops
{
33 struct kmem_cache
*slab
;
35 int (*rtx_syn_ack
)(struct sock
*sk
,
36 struct request_sock
*req
);
37 void (*send_ack
)(struct sock
*sk
, struct sk_buff
*skb
,
38 struct request_sock
*req
);
39 void (*send_reset
)(struct sock
*sk
,
41 void (*destructor
)(struct request_sock
*req
);
42 void (*syn_ack_timeout
)(struct sock
*sk
,
43 struct request_sock
*req
);
46 int inet_rtx_syn_ack(struct sock
*parent
, struct request_sock
*req
);
48 /* struct request_sock - mini sock to represent a connection request
51 struct sock_common __req_common
;
52 struct request_sock
*dl_next
;
54 u8 num_retrans
; /* number of retransmits */
55 u8 cookie_ts
:1; /* syncookie: encode tcpopts in timestamp */
56 u8 num_timeout
:7; /* number of timeouts */
57 /* The following two fields can be easily recomputed I think -AK */
58 u32 window_clamp
; /* window clamp at creation time */
59 u32 rcv_wnd
; /* rcv_wnd offered first time */
61 unsigned long expires
;
62 const struct request_sock_ops
*rsk_ops
;
68 static inline struct request_sock
*reqsk_alloc(const struct request_sock_ops
*ops
)
70 struct request_sock
*req
= kmem_cache_alloc(ops
->slab
, GFP_ATOMIC
);
78 static inline void __reqsk_free(struct request_sock
*req
)
80 kmem_cache_free(req
->rsk_ops
->slab
, req
);
83 static inline void reqsk_free(struct request_sock
*req
)
85 req
->rsk_ops
->destructor(req
);
89 extern int sysctl_max_syn_backlog
;
91 /** struct listen_sock - listen state
93 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
98 /* 2 bytes hole, try to use */
103 u32 nr_table_entries
;
104 struct request_sock
*syn_table
[0];
108 * For a TCP Fast Open listener -
109 * lock - protects the access to all the reqsk, which is co-owned by
110 * the listener and the child socket.
111 * qlen - pending TFO requests (still in TCP_SYN_RECV).
112 * max_qlen - max TFO reqs allowed before TFO is disabled.
114 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
115 * structure above. But there is some implementation difficulty due to
116 * listen_sock being part of request_sock_queue hence will be freed when
117 * a listener is stopped. But TFO related fields may continue to be
118 * accessed even after a listener is closed, until its sk_refcnt drops
119 * to 0 implying no more outstanding TFO reqs. One solution is to keep
120 * listen_opt around until sk_refcnt drops to 0. But there is some other
121 * complexity that needs to be resolved. E.g., a listener can be disabled
122 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
124 struct fastopen_queue
{
125 struct request_sock
*rskq_rst_head
; /* Keep track of past TFO */
126 struct request_sock
*rskq_rst_tail
; /* requests that caused RST.
127 * This is part of the defense
128 * against spoofing attack.
131 int qlen
; /* # of pending (TCP_SYN_RECV) reqs */
132 int max_qlen
; /* != 0 iff TFO is currently enabled */
135 /** struct request_sock_queue - queue of request_socks
137 * @rskq_accept_head - FIFO head of established children
138 * @rskq_accept_tail - FIFO tail of established children
139 * @rskq_defer_accept - User waits for some data after accept()
140 * @syn_wait_lock - serializer
142 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
143 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
145 * This lock is acquired in read mode only from listening_get_next() seq_file
146 * op and it's acquired in write mode _only_ from code that is actively
147 * changing rskq_accept_head. All readers that are holding the master sock lock
148 * don't need to grab this lock in read mode too as rskq_accept_head. writes
149 * are always protected from the main sock lock.
151 struct request_sock_queue
{
152 struct request_sock
*rskq_accept_head
;
153 struct request_sock
*rskq_accept_tail
;
154 rwlock_t syn_wait_lock
;
155 u8 rskq_defer_accept
;
156 /* 3 bytes hole, try to pack */
157 struct listen_sock
*listen_opt
;
158 struct fastopen_queue
*fastopenq
; /* This is non-NULL iff TFO has been
159 * enabled on this listener. Check
160 * max_qlen != 0 in fastopen_queue
161 * to determine if TFO is enabled
162 * right at this moment.
166 int reqsk_queue_alloc(struct request_sock_queue
*queue
,
167 unsigned int nr_table_entries
);
169 void __reqsk_queue_destroy(struct request_sock_queue
*queue
);
170 void reqsk_queue_destroy(struct request_sock_queue
*queue
);
171 void reqsk_fastopen_remove(struct sock
*sk
, struct request_sock
*req
,
174 static inline struct request_sock
*
175 reqsk_queue_yank_acceptq(struct request_sock_queue
*queue
)
177 struct request_sock
*req
= queue
->rskq_accept_head
;
179 queue
->rskq_accept_head
= NULL
;
183 static inline int reqsk_queue_empty(struct request_sock_queue
*queue
)
185 return queue
->rskq_accept_head
== NULL
;
188 static inline void reqsk_queue_unlink(struct request_sock_queue
*queue
,
189 struct request_sock
*req
,
190 struct request_sock
**prev_req
)
192 write_lock(&queue
->syn_wait_lock
);
193 *prev_req
= req
->dl_next
;
194 write_unlock(&queue
->syn_wait_lock
);
197 static inline void reqsk_queue_add(struct request_sock_queue
*queue
,
198 struct request_sock
*req
,
203 sk_acceptq_added(parent
);
205 if (queue
->rskq_accept_head
== NULL
)
206 queue
->rskq_accept_head
= req
;
208 queue
->rskq_accept_tail
->dl_next
= req
;
210 queue
->rskq_accept_tail
= req
;
214 static inline struct request_sock
*reqsk_queue_remove(struct request_sock_queue
*queue
)
216 struct request_sock
*req
= queue
->rskq_accept_head
;
218 WARN_ON(req
== NULL
);
220 queue
->rskq_accept_head
= req
->dl_next
;
221 if (queue
->rskq_accept_head
== NULL
)
222 queue
->rskq_accept_tail
= NULL
;
227 static inline int reqsk_queue_removed(struct request_sock_queue
*queue
,
228 struct request_sock
*req
)
230 struct listen_sock
*lopt
= queue
->listen_opt
;
232 if (req
->num_timeout
== 0)
238 static inline int reqsk_queue_added(struct request_sock_queue
*queue
)
240 struct listen_sock
*lopt
= queue
->listen_opt
;
241 const int prev_qlen
= lopt
->qlen
;
248 static inline int reqsk_queue_len(const struct request_sock_queue
*queue
)
250 return queue
->listen_opt
!= NULL
? queue
->listen_opt
->qlen
: 0;
253 static inline int reqsk_queue_len_young(const struct request_sock_queue
*queue
)
255 return queue
->listen_opt
->qlen_young
;
258 static inline int reqsk_queue_is_full(const struct request_sock_queue
*queue
)
260 return queue
->listen_opt
->qlen
>> queue
->listen_opt
->max_qlen_log
;
263 static inline void reqsk_queue_hash_req(struct request_sock_queue
*queue
,
264 u32 hash
, struct request_sock
*req
,
265 unsigned long timeout
)
267 struct listen_sock
*lopt
= queue
->listen_opt
;
269 req
->expires
= jiffies
+ timeout
;
270 req
->num_retrans
= 0;
271 req
->num_timeout
= 0;
273 req
->dl_next
= lopt
->syn_table
[hash
];
275 write_lock(&queue
->syn_wait_lock
);
276 lopt
->syn_table
[hash
] = req
;
277 write_unlock(&queue
->syn_wait_lock
);
280 #endif /* _REQUEST_SOCK_H */