2 * NET Generic infrastructure for Network protocols.
4 * Definitions for request_sock
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * From code originally in include/net/tcp.h
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 #ifndef _REQUEST_SOCK_H
16 #define _REQUEST_SOCK_H
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
21 #include <linux/bug.h>
30 struct request_sock_ops
{
33 struct kmem_cache
*slab
;
35 int (*rtx_syn_ack
)(struct sock
*sk
,
36 struct request_sock
*req
);
37 void (*send_ack
)(struct sock
*sk
, struct sk_buff
*skb
,
38 struct request_sock
*req
);
39 void (*send_reset
)(struct sock
*sk
,
41 void (*destructor
)(struct request_sock
*req
);
42 void (*syn_ack_timeout
)(struct sock
*sk
,
43 struct request_sock
*req
);
46 extern int inet_rtx_syn_ack(struct sock
*parent
, struct request_sock
*req
);
48 /* struct request_sock - mini sock to represent a connection request
51 struct request_sock
*dl_next
;
53 u8 num_retrans
; /* number of retransmits */
54 u8 cookie_ts
:1; /* syncookie: encode tcpopts in timestamp */
55 u8 num_timeout
:7; /* number of timeouts */
56 /* The following two fields can be easily recomputed I think -AK */
57 u32 window_clamp
; /* window clamp at creation time */
58 u32 rcv_wnd
; /* rcv_wnd offered first time */
60 unsigned long expires
;
61 const struct request_sock_ops
*rsk_ops
;
67 static inline struct request_sock
*reqsk_alloc(const struct request_sock_ops
*ops
)
69 struct request_sock
*req
= kmem_cache_alloc(ops
->slab
, GFP_ATOMIC
);
77 static inline void __reqsk_free(struct request_sock
*req
)
79 kmem_cache_free(req
->rsk_ops
->slab
, req
);
82 static inline void reqsk_free(struct request_sock
*req
)
84 req
->rsk_ops
->destructor(req
);
88 extern int sysctl_max_syn_backlog
;
90 /** struct listen_sock - listen state
92 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
97 /* 2 bytes hole, try to use */
102 u32 nr_table_entries
;
103 struct request_sock
*syn_table
[0];
107 * For a TCP Fast Open listener -
108 * lock - protects the access to all the reqsk, which is co-owned by
109 * the listener and the child socket.
110 * qlen - pending TFO requests (still in TCP_SYN_RECV).
111 * max_qlen - max TFO reqs allowed before TFO is disabled.
113 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
114 * structure above. But there is some implementation difficulty due to
115 * listen_sock being part of request_sock_queue hence will be freed when
116 * a listener is stopped. But TFO related fields may continue to be
117 * accessed even after a listener is closed, until its sk_refcnt drops
118 * to 0 implying no more outstanding TFO reqs. One solution is to keep
119 * listen_opt around until sk_refcnt drops to 0. But there is some other
120 * complexity that needs to be resolved. E.g., a listener can be disabled
121 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
123 struct fastopen_queue
{
124 struct request_sock
*rskq_rst_head
; /* Keep track of past TFO */
125 struct request_sock
*rskq_rst_tail
; /* requests that caused RST.
126 * This is part of the defense
127 * against spoofing attack.
130 int qlen
; /* # of pending (TCP_SYN_RECV) reqs */
131 int max_qlen
; /* != 0 iff TFO is currently enabled */
134 /** struct request_sock_queue - queue of request_socks
136 * @rskq_accept_head - FIFO head of established children
137 * @rskq_accept_tail - FIFO tail of established children
138 * @rskq_defer_accept - User waits for some data after accept()
139 * @syn_wait_lock - serializer
141 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
142 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
144 * This lock is acquired in read mode only from listening_get_next() seq_file
145 * op and it's acquired in write mode _only_ from code that is actively
146 * changing rskq_accept_head. All readers that are holding the master sock lock
147 * don't need to grab this lock in read mode too as rskq_accept_head. writes
148 * are always protected from the main sock lock.
150 struct request_sock_queue
{
151 struct request_sock
*rskq_accept_head
;
152 struct request_sock
*rskq_accept_tail
;
153 rwlock_t syn_wait_lock
;
154 u8 rskq_defer_accept
;
155 /* 3 bytes hole, try to pack */
156 struct listen_sock
*listen_opt
;
157 struct fastopen_queue
*fastopenq
; /* This is non-NULL iff TFO has been
158 * enabled on this listener. Check
159 * max_qlen != 0 in fastopen_queue
160 * to determine if TFO is enabled
161 * right at this moment.
165 extern int reqsk_queue_alloc(struct request_sock_queue
*queue
,
166 unsigned int nr_table_entries
);
168 extern void __reqsk_queue_destroy(struct request_sock_queue
*queue
);
169 extern void reqsk_queue_destroy(struct request_sock_queue
*queue
);
170 extern void reqsk_fastopen_remove(struct sock
*sk
,
171 struct request_sock
*req
, bool reset
);
173 static inline struct request_sock
*
174 reqsk_queue_yank_acceptq(struct request_sock_queue
*queue
)
176 struct request_sock
*req
= queue
->rskq_accept_head
;
178 queue
->rskq_accept_head
= NULL
;
182 static inline int reqsk_queue_empty(struct request_sock_queue
*queue
)
184 return queue
->rskq_accept_head
== NULL
;
187 static inline void reqsk_queue_unlink(struct request_sock_queue
*queue
,
188 struct request_sock
*req
,
189 struct request_sock
**prev_req
)
191 write_lock(&queue
->syn_wait_lock
);
192 *prev_req
= req
->dl_next
;
193 write_unlock(&queue
->syn_wait_lock
);
196 static inline void reqsk_queue_add(struct request_sock_queue
*queue
,
197 struct request_sock
*req
,
202 sk_acceptq_added(parent
);
204 if (queue
->rskq_accept_head
== NULL
)
205 queue
->rskq_accept_head
= req
;
207 queue
->rskq_accept_tail
->dl_next
= req
;
209 queue
->rskq_accept_tail
= req
;
213 static inline struct request_sock
*reqsk_queue_remove(struct request_sock_queue
*queue
)
215 struct request_sock
*req
= queue
->rskq_accept_head
;
217 WARN_ON(req
== NULL
);
219 queue
->rskq_accept_head
= req
->dl_next
;
220 if (queue
->rskq_accept_head
== NULL
)
221 queue
->rskq_accept_tail
= NULL
;
226 static inline int reqsk_queue_removed(struct request_sock_queue
*queue
,
227 struct request_sock
*req
)
229 struct listen_sock
*lopt
= queue
->listen_opt
;
231 if (req
->num_timeout
== 0)
237 static inline int reqsk_queue_added(struct request_sock_queue
*queue
)
239 struct listen_sock
*lopt
= queue
->listen_opt
;
240 const int prev_qlen
= lopt
->qlen
;
247 static inline int reqsk_queue_len(const struct request_sock_queue
*queue
)
249 return queue
->listen_opt
!= NULL
? queue
->listen_opt
->qlen
: 0;
252 static inline int reqsk_queue_len_young(const struct request_sock_queue
*queue
)
254 return queue
->listen_opt
->qlen_young
;
257 static inline int reqsk_queue_is_full(const struct request_sock_queue
*queue
)
259 return queue
->listen_opt
->qlen
>> queue
->listen_opt
->max_qlen_log
;
262 static inline void reqsk_queue_hash_req(struct request_sock_queue
*queue
,
263 u32 hash
, struct request_sock
*req
,
264 unsigned long timeout
)
266 struct listen_sock
*lopt
= queue
->listen_opt
;
268 req
->expires
= jiffies
+ timeout
;
269 req
->num_retrans
= 0;
270 req
->num_timeout
= 0;
272 req
->dl_next
= lopt
->syn_table
[hash
];
274 write_lock(&queue
->syn_wait_lock
);
275 lopt
->syn_table
[hash
] = req
;
276 write_unlock(&queue
->syn_wait_lock
);
279 #endif /* _REQUEST_SOCK_H */