2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Support for INET connection oriented protocols.
8 * Authors: See the TCP sources
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/jhash.h>
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
28 const char inet_csk_timer_bug_msg
[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg
);
33 * This struct holds the first and last local port number.
35 struct local_ports sysctl_local_ports __read_mostly
= {
36 .lock
= __SEQLOCK_UNLOCKED(sysctl_local_ports
.lock
),
37 .range
= { 32768, 61000 },
40 unsigned long *sysctl_local_reserved_ports
;
41 EXPORT_SYMBOL(sysctl_local_reserved_ports
);
43 void inet_get_local_port_range(int *low
, int *high
)
48 seq
= read_seqbegin(&sysctl_local_ports
.lock
);
50 *low
= sysctl_local_ports
.range
[0];
51 *high
= sysctl_local_ports
.range
[1];
52 } while (read_seqretry(&sysctl_local_ports
.lock
, seq
));
54 EXPORT_SYMBOL(inet_get_local_port_range
);
56 int inet_csk_bind_conflict(const struct sock
*sk
,
57 const struct inet_bind_bucket
*tb
, bool relax
)
60 struct hlist_node
*node
;
61 int reuse
= sk
->sk_reuse
;
64 * Unlike other sk lookup places we do not check
65 * for sk_net here, since _all_ the socks listed
66 * in tb->owners list belong to the same net - the
67 * one this bucket belongs to.
70 sk_for_each_bound(sk2
, node
, &tb
->owners
) {
72 !inet_v6_ipv6only(sk2
) &&
73 (!sk
->sk_bound_dev_if
||
74 !sk2
->sk_bound_dev_if
||
75 sk
->sk_bound_dev_if
== sk2
->sk_bound_dev_if
)) {
76 if (!reuse
|| !sk2
->sk_reuse
||
77 sk2
->sk_state
== TCP_LISTEN
) {
78 const __be32 sk2_rcv_saddr
= sk_rcv_saddr(sk2
);
79 if (!sk2_rcv_saddr
|| !sk_rcv_saddr(sk
) ||
80 sk2_rcv_saddr
== sk_rcv_saddr(sk
))
83 if (!relax
&& reuse
&& sk2
->sk_reuse
&&
84 sk2
->sk_state
!= TCP_LISTEN
) {
85 const __be32 sk2_rcv_saddr
= sk_rcv_saddr(sk2
);
87 if (!sk2_rcv_saddr
|| !sk_rcv_saddr(sk
) ||
88 sk2_rcv_saddr
== sk_rcv_saddr(sk
))
95 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict
);
97 /* Obtain a reference to a local port for the given sock,
98 * if snum is zero it means select any available local port.
100 int inet_csk_get_port(struct sock
*sk
, unsigned short snum
)
102 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
103 struct inet_bind_hashbucket
*head
;
104 struct hlist_node
*node
;
105 struct inet_bind_bucket
*tb
;
106 int ret
, attempts
= 5;
107 struct net
*net
= sock_net(sk
);
108 int smallest_size
= -1, smallest_rover
;
112 int remaining
, rover
, low
, high
;
115 inet_get_local_port_range(&low
, &high
);
116 remaining
= (high
- low
) + 1;
117 smallest_rover
= rover
= net_random() % remaining
+ low
;
121 if (inet_is_reserved_local_port(rover
))
123 head
= &hashinfo
->bhash
[inet_bhashfn(net
, rover
,
124 hashinfo
->bhash_size
)];
125 spin_lock(&head
->lock
);
126 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
127 if (net_eq(ib_net(tb
), net
) && tb
->port
== rover
) {
128 if (tb
->fastreuse
> 0 &&
130 sk
->sk_state
!= TCP_LISTEN
&&
131 (tb
->num_owners
< smallest_size
|| smallest_size
== -1)) {
132 smallest_size
= tb
->num_owners
;
133 smallest_rover
= rover
;
134 if (atomic_read(&hashinfo
->bsockets
) > (high
- low
) + 1 &&
135 !inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
, false)) {
136 snum
= smallest_rover
;
140 if (!inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
, false)) {
148 spin_unlock(&head
->lock
);
152 } while (--remaining
> 0);
154 /* Exhausted local port range during search? It is not
155 * possible for us to be holding one of the bind hash
156 * locks if this test triggers, because if 'remaining'
157 * drops to zero, we broke out of the do/while loop at
158 * the top level, not from the 'break;' statement.
161 if (remaining
<= 0) {
162 if (smallest_size
!= -1) {
163 snum
= smallest_rover
;
168 /* OK, here is the one we will use. HEAD is
169 * non-NULL and we hold it's mutex.
174 head
= &hashinfo
->bhash
[inet_bhashfn(net
, snum
,
175 hashinfo
->bhash_size
)];
176 spin_lock(&head
->lock
);
177 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
178 if (net_eq(ib_net(tb
), net
) && tb
->port
== snum
)
184 if (!hlist_empty(&tb
->owners
)) {
185 if (sk
->sk_reuse
== SK_FORCE_REUSE
)
188 if (tb
->fastreuse
> 0 &&
189 sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
&&
190 smallest_size
== -1) {
194 if (inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
, true)) {
195 if (sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
&&
196 smallest_size
!= -1 && --attempts
>= 0) {
197 spin_unlock(&head
->lock
);
207 if (!tb
&& (tb
= inet_bind_bucket_create(hashinfo
->bind_bucket_cachep
,
208 net
, head
, snum
)) == NULL
)
210 if (hlist_empty(&tb
->owners
)) {
211 if (sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
)
215 } else if (tb
->fastreuse
&&
216 (!sk
->sk_reuse
|| sk
->sk_state
== TCP_LISTEN
))
219 if (!inet_csk(sk
)->icsk_bind_hash
)
220 inet_bind_hash(sk
, tb
, snum
);
221 WARN_ON(inet_csk(sk
)->icsk_bind_hash
!= tb
);
225 spin_unlock(&head
->lock
);
230 EXPORT_SYMBOL_GPL(inet_csk_get_port
);
233 * Wait for an incoming connection, avoid race conditions. This must be called
234 * with the socket locked.
236 static int inet_csk_wait_for_connect(struct sock
*sk
, long timeo
)
238 struct inet_connection_sock
*icsk
= inet_csk(sk
);
243 * True wake-one mechanism for incoming connections: only
244 * one process gets woken up, not the 'whole herd'.
245 * Since we do not 'race & poll' for established sockets
246 * anymore, the common case will execute the loop only once.
248 * Subtle issue: "add_wait_queue_exclusive()" will be added
249 * after any current non-exclusive waiters, and we know that
250 * it will always _stay_ after any new non-exclusive waiters
251 * because all non-exclusive waiters are added at the
252 * beginning of the wait-queue. As such, it's ok to "drop"
253 * our exclusiveness temporarily when we get woken up without
254 * having to remove and re-insert us on the wait queue.
257 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
260 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
))
261 timeo
= schedule_timeout(timeo
);
264 if (!reqsk_queue_empty(&icsk
->icsk_accept_queue
))
267 if (sk
->sk_state
!= TCP_LISTEN
)
269 err
= sock_intr_errno(timeo
);
270 if (signal_pending(current
))
276 finish_wait(sk_sleep(sk
), &wait
);
281 * This will accept the next outstanding connection.
283 struct sock
*inet_csk_accept(struct sock
*sk
, int flags
, int *err
)
285 struct inet_connection_sock
*icsk
= inet_csk(sk
);
286 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
288 struct request_sock
*req
;
293 /* We need to make sure that this socket is listening,
294 * and that it has something pending.
297 if (sk
->sk_state
!= TCP_LISTEN
)
300 /* Find already established connection */
301 if (reqsk_queue_empty(queue
)) {
302 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
304 /* If this is a non blocking socket don't sleep */
309 error
= inet_csk_wait_for_connect(sk
, timeo
);
313 req
= reqsk_queue_remove(queue
);
316 sk_acceptq_removed(sk
);
317 if (sk
->sk_protocol
== IPPROTO_TCP
&& queue
->fastopenq
!= NULL
) {
318 spin_lock_bh(&queue
->fastopenq
->lock
);
319 if (tcp_rsk(req
)->listener
) {
320 /* We are still waiting for the final ACK from 3WHS
321 * so can't free req now. Instead, we set req->sk to
322 * NULL to signify that the child socket is taken
323 * so reqsk_fastopen_remove() will free the req
324 * when 3WHS finishes (or is aborted).
329 spin_unlock_bh(&queue
->fastopenq
->lock
);
342 EXPORT_SYMBOL(inet_csk_accept
);
345 * Using different timers for retransmit, delayed acks and probes
346 * We may wish use just one timer maintaining a list of expire jiffies
349 void inet_csk_init_xmit_timers(struct sock
*sk
,
350 void (*retransmit_handler
)(unsigned long),
351 void (*delack_handler
)(unsigned long),
352 void (*keepalive_handler
)(unsigned long))
354 struct inet_connection_sock
*icsk
= inet_csk(sk
);
356 setup_timer(&icsk
->icsk_retransmit_timer
, retransmit_handler
,
358 setup_timer(&icsk
->icsk_delack_timer
, delack_handler
,
360 setup_timer(&sk
->sk_timer
, keepalive_handler
, (unsigned long)sk
);
361 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= 0;
363 EXPORT_SYMBOL(inet_csk_init_xmit_timers
);
365 void inet_csk_clear_xmit_timers(struct sock
*sk
)
367 struct inet_connection_sock
*icsk
= inet_csk(sk
);
369 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= icsk
->icsk_ack
.blocked
= 0;
371 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
372 sk_stop_timer(sk
, &icsk
->icsk_delack_timer
);
373 sk_stop_timer(sk
, &sk
->sk_timer
);
375 EXPORT_SYMBOL(inet_csk_clear_xmit_timers
);
377 void inet_csk_delete_keepalive_timer(struct sock
*sk
)
379 sk_stop_timer(sk
, &sk
->sk_timer
);
381 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer
);
383 void inet_csk_reset_keepalive_timer(struct sock
*sk
, unsigned long len
)
385 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ len
);
387 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer
);
389 struct dst_entry
*inet_csk_route_req(struct sock
*sk
,
391 const struct request_sock
*req
)
394 const struct inet_request_sock
*ireq
= inet_rsk(req
);
395 struct ip_options_rcu
*opt
= inet_rsk(req
)->opt
;
396 struct net
*net
= sock_net(sk
);
397 int flags
= inet_sk_flowi_flags(sk
);
399 flowi4_init_output(fl4
, sk
->sk_bound_dev_if
, sk
->sk_mark
,
400 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
403 (opt
&& opt
->opt
.srr
) ? opt
->opt
.faddr
: ireq
->rmt_addr
,
404 ireq
->loc_addr
, ireq
->rmt_port
, inet_sk(sk
)->inet_sport
);
405 security_req_classify_flow(req
, flowi4_to_flowi(fl4
));
406 rt
= ip_route_output_flow(net
, fl4
, sk
);
409 if (opt
&& opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
416 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
419 EXPORT_SYMBOL_GPL(inet_csk_route_req
);
421 struct dst_entry
*inet_csk_route_child_sock(struct sock
*sk
,
423 const struct request_sock
*req
)
425 const struct inet_request_sock
*ireq
= inet_rsk(req
);
426 struct inet_sock
*newinet
= inet_sk(newsk
);
427 struct ip_options_rcu
*opt
;
428 struct net
*net
= sock_net(sk
);
432 fl4
= &newinet
->cork
.fl
.u
.ip4
;
435 opt
= rcu_dereference(newinet
->inet_opt
);
436 flowi4_init_output(fl4
, sk
->sk_bound_dev_if
, sk
->sk_mark
,
437 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
438 sk
->sk_protocol
, inet_sk_flowi_flags(sk
),
439 (opt
&& opt
->opt
.srr
) ? opt
->opt
.faddr
: ireq
->rmt_addr
,
440 ireq
->loc_addr
, ireq
->rmt_port
, inet_sk(sk
)->inet_sport
);
441 security_req_classify_flow(req
, flowi4_to_flowi(fl4
));
442 rt
= ip_route_output_flow(net
, fl4
, sk
);
445 if (opt
&& opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
454 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
457 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock
);
459 static inline u32
inet_synq_hash(const __be32 raddr
, const __be16 rport
,
460 const u32 rnd
, const u32 synq_hsize
)
462 return jhash_2words((__force u32
)raddr
, (__force u32
)rport
, rnd
) & (synq_hsize
- 1);
465 #if IS_ENABLED(CONFIG_IPV6)
466 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
468 #define AF_INET_FAMILY(fam) 1
471 struct request_sock
*inet_csk_search_req(const struct sock
*sk
,
472 struct request_sock
***prevp
,
473 const __be16 rport
, const __be32 raddr
,
476 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
477 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
478 struct request_sock
*req
, **prev
;
480 for (prev
= &lopt
->syn_table
[inet_synq_hash(raddr
, rport
, lopt
->hash_rnd
,
481 lopt
->nr_table_entries
)];
482 (req
= *prev
) != NULL
;
483 prev
= &req
->dl_next
) {
484 const struct inet_request_sock
*ireq
= inet_rsk(req
);
486 if (ireq
->rmt_port
== rport
&&
487 ireq
->rmt_addr
== raddr
&&
488 ireq
->loc_addr
== laddr
&&
489 AF_INET_FAMILY(req
->rsk_ops
->family
)) {
498 EXPORT_SYMBOL_GPL(inet_csk_search_req
);
500 void inet_csk_reqsk_queue_hash_add(struct sock
*sk
, struct request_sock
*req
,
501 unsigned long timeout
)
503 struct inet_connection_sock
*icsk
= inet_csk(sk
);
504 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
505 const u32 h
= inet_synq_hash(inet_rsk(req
)->rmt_addr
, inet_rsk(req
)->rmt_port
,
506 lopt
->hash_rnd
, lopt
->nr_table_entries
);
508 reqsk_queue_hash_req(&icsk
->icsk_accept_queue
, h
, req
, timeout
);
509 inet_csk_reqsk_queue_added(sk
, timeout
);
511 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add
);
513 /* Only thing we need from tcp.h */
514 extern int sysctl_tcp_synack_retries
;
517 /* Decide when to expire the request and when to resend SYN-ACK */
518 static inline void syn_ack_recalc(struct request_sock
*req
, const int thresh
,
519 const int max_retries
,
520 const u8 rskq_defer_accept
,
521 int *expire
, int *resend
)
523 if (!rskq_defer_accept
) {
524 *expire
= req
->num_timeout
>= thresh
;
528 *expire
= req
->num_timeout
>= thresh
&&
529 (!inet_rsk(req
)->acked
|| req
->num_timeout
>= max_retries
);
531 * Do not resend while waiting for data after ACK,
532 * start to resend on end of deferring period to give
533 * last chance for data or ACK to create established socket.
535 *resend
= !inet_rsk(req
)->acked
||
536 req
->num_timeout
>= rskq_defer_accept
- 1;
539 int inet_rtx_syn_ack(struct sock
*parent
, struct request_sock
*req
)
541 int err
= req
->rsk_ops
->rtx_syn_ack(parent
, req
, NULL
);
547 EXPORT_SYMBOL(inet_rtx_syn_ack
);
549 void inet_csk_reqsk_queue_prune(struct sock
*parent
,
550 const unsigned long interval
,
551 const unsigned long timeout
,
552 const unsigned long max_rto
)
554 struct inet_connection_sock
*icsk
= inet_csk(parent
);
555 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
556 struct listen_sock
*lopt
= queue
->listen_opt
;
557 int max_retries
= icsk
->icsk_syn_retries
? : sysctl_tcp_synack_retries
;
558 int thresh
= max_retries
;
559 unsigned long now
= jiffies
;
560 struct request_sock
**reqp
, *req
;
563 if (lopt
== NULL
|| lopt
->qlen
== 0)
566 /* Normally all the openreqs are young and become mature
567 * (i.e. converted to established socket) for first timeout.
568 * If synack was not acknowledged for 1 second, it means
569 * one of the following things: synack was lost, ack was lost,
570 * rtt is high or nobody planned to ack (i.e. synflood).
571 * When server is a bit loaded, queue is populated with old
572 * open requests, reducing effective size of queue.
573 * When server is well loaded, queue size reduces to zero
574 * after several minutes of work. It is not synflood,
575 * it is normal operation. The solution is pruning
576 * too old entries overriding normal timeout, when
577 * situation becomes dangerous.
579 * Essentially, we reserve half of room for young
580 * embrions; and abort old ones without pity, if old
581 * ones are about to clog our table.
583 if (lopt
->qlen
>>(lopt
->max_qlen_log
-1)) {
584 int young
= (lopt
->qlen_young
<<1);
587 if (lopt
->qlen
< young
)
594 if (queue
->rskq_defer_accept
)
595 max_retries
= queue
->rskq_defer_accept
;
597 budget
= 2 * (lopt
->nr_table_entries
/ (timeout
/ interval
));
598 i
= lopt
->clock_hand
;
601 reqp
=&lopt
->syn_table
[i
];
602 while ((req
= *reqp
) != NULL
) {
603 if (time_after_eq(now
, req
->expires
)) {
604 int expire
= 0, resend
= 0;
606 syn_ack_recalc(req
, thresh
, max_retries
,
607 queue
->rskq_defer_accept
,
609 req
->rsk_ops
->syn_ack_timeout(parent
, req
);
612 !inet_rtx_syn_ack(parent
, req
) ||
613 inet_rsk(req
)->acked
)) {
616 if (req
->num_timeout
++ == 0)
618 timeo
= min(timeout
<< req
->num_timeout
,
620 req
->expires
= now
+ timeo
;
621 reqp
= &req
->dl_next
;
625 /* Drop this request */
626 inet_csk_reqsk_queue_unlink(parent
, req
, reqp
);
627 reqsk_queue_removed(queue
, req
);
631 reqp
= &req
->dl_next
;
634 i
= (i
+ 1) & (lopt
->nr_table_entries
- 1);
636 } while (--budget
> 0);
638 lopt
->clock_hand
= i
;
641 inet_csk_reset_keepalive_timer(parent
, interval
);
643 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune
);
646 * inet_csk_clone_lock - clone an inet socket, and lock its clone
647 * @sk: the socket to clone
649 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
651 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
653 struct sock
*inet_csk_clone_lock(const struct sock
*sk
,
654 const struct request_sock
*req
,
655 const gfp_t priority
)
657 struct sock
*newsk
= sk_clone_lock(sk
, priority
);
660 struct inet_connection_sock
*newicsk
= inet_csk(newsk
);
662 newsk
->sk_state
= TCP_SYN_RECV
;
663 newicsk
->icsk_bind_hash
= NULL
;
665 inet_sk(newsk
)->inet_dport
= inet_rsk(req
)->rmt_port
;
666 inet_sk(newsk
)->inet_num
= ntohs(inet_rsk(req
)->loc_port
);
667 inet_sk(newsk
)->inet_sport
= inet_rsk(req
)->loc_port
;
668 newsk
->sk_write_space
= sk_stream_write_space
;
670 newicsk
->icsk_retransmits
= 0;
671 newicsk
->icsk_backoff
= 0;
672 newicsk
->icsk_probes_out
= 0;
674 /* Deinitialize accept_queue to trap illegal accesses. */
675 memset(&newicsk
->icsk_accept_queue
, 0, sizeof(newicsk
->icsk_accept_queue
));
677 security_inet_csk_clone(newsk
, req
);
681 EXPORT_SYMBOL_GPL(inet_csk_clone_lock
);
684 * At this point, there should be no process reference to this
685 * socket, and thus no user references at all. Therefore we
686 * can assume the socket waitqueue is inactive and nobody will
687 * try to jump onto it.
689 void inet_csk_destroy_sock(struct sock
*sk
)
691 WARN_ON(sk
->sk_state
!= TCP_CLOSE
);
692 WARN_ON(!sock_flag(sk
, SOCK_DEAD
));
694 /* It cannot be in hash table! */
695 WARN_ON(!sk_unhashed(sk
));
697 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
698 WARN_ON(inet_sk(sk
)->inet_num
&& !inet_csk(sk
)->icsk_bind_hash
);
700 sk
->sk_prot
->destroy(sk
);
702 sk_stream_kill_queues(sk
);
704 xfrm_sk_free_policy(sk
);
706 sk_refcnt_debug_release(sk
);
708 percpu_counter_dec(sk
->sk_prot
->orphan_count
);
711 EXPORT_SYMBOL(inet_csk_destroy_sock
);
713 /* This function allows to force a closure of a socket after the call to
714 * tcp/dccp_create_openreq_child().
716 void inet_csk_prepare_forced_close(struct sock
*sk
)
718 /* sk_clone_lock locked the socket and set refcnt to 2 */
722 /* The below has to be done to allow calling inet_csk_destroy_sock */
723 sock_set_flag(sk
, SOCK_DEAD
);
724 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
725 inet_sk(sk
)->inet_num
= 0;
727 EXPORT_SYMBOL(inet_csk_prepare_forced_close
);
729 int inet_csk_listen_start(struct sock
*sk
, const int nr_table_entries
)
731 struct inet_sock
*inet
= inet_sk(sk
);
732 struct inet_connection_sock
*icsk
= inet_csk(sk
);
733 int rc
= reqsk_queue_alloc(&icsk
->icsk_accept_queue
, nr_table_entries
);
738 sk
->sk_max_ack_backlog
= 0;
739 sk
->sk_ack_backlog
= 0;
740 inet_csk_delack_init(sk
);
742 /* There is race window here: we announce ourselves listening,
743 * but this transition is still not validated by get_port().
744 * It is OK, because this socket enters to hash table only
745 * after validation is complete.
747 sk
->sk_state
= TCP_LISTEN
;
748 if (!sk
->sk_prot
->get_port(sk
, inet
->inet_num
)) {
749 inet
->inet_sport
= htons(inet
->inet_num
);
752 sk
->sk_prot
->hash(sk
);
757 sk
->sk_state
= TCP_CLOSE
;
758 __reqsk_queue_destroy(&icsk
->icsk_accept_queue
);
761 EXPORT_SYMBOL_GPL(inet_csk_listen_start
);
764 * This routine closes sockets which have been at least partially
765 * opened, but not yet accepted.
767 void inet_csk_listen_stop(struct sock
*sk
)
769 struct inet_connection_sock
*icsk
= inet_csk(sk
);
770 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
771 struct request_sock
*acc_req
;
772 struct request_sock
*req
;
774 inet_csk_delete_keepalive_timer(sk
);
776 /* make all the listen_opt local to us */
777 acc_req
= reqsk_queue_yank_acceptq(queue
);
779 /* Following specs, it would be better either to send FIN
780 * (and enter FIN-WAIT-1, it is normal close)
781 * or to send active reset (abort).
782 * Certainly, it is pretty dangerous while synflood, but it is
783 * bad justification for our negligence 8)
784 * To be honest, we are not able to make either
785 * of the variants now. --ANK
787 reqsk_queue_destroy(queue
);
789 while ((req
= acc_req
) != NULL
) {
790 struct sock
*child
= req
->sk
;
792 acc_req
= req
->dl_next
;
796 WARN_ON(sock_owned_by_user(child
));
799 sk
->sk_prot
->disconnect(child
, O_NONBLOCK
);
803 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
805 if (sk
->sk_protocol
== IPPROTO_TCP
&& tcp_rsk(req
)->listener
) {
806 BUG_ON(tcp_sk(child
)->fastopen_rsk
!= req
);
807 BUG_ON(sk
!= tcp_rsk(req
)->listener
);
809 /* Paranoid, to prevent race condition if
810 * an inbound pkt destined for child is
811 * blocked by sock lock in tcp_v4_rcv().
812 * Also to satisfy an assertion in
813 * tcp_v4_destroy_sock().
815 tcp_sk(child
)->fastopen_rsk
= NULL
;
818 inet_csk_destroy_sock(child
);
820 bh_unlock_sock(child
);
824 sk_acceptq_removed(sk
);
827 if (queue
->fastopenq
!= NULL
) {
828 /* Free all the reqs queued in rskq_rst_head. */
829 spin_lock_bh(&queue
->fastopenq
->lock
);
830 acc_req
= queue
->fastopenq
->rskq_rst_head
;
831 queue
->fastopenq
->rskq_rst_head
= NULL
;
832 spin_unlock_bh(&queue
->fastopenq
->lock
);
833 while ((req
= acc_req
) != NULL
) {
834 acc_req
= req
->dl_next
;
838 WARN_ON(sk
->sk_ack_backlog
);
840 EXPORT_SYMBOL_GPL(inet_csk_listen_stop
);
842 void inet_csk_addr2sockaddr(struct sock
*sk
, struct sockaddr
*uaddr
)
844 struct sockaddr_in
*sin
= (struct sockaddr_in
*)uaddr
;
845 const struct inet_sock
*inet
= inet_sk(sk
);
847 sin
->sin_family
= AF_INET
;
848 sin
->sin_addr
.s_addr
= inet
->inet_daddr
;
849 sin
->sin_port
= inet
->inet_dport
;
851 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr
);
854 int inet_csk_compat_getsockopt(struct sock
*sk
, int level
, int optname
,
855 char __user
*optval
, int __user
*optlen
)
857 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
859 if (icsk
->icsk_af_ops
->compat_getsockopt
!= NULL
)
860 return icsk
->icsk_af_ops
->compat_getsockopt(sk
, level
, optname
,
862 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
865 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt
);
867 int inet_csk_compat_setsockopt(struct sock
*sk
, int level
, int optname
,
868 char __user
*optval
, unsigned int optlen
)
870 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
872 if (icsk
->icsk_af_ops
->compat_setsockopt
!= NULL
)
873 return icsk
->icsk_af_ops
->compat_setsockopt(sk
, level
, optname
,
875 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
878 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt
);
881 static struct dst_entry
*inet_csk_rebuild_route(struct sock
*sk
, struct flowi
*fl
)
883 const struct inet_sock
*inet
= inet_sk(sk
);
884 const struct ip_options_rcu
*inet_opt
;
885 __be32 daddr
= inet
->inet_daddr
;
890 inet_opt
= rcu_dereference(inet
->inet_opt
);
891 if (inet_opt
&& inet_opt
->opt
.srr
)
892 daddr
= inet_opt
->opt
.faddr
;
894 rt
= ip_route_output_ports(sock_net(sk
), fl4
, sk
, daddr
,
895 inet
->inet_saddr
, inet
->inet_dport
,
896 inet
->inet_sport
, sk
->sk_protocol
,
897 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
);
901 sk_setup_caps(sk
, &rt
->dst
);
907 struct dst_entry
*inet_csk_update_pmtu(struct sock
*sk
, u32 mtu
)
909 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
910 struct inet_sock
*inet
= inet_sk(sk
);
913 dst
= inet_csk_rebuild_route(sk
, &inet
->cork
.fl
);
917 dst
->ops
->update_pmtu(dst
, sk
, NULL
, mtu
);
919 dst
= __sk_dst_check(sk
, 0);
921 dst
= inet_csk_rebuild_route(sk
, &inet
->cork
.fl
);
925 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu
);