[AGPGART] Suspend/Resume support for nVidia nForce AGP.
[pv_ops_mirror.git] / net / dccp / timer.c
blob5244415e5f1877762d98ba5763e0174f37ee4e7e
1 /*
2 * net/dccp/timer.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
17 #include "dccp.h"
19 static void dccp_write_timer(unsigned long data);
20 static void dccp_keepalive_timer(unsigned long data);
21 static void dccp_delack_timer(unsigned long data);
23 void dccp_init_xmit_timers(struct sock *sk)
25 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
26 &dccp_keepalive_timer);
29 static void dccp_write_err(struct sock *sk)
31 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
32 sk->sk_error_report(sk);
34 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
35 dccp_done(sk);
36 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
39 /* A write timeout has occurred. Process the after effects. */
40 static int dccp_write_timeout(struct sock *sk)
42 const struct inet_connection_sock *icsk = inet_csk(sk);
43 int retry_until;
45 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
46 if (icsk->icsk_retransmits != 0)
47 dst_negative_advice(&sk->sk_dst_cache);
48 retry_until = icsk->icsk_syn_retries ? :
49 /* FIXME! */ 3 /* FIXME! sysctl_tcp_syn_retries */;
50 } else {
51 if (icsk->icsk_retransmits >=
52 /* FIXME! sysctl_tcp_retries1 */ 5 /* FIXME! */) {
53 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
54 black hole detection. :-(
56 It is place to make it. It is not made. I do not want
57 to make it. It is disguisting. It does not work in any
58 case. Let me to cite the same draft, which requires for
59 us to implement this:
61 "The one security concern raised by this memo is that ICMP black holes
62 are often caused by over-zealous security administrators who block
63 all ICMP messages. It is vitally important that those who design and
64 deploy security systems understand the impact of strict filtering on
65 upper-layer protocols. The safest web site in the world is worthless
66 if most TCP implementations cannot transfer data from it. It would
67 be far nicer to have all of the black holes fixed rather than fixing
68 all of the TCP implementations."
70 Golden words :-).
73 dst_negative_advice(&sk->sk_dst_cache);
76 retry_until = /* FIXME! */ 15 /* FIXME! sysctl_tcp_retries2 */;
78 * FIXME: see tcp_write_timout and tcp_out_of_resources
82 if (icsk->icsk_retransmits >= retry_until) {
83 /* Has it gone just too far? */
84 dccp_write_err(sk);
85 return 1;
87 return 0;
90 /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
91 static void dccp_delack_timer(unsigned long data)
93 struct sock *sk = (struct sock *)data;
94 struct inet_connection_sock *icsk = inet_csk(sk);
96 bh_lock_sock(sk);
97 if (sock_owned_by_user(sk)) {
98 /* Try again later. */
99 icsk->icsk_ack.blocked = 1;
100 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
101 sk_reset_timer(sk, &icsk->icsk_delack_timer,
102 jiffies + TCP_DELACK_MIN);
103 goto out;
106 if (sk->sk_state == DCCP_CLOSED ||
107 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
108 goto out;
109 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
110 sk_reset_timer(sk, &icsk->icsk_delack_timer,
111 icsk->icsk_ack.timeout);
112 goto out;
115 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
117 if (inet_csk_ack_scheduled(sk)) {
118 if (!icsk->icsk_ack.pingpong) {
119 /* Delayed ACK missed: inflate ATO. */
120 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
121 icsk->icsk_rto);
122 } else {
123 /* Delayed ACK missed: leave pingpong mode and
124 * deflate ATO.
126 icsk->icsk_ack.pingpong = 0;
127 icsk->icsk_ack.ato = TCP_ATO_MIN;
129 dccp_send_ack(sk);
130 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
132 out:
133 bh_unlock_sock(sk);
134 sock_put(sk);
138 * The DCCP retransmit timer.
140 static void dccp_retransmit_timer(struct sock *sk)
142 struct inet_connection_sock *icsk = inet_csk(sk);
144 /* retransmit timer is used for feature negotiation throughout
145 * connection. In this case, no packet is re-transmitted, but rather an
146 * ack is generated and pending changes are splaced into its options.
148 if (sk->sk_send_head == NULL) {
149 dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk);
150 if (sk->sk_state == DCCP_OPEN)
151 dccp_send_ack(sk);
152 goto backoff;
156 * sk->sk_send_head has to have one skb with
157 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
158 * packet types (REQUEST, RESPONSE, the ACK in the 3way handshake
159 * (PARTOPEN timer), etc).
161 BUG_TRAP(sk->sk_send_head != NULL);
164 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
165 * sent, no need to retransmit, this sock is dead.
167 if (dccp_write_timeout(sk))
168 goto out;
171 * We want to know the number of packets retransmitted, not the
172 * total number of retransmissions of clones of original packets.
174 if (icsk->icsk_retransmits == 0)
175 DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
177 if (dccp_retransmit_skb(sk, sk->sk_send_head) < 0) {
179 * Retransmission failed because of local congestion,
180 * do not backoff.
182 if (icsk->icsk_retransmits == 0)
183 icsk->icsk_retransmits = 1;
184 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
185 min(icsk->icsk_rto,
186 TCP_RESOURCE_PROBE_INTERVAL),
187 DCCP_RTO_MAX);
188 goto out;
191 backoff:
192 icsk->icsk_backoff++;
193 icsk->icsk_retransmits++;
195 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
196 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
197 DCCP_RTO_MAX);
198 if (icsk->icsk_retransmits > 3 /* FIXME: sysctl_dccp_retries1 */)
199 __sk_dst_reset(sk);
200 out:;
203 static void dccp_write_timer(unsigned long data)
205 struct sock *sk = (struct sock *)data;
206 struct inet_connection_sock *icsk = inet_csk(sk);
207 int event = 0;
209 bh_lock_sock(sk);
210 if (sock_owned_by_user(sk)) {
211 /* Try again later */
212 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
213 jiffies + (HZ / 20));
214 goto out;
217 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
218 goto out;
220 if (time_after(icsk->icsk_timeout, jiffies)) {
221 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
222 icsk->icsk_timeout);
223 goto out;
226 event = icsk->icsk_pending;
227 icsk->icsk_pending = 0;
229 switch (event) {
230 case ICSK_TIME_RETRANS:
231 dccp_retransmit_timer(sk);
232 break;
234 out:
235 bh_unlock_sock(sk);
236 sock_put(sk);
240 * Timer for listening sockets
242 static void dccp_response_timer(struct sock *sk)
244 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
245 DCCP_RTO_MAX);
248 static void dccp_keepalive_timer(unsigned long data)
250 struct sock *sk = (struct sock *)data;
252 /* Only process if socket is not in use. */
253 bh_lock_sock(sk);
254 if (sock_owned_by_user(sk)) {
255 /* Try again later. */
256 inet_csk_reset_keepalive_timer(sk, HZ / 20);
257 goto out;
260 if (sk->sk_state == DCCP_LISTEN) {
261 dccp_response_timer(sk);
262 goto out;
264 out:
265 bh_unlock_sock(sk);
266 sock_put(sk);