2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS
4 * Version: $Id: ip_vs_proto_tcp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
18 #include <linux/kernel.h>
20 #include <linux/tcp.h> /* for tcphdr */
22 #include <net/tcp.h> /* for csum_tcpudp_magic */
23 #include <linux/netfilter.h>
24 #include <linux/netfilter_ipv4.h>
26 #include <net/ip_vs.h>
29 static struct ip_vs_conn
*
30 tcp_conn_in_get(const struct sk_buff
*skb
, struct ip_vs_protocol
*pp
,
31 const struct iphdr
*iph
, unsigned int proto_off
, int inverse
)
33 __be16 _ports
[2], *pptr
;
35 pptr
= skb_header_pointer(skb
, proto_off
, sizeof(_ports
), _ports
);
39 if (likely(!inverse
)) {
40 return ip_vs_conn_in_get(iph
->protocol
,
44 return ip_vs_conn_in_get(iph
->protocol
,
50 static struct ip_vs_conn
*
51 tcp_conn_out_get(const struct sk_buff
*skb
, struct ip_vs_protocol
*pp
,
52 const struct iphdr
*iph
, unsigned int proto_off
, int inverse
)
54 __be16 _ports
[2], *pptr
;
56 pptr
= skb_header_pointer(skb
, proto_off
, sizeof(_ports
), _ports
);
60 if (likely(!inverse
)) {
61 return ip_vs_conn_out_get(iph
->protocol
,
65 return ip_vs_conn_out_get(iph
->protocol
,
73 tcp_conn_schedule(struct sk_buff
*skb
,
74 struct ip_vs_protocol
*pp
,
75 int *verdict
, struct ip_vs_conn
**cpp
)
77 struct ip_vs_service
*svc
;
78 struct tcphdr _tcph
, *th
;
80 th
= skb_header_pointer(skb
, ip_hdrlen(skb
), sizeof(_tcph
), &_tcph
);
87 (svc
= ip_vs_service_get(skb
->mark
, ip_hdr(skb
)->protocol
,
88 ip_hdr(skb
)->daddr
, th
->dest
))) {
91 * It seems that we are very loaded.
92 * We have to drop this packet :(
94 ip_vs_service_put(svc
);
100 * Let the virtual server select a real server for the
101 * incoming connection, and create a connection entry.
103 *cpp
= ip_vs_schedule(svc
, skb
);
105 *verdict
= ip_vs_leave(svc
, skb
, pp
);
108 ip_vs_service_put(svc
);
115 tcp_fast_csum_update(struct tcphdr
*tcph
, __be32 oldip
, __be32 newip
,
116 __be16 oldport
, __be16 newport
)
119 csum_fold(ip_vs_check_diff4(oldip
, newip
,
120 ip_vs_check_diff2(oldport
, newport
,
121 ~csum_unfold(tcph
->check
))));
126 tcp_snat_handler(struct sk_buff
*skb
,
127 struct ip_vs_protocol
*pp
, struct ip_vs_conn
*cp
)
130 const unsigned int tcphoff
= ip_hdrlen(skb
);
132 /* csum_check requires unshared skb */
133 if (!skb_make_writable(skb
, tcphoff
+sizeof(*tcph
)))
136 if (unlikely(cp
->app
!= NULL
)) {
137 /* Some checks before mangling */
138 if (pp
->csum_check
&& !pp
->csum_check(skb
, pp
))
141 /* Call application helper if needed */
142 if (!ip_vs_app_pkt_out(cp
, skb
))
146 tcph
= (void *)ip_hdr(skb
) + tcphoff
;
147 tcph
->source
= cp
->vport
;
149 /* Adjust TCP checksums */
151 /* Only port and addr are changed, do fast csum update */
152 tcp_fast_csum_update(tcph
, cp
->daddr
, cp
->vaddr
,
153 cp
->dport
, cp
->vport
);
154 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
155 skb
->ip_summed
= CHECKSUM_NONE
;
157 /* full checksum calculation */
159 skb
->csum
= skb_checksum(skb
, tcphoff
, skb
->len
- tcphoff
, 0);
160 tcph
->check
= csum_tcpudp_magic(cp
->vaddr
, cp
->caddr
,
162 cp
->protocol
, skb
->csum
);
163 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
164 pp
->name
, tcph
->check
,
165 (char*)&(tcph
->check
) - (char*)tcph
);
172 tcp_dnat_handler(struct sk_buff
*skb
,
173 struct ip_vs_protocol
*pp
, struct ip_vs_conn
*cp
)
176 const unsigned int tcphoff
= ip_hdrlen(skb
);
178 /* csum_check requires unshared skb */
179 if (!skb_make_writable(skb
, tcphoff
+sizeof(*tcph
)))
182 if (unlikely(cp
->app
!= NULL
)) {
183 /* Some checks before mangling */
184 if (pp
->csum_check
&& !pp
->csum_check(skb
, pp
))
188 * Attempt ip_vs_app call.
189 * It will fix ip_vs_conn and iph ack_seq stuff
191 if (!ip_vs_app_pkt_in(cp
, skb
))
195 tcph
= (void *)ip_hdr(skb
) + tcphoff
;
196 tcph
->dest
= cp
->dport
;
199 * Adjust TCP checksums
202 /* Only port and addr are changed, do fast csum update */
203 tcp_fast_csum_update(tcph
, cp
->vaddr
, cp
->daddr
,
204 cp
->vport
, cp
->dport
);
205 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
206 skb
->ip_summed
= CHECKSUM_NONE
;
208 /* full checksum calculation */
210 skb
->csum
= skb_checksum(skb
, tcphoff
, skb
->len
- tcphoff
, 0);
211 tcph
->check
= csum_tcpudp_magic(cp
->caddr
, cp
->daddr
,
213 cp
->protocol
, skb
->csum
);
214 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
221 tcp_csum_check(struct sk_buff
*skb
, struct ip_vs_protocol
*pp
)
223 const unsigned int tcphoff
= ip_hdrlen(skb
);
225 switch (skb
->ip_summed
) {
227 skb
->csum
= skb_checksum(skb
, tcphoff
, skb
->len
- tcphoff
, 0);
228 case CHECKSUM_COMPLETE
:
229 if (csum_tcpudp_magic(ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
231 ip_hdr(skb
)->protocol
, skb
->csum
)) {
232 IP_VS_DBG_RL_PKT(0, pp
, skb
, 0,
233 "Failed checksum for");
238 /* No need to checksum. */
246 #define TCP_DIR_INPUT 0
247 #define TCP_DIR_OUTPUT 4
248 #define TCP_DIR_INPUT_ONLY 8
250 static const int tcp_state_off
[IP_VS_DIR_LAST
] = {
251 [IP_VS_DIR_INPUT
] = TCP_DIR_INPUT
,
252 [IP_VS_DIR_OUTPUT
] = TCP_DIR_OUTPUT
,
253 [IP_VS_DIR_INPUT_ONLY
] = TCP_DIR_INPUT_ONLY
,
257 * Timeout table[state]
259 static int tcp_timeouts
[IP_VS_TCP_S_LAST
+1] = {
260 [IP_VS_TCP_S_NONE
] = 2*HZ
,
261 [IP_VS_TCP_S_ESTABLISHED
] = 15*60*HZ
,
262 [IP_VS_TCP_S_SYN_SENT
] = 2*60*HZ
,
263 [IP_VS_TCP_S_SYN_RECV
] = 1*60*HZ
,
264 [IP_VS_TCP_S_FIN_WAIT
] = 2*60*HZ
,
265 [IP_VS_TCP_S_TIME_WAIT
] = 2*60*HZ
,
266 [IP_VS_TCP_S_CLOSE
] = 10*HZ
,
267 [IP_VS_TCP_S_CLOSE_WAIT
] = 60*HZ
,
268 [IP_VS_TCP_S_LAST_ACK
] = 30*HZ
,
269 [IP_VS_TCP_S_LISTEN
] = 2*60*HZ
,
270 [IP_VS_TCP_S_SYNACK
] = 120*HZ
,
271 [IP_VS_TCP_S_LAST
] = 2*HZ
,
274 static char * tcp_state_name_table
[IP_VS_TCP_S_LAST
+1] = {
275 [IP_VS_TCP_S_NONE
] = "NONE",
276 [IP_VS_TCP_S_ESTABLISHED
] = "ESTABLISHED",
277 [IP_VS_TCP_S_SYN_SENT
] = "SYN_SENT",
278 [IP_VS_TCP_S_SYN_RECV
] = "SYN_RECV",
279 [IP_VS_TCP_S_FIN_WAIT
] = "FIN_WAIT",
280 [IP_VS_TCP_S_TIME_WAIT
] = "TIME_WAIT",
281 [IP_VS_TCP_S_CLOSE
] = "CLOSE",
282 [IP_VS_TCP_S_CLOSE_WAIT
] = "CLOSE_WAIT",
283 [IP_VS_TCP_S_LAST_ACK
] = "LAST_ACK",
284 [IP_VS_TCP_S_LISTEN
] = "LISTEN",
285 [IP_VS_TCP_S_SYNACK
] = "SYNACK",
286 [IP_VS_TCP_S_LAST
] = "BUG!",
289 #define sNO IP_VS_TCP_S_NONE
290 #define sES IP_VS_TCP_S_ESTABLISHED
291 #define sSS IP_VS_TCP_S_SYN_SENT
292 #define sSR IP_VS_TCP_S_SYN_RECV
293 #define sFW IP_VS_TCP_S_FIN_WAIT
294 #define sTW IP_VS_TCP_S_TIME_WAIT
295 #define sCL IP_VS_TCP_S_CLOSE
296 #define sCW IP_VS_TCP_S_CLOSE_WAIT
297 #define sLA IP_VS_TCP_S_LAST_ACK
298 #define sLI IP_VS_TCP_S_LISTEN
299 #define sSA IP_VS_TCP_S_SYNACK
301 struct tcp_states_t
{
302 int next_state
[IP_VS_TCP_S_LAST
];
305 static const char * tcp_state_name(int state
)
307 if (state
>= IP_VS_TCP_S_LAST
)
309 return tcp_state_name_table
[state
] ? tcp_state_name_table
[state
] : "?";
312 static struct tcp_states_t tcp_states
[] = {
314 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
315 /*syn*/ {{sSR
, sES
, sES
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
}},
316 /*fin*/ {{sCL
, sCW
, sSS
, sTW
, sTW
, sTW
, sCL
, sCW
, sLA
, sLI
, sTW
}},
317 /*ack*/ {{sCL
, sES
, sSS
, sES
, sFW
, sTW
, sCL
, sCW
, sCL
, sLI
, sES
}},
318 /*rst*/ {{sCL
, sCL
, sCL
, sSR
, sCL
, sCL
, sCL
, sCL
, sLA
, sLI
, sSR
}},
321 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
322 /*syn*/ {{sSS
, sES
, sSS
, sSR
, sSS
, sSS
, sSS
, sSS
, sSS
, sLI
, sSR
}},
323 /*fin*/ {{sTW
, sFW
, sSS
, sTW
, sFW
, sTW
, sCL
, sTW
, sLA
, sLI
, sTW
}},
324 /*ack*/ {{sES
, sES
, sSS
, sES
, sFW
, sTW
, sCL
, sCW
, sLA
, sES
, sES
}},
325 /*rst*/ {{sCL
, sCL
, sSS
, sCL
, sCL
, sTW
, sCL
, sCL
, sCL
, sCL
, sCL
}},
328 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
329 /*syn*/ {{sSR
, sES
, sES
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
}},
330 /*fin*/ {{sCL
, sFW
, sSS
, sTW
, sFW
, sTW
, sCL
, sCW
, sLA
, sLI
, sTW
}},
331 /*ack*/ {{sCL
, sES
, sSS
, sES
, sFW
, sTW
, sCL
, sCW
, sCL
, sLI
, sES
}},
332 /*rst*/ {{sCL
, sCL
, sCL
, sSR
, sCL
, sCL
, sCL
, sCL
, sLA
, sLI
, sCL
}},
335 static struct tcp_states_t tcp_states_dos
[] = {
337 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
338 /*syn*/ {{sSR
, sES
, sES
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
, sSR
, sSA
}},
339 /*fin*/ {{sCL
, sCW
, sSS
, sTW
, sTW
, sTW
, sCL
, sCW
, sLA
, sLI
, sSA
}},
340 /*ack*/ {{sCL
, sES
, sSS
, sSR
, sFW
, sTW
, sCL
, sCW
, sCL
, sLI
, sSA
}},
341 /*rst*/ {{sCL
, sCL
, sCL
, sSR
, sCL
, sCL
, sCL
, sCL
, sLA
, sLI
, sCL
}},
344 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
345 /*syn*/ {{sSS
, sES
, sSS
, sSA
, sSS
, sSS
, sSS
, sSS
, sSS
, sLI
, sSA
}},
346 /*fin*/ {{sTW
, sFW
, sSS
, sTW
, sFW
, sTW
, sCL
, sTW
, sLA
, sLI
, sTW
}},
347 /*ack*/ {{sES
, sES
, sSS
, sES
, sFW
, sTW
, sCL
, sCW
, sLA
, sES
, sES
}},
348 /*rst*/ {{sCL
, sCL
, sSS
, sCL
, sCL
, sTW
, sCL
, sCL
, sCL
, sCL
, sCL
}},
351 /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
352 /*syn*/ {{sSA
, sES
, sES
, sSR
, sSA
, sSA
, sSA
, sSA
, sSA
, sSA
, sSA
}},
353 /*fin*/ {{sCL
, sFW
, sSS
, sTW
, sFW
, sTW
, sCL
, sCW
, sLA
, sLI
, sTW
}},
354 /*ack*/ {{sCL
, sES
, sSS
, sES
, sFW
, sTW
, sCL
, sCW
, sCL
, sLI
, sES
}},
355 /*rst*/ {{sCL
, sCL
, sCL
, sSR
, sCL
, sCL
, sCL
, sCL
, sLA
, sLI
, sCL
}},
358 static struct tcp_states_t
*tcp_state_table
= tcp_states
;
361 static void tcp_timeout_change(struct ip_vs_protocol
*pp
, int flags
)
363 int on
= (flags
& 1); /* secure_tcp */
366 ** FIXME: change secure_tcp to independent sysctl var
367 ** or make it per-service or per-app because it is valid
368 ** for most if not for all of the applications. Something
369 ** like "capabilities" (flags) for each object.
371 tcp_state_table
= (on
? tcp_states_dos
: tcp_states
);
375 tcp_set_state_timeout(struct ip_vs_protocol
*pp
, char *sname
, int to
)
377 return ip_vs_set_state_timeout(pp
->timeout_table
, IP_VS_TCP_S_LAST
,
378 tcp_state_name_table
, sname
, to
);
381 static inline int tcp_state_idx(struct tcphdr
*th
)
395 set_tcp_state(struct ip_vs_protocol
*pp
, struct ip_vs_conn
*cp
,
396 int direction
, struct tcphdr
*th
)
399 int new_state
= IP_VS_TCP_S_CLOSE
;
400 int state_off
= tcp_state_off
[direction
];
403 * Update state offset to INPUT_ONLY if necessary
404 * or delete NO_OUTPUT flag if output packet detected
406 if (cp
->flags
& IP_VS_CONN_F_NOOUTPUT
) {
407 if (state_off
== TCP_DIR_OUTPUT
)
408 cp
->flags
&= ~IP_VS_CONN_F_NOOUTPUT
;
410 state_off
= TCP_DIR_INPUT_ONLY
;
413 if ((state_idx
= tcp_state_idx(th
)) < 0) {
414 IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx
);
418 new_state
= tcp_state_table
[state_off
+state_idx
].next_state
[cp
->state
];
421 if (new_state
!= cp
->state
) {
422 struct ip_vs_dest
*dest
= cp
->dest
;
424 IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->"
425 "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n",
427 (state_off
==TCP_DIR_OUTPUT
)?"output ":"input ",
432 NIPQUAD(cp
->daddr
), ntohs(cp
->dport
),
433 NIPQUAD(cp
->caddr
), ntohs(cp
->cport
),
434 tcp_state_name(cp
->state
),
435 tcp_state_name(new_state
),
436 atomic_read(&cp
->refcnt
));
438 if (!(cp
->flags
& IP_VS_CONN_F_INACTIVE
) &&
439 (new_state
!= IP_VS_TCP_S_ESTABLISHED
)) {
440 atomic_dec(&dest
->activeconns
);
441 atomic_inc(&dest
->inactconns
);
442 cp
->flags
|= IP_VS_CONN_F_INACTIVE
;
443 } else if ((cp
->flags
& IP_VS_CONN_F_INACTIVE
) &&
444 (new_state
== IP_VS_TCP_S_ESTABLISHED
)) {
445 atomic_inc(&dest
->activeconns
);
446 atomic_dec(&dest
->inactconns
);
447 cp
->flags
&= ~IP_VS_CONN_F_INACTIVE
;
452 cp
->timeout
= pp
->timeout_table
[cp
->state
= new_state
];
457 * Handle state transitions
460 tcp_state_transition(struct ip_vs_conn
*cp
, int direction
,
461 const struct sk_buff
*skb
,
462 struct ip_vs_protocol
*pp
)
464 struct tcphdr _tcph
, *th
;
466 th
= skb_header_pointer(skb
, ip_hdrlen(skb
), sizeof(_tcph
), &_tcph
);
470 spin_lock(&cp
->lock
);
471 set_tcp_state(pp
, cp
, direction
, th
);
472 spin_unlock(&cp
->lock
);
479 * Hash table for TCP application incarnations
481 #define TCP_APP_TAB_BITS 4
482 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
483 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
485 static struct list_head tcp_apps
[TCP_APP_TAB_SIZE
];
486 static DEFINE_SPINLOCK(tcp_app_lock
);
488 static inline __u16
tcp_app_hashkey(__be16 port
)
490 return (((__force u16
)port
>> TCP_APP_TAB_BITS
) ^ (__force u16
)port
)
495 static int tcp_register_app(struct ip_vs_app
*inc
)
499 __be16 port
= inc
->port
;
502 hash
= tcp_app_hashkey(port
);
504 spin_lock_bh(&tcp_app_lock
);
505 list_for_each_entry(i
, &tcp_apps
[hash
], p_list
) {
506 if (i
->port
== port
) {
511 list_add(&inc
->p_list
, &tcp_apps
[hash
]);
512 atomic_inc(&ip_vs_protocol_tcp
.appcnt
);
515 spin_unlock_bh(&tcp_app_lock
);
521 tcp_unregister_app(struct ip_vs_app
*inc
)
523 spin_lock_bh(&tcp_app_lock
);
524 atomic_dec(&ip_vs_protocol_tcp
.appcnt
);
525 list_del(&inc
->p_list
);
526 spin_unlock_bh(&tcp_app_lock
);
531 tcp_app_conn_bind(struct ip_vs_conn
*cp
)
534 struct ip_vs_app
*inc
;
537 /* Default binding: bind app only for NAT */
538 if (IP_VS_FWD_METHOD(cp
) != IP_VS_CONN_F_MASQ
)
541 /* Lookup application incarnations and bind the right one */
542 hash
= tcp_app_hashkey(cp
->vport
);
544 spin_lock(&tcp_app_lock
);
545 list_for_each_entry(inc
, &tcp_apps
[hash
], p_list
) {
546 if (inc
->port
== cp
->vport
) {
547 if (unlikely(!ip_vs_app_inc_get(inc
)))
549 spin_unlock(&tcp_app_lock
);
551 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->"
552 "%u.%u.%u.%u:%u to app %s on port %u\n",
554 NIPQUAD(cp
->caddr
), ntohs(cp
->cport
),
555 NIPQUAD(cp
->vaddr
), ntohs(cp
->vport
),
556 inc
->name
, ntohs(inc
->port
));
559 result
= inc
->init_conn(inc
, cp
);
563 spin_unlock(&tcp_app_lock
);
571 * Set LISTEN timeout. (ip_vs_conn_put will setup timer)
573 void ip_vs_tcp_conn_listen(struct ip_vs_conn
*cp
)
575 spin_lock(&cp
->lock
);
576 cp
->state
= IP_VS_TCP_S_LISTEN
;
577 cp
->timeout
= ip_vs_protocol_tcp
.timeout_table
[IP_VS_TCP_S_LISTEN
];
578 spin_unlock(&cp
->lock
);
582 static void ip_vs_tcp_init(struct ip_vs_protocol
*pp
)
584 IP_VS_INIT_HASH_TABLE(tcp_apps
);
585 pp
->timeout_table
= tcp_timeouts
;
589 static void ip_vs_tcp_exit(struct ip_vs_protocol
*pp
)
594 struct ip_vs_protocol ip_vs_protocol_tcp
= {
596 .protocol
= IPPROTO_TCP
,
598 .appcnt
= ATOMIC_INIT(0),
599 .init
= ip_vs_tcp_init
,
600 .exit
= ip_vs_tcp_exit
,
601 .register_app
= tcp_register_app
,
602 .unregister_app
= tcp_unregister_app
,
603 .conn_schedule
= tcp_conn_schedule
,
604 .conn_in_get
= tcp_conn_in_get
,
605 .conn_out_get
= tcp_conn_out_get
,
606 .snat_handler
= tcp_snat_handler
,
607 .dnat_handler
= tcp_dnat_handler
,
608 .csum_check
= tcp_csum_check
,
609 .state_name
= tcp_state_name
,
610 .state_transition
= tcp_state_transition
,
611 .app_conn_bind
= tcp_app_conn_bind
,
612 .debug_packet
= ip_vs_tcpudp_debug_packet
,
613 .timeout_change
= tcp_timeout_change
,
614 .set_state_timeout
= tcp_set_state_timeout
,