1 /* IPSec ESP and AH support.
2 Copyright (c) 1999 Pierre Beyssac
3 Copyright (C) 2002 Geoffrey Keating
4 Copyright (C) 2003-2007 Maurice Massar
5 Copyright (C) 2004 Tomas Mraz
6 Copyright (C) 2005 Michael Tilstra
7 Copyright (C) 2006 Daniel Roethlisberger
8 Copyright (C) 2007 Paolo Zarpellon (tap+Cygwin support)
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 /* borrowed from pipsecd (-; */
30 * Copyright (c) 1999 Pierre Beyssac
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 #include <sys/types.h>
57 #include <sys/socket.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
67 #include <netinet/ip_icmp.h>
69 #include <arpa/inet.h>
75 #include <sys/select.h>
82 #if !defined(__sun__) && !defined(__SKYOS__)
94 #define MAX(a,b) ((a)>(b)?(a):(b))
98 #define FD_COPY(f, t) ((void)memcpy((t), (f), sizeof(*(f))))
101 /* A real ESP header (RFC 2406) */
102 typedef struct esp_encap_header
{
103 uint32_t spi
; /* security parameters index */
104 uint32_t seq_id
; /* sequence id (unimplemented) */
105 /* variable-length payload data + padding */
106 /* unsigned char next_header */
107 /* optional auth data */
108 } __attribute__((packed
)) esp_encap_header_t
;
110 struct encap_method
{
111 int fixed_header_size
;
113 int (*recv
) (struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
);
114 void (*send_peer
) (struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
);
115 int (*recv_peer
) (struct sa_block
*s
);
118 /* Yuck! Global variables... */
120 #define MAX_HEADER 72
121 #define MAX_PACKET 4096
122 int volatile do_kill
;
123 static uint8_t global_buffer_rx
[MAX_HEADER
+ MAX_PACKET
+ ETH_HLEN
];
124 static uint8_t global_buffer_tx
[MAX_HEADER
+ MAX_PACKET
+ ETH_HLEN
];
128 * Checksum routine for Internet Protocol family headers (C Version)
130 static u_short
in_cksum(u_short
*addr
, int len
)
132 register int nleft
= len
;
133 register u_short
*w
= addr
;
134 register int sum
= 0;
138 * Our algorithm is simple, using a 32 bit accumulator (sum), we add
139 * sequential 16 bit words to it, and at the end, fold back all the
140 * carry bits from the top 16 bits into the lower 16 bits.
147 /* mop up an odd byte, if necessary */
149 *(u_char
*) (&answer
) = *(u_char
*) w
;
153 /* add back carry outs from top 16 bits to low 16 bits */
154 sum
= (sum
>> 16) + (sum
& 0xffff); /* add hi 16 to low 16 */
155 sum
+= (sum
>> 16); /* add carry */
156 answer
= ~sum
; /* truncate to 16 bits */
161 * Decapsulate from a raw IP packet
163 static int encap_rawip_recv(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
166 struct ip
*p
= (struct ip
*)buf
;
167 struct sockaddr_in from
;
168 socklen_t fromlen
= sizeof(from
);
170 r
= recvfrom(s
->esp_fd
, buf
, bufsize
, 0, (struct sockaddr
*)&from
, &fromlen
);
172 logmsg(LOG_ERR
, "recvfrom: %m");
175 if (from
.sin_addr
.s_addr
!= s
->dst
.s_addr
) {
176 logmsg(LOG_ALERT
, "packet from unknown host %s", inet_ntoa(from
.sin_addr
));
179 if (r
< (p
->ip_hl
<< 2) + s
->ipsec
.em
->fixed_header_size
) {
180 logmsg(LOG_ALERT
, "packet too short. got %zd, expected %d", r
, (p
->ip_hl
<< 2) + s
->ipsec
.em
->fixed_header_size
);
184 #ifdef NEED_IPLEN_FIX
187 p
->ip_len
= ntohs(r
);
190 s
->ipsec
.rx
.buf
= buf
;
191 s
->ipsec
.rx
.buflen
= r
;
192 s
->ipsec
.rx
.bufpayload
= (p
->ip_hl
<< 2);
193 s
->ipsec
.rx
.bufsize
= bufsize
;
198 * Decapsulate from an UDP packet
200 static int encap_udp_recv(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
204 r
= recv(s
->esp_fd
, buf
, bufsize
, 0);
206 logmsg(LOG_ERR
, "recvfrom: %m");
209 if (s
->ipsec
.natt_active_mode
== NATT_ACTIVE_DRAFT_OLD
&& r
> 8) {
211 memmove(buf
, buf
+ 8, r
);
213 if( r
== 1 && *buf
== 0xff )
215 DEBUGTOP(1, printf("UDP NAT keepalive packet received\n"));
218 if (r
< s
->ipsec
.em
->fixed_header_size
) {
219 logmsg(LOG_ALERT
, "packet too short from %s. got %zd, expected %d",
220 inet_ntoa(s
->dst
), r
, s
->ipsec
.em
->fixed_header_size
);
224 s
->ipsec
.rx
.buf
= buf
;
225 s
->ipsec
.rx
.buflen
= r
;
226 s
->ipsec
.rx
.bufpayload
= 0;
227 s
->ipsec
.rx
.bufsize
= bufsize
;
234 static int encap_any_decap(struct sa_block
*s
)
236 s
->ipsec
.rx
.buflen
-= s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
;
237 s
->ipsec
.rx
.buf
+= s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
;
238 if (s
->ipsec
.rx
.buflen
== 0)
244 * Send decapsulated packet to tunnel device
246 static int tun_send_ip(struct sa_block
*s
)
251 start
= s
->ipsec
.rx
.buf
;
252 len
= s
->ipsec
.rx
.buflen
;
254 if (opt_if_mode
== IF_MODE_TAP
) {
257 * Add ethernet header before s->ipsec.rx.buf where
258 * at least ETH_HLEN bytes should be available.
260 struct ether_header
*eth_hdr
= (struct ether_header
*) (s
->ipsec
.rx
.buf
- ETH_HLEN
);
262 memcpy(eth_hdr
->ether_dhost
, s
->tun_hwaddr
, ETH_ALEN
);
263 memcpy(eth_hdr
->ether_shost
, s
->tun_hwaddr
, ETH_ALEN
);
265 /* Use a different MAC as source */
266 eth_hdr
->ether_shost
[0] ^= 0x80; /* toggle some visible bit */
267 eth_hdr
->ether_type
= htons(ETHERTYPE_IP
);
269 start
= (uint8_t *) eth_hdr
;
274 sent
= tun_write(s
->tun_fd
, start
, len
);
276 logmsg(LOG_ERR
, "truncated in: %d -> %d\n", len
, sent
);
277 hex_dump("Tx pkt", start
, len
, NULL
);
282 * Compute HMAC for an arbitrary stream of bytes
284 static int hmac_compute(int md_algo
,
285 const unsigned char *data
, unsigned int data_size
,
286 unsigned char *digest
, unsigned char do_store
,
287 const unsigned char *secret
, unsigned short secret_size
)
291 unsigned char *hmac_digest
;
292 unsigned int hmac_len
;
295 gcry_md_open(&md_ctx
, md_algo
, GCRY_MD_FLAG_HMAC
);
296 assert(md_ctx
!= NULL
);
297 ret
= gcry_md_setkey(md_ctx
, secret
, secret_size
);
299 gcry_md_write(md_ctx
, data
, data_size
);
300 gcry_md_final(md_ctx
);
301 hmac_digest
= gcry_md_read(md_ctx
, 0);
302 hmac_len
= 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
305 memcpy(digest
, hmac_digest
, hmac_len
);
308 ret
= memcmp(digest
, hmac_digest
, hmac_len
);
310 gcry_md_close(md_ctx
);
315 * Encapsulate a packet in ESP
317 static void encap_esp_encapsulate(struct sa_block
*s
)
319 esp_encap_header_t
*eh
;
320 unsigned char *iv
, *cleartext
;
321 size_t i
, padding
, pad_blksz
;
322 unsigned int cleartextlen
;
325 * Add padding as necessary
327 * done: this should be checked, RFC 2406 section 2.4 is quite
328 * obscure on that point.
331 pad_blksz
= s
->ipsec
.blk_len
;
332 while (pad_blksz
& 3) /* must be multiple of 4 */
334 padding
= pad_blksz
- ((s
->ipsec
.tx
.buflen
+ 2 - s
->ipsec
.tx
.var_header_size
- s
->ipsec
.tx
.bufpayload
) % pad_blksz
);
335 DEBUG(3, printf("sending packet: len = %d, padding = %lu\n", s
->ipsec
.tx
.buflen
, (unsigned long)padding
));
336 if (padding
== pad_blksz
)
339 for (i
= 1; i
<= padding
; i
++) {
340 s
->ipsec
.tx
.buf
[s
->ipsec
.tx
.buflen
] = i
;
341 s
->ipsec
.tx
.buflen
++;
344 /* Add trailing padlen and next_header */
345 s
->ipsec
.tx
.buf
[s
->ipsec
.tx
.buflen
++] = padding
;
346 s
->ipsec
.tx
.buf
[s
->ipsec
.tx
.buflen
++] = IPPROTO_IPIP
;
348 cleartext
= s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.var_header_size
+ s
->ipsec
.tx
.bufpayload
;
349 cleartextlen
= s
->ipsec
.tx
.buflen
- s
->ipsec
.tx
.var_header_size
- s
->ipsec
.tx
.bufpayload
;
351 eh
= (esp_encap_header_t
*) (s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.bufpayload
);
352 eh
->spi
= s
->ipsec
.tx
.spi
;
353 eh
->seq_id
= htonl(s
->ipsec
.tx
.seq_id
++);
355 /* Copy initialization vector in packet */
356 iv
= (unsigned char *)(eh
+ 1);
357 gcry_create_nonce(iv
, s
->ipsec
.iv_len
);
358 hex_dump("iv", iv
, s
->ipsec
.iv_len
, NULL
);
360 hex_dump("sending ESP packet (before crypt)", s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, NULL
);
362 if (s
->ipsec
.cry_algo
) {
363 gcry_cipher_setiv(s
->ipsec
.tx
.cry_ctx
, iv
, s
->ipsec
.iv_len
);
364 gcry_cipher_encrypt(s
->ipsec
.tx
.cry_ctx
, cleartext
, cleartextlen
, NULL
, 0);
367 hex_dump("sending ESP packet (after crypt)", s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, NULL
);
369 /* Handle optional authentication field */
370 if (s
->ipsec
.md_algo
) {
371 hmac_compute(s
->ipsec
.md_algo
,
372 s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.bufpayload
,
373 s
->ipsec
.tx
.var_header_size
+ cleartextlen
,
374 s
->ipsec
.tx
.buf
+ s
->ipsec
.tx
.bufpayload
375 + s
->ipsec
.tx
.var_header_size
+ cleartextlen
,
376 1, s
->ipsec
.tx
.key_md
, s
->ipsec
.md_len
);
377 s
->ipsec
.tx
.buflen
+= 12; /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
378 hex_dump("sending ESP packet (after ah)", s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, NULL
);
383 * Encapsulate a packet in IP ESP and send to the peer.
384 * "buf" should have exactly MAX_HEADER free bytes at its beginning
385 * to account for encapsulation data (not counted in "size").
387 static void encap_esp_send_peer(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
391 struct sockaddr_in dstaddr
;
395 /* Keep a pointer to the old IP header */
396 tip
= (struct ip
*)buf
;
398 s
->ipsec
.tx
.buf
= buf
;
399 s
->ipsec
.tx
.buflen
= bufsize
;
401 /* Prepend our encapsulation header and new IP header */
402 s
->ipsec
.tx
.var_header_size
= (s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.iv_len
);
404 s
->ipsec
.tx
.buf
-= sizeof(struct ip
) + s
->ipsec
.tx
.var_header_size
;
405 s
->ipsec
.tx
.buflen
+= sizeof(struct ip
) + s
->ipsec
.tx
.var_header_size
;
407 s
->ipsec
.tx
.bufpayload
= sizeof(struct ip
);
409 /* Fill non-mutable fields */
412 /*gcry_md_get_algo_dlen(md_algo); see RFC .. only use 96 bit */
413 ip
.ip_id
= htons(s
->ipsec
.ip_id
++);
414 ip
.ip_p
= IPPROTO_ESP
;
418 /* Fill mutable fields */
419 ip
.ip_tos
= (bufsize
< sizeof(struct ip
)) ? 0 : tip
->ip_tos
;
421 ip
.ip_ttl
= IPDEFTTL
;
424 encap_esp_encapsulate(s
);
426 ip
.ip_len
= s
->ipsec
.tx
.buflen
;
427 #ifdef NEED_IPLEN_FIX
428 ip
.ip_len
= htons(ip
.ip_len
);
430 ip
.ip_sum
= in_cksum((u_short
*) s
->ipsec
.tx
.buf
, sizeof(struct ip
));
432 memcpy(s
->ipsec
.tx
.buf
, &ip
, sizeof ip
);
434 dstaddr
.sin_family
= AF_INET
;
435 dstaddr
.sin_addr
= s
->dst
;
436 dstaddr
.sin_port
= 0;
437 sent
= sendto(s
->esp_fd
, s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, 0, (struct sockaddr
*)&dstaddr
, sizeof(struct sockaddr_in
));
439 logmsg(LOG_ERR
, "esp sendto: %m");
442 if (sent
!= s
->ipsec
.tx
.buflen
)
443 logmsg(LOG_ALERT
, "esp truncated out (%lld out of %d)", (long long)sent
, s
->ipsec
.tx
.buflen
);
447 * Encapsulate a packet in UDP ESP and send to the peer.
448 * "buf" should have exactly MAX_HEADER free bytes at its beginning
449 * to account for encapsulation data (not counted in "size").
451 static void encap_udp_send_peer(struct sa_block
*s
, unsigned char *buf
, unsigned int bufsize
)
457 s
->ipsec
.tx
.buf
= buf
;
458 s
->ipsec
.tx
.buflen
= bufsize
;
460 /* Prepend our encapsulation header and new IP header */
461 s
->ipsec
.tx
.var_header_size
= (s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.iv_len
);
463 s
->ipsec
.tx
.buf
-= s
->ipsec
.tx
.var_header_size
;
464 s
->ipsec
.tx
.buflen
+= s
->ipsec
.tx
.var_header_size
;
466 s
->ipsec
.tx
.bufpayload
= 0;
468 encap_esp_encapsulate(s
);
470 if (s
->ipsec
.natt_active_mode
== NATT_ACTIVE_DRAFT_OLD
) {
471 s
->ipsec
.tx
.buf
-= 8;
472 s
->ipsec
.tx
.buflen
+= 8;
473 memset(s
->ipsec
.tx
.buf
, 0, 8);
476 sent
= send(s
->esp_fd
, s
->ipsec
.tx
.buf
, s
->ipsec
.tx
.buflen
, 0);
478 logmsg(LOG_ERR
, "udp sendto: %m");
481 if (sent
!= s
->ipsec
.tx
.buflen
)
482 logmsg(LOG_ALERT
, "udp truncated out (%lld out of %d)",
483 (long long)sent
, s
->ipsec
.tx
.buflen
);
486 static int encap_esp_recv_peer(struct sa_block
*s
)
490 unsigned char padlen
, next_header
;
494 s
->ipsec
.rx
.var_header_size
= s
->ipsec
.iv_len
;
495 iv
= s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
;
497 len
= s
->ipsec
.rx
.buflen
- s
->ipsec
.rx
.bufpayload
- s
->ipsec
.em
->fixed_header_size
- s
->ipsec
.rx
.var_header_size
;
500 logmsg(LOG_ALERT
, "Packet too short");
504 /* Handle optional authentication field */
505 if (s
->ipsec
.md_algo
) {
506 len
-= 12; /*gcry_md_get_algo_dlen(peer->local_sa->md_algo); */
507 s
->ipsec
.rx
.buflen
-= 12;
508 if (hmac_compute(s
->ipsec
.md_algo
,
509 s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
,
510 s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
,
511 s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
512 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
,
515 s
->ipsec
.md_len
) != 0) {
516 logmsg(LOG_ALERT
, "HMAC mismatch in ESP mode");
521 blksz
= s
->ipsec
.blk_len
;
522 if (s
->ipsec
.cry_algo
&& ((len
% blksz
) != 0)) {
524 "payload len %d not a multiple of algorithm block size %lu", len
,
525 (unsigned long)blksz
);
529 hex_dump("receiving ESP packet (before decrypt)",
530 &s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+
531 s
->ipsec
.rx
.var_header_size
], len
, NULL
);
533 if (s
->ipsec
.cry_algo
) {
536 data
= (s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
537 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
);
538 gcry_cipher_setiv(s
->ipsec
.rx
.cry_ctx
, iv
, s
->ipsec
.iv_len
);
539 gcry_cipher_decrypt(s
->ipsec
.rx
.cry_ctx
, data
, len
, NULL
, 0);
542 hex_dump("receiving ESP packet (after decrypt)",
543 &s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
+ s
->ipsec
.em
->fixed_header_size
+
544 s
->ipsec
.rx
.var_header_size
], len
, NULL
);
546 padlen
= s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
547 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
- 2];
548 next_header
= s
->ipsec
.rx
.buf
[s
->ipsec
.rx
.bufpayload
549 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
- 1];
551 if (padlen
+ 2 > len
) {
552 logmsg(LOG_ALERT
, "Inconsistent padlen");
555 if (next_header
!= IPPROTO_IPIP
) {
556 logmsg(LOG_ALERT
, "Inconsistent next_header %d", next_header
);
559 DEBUG(3, printf("pad len: %d, next_header: %d\n", padlen
, next_header
));
562 s
->ipsec
.rx
.buflen
-= padlen
+ 2;
565 pad
= s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
566 + s
->ipsec
.em
->fixed_header_size
+ s
->ipsec
.rx
.var_header_size
+ len
;
567 for (i
= 1; i
<= padlen
; i
++) {
569 logmsg(LOG_ALERT
, "Bad padding");
578 static void encap_esp_new(struct encap_method
*encap
)
580 encap
->recv
= encap_rawip_recv
;
581 encap
->send_peer
= encap_esp_send_peer
;
582 encap
->recv_peer
= encap_esp_recv_peer
;
583 encap
->fixed_header_size
= sizeof(esp_encap_header_t
);
586 static void encap_udp_new(struct encap_method
*encap
)
588 encap
->recv
= encap_udp_recv
;
589 encap
->send_peer
= encap_udp_send_peer
;
590 encap
->recv_peer
= encap_esp_recv_peer
;
591 encap
->fixed_header_size
= sizeof(esp_encap_header_t
);
596 * Return 1 if packet has been processed, 0 otherwise
598 static int process_arp(struct sa_block
*s
, uint8_t *frame
)
603 struct ether_header
*eth
= (struct ether_header
*) frame
;
604 struct ether_arp
*arp
= (struct ether_arp
*) (frame
+ ETH_HLEN
);
606 if (ntohs(eth
->ether_type
) != ETHERTYPE_ARP
) {
610 if (ntohs(arp
->arp_hrd
) != ARPHRD_ETHER
||
611 ntohs(arp
->arp_pro
) != 0x800 ||
612 arp
->arp_hln
!= ETH_ALEN
||
614 ntohs(arp
->arp_op
) != ARPOP_REQUEST
||
615 !memcmp(arp
->arp_spa
, arp
->arp_tpa
, 4) ||
616 memcmp(eth
->ether_shost
, s
->tun_hwaddr
, ETH_ALEN
) ||
617 !memcmp(arp
->arp_tpa
, &s
->our_address
, 4)) {
618 /* whatever .. just drop it */
624 memcpy(eth
->ether_dhost
, s
->tun_hwaddr
, ETH_ALEN
);
625 eth
->ether_shost
[0] ^= 0x80; /* Use a different MAC as source */
627 memcpy(tmp
, arp
->arp_spa
, 4);
628 memcpy(arp
->arp_spa
, arp
->arp_tpa
, 4);
629 memcpy(arp
->arp_tpa
, tmp
, 4);
631 memcpy(arp
->arp_tha
, s
->tun_hwaddr
, ETH_ALEN
);
632 arp
->arp_sha
[0] ^= 0x80; /* Use a different MAC as source */
634 arp
->arp_op
= htons(ARPOP_REPLY
);
636 frame_size
= ETH_HLEN
+ sizeof(struct ether_arp
);
637 tun_write(s
->tun_fd
, frame
, frame_size
);
638 hex_dump("ARP reply", frame
, frame_size
, NULL
);
649 * Process non-IP packets
650 * Return 1 if packet has been processed, 0 otherwise
652 static int process_non_ip(uint8_t *frame
)
654 struct ether_header
*eth
= (struct ether_header
*) frame
;
656 if (ntohs(eth
->ether_type
) != ETHERTYPE_IP
) {
657 /* drop non-ip traffic */
664 static void process_tun(struct sa_block
*s
)
667 int size
= MAX_PACKET
;
668 uint8_t *start
= global_buffer_rx
+ MAX_HEADER
;
670 if (opt_if_mode
== IF_MODE_TAP
) {
671 /* Make sure IP packet starts at buf + MAX_HEADER */
676 /* Receive a packet from the tunnel interface */
677 pack
= tun_read(s
->tun_fd
, start
, size
);
679 hex_dump("Rx pkt", start
, pack
, NULL
);
681 if (opt_if_mode
== IF_MODE_TAP
) {
682 if (process_arp(s
, start
)) {
685 if (process_non_ip(start
)) {
692 logmsg(LOG_ERR
, "read: %m");
696 /* Don't access the contents of the buffer other than byte aligned.
697 * 12: Offset of ip source address in ip header,
698 * 4: Length of IP address */
699 if (!memcmp(global_buffer_rx
+ MAX_HEADER
+ 12, &s
->dst
.s_addr
, 4)) {
700 logmsg(LOG_ALERT
, "routing loop to %s",
705 /* Encapsulate and send to the other end of the tunnel */
706 s
->ipsec
.life
.tx
+= pack
;
707 s
->ipsec
.em
->send_peer(s
, global_buffer_rx
, pack
);
710 static void process_socket(struct sa_block
*s
)
712 /* Receive a packet from a socket */
714 uint8_t *start
= global_buffer_tx
;
715 esp_encap_header_t
*eh
;
717 if (opt_if_mode
== IF_MODE_TAP
) {
721 pack
= s
->ipsec
.em
->recv(s
, start
, MAX_HEADER
+ MAX_PACKET
);
725 eh
= (esp_encap_header_t
*) (s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
);
727 process_late_ike(s
, s
->ipsec
.rx
.buf
+ s
->ipsec
.rx
.bufpayload
+ 4 /* SPI-size */,
728 s
->ipsec
.rx
.buflen
- s
->ipsec
.rx
.bufpayload
- 4);
730 } else if (eh
->spi
!= s
->ipsec
.rx
.spi
) {
731 logmsg(LOG_NOTICE
, "unknown spi %#08x from peer", ntohl(eh
->spi
));
735 /* Check auth digest and/or decrypt */
736 if (s
->ipsec
.em
->recv_peer(s
) != 0)
739 if (encap_any_decap(s
) == 0) {
740 logmsg(LOG_DEBUG
, "received update probe from peer");
742 /* Send the decapsulated packet to the tunnel interface */
743 s
->ipsec
.life
.rx
+= s
->ipsec
.rx
.buflen
;
748 #if defined(__CYGWIN__)
749 static void *tun_thread (void *arg
)
751 struct sa_block
*s
= (struct sa_block
*) arg
;
760 static void vpnc_main_loop(struct sa_block
*s
)
764 int enable_keepalives
;
767 struct timeval select_timeout
;
768 struct timeval normal_timeout
;
769 time_t next_ike_keepalive
=0;
770 time_t next_ike_dpd
=0;
771 #if defined(__CYGWIN__)
775 /* non-esp marker, nat keepalive payload (0xFF) */
776 uint8_t keepalive_v2
[5] = { 0x00, 0x00, 0x00, 0x00, 0xFF };
777 uint8_t keepalive_v1
[1] = { 0xFF };
779 size_t keepalive_size
;
781 if (s
->ipsec
.natt_active_mode
== NATT_ACTIVE_DRAFT_OLD
) {
782 keepalive
= keepalive_v1
;
783 keepalive_size
= sizeof(keepalive_v1
);
784 } else { /* active_mode is either RFC or CISCO_UDP */
785 keepalive
= keepalive_v2
;
786 keepalive_size
= sizeof(keepalive_v2
);
789 /* send keepalives if UDP encapsulation is enabled */
790 enable_keepalives
= (s
->ipsec
.encap_mode
!= IPSEC_ENCAP_TUNNEL
);
792 /* regular wakeups if keepalives on ike or dpd active */
793 timed_mode
= ((enable_keepalives
&& s
->ike_fd
!= s
->esp_fd
) || s
->ike
.do_dpd
);
797 #if !defined(__CYGWIN__)
798 FD_SET(s
->tun_fd
, &rfds
);
799 nfds
= MAX(nfds
, s
->tun_fd
+1);
802 FD_SET(s
->esp_fd
, &rfds
);
803 nfds
= MAX(nfds
, s
->esp_fd
+1);
805 if (s
->ike_fd
!= s
->esp_fd
) {
806 FD_SET(s
->ike_fd
, &rfds
);
807 nfds
= MAX(nfds
, s
->ike_fd
+1);
810 #if defined(__CYGWIN__)
811 if (pthread_create(&tid
, NULL
, tun_thread
, s
)) {
812 logmsg(LOG_ERR
, "Cannot create tun thread!\n");
817 normal_timeout
.tv_sec
= 86400;
818 normal_timeout
.tv_usec
= 0;
821 /* send initial dpd request */
822 next_ike_dpd
= time(NULL
) + s
->ike
.dpd_idle
;
824 normal_timeout
.tv_sec
= s
->ike
.dpd_idle
;
825 normal_timeout
.tv_usec
= 0;
828 if (enable_keepalives
) {
829 normal_timeout
.tv_sec
= 9;
830 normal_timeout
.tv_usec
= 500000;
832 if (s
->ike_fd
!= s
->esp_fd
) {
833 /* send initial nat ike keepalive packet */
834 next_ike_keepalive
= time(NULL
) + 9;
839 select_timeout
= normal_timeout
;
845 struct timeval
*tvp
= NULL
;
846 FD_COPY(&rfds
, &refds
);
847 if (s
->ike
.do_dpd
|| enable_keepalives
)
848 tvp
= &select_timeout
;
849 presult
= select(nfds
, &refds
, NULL
, NULL
, tvp
);
850 if (presult
== 0 && (s
->ike
.do_dpd
|| enable_keepalives
)) {
851 /* reset to max timeout */
852 select_timeout
= normal_timeout
;
853 if (enable_keepalives
) {
854 if (s
->ike_fd
!= s
->esp_fd
) {
855 /* send nat ike keepalive packet */
856 next_ike_keepalive
= time(NULL
) + 9;
859 /* send nat keepalive packet */
860 if (send(s
->esp_fd
, keepalive
, keepalive_size
, 0) == -1) {
861 logmsg(LOG_ERR
, "keepalive sendto: %m");
865 time_t now
= time(NULL
);
866 if (s
->ike
.dpd_seqno
!= s
->ike
.dpd_seqno_ack
) {
867 /* Wake up more often for dpd attempts */
868 select_timeout
.tv_sec
= 5;
869 select_timeout
.tv_usec
= 0;
871 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
873 else if (now
>= next_ike_dpd
) {
875 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
879 DEBUG(2,printf("lifetime status: %ld of %u seconds used, %u|%u of %u kbytes used\n",
880 time(NULL
) - s
->ipsec
.life
.start
,
881 s
->ipsec
.life
.seconds
,
882 s
->ipsec
.life
.rx
/1024,
883 s
->ipsec
.life
.tx
/1024,
884 s
->ipsec
.life
.kbytes
));
885 } while ((presult
== 0 || (presult
== -1 && errno
== EINTR
)) && !do_kill
);
887 logmsg(LOG_ERR
, "select: %m");
891 #if !defined(__CYGWIN__)
892 if (FD_ISSET(s
->tun_fd
, &refds
)) {
897 if (FD_ISSET(s
->esp_fd
, &refds
) ) {
901 if (s
->ike_fd
!= s
->esp_fd
&& FD_ISSET(s
->ike_fd
, &refds
) ) {
902 DEBUG(3,printf("received something on ike fd..\n"));
903 len
= recv(s
->ike_fd
, global_buffer_tx
, MAX_HEADER
+ MAX_PACKET
, 0);
904 process_late_ike(s
, global_buffer_tx
, len
);
908 time_t now
= time(NULL
);
909 time_t next_up
= now
+ 86400;
910 if (enable_keepalives
) {
911 /* never wait more than 9 seconds for a UDP keepalive */
913 if (s
->ike_fd
!= s
->esp_fd
) {
914 if (now
>= next_ike_keepalive
) {
915 /* send nat ike keepalive packet now */
916 next_ike_keepalive
= now
+ 9;
918 select_timeout
= normal_timeout
;
920 if (next_ike_keepalive
< next_up
)
921 next_up
= next_ike_keepalive
;
925 if (s
->ike
.dpd_seqno
!= s
->ike
.dpd_seqno_ack
) {
927 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
928 if (now
+ 5 < next_up
)
931 else if (now
>= next_ike_dpd
) {
933 next_ike_dpd
= now
+ s
->ike
.dpd_idle
;
935 if (next_ike_dpd
< next_up
)
936 next_up
= next_ike_dpd
;
938 /* Reduce timeout so next activity happens on schedule */
939 select_timeout
.tv_sec
= next_up
- now
;
940 select_timeout
.tv_usec
= 0;
947 logmsg(LOG_NOTICE
, "connection terminated by dead peer detection");
950 logmsg(LOG_NOTICE
, "connection terminated by peer");
953 logmsg(LOG_NOTICE
, "terminated by signal: %d", do_kill
);
958 static void killit(int signum
)
963 static void write_pidfile(const char *pidfile
)
967 if (pidfile
== NULL
|| pidfile
[0] == '\0')
970 pf
= fopen(pidfile
, "w");
972 logmsg(LOG_WARNING
, "can't open pidfile %s for writing", pidfile
);
976 fprintf(pf
, "%d\n", (int)getpid());
980 void vpnc_doit(struct sa_block
*s
)
982 struct sigaction act
;
983 struct encap_method meth
;
985 const char *pidfile
= config
[CONFIG_PID_FILE
];
987 switch (s
->ipsec
.encap_mode
) {
988 case IPSEC_ENCAP_TUNNEL
:
989 encap_esp_new(&meth
);
990 gcry_create_nonce(&s
->ipsec
.ip_id
, sizeof(uint16_t));
992 case IPSEC_ENCAP_UDP_TUNNEL
:
993 case IPSEC_ENCAP_UDP_TUNNEL_OLD
:
994 encap_udp_new(&meth
);
1001 s
->ipsec
.rx
.key_cry
= s
->ipsec
.rx
.key
;
1002 hex_dump("rx.key_cry", s
->ipsec
.rx
.key_cry
, s
->ipsec
.key_len
, NULL
);
1004 s
->ipsec
.rx
.key_md
= s
->ipsec
.rx
.key
+ s
->ipsec
.key_len
;
1005 hex_dump("rx.key_md", s
->ipsec
.rx
.key_md
, s
->ipsec
.md_len
, NULL
);
1007 if (s
->ipsec
.cry_algo
) {
1008 gcry_cipher_open(&s
->ipsec
.rx
.cry_ctx
, s
->ipsec
.cry_algo
, GCRY_CIPHER_MODE_CBC
, 0);
1009 gcry_cipher_setkey(s
->ipsec
.rx
.cry_ctx
, s
->ipsec
.rx
.key_cry
, s
->ipsec
.key_len
);
1011 s
->ipsec
.rx
.cry_ctx
= NULL
;
1014 s
->ipsec
.tx
.key_cry
= s
->ipsec
.tx
.key
;
1015 hex_dump("tx.key_cry", s
->ipsec
.tx
.key_cry
, s
->ipsec
.key_len
, NULL
);
1017 s
->ipsec
.tx
.key_md
= s
->ipsec
.tx
.key
+ s
->ipsec
.key_len
;
1018 hex_dump("tx.key_md", s
->ipsec
.tx
.key_md
, s
->ipsec
.md_len
, NULL
);
1020 if (s
->ipsec
.cry_algo
) {
1021 gcry_cipher_open(&s
->ipsec
.tx
.cry_ctx
, s
->ipsec
.cry_algo
, GCRY_CIPHER_MODE_CBC
, 0);
1022 gcry_cipher_setkey(s
->ipsec
.tx
.cry_ctx
, s
->ipsec
.tx
.key_cry
, s
->ipsec
.key_len
);
1024 s
->ipsec
.tx
.cry_ctx
= NULL
;
1027 DEBUG(2, printf("remote -> local spi: %#08x\n", ntohl(s
->ipsec
.rx
.spi
)));
1028 DEBUG(2, printf("local -> remote spi: %#08x\n", ntohl(s
->ipsec
.tx
.spi
)));
1032 sigaction(SIGHUP
, NULL
, &act
);
1033 if (act
.sa_handler
== SIG_DFL
)
1034 signal(SIGHUP
, killit
);
1036 signal(SIGINT
, killit
);
1037 signal(SIGTERM
, killit
);
1043 if ((pid
= fork()) < 0) {
1044 fprintf(stderr
, "Warning, could not fork the child process!\n");
1045 } else if (pid
== 0) {
1046 close(0); open("/dev/null", O_RDONLY
, 0666);
1047 close(1); open("/dev/null", O_WRONLY
, 0666);
1048 close(2); open("/dev/null", O_WRONLY
, 0666);
1051 printf("VPNC started in background (pid: %d)...\n", (int)pid
);
1054 openlog("vpnc", LOG_PID
| LOG_PERROR
, LOG_DAEMON
);
1057 printf("VPNC started in foreground...\n");
1059 write_pidfile(pidfile
);
1064 unlink(pidfile
); /* ignore errors */