2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 /* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 #include <linux/bitops.h>
54 #include <linux/if_arp.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/netdevice.h>
58 #include <net/af_ieee802154.h>
59 #include <net/ieee802154.h>
60 #include <net/ieee802154_netdev.h>
65 /* TTL uncompression values */
66 static const u8 lowpan_ttl_values
[] = {0, 1, 64, 255};
68 static LIST_HEAD(lowpan_devices
);
71 * Uncompression of linklocal:
72 * 0 -> 16 bytes from packet
73 * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
74 * 2 -> 2 bytes from prefix - zeroes + 2 from packet
75 * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
77 * NOTE: => the uncompress function does change 0xf to 0x10
78 * NOTE: 0x00 => no-autoconfig => unspecified
80 static const u8 lowpan_unc_llconf
[] = {0x0f, 0x28, 0x22, 0x20};
83 * Uncompression of ctx-based:
84 * 0 -> 0 bits from packet [unspecified / reserved]
85 * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
86 * 2 -> 8 bytes from prefix - zeroes + 2 from packet
87 * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
89 static const u8 lowpan_unc_ctxconf
[] = {0x00, 0x88, 0x82, 0x80};
92 * Uncompression of ctx-base
93 * 0 -> 0 bits from packet
94 * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
95 * 2 -> 2 bytes from prefix - zeroes + 3 from packet
96 * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
98 static const u8 lowpan_unc_mxconf
[] = {0x0f, 0x25, 0x23, 0x21};
100 /* Link local prefix */
101 static const u8 lowpan_llprefix
[] = {0xfe, 0x80};
103 /* private device info */
104 struct lowpan_dev_info
{
105 struct net_device
*real_dev
; /* real WPAN device ptr */
106 struct mutex dev_list_mtx
; /* mutex for list ops */
109 struct lowpan_dev_record
{
110 struct net_device
*ldev
;
111 struct list_head list
;
114 struct lowpan_fragment
{
115 struct sk_buff
*skb
; /* skb to be assembled */
116 spinlock_t lock
; /* concurency lock */
117 u16 length
; /* length to be assemled */
118 u32 bytes_rcv
; /* bytes received */
119 u16 tag
; /* current fragment tag */
120 struct timer_list timer
; /* assembling timer */
121 struct list_head list
; /* fragments list */
124 static unsigned short fragment_tag
;
125 static LIST_HEAD(lowpan_fragments
);
126 spinlock_t flist_lock
;
129 lowpan_dev_info
*lowpan_dev_info(const struct net_device
*dev
)
131 return netdev_priv(dev
);
134 static inline void lowpan_address_flip(u8
*src
, u8
*dest
)
137 for (i
= 0; i
< IEEE802154_ADDR_LEN
; i
++)
138 (dest
)[IEEE802154_ADDR_LEN
- i
- 1] = (src
)[i
];
141 /* list of all 6lowpan devices, uses for package delivering */
142 /* print data in line */
143 static inline void lowpan_raw_dump_inline(const char *caller
, char *msg
,
144 unsigned char *buf
, int len
)
148 pr_debug("(%s) %s: ", caller
, msg
);
149 print_hex_dump(KERN_DEBUG
, "", DUMP_PREFIX_NONE
,
150 16, 1, buf
, len
, false);
155 * print data in a table format:
157 * addr: xx xx xx xx xx xx
158 * addr: xx xx xx xx xx xx
161 static inline void lowpan_raw_dump_table(const char *caller
, char *msg
,
162 unsigned char *buf
, int len
)
166 pr_debug("(%s) %s:\n", caller
, msg
);
167 print_hex_dump(KERN_DEBUG
, "\t", DUMP_PREFIX_OFFSET
,
168 16, 1, buf
, len
, false);
173 lowpan_compress_addr_64(u8
**hc06_ptr
, u8 shift
, const struct in6_addr
*ipaddr
,
174 const unsigned char *lladdr
)
178 if (is_addr_mac_addr_based(ipaddr
, lladdr
))
179 val
= 3; /* 0-bits */
180 else if (lowpan_is_iid_16_bit_compressable(ipaddr
)) {
181 /* compress IID to 16 bits xxxx::XXXX */
182 memcpy(*hc06_ptr
, &ipaddr
->s6_addr16
[7], 2);
184 val
= 2; /* 16-bits */
186 /* do not compress IID => xxxx::IID */
187 memcpy(*hc06_ptr
, &ipaddr
->s6_addr16
[4], 8);
189 val
= 1; /* 64-bits */
192 return rol8(val
, shift
);
196 lowpan_uip_ds6_set_addr_iid(struct in6_addr
*ipaddr
, unsigned char *lladdr
)
198 memcpy(&ipaddr
->s6_addr
[8], lladdr
, IEEE802154_ALEN
);
199 /* second bit-flip (Universe/Local) is done according RFC2464 */
200 ipaddr
->s6_addr
[8] ^= 0x02;
204 * Uncompress addresses based on a prefix and a postfix with zeroes in
205 * between. If the postfix is zero in length it will use the link address
206 * to configure the IP address (autoconf style).
207 * pref_post_count takes a byte where the first nibble specify prefix count
208 * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
211 lowpan_uncompress_addr(struct sk_buff
*skb
, struct in6_addr
*ipaddr
,
212 u8
const *prefix
, u8 pref_post_count
, unsigned char *lladdr
)
214 u8 prefcount
= pref_post_count
>> 4;
215 u8 postcount
= pref_post_count
& 0x0f;
217 /* full nibble 15 => 16 */
218 prefcount
= (prefcount
== 15 ? 16 : prefcount
);
219 postcount
= (postcount
== 15 ? 16 : postcount
);
222 lowpan_raw_dump_inline(__func__
, "linklocal address",
223 lladdr
, IEEE802154_ALEN
);
225 memcpy(ipaddr
, prefix
, prefcount
);
227 if (prefcount
+ postcount
< 16)
228 memset(&ipaddr
->s6_addr
[prefcount
], 0,
229 16 - (prefcount
+ postcount
));
232 memcpy(&ipaddr
->s6_addr
[16 - postcount
], skb
->data
, postcount
);
233 skb_pull(skb
, postcount
);
234 } else if (prefcount
> 0) {
238 /* no IID based configuration if no prefix and no data */
239 lowpan_uip_ds6_set_addr_iid(ipaddr
, lladdr
);
242 pr_debug("(%s): uncompressing %d + %d => ", __func__
, prefcount
,
244 lowpan_raw_dump_inline(NULL
, NULL
, ipaddr
->s6_addr
, 16);
250 lowpan_compress_udp_header(u8
**hc06_ptr
, struct sk_buff
*skb
)
252 struct udphdr
*uh
= udp_hdr(skb
);
254 pr_debug("(%s): UDP header compression\n", __func__
);
256 if (((uh
->source
& LOWPAN_NHC_UDP_4BIT_MASK
) ==
257 LOWPAN_NHC_UDP_4BIT_PORT
) &&
258 ((uh
->dest
& LOWPAN_NHC_UDP_4BIT_MASK
) ==
259 LOWPAN_NHC_UDP_4BIT_PORT
)) {
260 pr_debug("(%s): both ports compression to 4 bits\n", __func__
);
261 **hc06_ptr
= LOWPAN_NHC_UDP_CS_P_11
;
262 **(hc06_ptr
+ 1) = /* subtraction is faster */
263 (u8
)((uh
->dest
- LOWPAN_NHC_UDP_4BIT_PORT
) +
264 ((uh
->source
& LOWPAN_NHC_UDP_4BIT_PORT
) << 4));
266 } else if ((uh
->dest
& LOWPAN_NHC_UDP_8BIT_MASK
) ==
267 LOWPAN_NHC_UDP_8BIT_PORT
) {
268 pr_debug("(%s): remove 8 bits of dest\n", __func__
);
269 **hc06_ptr
= LOWPAN_NHC_UDP_CS_P_01
;
270 memcpy(*hc06_ptr
+ 1, &uh
->source
, 2);
271 **(hc06_ptr
+ 3) = (u8
)(uh
->dest
- LOWPAN_NHC_UDP_8BIT_PORT
);
273 } else if ((uh
->source
& LOWPAN_NHC_UDP_8BIT_MASK
) ==
274 LOWPAN_NHC_UDP_8BIT_PORT
) {
275 pr_debug("(%s): remove 8 bits of source\n", __func__
);
276 **hc06_ptr
= LOWPAN_NHC_UDP_CS_P_10
;
277 memcpy(*hc06_ptr
+ 1, &uh
->dest
, 2);
278 **(hc06_ptr
+ 3) = (u8
)(uh
->source
- LOWPAN_NHC_UDP_8BIT_PORT
);
281 pr_debug("(%s): can't compress header\n", __func__
);
282 **hc06_ptr
= LOWPAN_NHC_UDP_CS_P_00
;
283 memcpy(*hc06_ptr
+ 1, &uh
->source
, 2);
284 memcpy(*hc06_ptr
+ 3, &uh
->dest
, 2);
288 /* checksum is always inline */
289 memcpy(*hc06_ptr
, &uh
->check
, 2);
293 static u8
lowpan_fetch_skb_u8(struct sk_buff
*skb
)
303 static u16
lowpan_fetch_skb_u16(struct sk_buff
*skb
)
307 BUG_ON(!pskb_may_pull(skb
, 2));
309 ret
= skb
->data
[0] | (skb
->data
[1] << 8);
315 lowpan_uncompress_udp_header(struct sk_buff
*skb
)
317 struct udphdr
*uh
= udp_hdr(skb
);
320 tmp
= lowpan_fetch_skb_u8(skb
);
322 if ((tmp
& LOWPAN_NHC_UDP_MASK
) == LOWPAN_NHC_UDP_ID
) {
323 pr_debug("(%s): UDP header uncompression\n", __func__
);
324 switch (tmp
& LOWPAN_NHC_UDP_CS_P_11
) {
325 case LOWPAN_NHC_UDP_CS_P_00
:
326 memcpy(&uh
->source
, &skb
->data
[0], 2);
327 memcpy(&uh
->dest
, &skb
->data
[2], 2);
330 case LOWPAN_NHC_UDP_CS_P_01
:
331 memcpy(&uh
->source
, &skb
->data
[0], 2);
333 skb
->data
[2] + LOWPAN_NHC_UDP_8BIT_PORT
;
336 case LOWPAN_NHC_UDP_CS_P_10
:
337 uh
->source
= skb
->data
[0] + LOWPAN_NHC_UDP_8BIT_PORT
;
338 memcpy(&uh
->dest
, &skb
->data
[1], 2);
341 case LOWPAN_NHC_UDP_CS_P_11
:
343 LOWPAN_NHC_UDP_4BIT_PORT
+ (skb
->data
[0] >> 4);
345 LOWPAN_NHC_UDP_4BIT_PORT
+ (skb
->data
[0] & 0x0f);
349 pr_debug("(%s) ERROR: unknown UDP format\n", __func__
);
354 pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
355 __func__
, uh
->source
, uh
->dest
);
358 memcpy(&uh
->check
, &skb
->data
[0], 2);
361 pr_debug("(%s): ERROR: unsupported NH format\n", __func__
);
370 static int lowpan_header_create(struct sk_buff
*skb
,
371 struct net_device
*dev
,
372 unsigned short type
, const void *_daddr
,
373 const void *_saddr
, unsigned len
)
375 u8 tmp
, iphc0
, iphc1
, *hc06_ptr
;
377 const u8
*saddr
= _saddr
;
378 const u8
*daddr
= _daddr
;
380 struct ieee802154_addr sa
, da
;
382 if (type
!= ETH_P_IPV6
)
385 * if this package isn't ipv6 one, where should it be routed?
387 head
= kzalloc(100, GFP_KERNEL
);
394 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
395 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__
,
396 hdr
->version
, ntohs(hdr
->payload_len
), hdr
->nexthdr
,
399 lowpan_raw_dump_table(__func__
, "raw skb network header dump",
400 skb_network_header(skb
), sizeof(struct ipv6hdr
));
403 saddr
= dev
->dev_addr
;
405 lowpan_raw_dump_inline(__func__
, "saddr", (unsigned char *)saddr
, 8);
408 * As we copy some bit-length fields, in the IPHC encoding bytes,
409 * we sometimes use |=
410 * If the field is 0, and the current bit value in memory is 1,
411 * this does not work. We therefore reset the IPHC encoding here
413 iphc0
= LOWPAN_DISPATCH_IPHC
;
416 /* TODO: context lookup */
418 lowpan_raw_dump_inline(__func__
, "daddr", (unsigned char *)daddr
, 8);
421 * Traffic class, flow label
422 * If flow label is 0, compress it. If traffic class is 0, compress it
423 * We have to process both in the same time as the offset of traffic
424 * class depends on the presence of version and flow label
427 /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
428 tmp
= (hdr
->priority
<< 4) | (hdr
->flow_lbl
[0] >> 4);
429 tmp
= ((tmp
& 0x03) << 6) | (tmp
>> 2);
431 if (((hdr
->flow_lbl
[0] & 0x0F) == 0) &&
432 (hdr
->flow_lbl
[1] == 0) && (hdr
->flow_lbl
[2] == 0)) {
433 /* flow label can be compressed */
434 iphc0
|= LOWPAN_IPHC_FL_C
;
435 if ((hdr
->priority
== 0) &&
436 ((hdr
->flow_lbl
[0] & 0xF0) == 0)) {
437 /* compress (elide) all */
438 iphc0
|= LOWPAN_IPHC_TC_C
;
440 /* compress only the flow label */
445 /* Flow label cannot be compressed */
446 if ((hdr
->priority
== 0) &&
447 ((hdr
->flow_lbl
[0] & 0xF0) == 0)) {
448 /* compress only traffic class */
449 iphc0
|= LOWPAN_IPHC_TC_C
;
450 *hc06_ptr
= (tmp
& 0xc0) | (hdr
->flow_lbl
[0] & 0x0F);
451 memcpy(hc06_ptr
+ 1, &hdr
->flow_lbl
[1], 2);
454 /* compress nothing */
455 memcpy(hc06_ptr
, &hdr
, 4);
456 /* replace the top byte with new ECN | DSCP format */
462 /* NOTE: payload length is always compressed */
464 /* Next Header is compress if UDP */
465 if (hdr
->nexthdr
== UIP_PROTO_UDP
)
466 iphc0
|= LOWPAN_IPHC_NH_C
;
468 if ((iphc0
& LOWPAN_IPHC_NH_C
) == 0) {
469 *hc06_ptr
= hdr
->nexthdr
;
475 * if 1: compress, encoding is 01
476 * if 64: compress, encoding is 10
477 * if 255: compress, encoding is 11
478 * else do not compress
480 switch (hdr
->hop_limit
) {
482 iphc0
|= LOWPAN_IPHC_TTL_1
;
485 iphc0
|= LOWPAN_IPHC_TTL_64
;
488 iphc0
|= LOWPAN_IPHC_TTL_255
;
491 *hc06_ptr
= hdr
->hop_limit
;
495 /* source address compression */
496 if (is_addr_unspecified(&hdr
->saddr
)) {
497 pr_debug("(%s): source address is unspecified, setting SAC\n",
499 iphc1
|= LOWPAN_IPHC_SAC
;
500 /* TODO: context lookup */
501 } else if (is_addr_link_local(&hdr
->saddr
)) {
502 pr_debug("(%s): source address is link-local\n", __func__
);
503 iphc1
|= lowpan_compress_addr_64(&hc06_ptr
,
504 LOWPAN_IPHC_SAM_BIT
, &hdr
->saddr
, saddr
);
506 pr_debug("(%s): send the full source address\n", __func__
);
507 memcpy(hc06_ptr
, &hdr
->saddr
.s6_addr16
[0], 16);
511 /* destination address compression */
512 if (is_addr_mcast(&hdr
->daddr
)) {
513 pr_debug("(%s): destination address is multicast", __func__
);
514 iphc1
|= LOWPAN_IPHC_M
;
515 if (lowpan_is_mcast_addr_compressable8(&hdr
->daddr
)) {
516 pr_debug("compressed to 1 octet\n");
517 iphc1
|= LOWPAN_IPHC_DAM_11
;
519 *hc06_ptr
= hdr
->daddr
.s6_addr
[15];
521 } else if (lowpan_is_mcast_addr_compressable32(&hdr
->daddr
)) {
522 pr_debug("compressed to 4 octets\n");
523 iphc1
|= LOWPAN_IPHC_DAM_10
;
524 /* second byte + the last three */
525 *hc06_ptr
= hdr
->daddr
.s6_addr
[1];
526 memcpy(hc06_ptr
+ 1, &hdr
->daddr
.s6_addr
[13], 3);
528 } else if (lowpan_is_mcast_addr_compressable48(&hdr
->daddr
)) {
529 pr_debug("compressed to 6 octets\n");
530 iphc1
|= LOWPAN_IPHC_DAM_01
;
531 /* second byte + the last five */
532 *hc06_ptr
= hdr
->daddr
.s6_addr
[1];
533 memcpy(hc06_ptr
+ 1, &hdr
->daddr
.s6_addr
[11], 5);
536 pr_debug("using full address\n");
537 iphc1
|= LOWPAN_IPHC_DAM_00
;
538 memcpy(hc06_ptr
, &hdr
->daddr
.s6_addr
[0], 16);
542 pr_debug("(%s): destination address is unicast: ", __func__
);
543 /* TODO: context lookup */
544 if (is_addr_link_local(&hdr
->daddr
)) {
545 pr_debug("destination address is link-local\n");
546 iphc1
|= lowpan_compress_addr_64(&hc06_ptr
,
547 LOWPAN_IPHC_DAM_BIT
, &hdr
->daddr
, daddr
);
549 pr_debug("using full address\n");
550 memcpy(hc06_ptr
, &hdr
->daddr
.s6_addr16
[0], 16);
555 /* UDP header compression */
556 if (hdr
->nexthdr
== UIP_PROTO_UDP
)
557 lowpan_compress_udp_header(&hc06_ptr
, skb
);
562 skb_pull(skb
, sizeof(struct ipv6hdr
));
563 memcpy(skb_push(skb
, hc06_ptr
- head
), head
, hc06_ptr
- head
);
567 lowpan_raw_dump_table(__func__
, "raw skb data dump", skb
->data
,
571 * NOTE1: I'm still unsure about the fact that compression and WPAN
572 * header are created here and not later in the xmit. So wait for
573 * an opinion of net maintainers.
576 * NOTE2: to be absolutely correct, we must derive PANid information
577 * from MAC subif of the 'dev' and 'real_dev' network devices, but
578 * this isn't implemented in mainline yet, so currently we assign 0xff
581 /* prepare wpan address data */
582 sa
.addr_type
= IEEE802154_ADDR_LONG
;
585 da
.addr_type
= IEEE802154_ADDR_LONG
;
588 memcpy(&(da
.hwaddr
), daddr
, 8);
589 memcpy(&(sa
.hwaddr
), saddr
, 8);
591 mac_cb(skb
)->flags
= IEEE802154_FC_TYPE_DATA
;
593 return dev_hard_header(skb
, lowpan_dev_info(dev
)->real_dev
,
594 type
, (void *)&da
, (void *)&sa
, skb
->len
);
598 static int lowpan_skb_deliver(struct sk_buff
*skb
, struct ipv6hdr
*hdr
)
601 struct lowpan_dev_record
*entry
;
602 int stat
= NET_RX_SUCCESS
;
604 new = skb_copy_expand(skb
, sizeof(struct ipv6hdr
), skb_tailroom(skb
),
611 skb_push(new, sizeof(struct ipv6hdr
));
612 skb_reset_network_header(new);
613 skb_copy_to_linear_data(new, hdr
, sizeof(struct ipv6hdr
));
615 new->protocol
= htons(ETH_P_IPV6
);
616 new->pkt_type
= PACKET_HOST
;
619 list_for_each_entry_rcu(entry
, &lowpan_devices
, list
)
620 if (lowpan_dev_info(entry
->ldev
)->real_dev
== new->dev
) {
621 skb
= skb_copy(new, GFP_ATOMIC
);
627 skb
->dev
= entry
->ldev
;
628 stat
= netif_rx(skb
);
637 static void lowpan_fragment_timer_expired(unsigned long entry_addr
)
639 struct lowpan_fragment
*entry
= (struct lowpan_fragment
*)entry_addr
;
641 pr_debug("%s: timer expired for frame with tag %d\n", __func__
,
644 spin_lock(&flist_lock
);
645 list_del(&entry
->list
);
646 spin_unlock(&flist_lock
);
648 dev_kfree_skb(entry
->skb
);
653 lowpan_process_data(struct sk_buff
*skb
)
656 u8 tmp
, iphc0
, iphc1
, num_context
= 0;
660 lowpan_raw_dump_table(__func__
, "raw skb data dump", skb
->data
,
662 /* at least two bytes will be used for the encoding */
665 iphc0
= lowpan_fetch_skb_u8(skb
);
667 /* fragments assembling */
668 switch (iphc0
& LOWPAN_DISPATCH_MASK
) {
669 case LOWPAN_DISPATCH_FRAG1
:
670 case LOWPAN_DISPATCH_FRAGN
:
672 struct lowpan_fragment
*frame
;
677 len
= lowpan_fetch_skb_u8(skb
); /* frame length */
678 tag
= lowpan_fetch_skb_u16(skb
);
681 * check if frame assembling with the same tag is
682 * already in progress
684 spin_lock(&flist_lock
);
686 list_for_each_entry(frame
, &lowpan_fragments
, list
)
687 if (frame
->tag
== tag
) {
692 /* alloc new frame structure */
694 frame
= kzalloc(sizeof(struct lowpan_fragment
),
697 goto unlock_and_drop
;
699 INIT_LIST_HEAD(&frame
->list
);
701 frame
->length
= (iphc0
& 7) | (len
<< 3);
704 /* allocate buffer for frame assembling */
705 frame
->skb
= alloc_skb(frame
->length
+
706 sizeof(struct ipv6hdr
), GFP_ATOMIC
);
710 goto unlock_and_drop
;
713 frame
->skb
->priority
= skb
->priority
;
714 frame
->skb
->dev
= skb
->dev
;
716 /* reserve headroom for uncompressed ipv6 header */
717 skb_reserve(frame
->skb
, sizeof(struct ipv6hdr
));
718 skb_put(frame
->skb
, frame
->length
);
720 init_timer(&frame
->timer
);
721 /* time out is the same as for ipv6 - 60 sec */
722 frame
->timer
.expires
= jiffies
+ LOWPAN_FRAG_TIMEOUT
;
723 frame
->timer
.data
= (unsigned long)frame
;
724 frame
->timer
.function
= lowpan_fragment_timer_expired
;
726 add_timer(&frame
->timer
);
728 list_add_tail(&frame
->list
, &lowpan_fragments
);
731 if ((iphc0
& LOWPAN_DISPATCH_MASK
) == LOWPAN_DISPATCH_FRAG1
)
732 goto unlock_and_drop
;
734 offset
= lowpan_fetch_skb_u8(skb
); /* fetch offset */
736 /* if payload fits buffer, copy it */
737 if (likely((offset
* 8 + skb
->len
) <= frame
->length
))
738 skb_copy_to_linear_data_offset(frame
->skb
, offset
* 8,
739 skb
->data
, skb
->len
);
741 goto unlock_and_drop
;
743 frame
->bytes_rcv
+= skb
->len
;
745 /* frame assembling complete */
746 if ((frame
->bytes_rcv
== frame
->length
) &&
747 frame
->timer
.expires
> jiffies
) {
748 /* if timer haven't expired - first of all delete it */
749 del_timer(&frame
->timer
);
750 list_del(&frame
->list
);
751 spin_unlock(&flist_lock
);
756 iphc0
= lowpan_fetch_skb_u8(skb
);
759 spin_unlock(&flist_lock
);
761 return kfree_skb(skb
), 0;
767 iphc1
= lowpan_fetch_skb_u8(skb
);
769 _saddr
= mac_cb(skb
)->sa
.hwaddr
;
770 _daddr
= mac_cb(skb
)->da
.hwaddr
;
772 pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__
, iphc0
, iphc1
);
774 /* another if the CID flag is set */
775 if (iphc1
& LOWPAN_IPHC_CID
) {
776 pr_debug("(%s): CID flag is set, increase header with one\n",
780 num_context
= lowpan_fetch_skb_u8(skb
);
785 /* Traffic Class and Flow Label */
786 switch ((iphc0
& LOWPAN_IPHC_TF
) >> 3) {
788 * Traffic Class and FLow Label carried in-line
789 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
794 tmp
= lowpan_fetch_skb_u8(skb
);
795 memcpy(&hdr
.flow_lbl
, &skb
->data
[0], 3);
797 hdr
.priority
= ((tmp
>> 2) & 0x0f);
798 hdr
.flow_lbl
[0] = ((tmp
>> 2) & 0x30) | (tmp
<< 6) |
799 (hdr
.flow_lbl
[0] & 0x0f);
802 * Traffic class carried in-line
803 * ECN + DSCP (1 byte), Flow Label is elided
808 tmp
= lowpan_fetch_skb_u8(skb
);
809 hdr
.priority
= ((tmp
>> 2) & 0x0f);
810 hdr
.flow_lbl
[0] = ((tmp
<< 6) & 0xC0) | ((tmp
>> 2) & 0x30);
815 * Flow Label carried in-line
816 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
821 tmp
= lowpan_fetch_skb_u8(skb
);
822 hdr
.flow_lbl
[0] = (skb
->data
[0] & 0x0F) | ((tmp
>> 2) & 0x30);
823 memcpy(&hdr
.flow_lbl
[1], &skb
->data
[0], 2);
826 /* Traffic Class and Flow Label are elided */
838 if ((iphc0
& LOWPAN_IPHC_NH_C
) == 0) {
839 /* Next header is carried inline */
842 hdr
.nexthdr
= lowpan_fetch_skb_u8(skb
);
843 pr_debug("(%s): NH flag is set, next header is carried "
844 "inline: %02x\n", __func__
, hdr
.nexthdr
);
848 if ((iphc0
& 0x03) != LOWPAN_IPHC_TTL_I
)
849 hdr
.hop_limit
= lowpan_ttl_values
[iphc0
& 0x03];
853 hdr
.hop_limit
= lowpan_fetch_skb_u8(skb
);
856 /* Extract SAM to the tmp variable */
857 tmp
= ((iphc1
& LOWPAN_IPHC_SAM
) >> LOWPAN_IPHC_SAM_BIT
) & 0x03;
859 /* Source address uncompression */
860 pr_debug("(%s): source address stateless compression\n", __func__
);
861 err
= lowpan_uncompress_addr(skb
, &hdr
.saddr
, lowpan_llprefix
,
862 lowpan_unc_llconf
[tmp
], skb
->data
);
866 /* Extract DAM to the tmp variable */
867 tmp
= ((iphc1
& LOWPAN_IPHC_DAM_11
) >> LOWPAN_IPHC_DAM_BIT
) & 0x03;
869 /* check for Multicast Compression */
870 if (iphc1
& LOWPAN_IPHC_M
) {
871 if (iphc1
& LOWPAN_IPHC_DAC
) {
872 pr_debug("(%s): destination address context-based "
873 "multicast compression\n", __func__
);
874 /* TODO: implement this */
876 u8 prefix
[] = {0xff, 0x02};
878 pr_debug("(%s): destination address non-context-based"
879 " multicast compression\n", __func__
);
880 if (0 < tmp
&& tmp
< 3) {
884 prefix
[1] = lowpan_fetch_skb_u8(skb
);
887 err
= lowpan_uncompress_addr(skb
, &hdr
.daddr
, prefix
,
888 lowpan_unc_mxconf
[tmp
], NULL
);
893 pr_debug("(%s): destination address stateless compression\n",
895 err
= lowpan_uncompress_addr(skb
, &hdr
.daddr
, lowpan_llprefix
,
896 lowpan_unc_llconf
[tmp
], skb
->data
);
901 /* UDP data uncompression */
902 if (iphc0
& LOWPAN_IPHC_NH_C
)
903 if (lowpan_uncompress_udp_header(skb
))
906 /* Not fragmented package */
907 hdr
.payload_len
= htons(skb
->len
);
909 pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__
,
910 skb_headroom(skb
), skb
->len
);
912 pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
913 "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__
, hdr
.version
,
914 ntohs(hdr
.payload_len
), hdr
.nexthdr
, hdr
.hop_limit
);
916 lowpan_raw_dump_table(__func__
, "raw header dump", (u8
*)&hdr
,
918 return lowpan_skb_deliver(skb
, &hdr
);
921 spin_unlock(&flist_lock
);
927 static int lowpan_set_address(struct net_device
*dev
, void *p
)
929 struct sockaddr
*sa
= p
;
931 if (netif_running(dev
))
934 /* TODO: validate addr */
935 memcpy(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
);
940 static int lowpan_get_mac_header_length(struct sk_buff
*skb
)
943 * Currently long addressing mode is supported only, so the overall
945 * FC SeqNum DPAN DA SA Sec
946 * 2 + 1 + 2 + 8 + 8 + 0 = 21
952 lowpan_fragment_xmit(struct sk_buff
*skb
, u8
*head
,
953 int mlen
, int plen
, int offset
)
955 struct sk_buff
*frag
;
958 /* if payload length is zero, therefore it's a first fragment */
959 hlen
= (plen
== 0 ? LOWPAN_FRAG1_HEAD_SIZE
: LOWPAN_FRAGN_HEAD_SIZE
);
961 lowpan_raw_dump_inline(__func__
, "6lowpan fragment header", head
, hlen
);
963 frag
= dev_alloc_skb(hlen
+ mlen
+ plen
+ IEEE802154_MFR_SIZE
);
967 frag
->priority
= skb
->priority
;
968 frag
->dev
= skb
->dev
;
970 /* copy header, MFR and payload */
971 memcpy(skb_put(frag
, mlen
), skb
->data
, mlen
);
972 memcpy(skb_put(frag
, hlen
), head
, hlen
);
975 skb_copy_from_linear_data_offset(skb
, offset
+ mlen
,
976 skb_put(frag
, plen
), plen
);
978 lowpan_raw_dump_table(__func__
, " raw fragment dump", frag
->data
,
981 ret
= dev_queue_xmit(frag
);
987 lowpan_skb_fragmentation(struct sk_buff
*skb
)
989 int err
, header_length
, payload_length
, tag
, offset
= 0;
992 header_length
= lowpan_get_mac_header_length(skb
);
993 payload_length
= skb
->len
- header_length
;
994 tag
= fragment_tag
++;
996 /* first fragment header */
997 head
[0] = LOWPAN_DISPATCH_FRAG1
| (payload_length
& 0x7);
998 head
[1] = (payload_length
>> 3) & 0xff;
999 head
[2] = tag
& 0xff;
1002 err
= lowpan_fragment_xmit(skb
, head
, header_length
, 0, 0);
1004 /* next fragment header */
1005 head
[0] &= ~LOWPAN_DISPATCH_FRAG1
;
1006 head
[0] |= LOWPAN_DISPATCH_FRAGN
;
1008 while ((payload_length
- offset
> 0) && (err
>= 0)) {
1009 int len
= LOWPAN_FRAG_SIZE
;
1011 head
[4] = offset
/ 8;
1013 if (payload_length
- offset
< len
)
1014 len
= payload_length
- offset
;
1016 err
= lowpan_fragment_xmit(skb
, head
, header_length
,
1024 static netdev_tx_t
lowpan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1028 pr_debug("(%s): package xmit\n", __func__
);
1030 skb
->dev
= lowpan_dev_info(dev
)->real_dev
;
1031 if (skb
->dev
== NULL
) {
1032 pr_debug("(%s) ERROR: no real wpan device found\n", __func__
);
1036 if (skb
->len
<= IEEE802154_MTU
) {
1037 err
= dev_queue_xmit(skb
);
1041 pr_debug("(%s): frame is too big, fragmentation is needed\n",
1043 err
= lowpan_skb_fragmentation(skb
);
1048 pr_debug("(%s): ERROR: xmit failed\n", __func__
);
1050 return (err
< 0 ? NETDEV_TX_BUSY
: NETDEV_TX_OK
);
1053 static void lowpan_dev_free(struct net_device
*dev
)
1055 dev_put(lowpan_dev_info(dev
)->real_dev
);
1059 static struct header_ops lowpan_header_ops
= {
1060 .create
= lowpan_header_create
,
1063 static const struct net_device_ops lowpan_netdev_ops
= {
1064 .ndo_start_xmit
= lowpan_xmit
,
1065 .ndo_set_mac_address
= lowpan_set_address
,
1068 static void lowpan_setup(struct net_device
*dev
)
1070 pr_debug("(%s)\n", __func__
);
1072 dev
->addr_len
= IEEE802154_ADDR_LEN
;
1073 memset(dev
->broadcast
, 0xff, IEEE802154_ADDR_LEN
);
1074 dev
->type
= ARPHRD_IEEE802154
;
1075 /* Frame Control + Sequence Number + Address fields + Security Header */
1076 dev
->hard_header_len
= 2 + 1 + 20 + 14;
1077 dev
->needed_tailroom
= 2; /* FCS */
1079 dev
->tx_queue_len
= 0;
1080 dev
->flags
= IFF_BROADCAST
| IFF_MULTICAST
;
1081 dev
->watchdog_timeo
= 0;
1083 dev
->netdev_ops
= &lowpan_netdev_ops
;
1084 dev
->header_ops
= &lowpan_header_ops
;
1085 dev
->destructor
= lowpan_dev_free
;
1088 static int lowpan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
1090 pr_debug("(%s)\n", __func__
);
1092 if (tb
[IFLA_ADDRESS
]) {
1093 if (nla_len(tb
[IFLA_ADDRESS
]) != IEEE802154_ADDR_LEN
)
1099 static int lowpan_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
1100 struct packet_type
*pt
, struct net_device
*orig_dev
)
1102 if (!netif_running(dev
))
1105 if (dev
->type
!= ARPHRD_IEEE802154
)
1108 /* check that it's our buffer */
1109 switch (skb
->data
[0] & 0xe0) {
1110 case LOWPAN_DISPATCH_IPHC
: /* ipv6 datagram */
1111 case LOWPAN_DISPATCH_FRAG1
: /* first fragment header */
1112 case LOWPAN_DISPATCH_FRAGN
: /* next fragments headers */
1113 lowpan_process_data(skb
);
1119 return NET_RX_SUCCESS
;
1126 static int lowpan_newlink(struct net
*src_net
, struct net_device
*dev
,
1127 struct nlattr
*tb
[], struct nlattr
*data
[])
1129 struct net_device
*real_dev
;
1130 struct lowpan_dev_record
*entry
;
1132 pr_debug("(%s)\n", __func__
);
1136 /* find and hold real wpan device */
1137 real_dev
= dev_get_by_index(src_net
, nla_get_u32(tb
[IFLA_LINK
]));
1141 lowpan_dev_info(dev
)->real_dev
= real_dev
;
1142 mutex_init(&lowpan_dev_info(dev
)->dev_list_mtx
);
1144 entry
= kzalloc(sizeof(struct lowpan_dev_record
), GFP_KERNEL
);
1147 lowpan_dev_info(dev
)->real_dev
= NULL
;
1153 mutex_lock(&lowpan_dev_info(dev
)->dev_list_mtx
);
1154 INIT_LIST_HEAD(&entry
->list
);
1155 list_add_tail(&entry
->list
, &lowpan_devices
);
1156 mutex_unlock(&lowpan_dev_info(dev
)->dev_list_mtx
);
1158 register_netdevice(dev
);
1163 static void lowpan_dellink(struct net_device
*dev
, struct list_head
*head
)
1165 struct lowpan_dev_info
*lowpan_dev
= lowpan_dev_info(dev
);
1166 struct net_device
*real_dev
= lowpan_dev
->real_dev
;
1167 struct lowpan_dev_record
*entry
;
1168 struct lowpan_dev_record
*tmp
;
1172 mutex_lock(&lowpan_dev_info(dev
)->dev_list_mtx
);
1173 list_for_each_entry_safe(entry
, tmp
, &lowpan_devices
, list
) {
1174 if (entry
->ldev
== dev
) {
1175 list_del(&entry
->list
);
1179 mutex_unlock(&lowpan_dev_info(dev
)->dev_list_mtx
);
1181 mutex_destroy(&lowpan_dev_info(dev
)->dev_list_mtx
);
1183 unregister_netdevice_queue(dev
, head
);
1188 static struct rtnl_link_ops lowpan_link_ops __read_mostly
= {
1190 .priv_size
= sizeof(struct lowpan_dev_info
),
1191 .setup
= lowpan_setup
,
1192 .newlink
= lowpan_newlink
,
1193 .dellink
= lowpan_dellink
,
1194 .validate
= lowpan_validate
,
1197 static inline int __init
lowpan_netlink_init(void)
1199 return rtnl_link_register(&lowpan_link_ops
);
1202 static inline void __init
lowpan_netlink_fini(void)
1204 rtnl_link_unregister(&lowpan_link_ops
);
1207 static struct packet_type lowpan_packet_type
= {
1208 .type
= __constant_htons(ETH_P_IEEE802154
),
1212 static int __init
lowpan_init_module(void)
1216 pr_debug("(%s)\n", __func__
);
1218 err
= lowpan_netlink_init();
1222 dev_add_pack(&lowpan_packet_type
);
1227 static void __exit
lowpan_cleanup_module(void)
1229 pr_debug("(%s)\n", __func__
);
1231 lowpan_netlink_fini();
1233 dev_remove_pack(&lowpan_packet_type
);
1236 module_init(lowpan_init_module
);
1237 module_exit(lowpan_cleanup_module
);
1238 MODULE_LICENSE("GPL");
1239 MODULE_ALIAS_RTNL_LINK("lowpan");