1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License version 2
3 * as published by the Free Software Foundation.
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
11 #include <net/6lowpan.h>
12 #include <net/ndisc.h>
13 #include <net/ieee802154_netdev.h>
14 #include <net/mac802154.h>
16 #include "6lowpan_i.h"
18 #define LOWPAN_FRAG1_HEAD_SIZE 0x4
19 #define LOWPAN_FRAGN_HEAD_SIZE 0x5
21 struct lowpan_addr_info
{
22 struct ieee802154_addr daddr
;
23 struct ieee802154_addr saddr
;
27 lowpan_addr_info
*lowpan_skb_priv(const struct sk_buff
*skb
)
29 WARN_ON_ONCE(skb_headroom(skb
) < sizeof(struct lowpan_addr_info
));
30 return (struct lowpan_addr_info
*)(skb
->data
-
31 sizeof(struct lowpan_addr_info
));
34 /* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
35 * sockets gives an 8 byte array for addresses only!
37 * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
38 * sense here. We should disable it, the right use-case would be AF_INET6
41 int lowpan_header_create(struct sk_buff
*skb
, struct net_device
*ldev
,
42 unsigned short type
, const void *daddr
,
43 const void *saddr
, unsigned int len
)
45 struct wpan_dev
*wpan_dev
= lowpan_802154_dev(ldev
)->wdev
->ieee802154_ptr
;
46 struct lowpan_addr_info
*info
= lowpan_skb_priv(skb
);
47 struct lowpan_802154_neigh
*llneigh
= NULL
;
48 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
52 * if this package isn't ipv6 one, where should it be routed?
54 if (type
!= ETH_P_IPV6
)
57 /* intra-pan communication */
58 info
->saddr
.pan_id
= wpan_dev
->pan_id
;
59 info
->daddr
.pan_id
= info
->saddr
.pan_id
;
61 if (!memcmp(daddr
, ldev
->broadcast
, EUI64_ADDR_LEN
)) {
62 info
->daddr
.short_addr
= cpu_to_le16(IEEE802154_ADDR_BROADCAST
);
63 info
->daddr
.mode
= IEEE802154_ADDR_SHORT
;
65 __le16 short_addr
= cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC
);
67 n
= neigh_lookup(&nd_tbl
, &hdr
->daddr
, ldev
);
69 llneigh
= lowpan_802154_neigh(neighbour_priv(n
));
70 read_lock_bh(&n
->lock
);
71 short_addr
= llneigh
->short_addr
;
72 read_unlock_bh(&n
->lock
);
76 lowpan_802154_is_valid_src_short_addr(short_addr
)) {
77 info
->daddr
.short_addr
= short_addr
;
78 info
->daddr
.mode
= IEEE802154_ADDR_SHORT
;
80 info
->daddr
.mode
= IEEE802154_ADDR_LONG
;
81 ieee802154_be64_to_le64(&info
->daddr
.extended_addr
,
90 if (lowpan_802154_is_valid_src_short_addr(wpan_dev
->short_addr
)) {
91 info
->saddr
.mode
= IEEE802154_ADDR_SHORT
;
92 info
->saddr
.short_addr
= wpan_dev
->short_addr
;
94 info
->saddr
.mode
= IEEE802154_ADDR_LONG
;
95 info
->saddr
.extended_addr
= wpan_dev
->extended_addr
;
98 info
->saddr
.mode
= IEEE802154_ADDR_LONG
;
99 ieee802154_be64_to_le64(&info
->saddr
.extended_addr
, saddr
);
105 static struct sk_buff
*
106 lowpan_alloc_frag(struct sk_buff
*skb
, int size
,
107 const struct ieee802154_hdr
*master_hdr
, bool frag1
)
109 struct net_device
*wdev
= lowpan_802154_dev(skb
->dev
)->wdev
;
110 struct sk_buff
*frag
;
113 frag
= alloc_skb(wdev
->needed_headroom
+ wdev
->needed_tailroom
+ size
,
118 frag
->priority
= skb
->priority
;
119 skb_reserve(frag
, wdev
->needed_headroom
);
120 skb_reset_network_header(frag
);
121 *mac_cb(frag
) = *mac_cb(skb
);
124 skb_put_data(frag
, skb_mac_header(skb
), skb
->mac_len
);
126 rc
= wpan_dev_hard_header(frag
, wdev
,
128 &master_hdr
->source
, size
);
135 frag
= ERR_PTR(-ENOMEM
);
142 lowpan_xmit_fragment(struct sk_buff
*skb
, const struct ieee802154_hdr
*wpan_hdr
,
143 u8
*frag_hdr
, int frag_hdrlen
,
144 int offset
, int len
, bool frag1
)
146 struct sk_buff
*frag
;
148 raw_dump_inline(__func__
, " fragment header", frag_hdr
, frag_hdrlen
);
150 frag
= lowpan_alloc_frag(skb
, frag_hdrlen
+ len
, wpan_hdr
, frag1
);
152 return PTR_ERR(frag
);
154 skb_put_data(frag
, frag_hdr
, frag_hdrlen
);
155 skb_put_data(frag
, skb_network_header(skb
) + offset
, len
);
157 raw_dump_table(__func__
, " fragment dump", frag
->data
, frag
->len
);
159 return dev_queue_xmit(frag
);
163 lowpan_xmit_fragmented(struct sk_buff
*skb
, struct net_device
*ldev
,
164 const struct ieee802154_hdr
*wpan_hdr
, u16 dgram_size
,
169 int frag_cap
, frag_len
, payload_cap
, rc
;
170 int skb_unprocessed
, skb_offset
;
172 frag_tag
= htons(lowpan_802154_dev(ldev
)->fragment_tag
);
173 lowpan_802154_dev(ldev
)->fragment_tag
++;
175 frag_hdr
[0] = LOWPAN_DISPATCH_FRAG1
| ((dgram_size
>> 8) & 0x07);
176 frag_hdr
[1] = dgram_size
& 0xff;
177 memcpy(frag_hdr
+ 2, &frag_tag
, sizeof(frag_tag
));
179 payload_cap
= ieee802154_max_payload(wpan_hdr
);
181 frag_len
= round_down(payload_cap
- LOWPAN_FRAG1_HEAD_SIZE
-
182 skb_network_header_len(skb
), 8);
184 skb_offset
= skb_network_header_len(skb
);
185 skb_unprocessed
= skb
->len
- skb
->mac_len
- skb_offset
;
187 rc
= lowpan_xmit_fragment(skb
, wpan_hdr
, frag_hdr
,
188 LOWPAN_FRAG1_HEAD_SIZE
, 0,
189 frag_len
+ skb_network_header_len(skb
),
192 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
193 __func__
, ntohs(frag_tag
));
197 frag_hdr
[0] &= ~LOWPAN_DISPATCH_FRAG1
;
198 frag_hdr
[0] |= LOWPAN_DISPATCH_FRAGN
;
199 frag_cap
= round_down(payload_cap
- LOWPAN_FRAGN_HEAD_SIZE
, 8);
202 dgram_offset
+= frag_len
;
203 skb_offset
+= frag_len
;
204 skb_unprocessed
-= frag_len
;
205 frag_len
= min(frag_cap
, skb_unprocessed
);
207 frag_hdr
[4] = dgram_offset
>> 3;
209 rc
= lowpan_xmit_fragment(skb
, wpan_hdr
, frag_hdr
,
210 LOWPAN_FRAGN_HEAD_SIZE
, skb_offset
,
213 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
214 __func__
, ntohs(frag_tag
), skb_offset
);
217 } while (skb_unprocessed
> frag_cap
);
219 ldev
->stats
.tx_packets
++;
220 ldev
->stats
.tx_bytes
+= dgram_size
;
222 return NET_XMIT_SUCCESS
;
229 static int lowpan_header(struct sk_buff
*skb
, struct net_device
*ldev
,
230 u16
*dgram_size
, u16
*dgram_offset
)
232 struct wpan_dev
*wpan_dev
= lowpan_802154_dev(ldev
)->wdev
->ieee802154_ptr
;
233 struct ieee802154_mac_cb
*cb
= mac_cb_init(skb
);
234 struct lowpan_addr_info info
;
236 memcpy(&info
, lowpan_skb_priv(skb
), sizeof(info
));
238 *dgram_size
= skb
->len
;
239 lowpan_header_compress(skb
, ldev
, &info
.daddr
, &info
.saddr
);
240 /* dgram_offset = (saved bytes after compression) + lowpan header len */
241 *dgram_offset
= (*dgram_size
- skb
->len
) + skb_network_header_len(skb
);
243 cb
->type
= IEEE802154_FC_TYPE_DATA
;
245 if (info
.daddr
.mode
== IEEE802154_ADDR_SHORT
&&
246 ieee802154_is_broadcast_short_addr(info
.daddr
.short_addr
))
249 cb
->ackreq
= wpan_dev
->ackreq
;
251 return wpan_dev_hard_header(skb
, lowpan_802154_dev(ldev
)->wdev
,
252 &info
.daddr
, &info
.saddr
, 0);
255 netdev_tx_t
lowpan_xmit(struct sk_buff
*skb
, struct net_device
*ldev
)
257 struct ieee802154_hdr wpan_hdr
;
259 u16 dgram_size
, dgram_offset
;
261 pr_debug("package xmit\n");
263 WARN_ON_ONCE(skb
->len
> IPV6_MIN_MTU
);
265 /* We must take a copy of the skb before we modify/replace the ipv6
266 * header as the header could be used elsewhere
268 skb
= skb_unshare(skb
, GFP_ATOMIC
);
270 return NET_XMIT_DROP
;
272 ret
= lowpan_header(skb
, ldev
, &dgram_size
, &dgram_offset
);
275 return NET_XMIT_DROP
;
278 if (ieee802154_hdr_peek(skb
, &wpan_hdr
) < 0) {
280 return NET_XMIT_DROP
;
283 max_single
= ieee802154_max_payload(&wpan_hdr
);
285 if (skb_tail_pointer(skb
) - skb_network_header(skb
) <= max_single
) {
286 skb
->dev
= lowpan_802154_dev(ldev
)->wdev
;
287 ldev
->stats
.tx_packets
++;
288 ldev
->stats
.tx_bytes
+= dgram_size
;
289 return dev_queue_xmit(skb
);
293 pr_debug("frame is too big, fragmentation is needed\n");
294 rc
= lowpan_xmit_fragmented(skb
, ldev
, &wpan_hdr
, dgram_size
,
297 return rc
< 0 ? NET_XMIT_DROP
: rc
;