2 Copyright (c) 2013 Intel Corp.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
14 #include <linux/if_arp.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
19 #include <net/ip6_route.h>
20 #include <net/addrconf.h>
22 #include <net/af_ieee802154.h> /* to get the address type */
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/l2cap.h>
30 #include "../ieee802154/6lowpan.h" /* for the compression support */
32 #define IFACE_NAME_TEMPLATE "bt%d"
33 #define EUI64_ADDR_LEN 8
37 struct l2cap_conn
*conn
;
39 #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
41 /* The devices list contains those devices that we are acting
42 * as a proxy. The BT 6LoWPAN device is a virtual device that
43 * connects to the Bluetooth LE device. The real connection to
44 * BT device is done via l2cap layer. There exists one
45 * virtual device / one BT 6LoWPAN network (=hciX device).
46 * The list contains struct lowpan_dev elements.
48 static LIST_HEAD(bt_6lowpan_devices
);
49 static DEFINE_RWLOCK(devices_lock
);
52 struct list_head list
;
53 struct l2cap_conn
*conn
;
55 /* peer addresses in various formats */
56 unsigned char eui64_addr
[EUI64_ADDR_LEN
];
57 struct in6_addr peer_addr
;
61 struct list_head list
;
64 struct net_device
*netdev
;
65 struct list_head peers
;
66 atomic_t peer_count
; /* number of items in peers list */
68 struct work_struct delete_netdev
;
69 struct delayed_work notify_peers
;
72 static inline struct lowpan_dev
*lowpan_dev(const struct net_device
*netdev
)
74 return netdev_priv(netdev
);
77 static inline void peer_add(struct lowpan_dev
*dev
, struct lowpan_peer
*peer
)
79 list_add(&peer
->list
, &dev
->peers
);
80 atomic_inc(&dev
->peer_count
);
83 static inline bool peer_del(struct lowpan_dev
*dev
, struct lowpan_peer
*peer
)
85 list_del(&peer
->list
);
87 if (atomic_dec_and_test(&dev
->peer_count
)) {
95 static inline struct lowpan_peer
*peer_lookup_ba(struct lowpan_dev
*dev
,
96 bdaddr_t
*ba
, __u8 type
)
98 struct lowpan_peer
*peer
, *tmp
;
100 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev
->peer_count
),
103 list_for_each_entry_safe(peer
, tmp
, &dev
->peers
, list
) {
104 BT_DBG("addr %pMR type %d",
105 &peer
->conn
->hcon
->dst
, peer
->conn
->hcon
->dst_type
);
107 if (bacmp(&peer
->conn
->hcon
->dst
, ba
))
110 if (type
== peer
->conn
->hcon
->dst_type
)
117 static inline struct lowpan_peer
*peer_lookup_conn(struct lowpan_dev
*dev
,
118 struct l2cap_conn
*conn
)
120 struct lowpan_peer
*peer
, *tmp
;
122 list_for_each_entry_safe(peer
, tmp
, &dev
->peers
, list
) {
123 if (peer
->conn
== conn
)
130 static struct lowpan_peer
*lookup_peer(struct l2cap_conn
*conn
)
132 struct lowpan_dev
*entry
, *tmp
;
133 struct lowpan_peer
*peer
= NULL
;
136 read_lock_irqsave(&devices_lock
, flags
);
138 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
139 peer
= peer_lookup_conn(entry
, conn
);
144 read_unlock_irqrestore(&devices_lock
, flags
);
149 static struct lowpan_dev
*lookup_dev(struct l2cap_conn
*conn
)
151 struct lowpan_dev
*entry
, *tmp
;
152 struct lowpan_dev
*dev
= NULL
;
155 read_lock_irqsave(&devices_lock
, flags
);
157 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
158 if (conn
->hcon
->hdev
== entry
->hdev
) {
164 read_unlock_irqrestore(&devices_lock
, flags
);
169 static int give_skb_to_upper(struct sk_buff
*skb
, struct net_device
*dev
)
171 struct sk_buff
*skb_cp
;
174 skb_cp
= skb_copy(skb
, GFP_ATOMIC
);
178 ret
= netif_rx(skb_cp
);
180 BT_DBG("receive skb %d", ret
);
187 static int process_data(struct sk_buff
*skb
, struct net_device
*netdev
,
188 struct l2cap_conn
*conn
)
190 const u8
*saddr
, *daddr
;
192 struct lowpan_dev
*dev
;
193 struct lowpan_peer
*peer
;
196 dev
= lowpan_dev(netdev
);
198 read_lock_irqsave(&devices_lock
, flags
);
199 peer
= peer_lookup_conn(dev
, conn
);
200 read_unlock_irqrestore(&devices_lock
, flags
);
204 saddr
= peer
->eui64_addr
;
205 daddr
= dev
->netdev
->dev_addr
;
207 /* at least two bytes will be used for the encoding */
211 if (lowpan_fetch_skb_u8(skb
, &iphc0
))
214 if (lowpan_fetch_skb_u8(skb
, &iphc1
))
217 return lowpan_process_data(skb
, netdev
,
218 saddr
, IEEE802154_ADDR_LONG
, EUI64_ADDR_LEN
,
219 daddr
, IEEE802154_ADDR_LONG
, EUI64_ADDR_LEN
,
220 iphc0
, iphc1
, give_skb_to_upper
);
227 static int recv_pkt(struct sk_buff
*skb
, struct net_device
*dev
,
228 struct l2cap_conn
*conn
)
230 struct sk_buff
*local_skb
;
233 if (!netif_running(dev
))
236 if (dev
->type
!= ARPHRD_6LOWPAN
)
239 /* check that it's our buffer */
240 if (skb
->data
[0] == LOWPAN_DISPATCH_IPV6
) {
241 /* Copy the packet so that the IPv6 header is
244 local_skb
= skb_copy_expand(skb
, NET_SKB_PAD
- 1,
245 skb_tailroom(skb
), GFP_ATOMIC
);
249 local_skb
->protocol
= htons(ETH_P_IPV6
);
250 local_skb
->pkt_type
= PACKET_HOST
;
252 skb_reset_network_header(local_skb
);
253 skb_set_transport_header(local_skb
, sizeof(struct ipv6hdr
));
255 if (give_skb_to_upper(local_skb
, dev
) != NET_RX_SUCCESS
) {
256 kfree_skb(local_skb
);
260 dev
->stats
.rx_bytes
+= skb
->len
;
261 dev
->stats
.rx_packets
++;
263 kfree_skb(local_skb
);
266 switch (skb
->data
[0] & 0xe0) {
267 case LOWPAN_DISPATCH_IPHC
: /* ipv6 datagram */
268 local_skb
= skb_clone(skb
, GFP_ATOMIC
);
272 ret
= process_data(local_skb
, dev
, conn
);
273 if (ret
!= NET_RX_SUCCESS
)
276 dev
->stats
.rx_bytes
+= skb
->len
;
277 dev
->stats
.rx_packets
++;
286 return NET_RX_SUCCESS
;
293 /* Packet from BT LE device */
294 int bt_6lowpan_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
296 struct lowpan_dev
*dev
;
297 struct lowpan_peer
*peer
;
300 peer
= lookup_peer(conn
);
304 dev
= lookup_dev(conn
);
305 if (!dev
|| !dev
->netdev
)
308 err
= recv_pkt(skb
, dev
->netdev
, conn
);
309 BT_DBG("recv pkt %d", err
);
314 static inline int skbuff_copy(void *msg
, int len
, int count
, int mtu
,
315 struct sk_buff
*skb
, struct net_device
*dev
)
317 struct sk_buff
**frag
;
320 memcpy(skb_put(skb
, count
), msg
, count
);
326 dev
->stats
.tx_bytes
+= count
;
327 dev
->stats
.tx_packets
++;
329 raw_dump_table(__func__
, "Sending", skb
->data
, skb
->len
);
331 /* Continuation fragments (no L2CAP header) */
332 frag
= &skb_shinfo(skb
)->frag_list
;
336 count
= min_t(unsigned int, mtu
, len
);
338 tmp
= bt_skb_alloc(count
, GFP_ATOMIC
);
344 memcpy(skb_put(*frag
, count
), msg
, count
);
346 raw_dump_table(__func__
, "Sending fragment",
347 (*frag
)->data
, count
);
349 (*frag
)->priority
= skb
->priority
;
355 skb
->len
+= (*frag
)->len
;
356 skb
->data_len
+= (*frag
)->len
;
358 frag
= &(*frag
)->next
;
360 dev
->stats
.tx_bytes
+= count
;
361 dev
->stats
.tx_packets
++;
367 static struct sk_buff
*create_pdu(struct l2cap_conn
*conn
, void *msg
,
368 size_t len
, u32 priority
,
369 struct net_device
*dev
)
373 struct l2cap_hdr
*lh
;
375 /* FIXME: This mtu check should be not needed and atm is only used for
378 if (conn
->mtu
> (L2CAP_LE_MIN_MTU
+ L2CAP_HDR_SIZE
))
379 conn
->mtu
= L2CAP_LE_MIN_MTU
+ L2CAP_HDR_SIZE
;
381 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
383 BT_DBG("conn %p len %zu mtu %d count %d", conn
, len
, conn
->mtu
, count
);
385 skb
= bt_skb_alloc(count
+ L2CAP_HDR_SIZE
, GFP_ATOMIC
);
387 return ERR_PTR(-ENOMEM
);
389 skb
->priority
= priority
;
391 lh
= (struct l2cap_hdr
*)skb_put(skb
, L2CAP_HDR_SIZE
);
392 lh
->cid
= cpu_to_le16(L2CAP_FC_6LOWPAN
);
393 lh
->len
= cpu_to_le16(len
);
395 err
= skbuff_copy(msg
, len
, count
, conn
->mtu
, skb
, dev
);
396 if (unlikely(err
< 0)) {
398 BT_DBG("skbuff copy %d failed", err
);
405 static int conn_send(struct l2cap_conn
*conn
,
406 void *msg
, size_t len
, u32 priority
,
407 struct net_device
*dev
)
411 skb
= create_pdu(conn
, msg
, len
, priority
, dev
);
415 BT_DBG("conn %p skb %p len %d priority %u", conn
, skb
, skb
->len
,
418 hci_send_acl(conn
->hchan
, skb
, ACL_START
);
423 static void get_dest_bdaddr(struct in6_addr
*ip6_daddr
,
424 bdaddr_t
*addr
, u8
*addr_type
)
428 eui64
= ip6_daddr
->s6_addr
+ 8;
430 addr
->b
[0] = eui64
[7];
431 addr
->b
[1] = eui64
[6];
432 addr
->b
[2] = eui64
[5];
433 addr
->b
[3] = eui64
[2];
434 addr
->b
[4] = eui64
[1];
435 addr
->b
[5] = eui64
[0];
439 /* Set universal/local bit to 0 */
440 if (addr
->b
[5] & 1) {
442 *addr_type
= ADDR_LE_DEV_PUBLIC
;
444 *addr_type
= ADDR_LE_DEV_RANDOM
;
448 static int header_create(struct sk_buff
*skb
, struct net_device
*netdev
,
449 unsigned short type
, const void *_daddr
,
450 const void *_saddr
, unsigned int len
)
453 struct lowpan_dev
*dev
;
454 struct lowpan_peer
*peer
;
455 bdaddr_t addr
, *any
= BDADDR_ANY
;
456 u8
*saddr
, *daddr
= any
->b
;
459 if (type
!= ETH_P_IPV6
)
464 dev
= lowpan_dev(netdev
);
466 if (ipv6_addr_is_multicast(&hdr
->daddr
)) {
467 memcpy(&lowpan_cb(skb
)->addr
, &hdr
->daddr
,
468 sizeof(struct in6_addr
));
469 lowpan_cb(skb
)->conn
= NULL
;
473 /* Get destination BT device from skb.
474 * If there is no such peer then discard the packet.
476 get_dest_bdaddr(&hdr
->daddr
, &addr
, &addr_type
);
478 BT_DBG("dest addr %pMR type %d", &addr
, addr_type
);
480 read_lock_irqsave(&devices_lock
, flags
);
481 peer
= peer_lookup_ba(dev
, &addr
, addr_type
);
482 read_unlock_irqrestore(&devices_lock
, flags
);
485 BT_DBG("no such peer %pMR found", &addr
);
489 daddr
= peer
->eui64_addr
;
491 memcpy(&lowpan_cb(skb
)->addr
, &hdr
->daddr
,
492 sizeof(struct in6_addr
));
493 lowpan_cb(skb
)->conn
= peer
->conn
;
496 saddr
= dev
->netdev
->dev_addr
;
498 return lowpan_header_compress(skb
, netdev
, type
, daddr
, saddr
, len
);
501 /* Packet to BT LE device */
502 static int send_pkt(struct l2cap_conn
*conn
, const void *saddr
,
503 const void *daddr
, struct sk_buff
*skb
,
504 struct net_device
*netdev
)
506 raw_dump_table(__func__
, "raw skb data dump before fragmentation",
507 skb
->data
, skb
->len
);
509 return conn_send(conn
, skb
->data
, skb
->len
, 0, netdev
);
512 static void send_mcast_pkt(struct sk_buff
*skb
, struct net_device
*netdev
)
514 struct sk_buff
*local_skb
;
515 struct lowpan_dev
*entry
, *tmp
;
518 read_lock_irqsave(&devices_lock
, flags
);
520 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
521 struct lowpan_peer
*pentry
, *ptmp
;
522 struct lowpan_dev
*dev
;
524 if (entry
->netdev
!= netdev
)
527 dev
= lowpan_dev(entry
->netdev
);
529 list_for_each_entry_safe(pentry
, ptmp
, &dev
->peers
, list
) {
530 local_skb
= skb_clone(skb
, GFP_ATOMIC
);
532 send_pkt(pentry
->conn
, netdev
->dev_addr
,
533 pentry
->eui64_addr
, local_skb
, netdev
);
535 kfree_skb(local_skb
);
539 read_unlock_irqrestore(&devices_lock
, flags
);
542 static netdev_tx_t
bt_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
545 unsigned char *eui64_addr
;
546 struct lowpan_dev
*dev
;
547 struct lowpan_peer
*peer
;
551 if (ipv6_addr_is_multicast(&lowpan_cb(skb
)->addr
)) {
552 /* We need to send the packet to every device
553 * behind this interface.
555 send_mcast_pkt(skb
, netdev
);
559 get_dest_bdaddr(&lowpan_cb(skb
)->addr
, &addr
, &addr_type
);
560 eui64_addr
= lowpan_cb(skb
)->addr
.s6_addr
+ 8;
561 dev
= lowpan_dev(netdev
);
563 read_lock_irqsave(&devices_lock
, flags
);
564 peer
= peer_lookup_ba(dev
, &addr
, addr_type
);
565 read_unlock_irqrestore(&devices_lock
, flags
);
567 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev
->name
,
568 &addr
, &lowpan_cb(skb
)->addr
, peer
);
570 if (peer
&& peer
->conn
)
571 err
= send_pkt(peer
->conn
, netdev
->dev_addr
,
572 eui64_addr
, skb
, netdev
);
577 BT_DBG("ERROR: xmit failed (%d)", err
);
579 return (err
< 0) ? NET_XMIT_DROP
: err
;
582 static const struct net_device_ops netdev_ops
= {
583 .ndo_start_xmit
= bt_xmit
,
586 static struct header_ops header_ops
= {
587 .create
= header_create
,
590 static void netdev_setup(struct net_device
*dev
)
592 dev
->addr_len
= EUI64_ADDR_LEN
;
593 dev
->type
= ARPHRD_6LOWPAN
;
595 dev
->hard_header_len
= 0;
596 dev
->needed_tailroom
= 0;
597 dev
->mtu
= IPV6_MIN_MTU
;
598 dev
->tx_queue_len
= 0;
599 dev
->flags
= IFF_RUNNING
| IFF_POINTOPOINT
;
600 dev
->watchdog_timeo
= 0;
602 dev
->netdev_ops
= &netdev_ops
;
603 dev
->header_ops
= &header_ops
;
604 dev
->destructor
= free_netdev
;
607 static struct device_type bt_type
= {
611 static void set_addr(u8
*eui
, u8
*addr
, u8 addr_type
)
613 /* addr is the BT address in little-endian format */
625 /* Universal/local bit set, RFC 4291 */
626 if (addr_type
== ADDR_LE_DEV_PUBLIC
)
632 static void set_dev_addr(struct net_device
*netdev
, bdaddr_t
*addr
,
635 netdev
->addr_assign_type
= NET_ADDR_PERM
;
636 set_addr(netdev
->dev_addr
, addr
->b
, addr_type
);
637 netdev
->dev_addr
[0] ^= 2;
640 static void ifup(struct net_device
*netdev
)
645 err
= dev_open(netdev
);
647 BT_INFO("iface %s cannot be opened (%d)", netdev
->name
, err
);
651 static void do_notify_peers(struct work_struct
*work
)
653 struct lowpan_dev
*dev
= container_of(work
, struct lowpan_dev
,
656 netdev_notify_peers(dev
->netdev
); /* send neighbour adv at startup */
659 static bool is_bt_6lowpan(struct hci_conn
*hcon
)
661 if (hcon
->type
!= LE_LINK
)
664 return test_bit(HCI_CONN_6LOWPAN
, &hcon
->flags
);
667 static int add_peer_conn(struct l2cap_conn
*conn
, struct lowpan_dev
*dev
)
669 struct lowpan_peer
*peer
;
672 peer
= kzalloc(sizeof(*peer
), GFP_ATOMIC
);
677 memset(&peer
->peer_addr
, 0, sizeof(struct in6_addr
));
680 peer
->peer_addr
.s6_addr
[0] = 0xFE;
681 peer
->peer_addr
.s6_addr
[1] = 0x80;
682 set_addr((u8
*)&peer
->peer_addr
.s6_addr
+ 8, conn
->hcon
->dst
.b
,
683 conn
->hcon
->dst_type
);
685 memcpy(&peer
->eui64_addr
, (u8
*)&peer
->peer_addr
.s6_addr
+ 8,
687 peer
->eui64_addr
[0] ^= 2; /* second bit-flip (Universe/Local)
688 * is done according RFC2464
691 raw_dump_inline(__func__
, "peer IPv6 address",
692 (unsigned char *)&peer
->peer_addr
, 16);
693 raw_dump_inline(__func__
, "peer EUI64 address", peer
->eui64_addr
, 8);
695 write_lock_irqsave(&devices_lock
, flags
);
696 INIT_LIST_HEAD(&peer
->list
);
698 write_unlock_irqrestore(&devices_lock
, flags
);
700 /* Notifying peers about us needs to be done without locks held */
701 INIT_DELAYED_WORK(&dev
->notify_peers
, do_notify_peers
);
702 schedule_delayed_work(&dev
->notify_peers
, msecs_to_jiffies(100));
707 /* This gets called when BT LE 6LoWPAN device is connected. We then
708 * create network device that acts as a proxy between BT LE device
709 * and kernel network stack.
711 int bt_6lowpan_add_conn(struct l2cap_conn
*conn
)
713 struct lowpan_peer
*peer
= NULL
;
714 struct lowpan_dev
*dev
;
715 struct net_device
*netdev
;
719 if (!is_bt_6lowpan(conn
->hcon
))
722 peer
= lookup_peer(conn
);
726 dev
= lookup_dev(conn
);
728 return add_peer_conn(conn
, dev
);
730 netdev
= alloc_netdev(sizeof(*dev
), IFACE_NAME_TEMPLATE
, netdev_setup
);
734 set_dev_addr(netdev
, &conn
->hcon
->src
, conn
->hcon
->src_type
);
736 netdev
->netdev_ops
= &netdev_ops
;
737 SET_NETDEV_DEV(netdev
, &conn
->hcon
->dev
);
738 SET_NETDEV_DEVTYPE(netdev
, &bt_type
);
740 err
= register_netdev(netdev
);
742 BT_INFO("register_netdev failed %d", err
);
747 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
748 netdev
->ifindex
, &conn
->hcon
->dst
, &conn
->hcon
->src
);
749 set_bit(__LINK_STATE_PRESENT
, &netdev
->state
);
751 dev
= netdev_priv(netdev
);
752 dev
->netdev
= netdev
;
753 dev
->hdev
= conn
->hcon
->hdev
;
754 INIT_LIST_HEAD(&dev
->peers
);
756 write_lock_irqsave(&devices_lock
, flags
);
757 INIT_LIST_HEAD(&dev
->list
);
758 list_add(&dev
->list
, &bt_6lowpan_devices
);
759 write_unlock_irqrestore(&devices_lock
, flags
);
763 return add_peer_conn(conn
, dev
);
769 static void delete_netdev(struct work_struct
*work
)
771 struct lowpan_dev
*entry
= container_of(work
, struct lowpan_dev
,
774 unregister_netdev(entry
->netdev
);
776 /* The entry pointer is deleted in device_event() */
779 int bt_6lowpan_del_conn(struct l2cap_conn
*conn
)
781 struct lowpan_dev
*entry
, *tmp
;
782 struct lowpan_dev
*dev
= NULL
;
783 struct lowpan_peer
*peer
;
788 if (!conn
|| !is_bt_6lowpan(conn
->hcon
))
791 write_lock_irqsave(&devices_lock
, flags
);
793 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
794 dev
= lowpan_dev(entry
->netdev
);
795 peer
= peer_lookup_conn(dev
, conn
);
797 last
= peer_del(dev
, peer
);
803 if (!err
&& last
&& dev
&& !atomic_read(&dev
->peer_count
)) {
804 write_unlock_irqrestore(&devices_lock
, flags
);
806 cancel_delayed_work_sync(&dev
->notify_peers
);
808 /* bt_6lowpan_del_conn() is called with hci dev lock held which
809 * means that we must delete the netdevice in worker thread.
811 INIT_WORK(&entry
->delete_netdev
, delete_netdev
);
812 schedule_work(&entry
->delete_netdev
);
814 write_unlock_irqrestore(&devices_lock
, flags
);
820 static int device_event(struct notifier_block
*unused
,
821 unsigned long event
, void *ptr
)
823 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
824 struct lowpan_dev
*entry
, *tmp
;
827 if (netdev
->type
!= ARPHRD_6LOWPAN
)
831 case NETDEV_UNREGISTER
:
832 write_lock_irqsave(&devices_lock
, flags
);
833 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
,
835 if (entry
->netdev
== netdev
) {
836 list_del(&entry
->list
);
841 write_unlock_irqrestore(&devices_lock
, flags
);
848 static struct notifier_block bt_6lowpan_dev_notifier
= {
849 .notifier_call
= device_event
,
852 int bt_6lowpan_init(void)
854 return register_netdevice_notifier(&bt_6lowpan_dev_notifier
);
857 void bt_6lowpan_cleanup(void)
859 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier
);