2 Copyright (c) 2013 Intel Corp.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
14 #include <linux/if_arp.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
19 #include <net/ip6_route.h>
20 #include <net/addrconf.h>
22 #include <net/af_ieee802154.h> /* to get the address type */
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/l2cap.h>
30 #include <net/6lowpan.h> /* for the compression support */
32 #define IFACE_NAME_TEMPLATE "bt%d"
33 #define EUI64_ADDR_LEN 8
37 struct l2cap_conn
*conn
;
39 #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
41 /* The devices list contains those devices that we are acting
42 * as a proxy. The BT 6LoWPAN device is a virtual device that
43 * connects to the Bluetooth LE device. The real connection to
44 * BT device is done via l2cap layer. There exists one
45 * virtual device / one BT 6LoWPAN network (=hciX device).
46 * The list contains struct lowpan_dev elements.
48 static LIST_HEAD(bt_6lowpan_devices
);
49 static DEFINE_RWLOCK(devices_lock
);
52 struct list_head list
;
53 struct l2cap_conn
*conn
;
55 /* peer addresses in various formats */
56 unsigned char eui64_addr
[EUI64_ADDR_LEN
];
57 struct in6_addr peer_addr
;
61 struct list_head list
;
64 struct net_device
*netdev
;
65 struct list_head peers
;
66 atomic_t peer_count
; /* number of items in peers list */
68 struct work_struct delete_netdev
;
69 struct delayed_work notify_peers
;
72 static inline struct lowpan_dev
*lowpan_dev(const struct net_device
*netdev
)
74 return netdev_priv(netdev
);
77 static inline void peer_add(struct lowpan_dev
*dev
, struct lowpan_peer
*peer
)
79 list_add(&peer
->list
, &dev
->peers
);
80 atomic_inc(&dev
->peer_count
);
83 static inline bool peer_del(struct lowpan_dev
*dev
, struct lowpan_peer
*peer
)
85 list_del(&peer
->list
);
87 if (atomic_dec_and_test(&dev
->peer_count
)) {
95 static inline struct lowpan_peer
*peer_lookup_ba(struct lowpan_dev
*dev
,
96 bdaddr_t
*ba
, __u8 type
)
98 struct lowpan_peer
*peer
, *tmp
;
100 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev
->peer_count
),
103 list_for_each_entry_safe(peer
, tmp
, &dev
->peers
, list
) {
104 BT_DBG("addr %pMR type %d",
105 &peer
->conn
->hcon
->dst
, peer
->conn
->hcon
->dst_type
);
107 if (bacmp(&peer
->conn
->hcon
->dst
, ba
))
110 if (type
== peer
->conn
->hcon
->dst_type
)
117 static inline struct lowpan_peer
*peer_lookup_conn(struct lowpan_dev
*dev
,
118 struct l2cap_conn
*conn
)
120 struct lowpan_peer
*peer
, *tmp
;
122 list_for_each_entry_safe(peer
, tmp
, &dev
->peers
, list
) {
123 if (peer
->conn
== conn
)
130 static struct lowpan_peer
*lookup_peer(struct l2cap_conn
*conn
)
132 struct lowpan_dev
*entry
, *tmp
;
133 struct lowpan_peer
*peer
= NULL
;
136 read_lock_irqsave(&devices_lock
, flags
);
138 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
139 peer
= peer_lookup_conn(entry
, conn
);
144 read_unlock_irqrestore(&devices_lock
, flags
);
149 static struct lowpan_dev
*lookup_dev(struct l2cap_conn
*conn
)
151 struct lowpan_dev
*entry
, *tmp
;
152 struct lowpan_dev
*dev
= NULL
;
155 read_lock_irqsave(&devices_lock
, flags
);
157 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
158 if (conn
->hcon
->hdev
== entry
->hdev
) {
164 read_unlock_irqrestore(&devices_lock
, flags
);
169 static int give_skb_to_upper(struct sk_buff
*skb
, struct net_device
*dev
)
171 struct sk_buff
*skb_cp
;
174 skb_cp
= skb_copy(skb
, GFP_ATOMIC
);
178 ret
= netif_rx(skb_cp
);
180 BT_DBG("receive skb %d", ret
);
187 static int process_data(struct sk_buff
*skb
, struct net_device
*netdev
,
188 struct l2cap_conn
*conn
)
190 const u8
*saddr
, *daddr
;
192 struct lowpan_dev
*dev
;
193 struct lowpan_peer
*peer
;
196 dev
= lowpan_dev(netdev
);
198 read_lock_irqsave(&devices_lock
, flags
);
199 peer
= peer_lookup_conn(dev
, conn
);
200 read_unlock_irqrestore(&devices_lock
, flags
);
204 saddr
= peer
->eui64_addr
;
205 daddr
= dev
->netdev
->dev_addr
;
207 /* at least two bytes will be used for the encoding */
211 if (lowpan_fetch_skb_u8(skb
, &iphc0
))
214 if (lowpan_fetch_skb_u8(skb
, &iphc1
))
217 return lowpan_process_data(skb
, netdev
,
218 saddr
, IEEE802154_ADDR_LONG
, EUI64_ADDR_LEN
,
219 daddr
, IEEE802154_ADDR_LONG
, EUI64_ADDR_LEN
,
220 iphc0
, iphc1
, give_skb_to_upper
);
227 static int recv_pkt(struct sk_buff
*skb
, struct net_device
*dev
,
228 struct l2cap_conn
*conn
)
230 struct sk_buff
*local_skb
;
233 if (!netif_running(dev
))
236 if (dev
->type
!= ARPHRD_6LOWPAN
)
239 /* check that it's our buffer */
240 if (skb
->data
[0] == LOWPAN_DISPATCH_IPV6
) {
241 /* Copy the packet so that the IPv6 header is
244 local_skb
= skb_copy_expand(skb
, NET_SKB_PAD
- 1,
245 skb_tailroom(skb
), GFP_ATOMIC
);
249 local_skb
->protocol
= htons(ETH_P_IPV6
);
250 local_skb
->pkt_type
= PACKET_HOST
;
252 skb_reset_network_header(local_skb
);
253 skb_set_transport_header(local_skb
, sizeof(struct ipv6hdr
));
255 if (give_skb_to_upper(local_skb
, dev
) != NET_RX_SUCCESS
) {
256 kfree_skb(local_skb
);
260 dev
->stats
.rx_bytes
+= skb
->len
;
261 dev
->stats
.rx_packets
++;
263 kfree_skb(local_skb
);
266 switch (skb
->data
[0] & 0xe0) {
267 case LOWPAN_DISPATCH_IPHC
: /* ipv6 datagram */
268 local_skb
= skb_clone(skb
, GFP_ATOMIC
);
272 ret
= process_data(local_skb
, dev
, conn
);
273 if (ret
!= NET_RX_SUCCESS
)
276 dev
->stats
.rx_bytes
+= skb
->len
;
277 dev
->stats
.rx_packets
++;
286 return NET_RX_SUCCESS
;
293 /* Packet from BT LE device */
294 int bt_6lowpan_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
296 struct lowpan_dev
*dev
;
297 struct lowpan_peer
*peer
;
300 peer
= lookup_peer(conn
);
304 dev
= lookup_dev(conn
);
305 if (!dev
|| !dev
->netdev
)
308 err
= recv_pkt(skb
, dev
->netdev
, conn
);
309 BT_DBG("recv pkt %d", err
);
314 static inline int skbuff_copy(void *msg
, int len
, int count
, int mtu
,
315 struct sk_buff
*skb
, struct net_device
*dev
)
317 struct sk_buff
**frag
;
320 memcpy(skb_put(skb
, count
), msg
, count
);
326 dev
->stats
.tx_bytes
+= count
;
327 dev
->stats
.tx_packets
++;
329 raw_dump_table(__func__
, "Sending", skb
->data
, skb
->len
);
331 /* Continuation fragments (no L2CAP header) */
332 frag
= &skb_shinfo(skb
)->frag_list
;
336 count
= min_t(unsigned int, mtu
, len
);
338 tmp
= bt_skb_alloc(count
, GFP_ATOMIC
);
344 memcpy(skb_put(*frag
, count
), msg
, count
);
346 raw_dump_table(__func__
, "Sending fragment",
347 (*frag
)->data
, count
);
349 (*frag
)->priority
= skb
->priority
;
355 skb
->len
+= (*frag
)->len
;
356 skb
->data_len
+= (*frag
)->len
;
358 frag
= &(*frag
)->next
;
360 dev
->stats
.tx_bytes
+= count
;
361 dev
->stats
.tx_packets
++;
367 static struct sk_buff
*create_pdu(struct l2cap_conn
*conn
, void *msg
,
368 size_t len
, u32 priority
,
369 struct net_device
*dev
)
373 struct l2cap_hdr
*lh
;
375 /* FIXME: This mtu check should be not needed and atm is only used for
378 if (conn
->mtu
> (L2CAP_LE_MIN_MTU
+ L2CAP_HDR_SIZE
))
379 conn
->mtu
= L2CAP_LE_MIN_MTU
+ L2CAP_HDR_SIZE
;
381 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
383 BT_DBG("conn %p len %zu mtu %d count %d", conn
, len
, conn
->mtu
, count
);
385 skb
= bt_skb_alloc(count
+ L2CAP_HDR_SIZE
, GFP_ATOMIC
);
387 return ERR_PTR(-ENOMEM
);
389 skb
->priority
= priority
;
391 lh
= (struct l2cap_hdr
*)skb_put(skb
, L2CAP_HDR_SIZE
);
392 lh
->cid
= cpu_to_le16(L2CAP_FC_6LOWPAN
);
393 lh
->len
= cpu_to_le16(len
);
395 err
= skbuff_copy(msg
, len
, count
, conn
->mtu
, skb
, dev
);
396 if (unlikely(err
< 0)) {
398 BT_DBG("skbuff copy %d failed", err
);
405 static int conn_send(struct l2cap_conn
*conn
,
406 void *msg
, size_t len
, u32 priority
,
407 struct net_device
*dev
)
411 skb
= create_pdu(conn
, msg
, len
, priority
, dev
);
415 BT_DBG("conn %p skb %p len %d priority %u", conn
, skb
, skb
->len
,
418 hci_send_acl(conn
->hchan
, skb
, ACL_START
);
423 static u8
get_addr_type_from_eui64(u8 byte
)
425 /* Is universal(0) or local(1) bit, */
427 return ADDR_LE_DEV_RANDOM
;
429 return ADDR_LE_DEV_PUBLIC
;
432 static void copy_to_bdaddr(struct in6_addr
*ip6_daddr
, bdaddr_t
*addr
)
434 u8
*eui64
= ip6_daddr
->s6_addr
+ 8;
436 addr
->b
[0] = eui64
[7];
437 addr
->b
[1] = eui64
[6];
438 addr
->b
[2] = eui64
[5];
439 addr
->b
[3] = eui64
[2];
440 addr
->b
[4] = eui64
[1];
441 addr
->b
[5] = eui64
[0];
444 static void convert_dest_bdaddr(struct in6_addr
*ip6_daddr
,
445 bdaddr_t
*addr
, u8
*addr_type
)
447 copy_to_bdaddr(ip6_daddr
, addr
);
449 /* We need to toggle the U/L bit that we got from IPv6 address
450 * so that we get the proper address and type of the BD address.
454 *addr_type
= get_addr_type_from_eui64(addr
->b
[5]);
457 static int header_create(struct sk_buff
*skb
, struct net_device
*netdev
,
458 unsigned short type
, const void *_daddr
,
459 const void *_saddr
, unsigned int len
)
462 struct lowpan_dev
*dev
;
463 struct lowpan_peer
*peer
;
464 bdaddr_t addr
, *any
= BDADDR_ANY
;
465 u8
*saddr
, *daddr
= any
->b
;
468 if (type
!= ETH_P_IPV6
)
473 dev
= lowpan_dev(netdev
);
475 if (ipv6_addr_is_multicast(&hdr
->daddr
)) {
476 memcpy(&lowpan_cb(skb
)->addr
, &hdr
->daddr
,
477 sizeof(struct in6_addr
));
478 lowpan_cb(skb
)->conn
= NULL
;
482 /* Get destination BT device from skb.
483 * If there is no such peer then discard the packet.
485 convert_dest_bdaddr(&hdr
->daddr
, &addr
, &addr_type
);
487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr
,
488 addr_type
== ADDR_LE_DEV_PUBLIC
? "PUBLIC" : "RANDOM",
491 read_lock_irqsave(&devices_lock
, flags
);
492 peer
= peer_lookup_ba(dev
, &addr
, addr_type
);
493 read_unlock_irqrestore(&devices_lock
, flags
);
496 BT_DBG("no such peer %pMR found", &addr
);
500 daddr
= peer
->eui64_addr
;
502 memcpy(&lowpan_cb(skb
)->addr
, &hdr
->daddr
,
503 sizeof(struct in6_addr
));
504 lowpan_cb(skb
)->conn
= peer
->conn
;
507 saddr
= dev
->netdev
->dev_addr
;
509 return lowpan_header_compress(skb
, netdev
, type
, daddr
, saddr
, len
);
512 /* Packet to BT LE device */
513 static int send_pkt(struct l2cap_conn
*conn
, const void *saddr
,
514 const void *daddr
, struct sk_buff
*skb
,
515 struct net_device
*netdev
)
517 raw_dump_table(__func__
, "raw skb data dump before fragmentation",
518 skb
->data
, skb
->len
);
520 return conn_send(conn
, skb
->data
, skb
->len
, 0, netdev
);
523 static void send_mcast_pkt(struct sk_buff
*skb
, struct net_device
*netdev
)
525 struct sk_buff
*local_skb
;
526 struct lowpan_dev
*entry
, *tmp
;
529 read_lock_irqsave(&devices_lock
, flags
);
531 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
532 struct lowpan_peer
*pentry
, *ptmp
;
533 struct lowpan_dev
*dev
;
535 if (entry
->netdev
!= netdev
)
538 dev
= lowpan_dev(entry
->netdev
);
540 list_for_each_entry_safe(pentry
, ptmp
, &dev
->peers
, list
) {
541 local_skb
= skb_clone(skb
, GFP_ATOMIC
);
543 send_pkt(pentry
->conn
, netdev
->dev_addr
,
544 pentry
->eui64_addr
, local_skb
, netdev
);
546 kfree_skb(local_skb
);
550 read_unlock_irqrestore(&devices_lock
, flags
);
553 static netdev_tx_t
bt_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
556 unsigned char *eui64_addr
;
557 struct lowpan_dev
*dev
;
558 struct lowpan_peer
*peer
;
562 if (ipv6_addr_is_multicast(&lowpan_cb(skb
)->addr
)) {
563 /* We need to send the packet to every device
564 * behind this interface.
566 send_mcast_pkt(skb
, netdev
);
570 convert_dest_bdaddr(&lowpan_cb(skb
)->addr
, &addr
, &addr_type
);
571 eui64_addr
= lowpan_cb(skb
)->addr
.s6_addr
+ 8;
572 dev
= lowpan_dev(netdev
);
574 read_lock_irqsave(&devices_lock
, flags
);
575 peer
= peer_lookup_ba(dev
, &addr
, addr_type
);
576 read_unlock_irqrestore(&devices_lock
, flags
);
578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
580 addr_type
== ADDR_LE_DEV_PUBLIC
? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb
)->addr
, peer
);
583 if (peer
&& peer
->conn
)
584 err
= send_pkt(peer
->conn
, netdev
->dev_addr
,
585 eui64_addr
, skb
, netdev
);
590 BT_DBG("ERROR: xmit failed (%d)", err
);
592 return (err
< 0) ? NET_XMIT_DROP
: err
;
595 static const struct net_device_ops netdev_ops
= {
596 .ndo_start_xmit
= bt_xmit
,
599 static struct header_ops header_ops
= {
600 .create
= header_create
,
603 static void netdev_setup(struct net_device
*dev
)
605 dev
->addr_len
= EUI64_ADDR_LEN
;
606 dev
->type
= ARPHRD_6LOWPAN
;
608 dev
->hard_header_len
= 0;
609 dev
->needed_tailroom
= 0;
610 dev
->mtu
= IPV6_MIN_MTU
;
611 dev
->tx_queue_len
= 0;
612 dev
->flags
= IFF_RUNNING
| IFF_POINTOPOINT
;
613 dev
->watchdog_timeo
= 0;
615 dev
->netdev_ops
= &netdev_ops
;
616 dev
->header_ops
= &header_ops
;
617 dev
->destructor
= free_netdev
;
620 static struct device_type bt_type
= {
624 static void set_addr(u8
*eui
, u8
*addr
, u8 addr_type
)
626 /* addr is the BT address in little-endian format */
636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
637 if (addr_type
== ADDR_LE_DEV_PUBLIC
)
642 BT_DBG("type %d addr %*phC", addr_type
, 8, eui
);
645 static void set_dev_addr(struct net_device
*netdev
, bdaddr_t
*addr
,
648 netdev
->addr_assign_type
= NET_ADDR_PERM
;
649 set_addr(netdev
->dev_addr
, addr
->b
, addr_type
);
652 static void ifup(struct net_device
*netdev
)
657 err
= dev_open(netdev
);
659 BT_INFO("iface %s cannot be opened (%d)", netdev
->name
, err
);
663 static void do_notify_peers(struct work_struct
*work
)
665 struct lowpan_dev
*dev
= container_of(work
, struct lowpan_dev
,
668 netdev_notify_peers(dev
->netdev
); /* send neighbour adv at startup */
671 static bool is_bt_6lowpan(struct hci_conn
*hcon
)
673 if (hcon
->type
!= LE_LINK
)
676 return test_bit(HCI_CONN_6LOWPAN
, &hcon
->flags
);
679 static int add_peer_conn(struct l2cap_conn
*conn
, struct lowpan_dev
*dev
)
681 struct lowpan_peer
*peer
;
684 peer
= kzalloc(sizeof(*peer
), GFP_ATOMIC
);
689 memset(&peer
->peer_addr
, 0, sizeof(struct in6_addr
));
692 peer
->peer_addr
.s6_addr
[0] = 0xFE;
693 peer
->peer_addr
.s6_addr
[1] = 0x80;
694 set_addr((u8
*)&peer
->peer_addr
.s6_addr
+ 8, conn
->hcon
->dst
.b
,
695 conn
->hcon
->dst_type
);
697 memcpy(&peer
->eui64_addr
, (u8
*)&peer
->peer_addr
.s6_addr
+ 8,
700 write_lock_irqsave(&devices_lock
, flags
);
701 INIT_LIST_HEAD(&peer
->list
);
703 write_unlock_irqrestore(&devices_lock
, flags
);
705 /* Notifying peers about us needs to be done without locks held */
706 INIT_DELAYED_WORK(&dev
->notify_peers
, do_notify_peers
);
707 schedule_delayed_work(&dev
->notify_peers
, msecs_to_jiffies(100));
712 /* This gets called when BT LE 6LoWPAN device is connected. We then
713 * create network device that acts as a proxy between BT LE device
714 * and kernel network stack.
716 int bt_6lowpan_add_conn(struct l2cap_conn
*conn
)
718 struct lowpan_peer
*peer
= NULL
;
719 struct lowpan_dev
*dev
;
720 struct net_device
*netdev
;
724 if (!is_bt_6lowpan(conn
->hcon
))
727 peer
= lookup_peer(conn
);
731 dev
= lookup_dev(conn
);
733 return add_peer_conn(conn
, dev
);
735 netdev
= alloc_netdev(sizeof(*dev
), IFACE_NAME_TEMPLATE
, netdev_setup
);
739 set_dev_addr(netdev
, &conn
->hcon
->src
, conn
->hcon
->src_type
);
741 netdev
->netdev_ops
= &netdev_ops
;
742 SET_NETDEV_DEV(netdev
, &conn
->hcon
->dev
);
743 SET_NETDEV_DEVTYPE(netdev
, &bt_type
);
745 err
= register_netdev(netdev
);
747 BT_INFO("register_netdev failed %d", err
);
752 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
753 netdev
->ifindex
, &conn
->hcon
->dst
, &conn
->hcon
->src
);
754 set_bit(__LINK_STATE_PRESENT
, &netdev
->state
);
756 dev
= netdev_priv(netdev
);
757 dev
->netdev
= netdev
;
758 dev
->hdev
= conn
->hcon
->hdev
;
759 INIT_LIST_HEAD(&dev
->peers
);
761 write_lock_irqsave(&devices_lock
, flags
);
762 INIT_LIST_HEAD(&dev
->list
);
763 list_add(&dev
->list
, &bt_6lowpan_devices
);
764 write_unlock_irqrestore(&devices_lock
, flags
);
768 return add_peer_conn(conn
, dev
);
774 static void delete_netdev(struct work_struct
*work
)
776 struct lowpan_dev
*entry
= container_of(work
, struct lowpan_dev
,
779 unregister_netdev(entry
->netdev
);
781 /* The entry pointer is deleted in device_event() */
784 int bt_6lowpan_del_conn(struct l2cap_conn
*conn
)
786 struct lowpan_dev
*entry
, *tmp
;
787 struct lowpan_dev
*dev
= NULL
;
788 struct lowpan_peer
*peer
;
793 if (!conn
|| !is_bt_6lowpan(conn
->hcon
))
796 write_lock_irqsave(&devices_lock
, flags
);
798 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
, list
) {
799 dev
= lowpan_dev(entry
->netdev
);
800 peer
= peer_lookup_conn(dev
, conn
);
802 last
= peer_del(dev
, peer
);
808 if (!err
&& last
&& dev
&& !atomic_read(&dev
->peer_count
)) {
809 write_unlock_irqrestore(&devices_lock
, flags
);
811 cancel_delayed_work_sync(&dev
->notify_peers
);
813 /* bt_6lowpan_del_conn() is called with hci dev lock held which
814 * means that we must delete the netdevice in worker thread.
816 INIT_WORK(&entry
->delete_netdev
, delete_netdev
);
817 schedule_work(&entry
->delete_netdev
);
819 write_unlock_irqrestore(&devices_lock
, flags
);
825 static int device_event(struct notifier_block
*unused
,
826 unsigned long event
, void *ptr
)
828 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
829 struct lowpan_dev
*entry
, *tmp
;
832 if (netdev
->type
!= ARPHRD_6LOWPAN
)
836 case NETDEV_UNREGISTER
:
837 write_lock_irqsave(&devices_lock
, flags
);
838 list_for_each_entry_safe(entry
, tmp
, &bt_6lowpan_devices
,
840 if (entry
->netdev
== netdev
) {
841 list_del(&entry
->list
);
846 write_unlock_irqrestore(&devices_lock
, flags
);
853 static struct notifier_block bt_6lowpan_dev_notifier
= {
854 .notifier_call
= device_event
,
857 int bt_6lowpan_init(void)
859 return register_netdevice_notifier(&bt_6lowpan_dev_notifier
);
862 void bt_6lowpan_cleanup(void)
864 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier
);