Makefile: Export clang toolchain variables
[linux/fpc-iii.git] / drivers / bluetooth / hci_h5.c
blob8eede1197cd2ee7f6e32042d29fff7a660a14e92
1 /*
3 * Bluetooth HCI Three-wire UART driver
5 * Copyright (C) 2012 Intel Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/acpi.h>
25 #include <linux/errno.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/kernel.h>
28 #include <linux/mod_devicetable.h>
29 #include <linux/serdev.h>
30 #include <linux/skbuff.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
35 #include "btrtl.h"
36 #include "hci_uart.h"
38 #define HCI_3WIRE_ACK_PKT 0
39 #define HCI_3WIRE_LINK_PKT 15
41 /* Sliding window size */
42 #define H5_TX_WIN_MAX 4
44 #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
45 #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
48 * Maximum Three-wire packet:
49 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
51 #define H5_MAX_LEN (4 + 0xfff + 2)
53 /* Convenience macros for reading Three-wire header values */
54 #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
55 #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
56 #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
57 #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
58 #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
59 #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
61 #define SLIP_DELIMITER 0xc0
62 #define SLIP_ESC 0xdb
63 #define SLIP_ESC_DELIM 0xdc
64 #define SLIP_ESC_ESC 0xdd
66 /* H5 state flags */
67 enum {
68 H5_RX_ESC, /* SLIP escape mode */
69 H5_TX_ACK_REQ, /* Pending ack to send */
72 struct h5 {
73 /* Must be the first member, hci_serdev.c expects this. */
74 struct hci_uart serdev_hu;
76 struct sk_buff_head unack; /* Unack'ed packets queue */
77 struct sk_buff_head rel; /* Reliable packets queue */
78 struct sk_buff_head unrel; /* Unreliable packets queue */
80 unsigned long flags;
82 struct sk_buff *rx_skb; /* Receive buffer */
83 size_t rx_pending; /* Expecting more bytes */
84 u8 rx_ack; /* Last ack number received */
86 int (*rx_func)(struct hci_uart *hu, u8 c);
88 struct timer_list timer; /* Retransmission timer */
89 struct hci_uart *hu; /* Parent HCI UART */
91 u8 tx_seq; /* Next seq number to send */
92 u8 tx_ack; /* Next ack number to send */
93 u8 tx_win; /* Sliding window size */
95 enum {
96 H5_UNINITIALIZED,
97 H5_INITIALIZED,
98 H5_ACTIVE,
99 } state;
101 enum {
102 H5_AWAKE,
103 H5_SLEEPING,
104 H5_WAKING_UP,
105 } sleep;
107 const struct h5_vnd *vnd;
108 const char *id;
110 struct gpio_desc *enable_gpio;
111 struct gpio_desc *device_wake_gpio;
114 struct h5_vnd {
115 int (*setup)(struct h5 *h5);
116 void (*open)(struct h5 *h5);
117 void (*close)(struct h5 *h5);
118 const struct acpi_gpio_mapping *acpi_gpio_map;
121 static void h5_reset_rx(struct h5 *h5);
123 static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
125 struct h5 *h5 = hu->priv;
126 struct sk_buff *nskb;
128 nskb = alloc_skb(3, GFP_ATOMIC);
129 if (!nskb)
130 return;
132 hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
134 skb_put_data(nskb, data, len);
136 skb_queue_tail(&h5->unrel, nskb);
139 static u8 h5_cfg_field(struct h5 *h5)
141 /* Sliding window size (first 3 bits) */
142 return h5->tx_win & 0x07;
145 static void h5_timed_event(struct timer_list *t)
147 const unsigned char sync_req[] = { 0x01, 0x7e };
148 unsigned char conf_req[3] = { 0x03, 0xfc };
149 struct h5 *h5 = from_timer(h5, t, timer);
150 struct hci_uart *hu = h5->hu;
151 struct sk_buff *skb;
152 unsigned long flags;
154 BT_DBG("%s", hu->hdev->name);
156 if (h5->state == H5_UNINITIALIZED)
157 h5_link_control(hu, sync_req, sizeof(sync_req));
159 if (h5->state == H5_INITIALIZED) {
160 conf_req[2] = h5_cfg_field(h5);
161 h5_link_control(hu, conf_req, sizeof(conf_req));
164 if (h5->state != H5_ACTIVE) {
165 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
166 goto wakeup;
169 if (h5->sleep != H5_AWAKE) {
170 h5->sleep = H5_SLEEPING;
171 goto wakeup;
174 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
176 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
178 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
179 h5->tx_seq = (h5->tx_seq - 1) & 0x07;
180 skb_queue_head(&h5->rel, skb);
183 spin_unlock_irqrestore(&h5->unack.lock, flags);
185 wakeup:
186 hci_uart_tx_wakeup(hu);
189 static void h5_peer_reset(struct hci_uart *hu)
191 struct h5 *h5 = hu->priv;
193 BT_ERR("Peer device has reset");
195 h5->state = H5_UNINITIALIZED;
197 del_timer(&h5->timer);
199 skb_queue_purge(&h5->rel);
200 skb_queue_purge(&h5->unrel);
201 skb_queue_purge(&h5->unack);
203 h5->tx_seq = 0;
204 h5->tx_ack = 0;
206 /* Send reset request to upper stack */
207 hci_reset_dev(hu->hdev);
210 static int h5_open(struct hci_uart *hu)
212 struct h5 *h5;
213 const unsigned char sync[] = { 0x01, 0x7e };
215 BT_DBG("hu %p", hu);
217 if (hu->serdev) {
218 h5 = serdev_device_get_drvdata(hu->serdev);
219 } else {
220 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
221 if (!h5)
222 return -ENOMEM;
225 hu->priv = h5;
226 h5->hu = hu;
228 skb_queue_head_init(&h5->unack);
229 skb_queue_head_init(&h5->rel);
230 skb_queue_head_init(&h5->unrel);
232 h5_reset_rx(h5);
234 timer_setup(&h5->timer, h5_timed_event, 0);
236 h5->tx_win = H5_TX_WIN_MAX;
238 if (h5->vnd && h5->vnd->open)
239 h5->vnd->open(h5);
241 set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
243 /* Send initial sync request */
244 h5_link_control(hu, sync, sizeof(sync));
245 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
247 return 0;
250 static int h5_close(struct hci_uart *hu)
252 struct h5 *h5 = hu->priv;
254 del_timer_sync(&h5->timer);
256 skb_queue_purge(&h5->unack);
257 skb_queue_purge(&h5->rel);
258 skb_queue_purge(&h5->unrel);
260 if (h5->vnd && h5->vnd->close)
261 h5->vnd->close(h5);
263 if (!hu->serdev)
264 kfree(h5);
266 return 0;
269 static int h5_setup(struct hci_uart *hu)
271 struct h5 *h5 = hu->priv;
273 if (h5->vnd && h5->vnd->setup)
274 return h5->vnd->setup(h5);
276 return 0;
279 static void h5_pkt_cull(struct h5 *h5)
281 struct sk_buff *skb, *tmp;
282 unsigned long flags;
283 int i, to_remove;
284 u8 seq;
286 spin_lock_irqsave(&h5->unack.lock, flags);
288 to_remove = skb_queue_len(&h5->unack);
289 if (to_remove == 0)
290 goto unlock;
292 seq = h5->tx_seq;
294 while (to_remove > 0) {
295 if (h5->rx_ack == seq)
296 break;
298 to_remove--;
299 seq = (seq - 1) & 0x07;
302 if (seq != h5->rx_ack)
303 BT_ERR("Controller acked invalid packet");
305 i = 0;
306 skb_queue_walk_safe(&h5->unack, skb, tmp) {
307 if (i++ >= to_remove)
308 break;
310 __skb_unlink(skb, &h5->unack);
311 kfree_skb(skb);
314 if (skb_queue_empty(&h5->unack))
315 del_timer(&h5->timer);
317 unlock:
318 spin_unlock_irqrestore(&h5->unack.lock, flags);
321 static void h5_handle_internal_rx(struct hci_uart *hu)
323 struct h5 *h5 = hu->priv;
324 const unsigned char sync_req[] = { 0x01, 0x7e };
325 const unsigned char sync_rsp[] = { 0x02, 0x7d };
326 unsigned char conf_req[3] = { 0x03, 0xfc };
327 const unsigned char conf_rsp[] = { 0x04, 0x7b };
328 const unsigned char wakeup_req[] = { 0x05, 0xfa };
329 const unsigned char woken_req[] = { 0x06, 0xf9 };
330 const unsigned char sleep_req[] = { 0x07, 0x78 };
331 const unsigned char *hdr = h5->rx_skb->data;
332 const unsigned char *data = &h5->rx_skb->data[4];
334 BT_DBG("%s", hu->hdev->name);
336 if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
337 return;
339 if (H5_HDR_LEN(hdr) < 2)
340 return;
342 conf_req[2] = h5_cfg_field(h5);
344 if (memcmp(data, sync_req, 2) == 0) {
345 if (h5->state == H5_ACTIVE)
346 h5_peer_reset(hu);
347 h5_link_control(hu, sync_rsp, 2);
348 } else if (memcmp(data, sync_rsp, 2) == 0) {
349 if (h5->state == H5_ACTIVE)
350 h5_peer_reset(hu);
351 h5->state = H5_INITIALIZED;
352 h5_link_control(hu, conf_req, 3);
353 } else if (memcmp(data, conf_req, 2) == 0) {
354 h5_link_control(hu, conf_rsp, 2);
355 h5_link_control(hu, conf_req, 3);
356 } else if (memcmp(data, conf_rsp, 2) == 0) {
357 if (H5_HDR_LEN(hdr) > 2)
358 h5->tx_win = (data[2] & 0x07);
359 BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
360 h5->state = H5_ACTIVE;
361 hci_uart_init_ready(hu);
362 return;
363 } else if (memcmp(data, sleep_req, 2) == 0) {
364 BT_DBG("Peer went to sleep");
365 h5->sleep = H5_SLEEPING;
366 return;
367 } else if (memcmp(data, woken_req, 2) == 0) {
368 BT_DBG("Peer woke up");
369 h5->sleep = H5_AWAKE;
370 } else if (memcmp(data, wakeup_req, 2) == 0) {
371 BT_DBG("Peer requested wakeup");
372 h5_link_control(hu, woken_req, 2);
373 h5->sleep = H5_AWAKE;
374 } else {
375 BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
376 return;
379 hci_uart_tx_wakeup(hu);
382 static void h5_complete_rx_pkt(struct hci_uart *hu)
384 struct h5 *h5 = hu->priv;
385 const unsigned char *hdr = h5->rx_skb->data;
387 if (H5_HDR_RELIABLE(hdr)) {
388 h5->tx_ack = (h5->tx_ack + 1) % 8;
389 set_bit(H5_TX_ACK_REQ, &h5->flags);
390 hci_uart_tx_wakeup(hu);
393 h5->rx_ack = H5_HDR_ACK(hdr);
395 h5_pkt_cull(h5);
397 switch (H5_HDR_PKT_TYPE(hdr)) {
398 case HCI_EVENT_PKT:
399 case HCI_ACLDATA_PKT:
400 case HCI_SCODATA_PKT:
401 hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
403 /* Remove Three-wire header */
404 skb_pull(h5->rx_skb, 4);
406 hci_recv_frame(hu->hdev, h5->rx_skb);
407 h5->rx_skb = NULL;
409 break;
411 default:
412 h5_handle_internal_rx(hu);
413 break;
416 h5_reset_rx(h5);
419 static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
421 h5_complete_rx_pkt(hu);
423 return 0;
426 static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
428 struct h5 *h5 = hu->priv;
429 const unsigned char *hdr = h5->rx_skb->data;
431 if (H5_HDR_CRC(hdr)) {
432 h5->rx_func = h5_rx_crc;
433 h5->rx_pending = 2;
434 } else {
435 h5_complete_rx_pkt(hu);
438 return 0;
441 static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
443 struct h5 *h5 = hu->priv;
444 const unsigned char *hdr = h5->rx_skb->data;
446 BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
447 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
448 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
449 H5_HDR_LEN(hdr));
451 if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
452 BT_ERR("Invalid header checksum");
453 h5_reset_rx(h5);
454 return 0;
457 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
458 BT_ERR("Out-of-order packet arrived (%u != %u)",
459 H5_HDR_SEQ(hdr), h5->tx_ack);
460 h5_reset_rx(h5);
461 return 0;
464 if (h5->state != H5_ACTIVE &&
465 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
466 BT_ERR("Non-link packet received in non-active state");
467 h5_reset_rx(h5);
468 return 0;
471 h5->rx_func = h5_rx_payload;
472 h5->rx_pending = H5_HDR_LEN(hdr);
474 return 0;
477 static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
479 struct h5 *h5 = hu->priv;
481 if (c == SLIP_DELIMITER)
482 return 1;
484 h5->rx_func = h5_rx_3wire_hdr;
485 h5->rx_pending = 4;
487 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
488 if (!h5->rx_skb) {
489 BT_ERR("Can't allocate mem for new packet");
490 h5_reset_rx(h5);
491 return -ENOMEM;
494 h5->rx_skb->dev = (void *)hu->hdev;
496 return 0;
499 static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
501 struct h5 *h5 = hu->priv;
503 if (c == SLIP_DELIMITER)
504 h5->rx_func = h5_rx_pkt_start;
506 return 1;
509 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
511 const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
512 const u8 *byte = &c;
514 if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
515 set_bit(H5_RX_ESC, &h5->flags);
516 return;
519 if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
520 switch (c) {
521 case SLIP_ESC_DELIM:
522 byte = &delim;
523 break;
524 case SLIP_ESC_ESC:
525 byte = &esc;
526 break;
527 default:
528 BT_ERR("Invalid esc byte 0x%02hhx", c);
529 h5_reset_rx(h5);
530 return;
534 skb_put_data(h5->rx_skb, byte, 1);
535 h5->rx_pending--;
537 BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
540 static void h5_reset_rx(struct h5 *h5)
542 if (h5->rx_skb) {
543 kfree_skb(h5->rx_skb);
544 h5->rx_skb = NULL;
547 h5->rx_func = h5_rx_delimiter;
548 h5->rx_pending = 0;
549 clear_bit(H5_RX_ESC, &h5->flags);
552 static int h5_recv(struct hci_uart *hu, const void *data, int count)
554 struct h5 *h5 = hu->priv;
555 const unsigned char *ptr = data;
557 BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
558 count);
560 while (count > 0) {
561 int processed;
563 if (h5->rx_pending > 0) {
564 if (*ptr == SLIP_DELIMITER) {
565 BT_ERR("Too short H5 packet");
566 h5_reset_rx(h5);
567 continue;
570 h5_unslip_one_byte(h5, *ptr);
572 ptr++; count--;
573 continue;
576 processed = h5->rx_func(hu, *ptr);
577 if (processed < 0)
578 return processed;
580 ptr += processed;
581 count -= processed;
584 return 0;
587 static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
589 struct h5 *h5 = hu->priv;
591 if (skb->len > 0xfff) {
592 BT_ERR("Packet too long (%u bytes)", skb->len);
593 kfree_skb(skb);
594 return 0;
597 if (h5->state != H5_ACTIVE) {
598 BT_ERR("Ignoring HCI data in non-active state");
599 kfree_skb(skb);
600 return 0;
603 switch (hci_skb_pkt_type(skb)) {
604 case HCI_ACLDATA_PKT:
605 case HCI_COMMAND_PKT:
606 skb_queue_tail(&h5->rel, skb);
607 break;
609 case HCI_SCODATA_PKT:
610 skb_queue_tail(&h5->unrel, skb);
611 break;
613 default:
614 BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
615 kfree_skb(skb);
616 break;
619 return 0;
622 static void h5_slip_delim(struct sk_buff *skb)
624 const char delim = SLIP_DELIMITER;
626 skb_put_data(skb, &delim, 1);
629 static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
631 const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
632 const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
634 switch (c) {
635 case SLIP_DELIMITER:
636 skb_put_data(skb, &esc_delim, 2);
637 break;
638 case SLIP_ESC:
639 skb_put_data(skb, &esc_esc, 2);
640 break;
641 default:
642 skb_put_data(skb, &c, 1);
646 static bool valid_packet_type(u8 type)
648 switch (type) {
649 case HCI_ACLDATA_PKT:
650 case HCI_COMMAND_PKT:
651 case HCI_SCODATA_PKT:
652 case HCI_3WIRE_LINK_PKT:
653 case HCI_3WIRE_ACK_PKT:
654 return true;
655 default:
656 return false;
660 static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
661 const u8 *data, size_t len)
663 struct h5 *h5 = hu->priv;
664 struct sk_buff *nskb;
665 u8 hdr[4];
666 int i;
668 if (!valid_packet_type(pkt_type)) {
669 BT_ERR("Unknown packet type %u", pkt_type);
670 return NULL;
674 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
675 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
676 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
677 * delimiters at start and end).
679 nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
680 if (!nskb)
681 return NULL;
683 hci_skb_pkt_type(nskb) = pkt_type;
685 h5_slip_delim(nskb);
687 hdr[0] = h5->tx_ack << 3;
688 clear_bit(H5_TX_ACK_REQ, &h5->flags);
690 /* Reliable packet? */
691 if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
692 hdr[0] |= 1 << 7;
693 hdr[0] |= h5->tx_seq;
694 h5->tx_seq = (h5->tx_seq + 1) % 8;
697 hdr[1] = pkt_type | ((len & 0x0f) << 4);
698 hdr[2] = len >> 4;
699 hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
701 BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
702 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
703 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
704 H5_HDR_LEN(hdr));
706 for (i = 0; i < 4; i++)
707 h5_slip_one_byte(nskb, hdr[i]);
709 for (i = 0; i < len; i++)
710 h5_slip_one_byte(nskb, data[i]);
712 h5_slip_delim(nskb);
714 return nskb;
717 static struct sk_buff *h5_dequeue(struct hci_uart *hu)
719 struct h5 *h5 = hu->priv;
720 unsigned long flags;
721 struct sk_buff *skb, *nskb;
723 if (h5->sleep != H5_AWAKE) {
724 const unsigned char wakeup_req[] = { 0x05, 0xfa };
726 if (h5->sleep == H5_WAKING_UP)
727 return NULL;
729 h5->sleep = H5_WAKING_UP;
730 BT_DBG("Sending wakeup request");
732 mod_timer(&h5->timer, jiffies + HZ / 100);
733 return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
736 skb = skb_dequeue(&h5->unrel);
737 if (skb) {
738 nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
739 skb->data, skb->len);
740 if (nskb) {
741 kfree_skb(skb);
742 return nskb;
745 skb_queue_head(&h5->unrel, skb);
746 BT_ERR("Could not dequeue pkt because alloc_skb failed");
749 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
751 if (h5->unack.qlen >= h5->tx_win)
752 goto unlock;
754 skb = skb_dequeue(&h5->rel);
755 if (skb) {
756 nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
757 skb->data, skb->len);
758 if (nskb) {
759 __skb_queue_tail(&h5->unack, skb);
760 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
761 spin_unlock_irqrestore(&h5->unack.lock, flags);
762 return nskb;
765 skb_queue_head(&h5->rel, skb);
766 BT_ERR("Could not dequeue pkt because alloc_skb failed");
769 unlock:
770 spin_unlock_irqrestore(&h5->unack.lock, flags);
772 if (test_bit(H5_TX_ACK_REQ, &h5->flags))
773 return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
775 return NULL;
778 static int h5_flush(struct hci_uart *hu)
780 BT_DBG("hu %p", hu);
781 return 0;
784 static const struct hci_uart_proto h5p = {
785 .id = HCI_UART_3WIRE,
786 .name = "Three-wire (H5)",
787 .open = h5_open,
788 .close = h5_close,
789 .setup = h5_setup,
790 .recv = h5_recv,
791 .enqueue = h5_enqueue,
792 .dequeue = h5_dequeue,
793 .flush = h5_flush,
796 static int h5_serdev_probe(struct serdev_device *serdev)
798 const struct acpi_device_id *match;
799 struct device *dev = &serdev->dev;
800 struct h5 *h5;
802 h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
803 if (!h5)
804 return -ENOMEM;
806 set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
808 h5->hu = &h5->serdev_hu;
809 h5->serdev_hu.serdev = serdev;
810 serdev_device_set_drvdata(serdev, h5);
812 if (has_acpi_companion(dev)) {
813 match = acpi_match_device(dev->driver->acpi_match_table, dev);
814 if (!match)
815 return -ENODEV;
817 h5->vnd = (const struct h5_vnd *)match->driver_data;
818 h5->id = (char *)match->id;
820 if (h5->vnd->acpi_gpio_map)
821 devm_acpi_dev_add_driver_gpios(dev,
822 h5->vnd->acpi_gpio_map);
825 h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
826 if (IS_ERR(h5->enable_gpio))
827 return PTR_ERR(h5->enable_gpio);
829 h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
830 GPIOD_OUT_LOW);
831 if (IS_ERR(h5->device_wake_gpio))
832 return PTR_ERR(h5->device_wake_gpio);
834 return hci_uart_register_device(&h5->serdev_hu, &h5p);
837 static void h5_serdev_remove(struct serdev_device *serdev)
839 struct h5 *h5 = serdev_device_get_drvdata(serdev);
841 hci_uart_unregister_device(&h5->serdev_hu);
844 #ifdef CONFIG_BT_HCIUART_RTL
845 static int h5_btrtl_setup(struct h5 *h5)
847 struct btrtl_device_info *btrtl_dev;
848 struct sk_buff *skb;
849 __le32 baudrate_data;
850 u32 device_baudrate;
851 unsigned int controller_baudrate;
852 bool flow_control;
853 int err;
855 btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
856 if (IS_ERR(btrtl_dev))
857 return PTR_ERR(btrtl_dev);
859 err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
860 &controller_baudrate, &device_baudrate,
861 &flow_control);
862 if (err)
863 goto out_free;
865 baudrate_data = cpu_to_le32(device_baudrate);
866 skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
867 &baudrate_data, HCI_INIT_TIMEOUT);
868 if (IS_ERR(skb)) {
869 rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
870 err = PTR_ERR(skb);
871 goto out_free;
872 } else {
873 kfree_skb(skb);
875 /* Give the device some time to set up the new baudrate. */
876 usleep_range(10000, 20000);
878 serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
879 serdev_device_set_flow_control(h5->hu->serdev, flow_control);
881 err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
882 /* Give the device some time before the hci-core sends it a reset */
883 usleep_range(10000, 20000);
885 out_free:
886 btrtl_free(btrtl_dev);
888 return err;
891 static void h5_btrtl_open(struct h5 *h5)
893 /* Devices always start with these fixed parameters */
894 serdev_device_set_flow_control(h5->hu->serdev, false);
895 serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
896 serdev_device_set_baudrate(h5->hu->serdev, 115200);
898 /* The controller needs up to 500ms to wakeup */
899 gpiod_set_value_cansleep(h5->enable_gpio, 1);
900 gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
901 msleep(500);
904 static void h5_btrtl_close(struct h5 *h5)
906 gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
907 gpiod_set_value_cansleep(h5->enable_gpio, 0);
910 static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
911 static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
912 static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
913 static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
914 { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
915 { "enable-gpios", &btrtl_enable_gpios, 1 },
916 { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
920 static struct h5_vnd rtl_vnd = {
921 .setup = h5_btrtl_setup,
922 .open = h5_btrtl_open,
923 .close = h5_btrtl_close,
924 .acpi_gpio_map = acpi_btrtl_gpios,
926 #endif
928 #ifdef CONFIG_ACPI
929 static const struct acpi_device_id h5_acpi_match[] = {
930 #ifdef CONFIG_BT_HCIUART_RTL
931 { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
932 #endif
933 { },
935 MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
936 #endif
938 static struct serdev_device_driver h5_serdev_driver = {
939 .probe = h5_serdev_probe,
940 .remove = h5_serdev_remove,
941 .driver = {
942 .name = "hci_uart_h5",
943 .acpi_match_table = ACPI_PTR(h5_acpi_match),
947 int __init h5_init(void)
949 serdev_device_driver_register(&h5_serdev_driver);
950 return hci_uart_register_proto(&h5p);
953 int __exit h5_deinit(void)
955 serdev_device_driver_unregister(&h5_serdev_driver);
956 return hci_uart_unregister_proto(&h5p);