2 * Bluetooth Software UART Qualcomm protocol
4 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
5 * protocol extension to H4.
7 * Copyright (C) 2007 Texas Instruments, Inc.
8 * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
11 * This file is based on hci_ll.c, which was...
12 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
13 * which was in turn based on hci_h4.c, which was written
14 * by Maxim Krasnyansky and Marcel Holtmann.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2
18 * as published by the Free Software Foundation
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/kernel.h>
32 #include <linux/clk.h>
33 #include <linux/debugfs.h>
34 #include <linux/delay.h>
35 #include <linux/device.h>
36 #include <linux/gpio/consumer.h>
37 #include <linux/mod_devicetable.h>
38 #include <linux/module.h>
39 #include <linux/of_device.h>
40 #include <linux/platform_device.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/serdev.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
51 /* HCI_IBS protocol messages */
52 #define HCI_IBS_SLEEP_IND 0xFE
53 #define HCI_IBS_WAKE_IND 0xFD
54 #define HCI_IBS_WAKE_ACK 0xFC
55 #define HCI_MAX_IBS_SIZE 10
57 /* Controller states */
58 #define STATE_IN_BAND_SLEEP_ENABLED 1
60 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
61 #define IBS_TX_IDLE_TIMEOUT_MS 2000
62 #define BAUDRATE_SETTLE_TIMEOUT_MS 300
65 #define SUSCLK_RATE_32KHZ 32768
67 /* Controller debug log header */
68 #define QCA_DEBUG_HANDLE 0x2EDC
70 /* HCI_IBS transmit side sleep protocol states */
77 /* HCI_IBS receive side sleep protocol states */
83 /* HCI_IBS transmit and receive side clock state vote */
84 enum hci_ibs_clock_state_vote
{
85 HCI_IBS_VOTE_STATS_UPDATE
,
86 HCI_IBS_TX_VOTE_CLOCK_ON
,
87 HCI_IBS_TX_VOTE_CLOCK_OFF
,
88 HCI_IBS_RX_VOTE_CLOCK_ON
,
89 HCI_IBS_RX_VOTE_CLOCK_OFF
,
94 struct sk_buff
*rx_skb
;
95 struct sk_buff_head txq
;
96 struct sk_buff_head tx_wait_q
; /* HCI_IBS wait queue */
97 spinlock_t hci_ibs_lock
; /* HCI_IBS state lock */
98 u8 tx_ibs_state
; /* HCI_IBS transmit side power state*/
99 u8 rx_ibs_state
; /* HCI_IBS receive side power state */
100 bool tx_vote
; /* Clock must be on for TX */
101 bool rx_vote
; /* Clock must be on for RX */
102 struct timer_list tx_idle_timer
;
104 struct timer_list wake_retrans_timer
;
106 struct workqueue_struct
*workqueue
;
107 struct work_struct ws_awake_rx
;
108 struct work_struct ws_awake_device
;
109 struct work_struct ws_rx_vote_off
;
110 struct work_struct ws_tx_vote_off
;
113 /* For debugging purpose */
131 enum qca_speed_type
{
137 * Voltage regulator information required for configuring the
138 * QCA Bluetooth chipset
144 unsigned int load_uA
;
147 struct qca_vreg_data
{
148 enum qca_btsoc_type soc_type
;
149 struct qca_vreg
*vregs
;
154 * Platform data for the QCA Bluetooth power driver.
158 const struct qca_vreg_data
*vreg_data
;
159 struct regulator_bulk_data
*vreg_bulk
;
164 struct hci_uart serdev_hu
;
165 struct gpio_desc
*bt_en
;
167 enum qca_btsoc_type btsoc_type
;
168 struct qca_power
*bt_power
;
173 static int qca_power_setup(struct hci_uart
*hu
, bool on
);
174 static void qca_power_shutdown(struct hci_uart
*hu
);
175 static int qca_power_off(struct hci_dev
*hdev
);
177 static void __serial_clock_on(struct tty_struct
*tty
)
179 /* TODO: Some chipset requires to enable UART clock on client
180 * side to save power consumption or manual work is required.
181 * Please put your code to control UART clock here if needed
185 static void __serial_clock_off(struct tty_struct
*tty
)
187 /* TODO: Some chipset requires to disable UART clock on client
188 * side to save power consumption or manual work is required.
189 * Please put your code to control UART clock off here if needed
193 /* serial_clock_vote needs to be called with the ibs lock held */
194 static void serial_clock_vote(unsigned long vote
, struct hci_uart
*hu
)
196 struct qca_data
*qca
= hu
->priv
;
199 bool old_vote
= (qca
->tx_vote
| qca
->rx_vote
);
203 case HCI_IBS_VOTE_STATS_UPDATE
:
204 diff
= jiffies_to_msecs(jiffies
- qca
->vote_last_jif
);
207 qca
->vote_off_ms
+= diff
;
209 qca
->vote_on_ms
+= diff
;
212 case HCI_IBS_TX_VOTE_CLOCK_ON
:
218 case HCI_IBS_RX_VOTE_CLOCK_ON
:
224 case HCI_IBS_TX_VOTE_CLOCK_OFF
:
225 qca
->tx_vote
= false;
227 new_vote
= qca
->rx_vote
| qca
->tx_vote
;
230 case HCI_IBS_RX_VOTE_CLOCK_OFF
:
231 qca
->rx_vote
= false;
233 new_vote
= qca
->rx_vote
| qca
->tx_vote
;
237 BT_ERR("Voting irregularity");
241 if (new_vote
!= old_vote
) {
243 __serial_clock_on(hu
->tty
);
245 __serial_clock_off(hu
->tty
);
247 BT_DBG("Vote serial clock %s(%s)", new_vote
? "true" : "false",
248 vote
? "true" : "false");
250 diff
= jiffies_to_msecs(jiffies
- qca
->vote_last_jif
);
254 qca
->vote_off_ms
+= diff
;
257 qca
->vote_on_ms
+= diff
;
259 qca
->vote_last_jif
= jiffies
;
263 /* Builds and sends an HCI_IBS command packet.
264 * These are very simple packets with only 1 cmd byte.
266 static int send_hci_ibs_cmd(u8 cmd
, struct hci_uart
*hu
)
269 struct sk_buff
*skb
= NULL
;
270 struct qca_data
*qca
= hu
->priv
;
272 BT_DBG("hu %p send hci ibs cmd 0x%x", hu
, cmd
);
274 skb
= bt_skb_alloc(1, GFP_ATOMIC
);
276 BT_ERR("Failed to allocate memory for HCI_IBS packet");
280 /* Assign HCI_IBS type */
281 skb_put_u8(skb
, cmd
);
283 skb_queue_tail(&qca
->txq
, skb
);
288 static void qca_wq_awake_device(struct work_struct
*work
)
290 struct qca_data
*qca
= container_of(work
, struct qca_data
,
292 struct hci_uart
*hu
= qca
->hu
;
293 unsigned long retrans_delay
;
295 BT_DBG("hu %p wq awake device", hu
);
297 /* Vote for serial clock */
298 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON
, hu
);
300 spin_lock(&qca
->hci_ibs_lock
);
302 /* Send wake indication to device */
303 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND
, hu
) < 0)
304 BT_ERR("Failed to send WAKE to device");
306 qca
->ibs_sent_wakes
++;
308 /* Start retransmit timer */
309 retrans_delay
= msecs_to_jiffies(qca
->wake_retrans
);
310 mod_timer(&qca
->wake_retrans_timer
, jiffies
+ retrans_delay
);
312 spin_unlock(&qca
->hci_ibs_lock
);
314 /* Actually send the packets */
315 hci_uart_tx_wakeup(hu
);
318 static void qca_wq_awake_rx(struct work_struct
*work
)
320 struct qca_data
*qca
= container_of(work
, struct qca_data
,
322 struct hci_uart
*hu
= qca
->hu
;
324 BT_DBG("hu %p wq awake rx", hu
);
326 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON
, hu
);
328 spin_lock(&qca
->hci_ibs_lock
);
329 qca
->rx_ibs_state
= HCI_IBS_RX_AWAKE
;
331 /* Always acknowledge device wake up,
332 * sending IBS message doesn't count as TX ON.
334 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK
, hu
) < 0)
335 BT_ERR("Failed to acknowledge device wake up");
337 qca
->ibs_sent_wacks
++;
339 spin_unlock(&qca
->hci_ibs_lock
);
341 /* Actually send the packets */
342 hci_uart_tx_wakeup(hu
);
345 static void qca_wq_serial_rx_clock_vote_off(struct work_struct
*work
)
347 struct qca_data
*qca
= container_of(work
, struct qca_data
,
349 struct hci_uart
*hu
= qca
->hu
;
351 BT_DBG("hu %p rx clock vote off", hu
);
353 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF
, hu
);
356 static void qca_wq_serial_tx_clock_vote_off(struct work_struct
*work
)
358 struct qca_data
*qca
= container_of(work
, struct qca_data
,
360 struct hci_uart
*hu
= qca
->hu
;
362 BT_DBG("hu %p tx clock vote off", hu
);
364 /* Run HCI tx handling unlocked */
365 hci_uart_tx_wakeup(hu
);
367 /* Now that message queued to tty driver, vote for tty clocks off.
368 * It is up to the tty driver to pend the clocks off until tx done.
370 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF
, hu
);
373 static void hci_ibs_tx_idle_timeout(struct timer_list
*t
)
375 struct qca_data
*qca
= from_timer(qca
, t
, tx_idle_timer
);
376 struct hci_uart
*hu
= qca
->hu
;
379 BT_DBG("hu %p idle timeout in %d state", hu
, qca
->tx_ibs_state
);
381 spin_lock_irqsave_nested(&qca
->hci_ibs_lock
,
382 flags
, SINGLE_DEPTH_NESTING
);
384 switch (qca
->tx_ibs_state
) {
385 case HCI_IBS_TX_AWAKE
:
386 /* TX_IDLE, go to SLEEP */
387 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND
, hu
) < 0) {
388 BT_ERR("Failed to send SLEEP to device");
391 qca
->tx_ibs_state
= HCI_IBS_TX_ASLEEP
;
392 qca
->ibs_sent_slps
++;
393 queue_work(qca
->workqueue
, &qca
->ws_tx_vote_off
);
396 case HCI_IBS_TX_ASLEEP
:
397 case HCI_IBS_TX_WAKING
:
401 BT_ERR("Spurious timeout tx state %d", qca
->tx_ibs_state
);
405 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
408 static void hci_ibs_wake_retrans_timeout(struct timer_list
*t
)
410 struct qca_data
*qca
= from_timer(qca
, t
, wake_retrans_timer
);
411 struct hci_uart
*hu
= qca
->hu
;
412 unsigned long flags
, retrans_delay
;
413 bool retransmit
= false;
415 BT_DBG("hu %p wake retransmit timeout in %d state",
416 hu
, qca
->tx_ibs_state
);
418 spin_lock_irqsave_nested(&qca
->hci_ibs_lock
,
419 flags
, SINGLE_DEPTH_NESTING
);
421 switch (qca
->tx_ibs_state
) {
422 case HCI_IBS_TX_WAKING
:
423 /* No WAKE_ACK, retransmit WAKE */
425 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND
, hu
) < 0) {
426 BT_ERR("Failed to acknowledge device wake up");
429 qca
->ibs_sent_wakes
++;
430 retrans_delay
= msecs_to_jiffies(qca
->wake_retrans
);
431 mod_timer(&qca
->wake_retrans_timer
, jiffies
+ retrans_delay
);
434 case HCI_IBS_TX_ASLEEP
:
435 case HCI_IBS_TX_AWAKE
:
439 BT_ERR("Spurious timeout tx state %d", qca
->tx_ibs_state
);
443 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
446 hci_uart_tx_wakeup(hu
);
449 /* Initialize protocol */
450 static int qca_open(struct hci_uart
*hu
)
452 struct qca_serdev
*qcadev
;
453 struct qca_data
*qca
;
456 BT_DBG("hu %p qca_open", hu
);
458 qca
= kzalloc(sizeof(struct qca_data
), GFP_KERNEL
);
462 skb_queue_head_init(&qca
->txq
);
463 skb_queue_head_init(&qca
->tx_wait_q
);
464 spin_lock_init(&qca
->hci_ibs_lock
);
465 qca
->workqueue
= alloc_ordered_workqueue("qca_wq", 0);
466 if (!qca
->workqueue
) {
467 BT_ERR("QCA Workqueue not initialized properly");
472 INIT_WORK(&qca
->ws_awake_rx
, qca_wq_awake_rx
);
473 INIT_WORK(&qca
->ws_awake_device
, qca_wq_awake_device
);
474 INIT_WORK(&qca
->ws_rx_vote_off
, qca_wq_serial_rx_clock_vote_off
);
475 INIT_WORK(&qca
->ws_tx_vote_off
, qca_wq_serial_tx_clock_vote_off
);
479 /* Assume we start with both sides asleep -- extra wakes OK */
480 qca
->tx_ibs_state
= HCI_IBS_TX_ASLEEP
;
481 qca
->rx_ibs_state
= HCI_IBS_RX_ASLEEP
;
483 /* clocks actually on, but we start votes off */
484 qca
->tx_vote
= false;
485 qca
->rx_vote
= false;
488 qca
->ibs_sent_wacks
= 0;
489 qca
->ibs_sent_slps
= 0;
490 qca
->ibs_sent_wakes
= 0;
491 qca
->ibs_recv_wacks
= 0;
492 qca
->ibs_recv_slps
= 0;
493 qca
->ibs_recv_wakes
= 0;
494 qca
->vote_last_jif
= jiffies
;
496 qca
->vote_off_ms
= 0;
499 qca
->tx_votes_on
= 0;
500 qca
->tx_votes_off
= 0;
501 qca
->rx_votes_on
= 0;
502 qca
->rx_votes_off
= 0;
508 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
509 if (qcadev
->btsoc_type
!= QCA_WCN3990
) {
510 gpiod_set_value_cansleep(qcadev
->bt_en
, 1);
512 hu
->init_speed
= qcadev
->init_speed
;
513 hu
->oper_speed
= qcadev
->oper_speed
;
514 ret
= qca_power_setup(hu
, true);
516 destroy_workqueue(qca
->workqueue
);
517 kfree_skb(qca
->rx_skb
);
525 timer_setup(&qca
->wake_retrans_timer
, hci_ibs_wake_retrans_timeout
, 0);
526 qca
->wake_retrans
= IBS_WAKE_RETRANS_TIMEOUT_MS
;
528 timer_setup(&qca
->tx_idle_timer
, hci_ibs_tx_idle_timeout
, 0);
529 qca
->tx_idle_delay
= IBS_TX_IDLE_TIMEOUT_MS
;
531 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
532 qca
->tx_idle_delay
, qca
->wake_retrans
);
537 static void qca_debugfs_init(struct hci_dev
*hdev
)
539 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
540 struct qca_data
*qca
= hu
->priv
;
541 struct dentry
*ibs_dir
;
547 ibs_dir
= debugfs_create_dir("ibs", hdev
->debugfs
);
551 debugfs_create_u8("tx_ibs_state", mode
, ibs_dir
, &qca
->tx_ibs_state
);
552 debugfs_create_u8("rx_ibs_state", mode
, ibs_dir
, &qca
->rx_ibs_state
);
553 debugfs_create_u64("ibs_sent_sleeps", mode
, ibs_dir
,
554 &qca
->ibs_sent_slps
);
555 debugfs_create_u64("ibs_sent_wakes", mode
, ibs_dir
,
556 &qca
->ibs_sent_wakes
);
557 debugfs_create_u64("ibs_sent_wake_acks", mode
, ibs_dir
,
558 &qca
->ibs_sent_wacks
);
559 debugfs_create_u64("ibs_recv_sleeps", mode
, ibs_dir
,
560 &qca
->ibs_recv_slps
);
561 debugfs_create_u64("ibs_recv_wakes", mode
, ibs_dir
,
562 &qca
->ibs_recv_wakes
);
563 debugfs_create_u64("ibs_recv_wake_acks", mode
, ibs_dir
,
564 &qca
->ibs_recv_wacks
);
565 debugfs_create_bool("tx_vote", mode
, ibs_dir
, &qca
->tx_vote
);
566 debugfs_create_u64("tx_votes_on", mode
, ibs_dir
, &qca
->tx_votes_on
);
567 debugfs_create_u64("tx_votes_off", mode
, ibs_dir
, &qca
->tx_votes_off
);
568 debugfs_create_bool("rx_vote", mode
, ibs_dir
, &qca
->rx_vote
);
569 debugfs_create_u64("rx_votes_on", mode
, ibs_dir
, &qca
->rx_votes_on
);
570 debugfs_create_u64("rx_votes_off", mode
, ibs_dir
, &qca
->rx_votes_off
);
571 debugfs_create_u64("votes_on", mode
, ibs_dir
, &qca
->votes_on
);
572 debugfs_create_u64("votes_off", mode
, ibs_dir
, &qca
->votes_off
);
573 debugfs_create_u32("vote_on_ms", mode
, ibs_dir
, &qca
->vote_on_ms
);
574 debugfs_create_u32("vote_off_ms", mode
, ibs_dir
, &qca
->vote_off_ms
);
577 mode
= S_IRUGO
| S_IWUSR
;
578 debugfs_create_u32("wake_retrans", mode
, ibs_dir
, &qca
->wake_retrans
);
579 debugfs_create_u32("tx_idle_delay", mode
, ibs_dir
,
580 &qca
->tx_idle_delay
);
583 /* Flush protocol data */
584 static int qca_flush(struct hci_uart
*hu
)
586 struct qca_data
*qca
= hu
->priv
;
588 BT_DBG("hu %p qca flush", hu
);
590 skb_queue_purge(&qca
->tx_wait_q
);
591 skb_queue_purge(&qca
->txq
);
597 static int qca_close(struct hci_uart
*hu
)
599 struct qca_serdev
*qcadev
;
600 struct qca_data
*qca
= hu
->priv
;
602 BT_DBG("hu %p qca close", hu
);
604 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE
, hu
);
606 skb_queue_purge(&qca
->tx_wait_q
);
607 skb_queue_purge(&qca
->txq
);
608 del_timer(&qca
->tx_idle_timer
);
609 del_timer(&qca
->wake_retrans_timer
);
610 destroy_workqueue(qca
->workqueue
);
614 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
615 if (qcadev
->btsoc_type
== QCA_WCN3990
)
616 qca_power_shutdown(hu
);
618 gpiod_set_value_cansleep(qcadev
->bt_en
, 0);
622 kfree_skb(qca
->rx_skb
);
631 /* Called upon a wake-up-indication from the device.
633 static void device_want_to_wakeup(struct hci_uart
*hu
)
636 struct qca_data
*qca
= hu
->priv
;
638 BT_DBG("hu %p want to wake up", hu
);
640 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
642 qca
->ibs_recv_wakes
++;
644 switch (qca
->rx_ibs_state
) {
645 case HCI_IBS_RX_ASLEEP
:
646 /* Make sure clock is on - we may have turned clock off since
647 * receiving the wake up indicator awake rx clock.
649 queue_work(qca
->workqueue
, &qca
->ws_awake_rx
);
650 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
653 case HCI_IBS_RX_AWAKE
:
654 /* Always acknowledge device wake up,
655 * sending IBS message doesn't count as TX ON.
657 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK
, hu
) < 0) {
658 BT_ERR("Failed to acknowledge device wake up");
661 qca
->ibs_sent_wacks
++;
665 /* Any other state is illegal */
666 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
671 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
673 /* Actually send the packets */
674 hci_uart_tx_wakeup(hu
);
677 /* Called upon a sleep-indication from the device.
679 static void device_want_to_sleep(struct hci_uart
*hu
)
682 struct qca_data
*qca
= hu
->priv
;
684 BT_DBG("hu %p want to sleep", hu
);
686 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
688 qca
->ibs_recv_slps
++;
690 switch (qca
->rx_ibs_state
) {
691 case HCI_IBS_RX_AWAKE
:
693 qca
->rx_ibs_state
= HCI_IBS_RX_ASLEEP
;
694 /* Vote off rx clock under workqueue */
695 queue_work(qca
->workqueue
, &qca
->ws_rx_vote_off
);
698 case HCI_IBS_RX_ASLEEP
:
702 /* Any other state is illegal */
703 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
708 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
711 /* Called upon wake-up-acknowledgement from the device
713 static void device_woke_up(struct hci_uart
*hu
)
715 unsigned long flags
, idle_delay
;
716 struct qca_data
*qca
= hu
->priv
;
717 struct sk_buff
*skb
= NULL
;
719 BT_DBG("hu %p woke up", hu
);
721 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
723 qca
->ibs_recv_wacks
++;
725 switch (qca
->tx_ibs_state
) {
726 case HCI_IBS_TX_AWAKE
:
727 /* Expect one if we send 2 WAKEs */
728 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
732 case HCI_IBS_TX_WAKING
:
733 /* Send pending packets */
734 while ((skb
= skb_dequeue(&qca
->tx_wait_q
)))
735 skb_queue_tail(&qca
->txq
, skb
);
737 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
738 del_timer(&qca
->wake_retrans_timer
);
739 idle_delay
= msecs_to_jiffies(qca
->tx_idle_delay
);
740 mod_timer(&qca
->tx_idle_timer
, jiffies
+ idle_delay
);
741 qca
->tx_ibs_state
= HCI_IBS_TX_AWAKE
;
744 case HCI_IBS_TX_ASLEEP
:
748 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
753 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
755 /* Actually send the packets */
756 hci_uart_tx_wakeup(hu
);
759 /* Enqueue frame for transmittion (padding, crc, etc) may be called from
760 * two simultaneous tasklets.
762 static int qca_enqueue(struct hci_uart
*hu
, struct sk_buff
*skb
)
764 unsigned long flags
= 0, idle_delay
;
765 struct qca_data
*qca
= hu
->priv
;
767 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu
, skb
,
770 /* Prepend skb with frame type */
771 memcpy(skb_push(skb
, 1), &hci_skb_pkt_type(skb
), 1);
773 /* Don't go to sleep in middle of patch download or
774 * Out-Of-Band(GPIOs control) sleep is selected.
776 if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED
, &qca
->flags
)) {
777 skb_queue_tail(&qca
->txq
, skb
);
781 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
783 /* Act according to current state */
784 switch (qca
->tx_ibs_state
) {
785 case HCI_IBS_TX_AWAKE
:
786 BT_DBG("Device awake, sending normally");
787 skb_queue_tail(&qca
->txq
, skb
);
788 idle_delay
= msecs_to_jiffies(qca
->tx_idle_delay
);
789 mod_timer(&qca
->tx_idle_timer
, jiffies
+ idle_delay
);
792 case HCI_IBS_TX_ASLEEP
:
793 BT_DBG("Device asleep, waking up and queueing packet");
794 /* Save packet for later */
795 skb_queue_tail(&qca
->tx_wait_q
, skb
);
797 qca
->tx_ibs_state
= HCI_IBS_TX_WAKING
;
798 /* Schedule a work queue to wake up device */
799 queue_work(qca
->workqueue
, &qca
->ws_awake_device
);
802 case HCI_IBS_TX_WAKING
:
803 BT_DBG("Device waking up, queueing packet");
804 /* Transient state; just keep packet for later */
805 skb_queue_tail(&qca
->tx_wait_q
, skb
);
809 BT_ERR("Illegal tx state: %d (losing packet)",
815 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
820 static int qca_ibs_sleep_ind(struct hci_dev
*hdev
, struct sk_buff
*skb
)
822 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
824 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_SLEEP_IND
);
826 device_want_to_sleep(hu
);
832 static int qca_ibs_wake_ind(struct hci_dev
*hdev
, struct sk_buff
*skb
)
834 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
836 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_WAKE_IND
);
838 device_want_to_wakeup(hu
);
844 static int qca_ibs_wake_ack(struct hci_dev
*hdev
, struct sk_buff
*skb
)
846 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
848 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_WAKE_ACK
);
856 static int qca_recv_acl_data(struct hci_dev
*hdev
, struct sk_buff
*skb
)
858 /* We receive debug logs from chip as an ACL packets.
859 * Instead of sending the data to ACL to decode the
860 * received data, we are pushing them to the above layers
861 * as a diagnostic packet.
863 if (get_unaligned_le16(skb
->data
) == QCA_DEBUG_HANDLE
)
864 return hci_recv_diag(hdev
, skb
);
866 return hci_recv_frame(hdev
, skb
);
869 #define QCA_IBS_SLEEP_IND_EVENT \
870 .type = HCI_IBS_SLEEP_IND, \
874 .maxlen = HCI_MAX_IBS_SIZE
876 #define QCA_IBS_WAKE_IND_EVENT \
877 .type = HCI_IBS_WAKE_IND, \
881 .maxlen = HCI_MAX_IBS_SIZE
883 #define QCA_IBS_WAKE_ACK_EVENT \
884 .type = HCI_IBS_WAKE_ACK, \
888 .maxlen = HCI_MAX_IBS_SIZE
890 static const struct h4_recv_pkt qca_recv_pkts
[] = {
891 { H4_RECV_ACL
, .recv
= qca_recv_acl_data
},
892 { H4_RECV_SCO
, .recv
= hci_recv_frame
},
893 { H4_RECV_EVENT
, .recv
= hci_recv_frame
},
894 { QCA_IBS_WAKE_IND_EVENT
, .recv
= qca_ibs_wake_ind
},
895 { QCA_IBS_WAKE_ACK_EVENT
, .recv
= qca_ibs_wake_ack
},
896 { QCA_IBS_SLEEP_IND_EVENT
, .recv
= qca_ibs_sleep_ind
},
899 static int qca_recv(struct hci_uart
*hu
, const void *data
, int count
)
901 struct qca_data
*qca
= hu
->priv
;
903 if (!test_bit(HCI_UART_REGISTERED
, &hu
->flags
))
906 qca
->rx_skb
= h4_recv_buf(hu
->hdev
, qca
->rx_skb
, data
, count
,
907 qca_recv_pkts
, ARRAY_SIZE(qca_recv_pkts
));
908 if (IS_ERR(qca
->rx_skb
)) {
909 int err
= PTR_ERR(qca
->rx_skb
);
910 bt_dev_err(hu
->hdev
, "Frame reassembly failed (%d)", err
);
918 static struct sk_buff
*qca_dequeue(struct hci_uart
*hu
)
920 struct qca_data
*qca
= hu
->priv
;
922 return skb_dequeue(&qca
->txq
);
925 static uint8_t qca_get_baudrate_value(int speed
)
929 return QCA_BAUDRATE_9600
;
931 return QCA_BAUDRATE_19200
;
933 return QCA_BAUDRATE_38400
;
935 return QCA_BAUDRATE_57600
;
937 return QCA_BAUDRATE_115200
;
939 return QCA_BAUDRATE_230400
;
941 return QCA_BAUDRATE_460800
;
943 return QCA_BAUDRATE_500000
;
945 return QCA_BAUDRATE_921600
;
947 return QCA_BAUDRATE_1000000
;
949 return QCA_BAUDRATE_2000000
;
951 return QCA_BAUDRATE_3000000
;
953 return QCA_BAUDRATE_3200000
;
955 return QCA_BAUDRATE_3500000
;
957 return QCA_BAUDRATE_115200
;
961 static int qca_set_baudrate(struct hci_dev
*hdev
, uint8_t baudrate
)
963 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
964 struct qca_data
*qca
= hu
->priv
;
966 struct qca_serdev
*qcadev
;
967 u8 cmd
[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
969 if (baudrate
> QCA_BAUDRATE_3200000
)
974 skb
= bt_skb_alloc(sizeof(cmd
), GFP_KERNEL
);
976 bt_dev_err(hdev
, "Failed to allocate baudrate packet");
980 /* Disabling hardware flow control is mandatory while
981 * sending change baudrate request to wcn3990 SoC.
983 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
984 if (qcadev
->btsoc_type
== QCA_WCN3990
)
985 hci_uart_set_flow_control(hu
, true);
987 /* Assign commands to change baudrate and packet type. */
988 skb_put_data(skb
, cmd
, sizeof(cmd
));
989 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
991 skb_queue_tail(&qca
->txq
, skb
);
992 hci_uart_tx_wakeup(hu
);
994 /* wait 300ms to change new baudrate on controller side
995 * controller will come back after they receive this HCI command
996 * then host can communicate with new baudrate to controller
998 set_current_state(TASK_UNINTERRUPTIBLE
);
999 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS
));
1000 set_current_state(TASK_RUNNING
);
1002 if (qcadev
->btsoc_type
== QCA_WCN3990
)
1003 hci_uart_set_flow_control(hu
, false);
1008 static inline void host_set_baudrate(struct hci_uart
*hu
, unsigned int speed
)
1011 serdev_device_set_baudrate(hu
->serdev
, speed
);
1013 hci_uart_set_baudrate(hu
, speed
);
1016 static int qca_send_power_pulse(struct hci_dev
*hdev
, u8 cmd
)
1018 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
1019 struct qca_data
*qca
= hu
->priv
;
1020 struct sk_buff
*skb
;
1022 /* These power pulses are single byte command which are sent
1023 * at required baudrate to wcn3990. On wcn3990, we have an external
1024 * circuit at Tx pin which decodes the pulse sent at specific baudrate.
1025 * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
1026 * and also we use the same power inputs to turn on and off for
1027 * Wi-Fi/BT. Powering up the power sources will not enable BT, until
1028 * we send a power on pulse at 115200 bps. This algorithm will help to
1029 * save power. Disabling hardware flow control is mandatory while
1030 * sending power pulses to SoC.
1032 bt_dev_dbg(hdev
, "sending power pulse %02x to SoC", cmd
);
1034 skb
= bt_skb_alloc(sizeof(cmd
), GFP_KERNEL
);
1038 hci_uart_set_flow_control(hu
, true);
1040 skb_put_u8(skb
, cmd
);
1041 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
1043 skb_queue_tail(&qca
->txq
, skb
);
1044 hci_uart_tx_wakeup(hu
);
1046 /* Wait for 100 uS for SoC to settle down */
1047 usleep_range(100, 200);
1048 hci_uart_set_flow_control(hu
, false);
1053 static unsigned int qca_get_speed(struct hci_uart
*hu
,
1054 enum qca_speed_type speed_type
)
1056 unsigned int speed
= 0;
1058 if (speed_type
== QCA_INIT_SPEED
) {
1060 speed
= hu
->init_speed
;
1061 else if (hu
->proto
->init_speed
)
1062 speed
= hu
->proto
->init_speed
;
1065 speed
= hu
->oper_speed
;
1066 else if (hu
->proto
->oper_speed
)
1067 speed
= hu
->proto
->oper_speed
;
1073 static int qca_check_speeds(struct hci_uart
*hu
)
1075 struct qca_serdev
*qcadev
;
1077 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
1078 if (qcadev
->btsoc_type
== QCA_WCN3990
) {
1079 if (!qca_get_speed(hu
, QCA_INIT_SPEED
) &&
1080 !qca_get_speed(hu
, QCA_OPER_SPEED
))
1083 if (!qca_get_speed(hu
, QCA_INIT_SPEED
) ||
1084 !qca_get_speed(hu
, QCA_OPER_SPEED
))
1091 static int qca_set_speed(struct hci_uart
*hu
, enum qca_speed_type speed_type
)
1093 unsigned int speed
, qca_baudrate
;
1096 if (speed_type
== QCA_INIT_SPEED
) {
1097 speed
= qca_get_speed(hu
, QCA_INIT_SPEED
);
1099 host_set_baudrate(hu
, speed
);
1101 speed
= qca_get_speed(hu
, QCA_OPER_SPEED
);
1105 qca_baudrate
= qca_get_baudrate_value(speed
);
1106 bt_dev_dbg(hu
->hdev
, "Set UART speed to %d", speed
);
1107 ret
= qca_set_baudrate(hu
->hdev
, qca_baudrate
);
1111 host_set_baudrate(hu
, speed
);
1117 static int qca_wcn3990_init(struct hci_uart
*hu
)
1119 struct hci_dev
*hdev
= hu
->hdev
;
1120 struct qca_serdev
*qcadev
;
1123 /* Check for vregs status, may be hci down has turned
1124 * off the voltage regulator.
1126 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
1127 if (!qcadev
->bt_power
->vregs_on
) {
1128 serdev_device_close(hu
->serdev
);
1129 ret
= qca_power_setup(hu
, true);
1133 ret
= serdev_device_open(hu
->serdev
);
1135 bt_dev_err(hu
->hdev
, "failed to open port");
1140 /* Forcefully enable wcn3990 to enter in to boot mode. */
1141 host_set_baudrate(hu
, 2400);
1142 ret
= qca_send_power_pulse(hdev
, QCA_WCN3990_POWEROFF_PULSE
);
1146 qca_set_speed(hu
, QCA_INIT_SPEED
);
1147 ret
= qca_send_power_pulse(hdev
, QCA_WCN3990_POWERON_PULSE
);
1151 /* Wait for 100 ms for SoC to boot */
1154 /* Now the device is in ready state to communicate with host.
1155 * To sync host with device we need to reopen port.
1156 * Without this, we will have RTS and CTS synchronization
1159 serdev_device_close(hu
->serdev
);
1160 ret
= serdev_device_open(hu
->serdev
);
1162 bt_dev_err(hu
->hdev
, "failed to open port");
1166 hci_uart_set_flow_control(hu
, false);
1171 static int qca_setup(struct hci_uart
*hu
)
1173 struct hci_dev
*hdev
= hu
->hdev
;
1174 struct qca_data
*qca
= hu
->priv
;
1175 unsigned int speed
, qca_baudrate
= QCA_BAUDRATE_115200
;
1176 struct qca_serdev
*qcadev
;
1180 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
1182 ret
= qca_check_speeds(hu
);
1186 /* Patch downloading has to be done without IBS mode */
1187 clear_bit(STATE_IN_BAND_SLEEP_ENABLED
, &qca
->flags
);
1189 if (qcadev
->btsoc_type
== QCA_WCN3990
) {
1190 bt_dev_info(hdev
, "setting up wcn3990");
1192 /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
1193 * setup for every hci up.
1195 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP
, &hdev
->quirks
);
1196 hu
->hdev
->shutdown
= qca_power_off
;
1197 ret
= qca_wcn3990_init(hu
);
1201 ret
= qca_read_soc_version(hdev
, &soc_ver
);
1205 bt_dev_info(hdev
, "ROME setup");
1206 qca_set_speed(hu
, QCA_INIT_SPEED
);
1209 /* Setup user speed if needed */
1210 speed
= qca_get_speed(hu
, QCA_OPER_SPEED
);
1212 ret
= qca_set_speed(hu
, QCA_OPER_SPEED
);
1216 qca_baudrate
= qca_get_baudrate_value(speed
);
1219 if (qcadev
->btsoc_type
!= QCA_WCN3990
) {
1220 /* Get QCA version information */
1221 ret
= qca_read_soc_version(hdev
, &soc_ver
);
1226 bt_dev_info(hdev
, "QCA controller version 0x%08x", soc_ver
);
1227 /* Setup patch / NVM configurations */
1228 ret
= qca_uart_setup(hdev
, qca_baudrate
, qcadev
->btsoc_type
, soc_ver
);
1230 set_bit(STATE_IN_BAND_SLEEP_ENABLED
, &qca
->flags
);
1231 qca_debugfs_init(hdev
);
1232 } else if (ret
== -ENOENT
) {
1233 /* No patch/nvm-config found, run with original fw/config */
1235 } else if (ret
== -EAGAIN
) {
1237 * Userspace firmware loader will return -EAGAIN in case no
1238 * patch/nvm-config is found, so run with original fw/config.
1244 hu
->hdev
->set_bdaddr
= qca_set_bdaddr_rome
;
1249 static struct hci_uart_proto qca_proto
= {
1253 .init_speed
= 115200,
1254 .oper_speed
= 3000000,
1260 .enqueue
= qca_enqueue
,
1261 .dequeue
= qca_dequeue
,
1264 static const struct qca_vreg_data qca_soc_data
= {
1265 .soc_type
= QCA_WCN3990
,
1266 .vregs
= (struct qca_vreg
[]) {
1267 { "vddio", 1800000, 1900000, 15000 },
1268 { "vddxo", 1800000, 1900000, 80000 },
1269 { "vddrf", 1300000, 1350000, 300000 },
1270 { "vddch0", 3300000, 3400000, 450000 },
1275 static void qca_power_shutdown(struct hci_uart
*hu
)
1277 struct serdev_device
*serdev
= hu
->serdev
;
1278 unsigned char cmd
= QCA_WCN3990_POWEROFF_PULSE
;
1280 host_set_baudrate(hu
, 2400);
1281 hci_uart_set_flow_control(hu
, true);
1282 serdev_device_write_buf(serdev
, &cmd
, sizeof(cmd
));
1283 hci_uart_set_flow_control(hu
, false);
1284 qca_power_setup(hu
, false);
1287 static int qca_power_off(struct hci_dev
*hdev
)
1289 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
1291 qca_power_shutdown(hu
);
1295 static int qca_enable_regulator(struct qca_vreg vregs
,
1296 struct regulator
*regulator
)
1300 ret
= regulator_set_voltage(regulator
, vregs
.min_uV
,
1306 ret
= regulator_set_load(regulator
,
1312 return regulator_enable(regulator
);
1316 static void qca_disable_regulator(struct qca_vreg vregs
,
1317 struct regulator
*regulator
)
1319 regulator_disable(regulator
);
1320 regulator_set_voltage(regulator
, 0, vregs
.max_uV
);
1322 regulator_set_load(regulator
, 0);
1326 static int qca_power_setup(struct hci_uart
*hu
, bool on
)
1328 struct qca_vreg
*vregs
;
1329 struct regulator_bulk_data
*vreg_bulk
;
1330 struct qca_serdev
*qcadev
;
1331 int i
, num_vregs
, ret
= 0;
1333 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
1334 if (!qcadev
|| !qcadev
->bt_power
|| !qcadev
->bt_power
->vreg_data
||
1335 !qcadev
->bt_power
->vreg_bulk
)
1338 vregs
= qcadev
->bt_power
->vreg_data
->vregs
;
1339 vreg_bulk
= qcadev
->bt_power
->vreg_bulk
;
1340 num_vregs
= qcadev
->bt_power
->vreg_data
->num_vregs
;
1341 BT_DBG("on: %d", on
);
1342 if (on
&& !qcadev
->bt_power
->vregs_on
) {
1343 for (i
= 0; i
< num_vregs
; i
++) {
1344 ret
= qca_enable_regulator(vregs
[i
],
1345 vreg_bulk
[i
].consumer
);
1351 BT_ERR("failed to enable regulator:%s", vregs
[i
].name
);
1352 /* turn off regulators which are enabled */
1353 for (i
= i
- 1; i
>= 0; i
--)
1354 qca_disable_regulator(vregs
[i
],
1355 vreg_bulk
[i
].consumer
);
1357 qcadev
->bt_power
->vregs_on
= true;
1359 } else if (!on
&& qcadev
->bt_power
->vregs_on
) {
1360 /* turn off regulator in reverse order */
1361 i
= qcadev
->bt_power
->vreg_data
->num_vregs
- 1;
1362 for ( ; i
>= 0; i
--)
1363 qca_disable_regulator(vregs
[i
], vreg_bulk
[i
].consumer
);
1365 qcadev
->bt_power
->vregs_on
= false;
1371 static int qca_init_regulators(struct qca_power
*qca
,
1372 const struct qca_vreg
*vregs
, size_t num_vregs
)
1376 qca
->vreg_bulk
= devm_kcalloc(qca
->dev
, num_vregs
,
1377 sizeof(struct regulator_bulk_data
),
1379 if (!qca
->vreg_bulk
)
1382 for (i
= 0; i
< num_vregs
; i
++)
1383 qca
->vreg_bulk
[i
].supply
= vregs
[i
].name
;
1385 return devm_regulator_bulk_get(qca
->dev
, num_vregs
, qca
->vreg_bulk
);
1388 static int qca_serdev_probe(struct serdev_device
*serdev
)
1390 struct qca_serdev
*qcadev
;
1391 const struct qca_vreg_data
*data
;
1394 qcadev
= devm_kzalloc(&serdev
->dev
, sizeof(*qcadev
), GFP_KERNEL
);
1398 qcadev
->serdev_hu
.serdev
= serdev
;
1399 data
= of_device_get_match_data(&serdev
->dev
);
1400 serdev_device_set_drvdata(serdev
, qcadev
);
1401 if (data
&& data
->soc_type
== QCA_WCN3990
) {
1402 qcadev
->btsoc_type
= QCA_WCN3990
;
1403 qcadev
->bt_power
= devm_kzalloc(&serdev
->dev
,
1404 sizeof(struct qca_power
),
1406 if (!qcadev
->bt_power
)
1409 qcadev
->bt_power
->dev
= &serdev
->dev
;
1410 qcadev
->bt_power
->vreg_data
= data
;
1411 err
= qca_init_regulators(qcadev
->bt_power
, data
->vregs
,
1414 BT_ERR("Failed to init regulators:%d", err
);
1418 qcadev
->bt_power
->vregs_on
= false;
1420 device_property_read_u32(&serdev
->dev
, "max-speed",
1421 &qcadev
->oper_speed
);
1422 if (!qcadev
->oper_speed
)
1423 BT_DBG("UART will pick default operating speed");
1425 err
= hci_uart_register_device(&qcadev
->serdev_hu
, &qca_proto
);
1427 BT_ERR("wcn3990 serdev registration failed");
1431 qcadev
->btsoc_type
= QCA_ROME
;
1432 qcadev
->bt_en
= devm_gpiod_get(&serdev
->dev
, "enable",
1434 if (IS_ERR(qcadev
->bt_en
)) {
1435 dev_err(&serdev
->dev
, "failed to acquire enable gpio\n");
1436 return PTR_ERR(qcadev
->bt_en
);
1439 qcadev
->susclk
= devm_clk_get(&serdev
->dev
, NULL
);
1440 if (IS_ERR(qcadev
->susclk
)) {
1441 dev_err(&serdev
->dev
, "failed to acquire clk\n");
1442 return PTR_ERR(qcadev
->susclk
);
1445 err
= clk_set_rate(qcadev
->susclk
, SUSCLK_RATE_32KHZ
);
1449 err
= clk_prepare_enable(qcadev
->susclk
);
1453 err
= hci_uart_register_device(&qcadev
->serdev_hu
, &qca_proto
);
1455 clk_disable_unprepare(qcadev
->susclk
);
1462 static void qca_serdev_remove(struct serdev_device
*serdev
)
1464 struct qca_serdev
*qcadev
= serdev_device_get_drvdata(serdev
);
1466 if (qcadev
->btsoc_type
== QCA_WCN3990
)
1467 qca_power_shutdown(&qcadev
->serdev_hu
);
1469 clk_disable_unprepare(qcadev
->susclk
);
1471 hci_uart_unregister_device(&qcadev
->serdev_hu
);
1474 static const struct of_device_id qca_bluetooth_of_match
[] = {
1475 { .compatible
= "qcom,qca6174-bt" },
1476 { .compatible
= "qcom,wcn3990-bt", .data
= &qca_soc_data
},
1479 MODULE_DEVICE_TABLE(of
, qca_bluetooth_of_match
);
1481 static struct serdev_device_driver qca_serdev_driver
= {
1482 .probe
= qca_serdev_probe
,
1483 .remove
= qca_serdev_remove
,
1485 .name
= "hci_uart_qca",
1486 .of_match_table
= qca_bluetooth_of_match
,
1490 int __init
qca_init(void)
1492 serdev_device_driver_register(&qca_serdev_driver
);
1494 return hci_uart_register_proto(&qca_proto
);
1497 int __exit
qca_deinit(void)
1499 serdev_device_driver_unregister(&qca_serdev_driver
);
1501 return hci_uart_unregister_proto(&qca_proto
);