2 * Bluetooth Software UART Qualcomm protocol
4 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
5 * protocol extension to H4.
7 * Copyright (C) 2007 Texas Instruments, Inc.
8 * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
11 * This file is based on hci_ll.c, which was...
12 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
13 * which was in turn based on hci_h4.c, which was written
14 * by Maxim Krasnyansky and Marcel Holtmann.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2
18 * as published by the Free Software Foundation
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include <linux/kernel.h>
32 #include <linux/debugfs.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
40 /* HCI_IBS protocol messages */
41 #define HCI_IBS_SLEEP_IND 0xFE
42 #define HCI_IBS_WAKE_IND 0xFD
43 #define HCI_IBS_WAKE_ACK 0xFC
44 #define HCI_MAX_IBS_SIZE 10
46 /* Controller states */
47 #define STATE_IN_BAND_SLEEP_ENABLED 1
49 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
50 #define IBS_TX_IDLE_TIMEOUT_MS 2000
51 #define BAUDRATE_SETTLE_TIMEOUT_MS 300
53 /* HCI_IBS transmit side sleep protocol states */
60 /* HCI_IBS receive side sleep protocol states */
66 /* HCI_IBS transmit and receive side clock state vote */
67 enum hci_ibs_clock_state_vote
{
68 HCI_IBS_VOTE_STATS_UPDATE
,
69 HCI_IBS_TX_VOTE_CLOCK_ON
,
70 HCI_IBS_TX_VOTE_CLOCK_OFF
,
71 HCI_IBS_RX_VOTE_CLOCK_ON
,
72 HCI_IBS_RX_VOTE_CLOCK_OFF
,
77 struct sk_buff
*rx_skb
;
78 struct sk_buff_head txq
;
79 struct sk_buff_head tx_wait_q
; /* HCI_IBS wait queue */
80 spinlock_t hci_ibs_lock
; /* HCI_IBS state lock */
81 u8 tx_ibs_state
; /* HCI_IBS transmit side power state*/
82 u8 rx_ibs_state
; /* HCI_IBS receive side power state */
83 bool tx_vote
; /* Clock must be on for TX */
84 bool rx_vote
; /* Clock must be on for RX */
85 struct timer_list tx_idle_timer
;
87 struct timer_list wake_retrans_timer
;
89 struct workqueue_struct
*workqueue
;
90 struct work_struct ws_awake_rx
;
91 struct work_struct ws_awake_device
;
92 struct work_struct ws_rx_vote_off
;
93 struct work_struct ws_tx_vote_off
;
96 /* For debugging purpose */
114 static void __serial_clock_on(struct tty_struct
*tty
)
116 /* TODO: Some chipset requires to enable UART clock on client
117 * side to save power consumption or manual work is required.
118 * Please put your code to control UART clock here if needed
122 static void __serial_clock_off(struct tty_struct
*tty
)
124 /* TODO: Some chipset requires to disable UART clock on client
125 * side to save power consumption or manual work is required.
126 * Please put your code to control UART clock off here if needed
130 /* serial_clock_vote needs to be called with the ibs lock held */
131 static void serial_clock_vote(unsigned long vote
, struct hci_uart
*hu
)
133 struct qca_data
*qca
= hu
->priv
;
136 bool old_vote
= (qca
->tx_vote
| qca
->rx_vote
);
140 case HCI_IBS_VOTE_STATS_UPDATE
:
141 diff
= jiffies_to_msecs(jiffies
- qca
->vote_last_jif
);
144 qca
->vote_off_ms
+= diff
;
146 qca
->vote_on_ms
+= diff
;
149 case HCI_IBS_TX_VOTE_CLOCK_ON
:
155 case HCI_IBS_RX_VOTE_CLOCK_ON
:
161 case HCI_IBS_TX_VOTE_CLOCK_OFF
:
162 qca
->tx_vote
= false;
164 new_vote
= qca
->rx_vote
| qca
->tx_vote
;
167 case HCI_IBS_RX_VOTE_CLOCK_OFF
:
168 qca
->rx_vote
= false;
170 new_vote
= qca
->rx_vote
| qca
->tx_vote
;
174 BT_ERR("Voting irregularity");
178 if (new_vote
!= old_vote
) {
180 __serial_clock_on(hu
->tty
);
182 __serial_clock_off(hu
->tty
);
184 BT_DBG("Vote serial clock %s(%s)", new_vote
? "true" : "false",
185 vote
? "true" : "false");
187 diff
= jiffies_to_msecs(jiffies
- qca
->vote_last_jif
);
191 qca
->vote_off_ms
+= diff
;
194 qca
->vote_on_ms
+= diff
;
196 qca
->vote_last_jif
= jiffies
;
200 /* Builds and sends an HCI_IBS command packet.
201 * These are very simple packets with only 1 cmd byte.
203 static int send_hci_ibs_cmd(u8 cmd
, struct hci_uart
*hu
)
206 struct sk_buff
*skb
= NULL
;
207 struct qca_data
*qca
= hu
->priv
;
209 BT_DBG("hu %p send hci ibs cmd 0x%x", hu
, cmd
);
211 skb
= bt_skb_alloc(1, GFP_ATOMIC
);
213 BT_ERR("Failed to allocate memory for HCI_IBS packet");
217 /* Assign HCI_IBS type */
218 *skb_put(skb
, 1) = cmd
;
220 skb_queue_tail(&qca
->txq
, skb
);
225 static void qca_wq_awake_device(struct work_struct
*work
)
227 struct qca_data
*qca
= container_of(work
, struct qca_data
,
229 struct hci_uart
*hu
= qca
->hu
;
230 unsigned long retrans_delay
;
232 BT_DBG("hu %p wq awake device", hu
);
234 /* Vote for serial clock */
235 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON
, hu
);
237 spin_lock(&qca
->hci_ibs_lock
);
239 /* Send wake indication to device */
240 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND
, hu
) < 0)
241 BT_ERR("Failed to send WAKE to device");
243 qca
->ibs_sent_wakes
++;
245 /* Start retransmit timer */
246 retrans_delay
= msecs_to_jiffies(qca
->wake_retrans
);
247 mod_timer(&qca
->wake_retrans_timer
, jiffies
+ retrans_delay
);
249 spin_unlock(&qca
->hci_ibs_lock
);
251 /* Actually send the packets */
252 hci_uart_tx_wakeup(hu
);
255 static void qca_wq_awake_rx(struct work_struct
*work
)
257 struct qca_data
*qca
= container_of(work
, struct qca_data
,
259 struct hci_uart
*hu
= qca
->hu
;
261 BT_DBG("hu %p wq awake rx", hu
);
263 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON
, hu
);
265 spin_lock(&qca
->hci_ibs_lock
);
266 qca
->rx_ibs_state
= HCI_IBS_RX_AWAKE
;
268 /* Always acknowledge device wake up,
269 * sending IBS message doesn't count as TX ON.
271 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK
, hu
) < 0)
272 BT_ERR("Failed to acknowledge device wake up");
274 qca
->ibs_sent_wacks
++;
276 spin_unlock(&qca
->hci_ibs_lock
);
278 /* Actually send the packets */
279 hci_uart_tx_wakeup(hu
);
282 static void qca_wq_serial_rx_clock_vote_off(struct work_struct
*work
)
284 struct qca_data
*qca
= container_of(work
, struct qca_data
,
286 struct hci_uart
*hu
= qca
->hu
;
288 BT_DBG("hu %p rx clock vote off", hu
);
290 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF
, hu
);
293 static void qca_wq_serial_tx_clock_vote_off(struct work_struct
*work
)
295 struct qca_data
*qca
= container_of(work
, struct qca_data
,
297 struct hci_uart
*hu
= qca
->hu
;
299 BT_DBG("hu %p tx clock vote off", hu
);
301 /* Run HCI tx handling unlocked */
302 hci_uart_tx_wakeup(hu
);
304 /* Now that message queued to tty driver, vote for tty clocks off.
305 * It is up to the tty driver to pend the clocks off until tx done.
307 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF
, hu
);
310 static void hci_ibs_tx_idle_timeout(unsigned long arg
)
312 struct hci_uart
*hu
= (struct hci_uart
*)arg
;
313 struct qca_data
*qca
= hu
->priv
;
316 BT_DBG("hu %p idle timeout in %d state", hu
, qca
->tx_ibs_state
);
318 spin_lock_irqsave_nested(&qca
->hci_ibs_lock
,
319 flags
, SINGLE_DEPTH_NESTING
);
321 switch (qca
->tx_ibs_state
) {
322 case HCI_IBS_TX_AWAKE
:
323 /* TX_IDLE, go to SLEEP */
324 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND
, hu
) < 0) {
325 BT_ERR("Failed to send SLEEP to device");
328 qca
->tx_ibs_state
= HCI_IBS_TX_ASLEEP
;
329 qca
->ibs_sent_slps
++;
330 queue_work(qca
->workqueue
, &qca
->ws_tx_vote_off
);
333 case HCI_IBS_TX_ASLEEP
:
334 case HCI_IBS_TX_WAKING
:
338 BT_ERR("Spurrious timeout tx state %d", qca
->tx_ibs_state
);
342 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
345 static void hci_ibs_wake_retrans_timeout(unsigned long arg
)
347 struct hci_uart
*hu
= (struct hci_uart
*)arg
;
348 struct qca_data
*qca
= hu
->priv
;
349 unsigned long flags
, retrans_delay
;
350 bool retransmit
= false;
352 BT_DBG("hu %p wake retransmit timeout in %d state",
353 hu
, qca
->tx_ibs_state
);
355 spin_lock_irqsave_nested(&qca
->hci_ibs_lock
,
356 flags
, SINGLE_DEPTH_NESTING
);
358 switch (qca
->tx_ibs_state
) {
359 case HCI_IBS_TX_WAKING
:
360 /* No WAKE_ACK, retransmit WAKE */
362 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND
, hu
) < 0) {
363 BT_ERR("Failed to acknowledge device wake up");
366 qca
->ibs_sent_wakes
++;
367 retrans_delay
= msecs_to_jiffies(qca
->wake_retrans
);
368 mod_timer(&qca
->wake_retrans_timer
, jiffies
+ retrans_delay
);
371 case HCI_IBS_TX_ASLEEP
:
372 case HCI_IBS_TX_AWAKE
:
376 BT_ERR("Spurrious timeout tx state %d", qca
->tx_ibs_state
);
380 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
383 hci_uart_tx_wakeup(hu
);
386 /* Initialize protocol */
387 static int qca_open(struct hci_uart
*hu
)
389 struct qca_data
*qca
;
391 BT_DBG("hu %p qca_open", hu
);
393 qca
= kzalloc(sizeof(struct qca_data
), GFP_ATOMIC
);
397 skb_queue_head_init(&qca
->txq
);
398 skb_queue_head_init(&qca
->tx_wait_q
);
399 spin_lock_init(&qca
->hci_ibs_lock
);
400 qca
->workqueue
= create_singlethread_workqueue("qca_wq");
401 if (!qca
->workqueue
) {
402 BT_ERR("QCA Workqueue not initialized properly");
407 INIT_WORK(&qca
->ws_awake_rx
, qca_wq_awake_rx
);
408 INIT_WORK(&qca
->ws_awake_device
, qca_wq_awake_device
);
409 INIT_WORK(&qca
->ws_rx_vote_off
, qca_wq_serial_rx_clock_vote_off
);
410 INIT_WORK(&qca
->ws_tx_vote_off
, qca_wq_serial_tx_clock_vote_off
);
414 /* Assume we start with both sides asleep -- extra wakes OK */
415 qca
->tx_ibs_state
= HCI_IBS_TX_ASLEEP
;
416 qca
->rx_ibs_state
= HCI_IBS_RX_ASLEEP
;
418 /* clocks actually on, but we start votes off */
419 qca
->tx_vote
= false;
420 qca
->rx_vote
= false;
423 qca
->ibs_sent_wacks
= 0;
424 qca
->ibs_sent_slps
= 0;
425 qca
->ibs_sent_wakes
= 0;
426 qca
->ibs_recv_wacks
= 0;
427 qca
->ibs_recv_slps
= 0;
428 qca
->ibs_recv_wakes
= 0;
429 qca
->vote_last_jif
= jiffies
;
431 qca
->vote_off_ms
= 0;
434 qca
->tx_votes_on
= 0;
435 qca
->tx_votes_off
= 0;
436 qca
->rx_votes_on
= 0;
437 qca
->rx_votes_off
= 0;
441 init_timer(&qca
->wake_retrans_timer
);
442 qca
->wake_retrans_timer
.function
= hci_ibs_wake_retrans_timeout
;
443 qca
->wake_retrans_timer
.data
= (u_long
)hu
;
444 qca
->wake_retrans
= IBS_WAKE_RETRANS_TIMEOUT_MS
;
446 init_timer(&qca
->tx_idle_timer
);
447 qca
->tx_idle_timer
.function
= hci_ibs_tx_idle_timeout
;
448 qca
->tx_idle_timer
.data
= (u_long
)hu
;
449 qca
->tx_idle_delay
= IBS_TX_IDLE_TIMEOUT_MS
;
451 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
452 qca
->tx_idle_delay
, qca
->wake_retrans
);
457 static void qca_debugfs_init(struct hci_dev
*hdev
)
459 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
460 struct qca_data
*qca
= hu
->priv
;
461 struct dentry
*ibs_dir
;
467 ibs_dir
= debugfs_create_dir("ibs", hdev
->debugfs
);
471 debugfs_create_u8("tx_ibs_state", mode
, ibs_dir
, &qca
->tx_ibs_state
);
472 debugfs_create_u8("rx_ibs_state", mode
, ibs_dir
, &qca
->rx_ibs_state
);
473 debugfs_create_u64("ibs_sent_sleeps", mode
, ibs_dir
,
474 &qca
->ibs_sent_slps
);
475 debugfs_create_u64("ibs_sent_wakes", mode
, ibs_dir
,
476 &qca
->ibs_sent_wakes
);
477 debugfs_create_u64("ibs_sent_wake_acks", mode
, ibs_dir
,
478 &qca
->ibs_sent_wacks
);
479 debugfs_create_u64("ibs_recv_sleeps", mode
, ibs_dir
,
480 &qca
->ibs_recv_slps
);
481 debugfs_create_u64("ibs_recv_wakes", mode
, ibs_dir
,
482 &qca
->ibs_recv_wakes
);
483 debugfs_create_u64("ibs_recv_wake_acks", mode
, ibs_dir
,
484 &qca
->ibs_recv_wacks
);
485 debugfs_create_bool("tx_vote", mode
, ibs_dir
, &qca
->tx_vote
);
486 debugfs_create_u64("tx_votes_on", mode
, ibs_dir
, &qca
->tx_votes_on
);
487 debugfs_create_u64("tx_votes_off", mode
, ibs_dir
, &qca
->tx_votes_off
);
488 debugfs_create_bool("rx_vote", mode
, ibs_dir
, &qca
->rx_vote
);
489 debugfs_create_u64("rx_votes_on", mode
, ibs_dir
, &qca
->rx_votes_on
);
490 debugfs_create_u64("rx_votes_off", mode
, ibs_dir
, &qca
->rx_votes_off
);
491 debugfs_create_u64("votes_on", mode
, ibs_dir
, &qca
->votes_on
);
492 debugfs_create_u64("votes_off", mode
, ibs_dir
, &qca
->votes_off
);
493 debugfs_create_u32("vote_on_ms", mode
, ibs_dir
, &qca
->vote_on_ms
);
494 debugfs_create_u32("vote_off_ms", mode
, ibs_dir
, &qca
->vote_off_ms
);
497 mode
= S_IRUGO
| S_IWUSR
;
498 debugfs_create_u32("wake_retrans", mode
, ibs_dir
, &qca
->wake_retrans
);
499 debugfs_create_u32("tx_idle_delay", mode
, ibs_dir
,
500 &qca
->tx_idle_delay
);
503 /* Flush protocol data */
504 static int qca_flush(struct hci_uart
*hu
)
506 struct qca_data
*qca
= hu
->priv
;
508 BT_DBG("hu %p qca flush", hu
);
510 skb_queue_purge(&qca
->tx_wait_q
);
511 skb_queue_purge(&qca
->txq
);
517 static int qca_close(struct hci_uart
*hu
)
519 struct qca_data
*qca
= hu
->priv
;
521 BT_DBG("hu %p qca close", hu
);
523 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE
, hu
);
525 skb_queue_purge(&qca
->tx_wait_q
);
526 skb_queue_purge(&qca
->txq
);
527 del_timer(&qca
->tx_idle_timer
);
528 del_timer(&qca
->wake_retrans_timer
);
529 destroy_workqueue(qca
->workqueue
);
532 kfree_skb(qca
->rx_skb
);
541 /* Called upon a wake-up-indication from the device.
543 static void device_want_to_wakeup(struct hci_uart
*hu
)
546 struct qca_data
*qca
= hu
->priv
;
548 BT_DBG("hu %p want to wake up", hu
);
550 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
552 qca
->ibs_recv_wakes
++;
554 switch (qca
->rx_ibs_state
) {
555 case HCI_IBS_RX_ASLEEP
:
556 /* Make sure clock is on - we may have turned clock off since
557 * receiving the wake up indicator awake rx clock.
559 queue_work(qca
->workqueue
, &qca
->ws_awake_rx
);
560 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
563 case HCI_IBS_RX_AWAKE
:
564 /* Always acknowledge device wake up,
565 * sending IBS message doesn't count as TX ON.
567 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK
, hu
) < 0) {
568 BT_ERR("Failed to acknowledge device wake up");
571 qca
->ibs_sent_wacks
++;
575 /* Any other state is illegal */
576 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
581 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
583 /* Actually send the packets */
584 hci_uart_tx_wakeup(hu
);
587 /* Called upon a sleep-indication from the device.
589 static void device_want_to_sleep(struct hci_uart
*hu
)
592 struct qca_data
*qca
= hu
->priv
;
594 BT_DBG("hu %p want to sleep", hu
);
596 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
598 qca
->ibs_recv_slps
++;
600 switch (qca
->rx_ibs_state
) {
601 case HCI_IBS_RX_AWAKE
:
603 qca
->rx_ibs_state
= HCI_IBS_RX_ASLEEP
;
604 /* Vote off rx clock under workqueue */
605 queue_work(qca
->workqueue
, &qca
->ws_rx_vote_off
);
608 case HCI_IBS_RX_ASLEEP
:
612 /* Any other state is illegal */
613 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
618 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
621 /* Called upon wake-up-acknowledgement from the device
623 static void device_woke_up(struct hci_uart
*hu
)
625 unsigned long flags
, idle_delay
;
626 struct qca_data
*qca
= hu
->priv
;
627 struct sk_buff
*skb
= NULL
;
629 BT_DBG("hu %p woke up", hu
);
631 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
633 qca
->ibs_recv_wacks
++;
635 switch (qca
->tx_ibs_state
) {
636 case HCI_IBS_TX_AWAKE
:
637 /* Expect one if we send 2 WAKEs */
638 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
642 case HCI_IBS_TX_WAKING
:
643 /* Send pending packets */
644 while ((skb
= skb_dequeue(&qca
->tx_wait_q
)))
645 skb_queue_tail(&qca
->txq
, skb
);
647 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
648 del_timer(&qca
->wake_retrans_timer
);
649 idle_delay
= msecs_to_jiffies(qca
->tx_idle_delay
);
650 mod_timer(&qca
->tx_idle_timer
, jiffies
+ idle_delay
);
651 qca
->tx_ibs_state
= HCI_IBS_TX_AWAKE
;
654 case HCI_IBS_TX_ASLEEP
:
658 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
663 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
665 /* Actually send the packets */
666 hci_uart_tx_wakeup(hu
);
669 /* Enqueue frame for transmittion (padding, crc, etc) may be called from
670 * two simultaneous tasklets.
672 static int qca_enqueue(struct hci_uart
*hu
, struct sk_buff
*skb
)
674 unsigned long flags
= 0, idle_delay
;
675 struct qca_data
*qca
= hu
->priv
;
677 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu
, skb
,
680 /* Prepend skb with frame type */
681 memcpy(skb_push(skb
, 1), &bt_cb(skb
)->pkt_type
, 1);
683 /* Don't go to sleep in middle of patch download or
684 * Out-Of-Band(GPIOs control) sleep is selected.
686 if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED
, &qca
->flags
)) {
687 skb_queue_tail(&qca
->txq
, skb
);
691 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
693 /* Act according to current state */
694 switch (qca
->tx_ibs_state
) {
695 case HCI_IBS_TX_AWAKE
:
696 BT_DBG("Device awake, sending normally");
697 skb_queue_tail(&qca
->txq
, skb
);
698 idle_delay
= msecs_to_jiffies(qca
->tx_idle_delay
);
699 mod_timer(&qca
->tx_idle_timer
, jiffies
+ idle_delay
);
702 case HCI_IBS_TX_ASLEEP
:
703 BT_DBG("Device asleep, waking up and queueing packet");
704 /* Save packet for later */
705 skb_queue_tail(&qca
->tx_wait_q
, skb
);
707 qca
->tx_ibs_state
= HCI_IBS_TX_WAKING
;
708 /* Schedule a work queue to wake up device */
709 queue_work(qca
->workqueue
, &qca
->ws_awake_device
);
712 case HCI_IBS_TX_WAKING
:
713 BT_DBG("Device waking up, queueing packet");
714 /* Transient state; just keep packet for later */
715 skb_queue_tail(&qca
->tx_wait_q
, skb
);
719 BT_ERR("Illegal tx state: %d (losing packet)",
725 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
730 static int qca_ibs_sleep_ind(struct hci_dev
*hdev
, struct sk_buff
*skb
)
732 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
734 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_SLEEP_IND
);
736 device_want_to_sleep(hu
);
742 static int qca_ibs_wake_ind(struct hci_dev
*hdev
, struct sk_buff
*skb
)
744 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
746 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_WAKE_IND
);
748 device_want_to_wakeup(hu
);
754 static int qca_ibs_wake_ack(struct hci_dev
*hdev
, struct sk_buff
*skb
)
756 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
758 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_WAKE_ACK
);
766 #define QCA_IBS_SLEEP_IND_EVENT \
767 .type = HCI_IBS_SLEEP_IND, \
771 .maxlen = HCI_MAX_IBS_SIZE
773 #define QCA_IBS_WAKE_IND_EVENT \
774 .type = HCI_IBS_WAKE_IND, \
778 .maxlen = HCI_MAX_IBS_SIZE
780 #define QCA_IBS_WAKE_ACK_EVENT \
781 .type = HCI_IBS_WAKE_ACK, \
785 .maxlen = HCI_MAX_IBS_SIZE
787 static const struct h4_recv_pkt qca_recv_pkts
[] = {
788 { H4_RECV_ACL
, .recv
= hci_recv_frame
},
789 { H4_RECV_SCO
, .recv
= hci_recv_frame
},
790 { H4_RECV_EVENT
, .recv
= hci_recv_frame
},
791 { QCA_IBS_WAKE_IND_EVENT
, .recv
= qca_ibs_wake_ind
},
792 { QCA_IBS_WAKE_ACK_EVENT
, .recv
= qca_ibs_wake_ack
},
793 { QCA_IBS_SLEEP_IND_EVENT
, .recv
= qca_ibs_sleep_ind
},
796 static int qca_recv(struct hci_uart
*hu
, const void *data
, int count
)
798 struct qca_data
*qca
= hu
->priv
;
800 if (!test_bit(HCI_UART_REGISTERED
, &hu
->flags
))
803 qca
->rx_skb
= h4_recv_buf(hu
->hdev
, qca
->rx_skb
, data
, count
,
804 qca_recv_pkts
, ARRAY_SIZE(qca_recv_pkts
));
805 if (IS_ERR(qca
->rx_skb
)) {
806 int err
= PTR_ERR(qca
->rx_skb
);
807 BT_ERR("%s: Frame reassembly failed (%d)", hu
->hdev
->name
, err
);
815 static struct sk_buff
*qca_dequeue(struct hci_uart
*hu
)
817 struct qca_data
*qca
= hu
->priv
;
819 return skb_dequeue(&qca
->txq
);
822 static uint8_t qca_get_baudrate_value(int speed
)
826 return QCA_BAUDRATE_9600
;
828 return QCA_BAUDRATE_19200
;
830 return QCA_BAUDRATE_38400
;
832 return QCA_BAUDRATE_57600
;
834 return QCA_BAUDRATE_115200
;
836 return QCA_BAUDRATE_230400
;
838 return QCA_BAUDRATE_460800
;
840 return QCA_BAUDRATE_500000
;
842 return QCA_BAUDRATE_921600
;
844 return QCA_BAUDRATE_1000000
;
846 return QCA_BAUDRATE_2000000
;
848 return QCA_BAUDRATE_3000000
;
850 return QCA_BAUDRATE_3500000
;
852 return QCA_BAUDRATE_115200
;
856 static int qca_set_baudrate(struct hci_dev
*hdev
, uint8_t baudrate
)
858 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
859 struct qca_data
*qca
= hu
->priv
;
861 u8 cmd
[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
863 if (baudrate
> QCA_BAUDRATE_3000000
)
868 skb
= bt_skb_alloc(sizeof(cmd
), GFP_ATOMIC
);
870 BT_ERR("Failed to allocate memory for baudrate packet");
874 /* Assign commands to change baudrate and packet type. */
875 memcpy(skb_put(skb
, sizeof(cmd
)), cmd
, sizeof(cmd
));
876 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
878 skb_queue_tail(&qca
->txq
, skb
);
879 hci_uart_tx_wakeup(hu
);
881 /* wait 300ms to change new baudrate on controller side
882 * controller will come back after they receive this HCI command
883 * then host can communicate with new baudrate to controller
885 set_current_state(TASK_UNINTERRUPTIBLE
);
886 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS
));
887 set_current_state(TASK_INTERRUPTIBLE
);
892 static int qca_setup(struct hci_uart
*hu
)
894 struct hci_dev
*hdev
= hu
->hdev
;
895 struct qca_data
*qca
= hu
->priv
;
896 unsigned int speed
, qca_baudrate
= QCA_BAUDRATE_115200
;
899 BT_INFO("%s: ROME setup", hdev
->name
);
901 /* Patch downloading has to be done without IBS mode */
902 clear_bit(STATE_IN_BAND_SLEEP_ENABLED
, &qca
->flags
);
904 /* Setup initial baudrate */
907 speed
= hu
->init_speed
;
908 else if (hu
->proto
->init_speed
)
909 speed
= hu
->proto
->init_speed
;
912 hci_uart_set_baudrate(hu
, speed
);
914 /* Setup user speed if needed */
917 speed
= hu
->oper_speed
;
918 else if (hu
->proto
->oper_speed
)
919 speed
= hu
->proto
->oper_speed
;
922 qca_baudrate
= qca_get_baudrate_value(speed
);
924 BT_INFO("%s: Set UART speed to %d", hdev
->name
, speed
);
925 ret
= qca_set_baudrate(hdev
, qca_baudrate
);
927 BT_ERR("%s: Failed to change the baud rate (%d)",
931 hci_uart_set_baudrate(hu
, speed
);
934 /* Setup patch / NVM configurations */
935 ret
= qca_uart_setup_rome(hdev
, qca_baudrate
);
937 set_bit(STATE_IN_BAND_SLEEP_ENABLED
, &qca
->flags
);
938 qca_debugfs_init(hdev
);
942 hu
->hdev
->set_bdaddr
= qca_set_bdaddr_rome
;
947 static struct hci_uart_proto qca_proto
= {
951 .init_speed
= 115200,
952 .oper_speed
= 3000000,
958 .enqueue
= qca_enqueue
,
959 .dequeue
= qca_dequeue
,
962 int __init
qca_init(void)
964 return hci_uart_register_proto(&qca_proto
);
967 int __exit
qca_deinit(void)
969 return hci_uart_unregister_proto(&qca_proto
);